content
stringlengths 5
1.05M
|
|---|
#Python Project. Beginner Level. Level 1
# Dice Rolling game.....
import random
import tkinter as bijoy
root = bijoy.Tk() # main window
root.geometry("750x475") # Resize
root.title('Roll Dice') #title
ll = bijoy.Label(root, text = '', font = ("times new roman",260)) #level to display dice
#Activate button
def roll() :
number = ['\u2680', '\u2681', '\u2682', '\u2683', '\u2684', '\u2685']
ll.configure(text = f'{random.choice(number)}')
ll.pack()
b1 = bijoy.Button(root, text = "ROLL DICE",font = ('times new roman', 15), foreground = 'white', background = 'navyblue' ,command = roll) #button
b1.place(x = 317, y = 340)
root.mainloop() #keep window running
|
import time
from datetime import datetime, date
import boto3
class CE(object):
def __init__(self, credentials=None):
super().__init__()
self.credentials = credentials
c = credentials.get() if credentials is not None else {}
self.ce_client = boto3.client('ce', **c)
@staticmethod
def _get_start_end(period_str):
dt = datetime.strptime(period_str, '%m/%Y')
year, month, day = dt.timetuple()[:3]
return dt, date(year + int(month / 12), ((month+1) % 12) or 12, day)
def get_month(self, period_str):
dt_start, dt_end = self._get_start_end(period_str)
timeperiod = {
'Start': dt_start.strftime('%Y-%m-%d'),
'End': dt_end.strftime('%Y-%m-%d')
}
req = {
'TimePeriod': timeperiod,
'Dimension': 'LINKED_ACCOUNT',
'Context': 'COST_AND_USAGE'
}
data_dims = []
while True:
response = self.ce_client.get_dimension_values(**req)
data_dims = data_dims + response["DimensionValues"]
if 'NextPageToken' in response:
req['NextPageToken'] = response['NextPageToken']
else:
break
# throttle a bit
time.sleep(0.5)
linkedaccounts = [ x["Value"] for x in data_dims ]
req = {
'TimePeriod': timeperiod,
'Granularity': 'MONTHLY',
'Filter': {
'Dimensions': {
'Key': 'LINKED_ACCOUNT',
'Values': []
}
},
'Metrics': [ 'UnblendedCost' ]
}
data_cu = {}
for account in linkedaccounts:
req['Filter']['Dimensions']['Values'] = [ account ]
response = self.ce_client.get_cost_and_usage(**req)
v = None
for x in response['ResultsByTime']:
w = x['Total']['UnblendedCost']['Amount']
v = w if v is None else v + w
data_cu[account] = v
# throttle a bit
time.sleep(0.5)
return data_cu
|
'''
This file using geopy library
I found this API before using Yandex
After I understand the Yandex API I focus on it
'''
from flask import Blueprint, current_app
from geopy.geocoders import Nominatim
from geopy import distance
PREFIX = '/geopy'
geopy_lib = Blueprint('geopy_lib', __name__, url_prefix=PREFIX)
geolocator = Nominatim(user_agent="Distance Measure")
@geopy_lib.route("/", methods=["GET"])
def home():
return '''
<table>
<tr>
<td>/(address)</td>
<td>for calculate address from Moscow Ring Road</td>
</tr>
<tr>
<td>/(address1)/(address2)</td>
<td>for calculate two address</td>
</tr>
</table>
'''
@geopy_lib.route("/<address>")
def distance_from_mkad(address):
address1 = "Moscow Ring Road"
address2 = address
return distance_two_points(address1, address2)
@geopy_lib.route("/<address1>/<address2>")
def distance_two_points(address1, address2):
target1 = geolocator.geocode(address1)
target2 = geolocator.geocode(address2)
distance_target = distance.distance((target1.latitude, target1.longitude), (target2.latitude, target2.longitude)).km
log = address1 + ' ' + str([target1.latitude, target1.longitude]) + ' - ' \
+ address2 + ' ' + str([target2.latitude, target2.longitude]) + ' ' \
+ 'distance: ' + str(distance_target) + ' km'
current_app.logger.info(log)
return str(distance_target)
|
#!/usr/bin/env python3
import config
import binascii
import requests
import logging
from logging.handlers import RotatingFileHandler
from bluepy.btle import UUID, Peripheral, ADDR_TYPE_PUBLIC, DefaultDelegate, Scanner, BTLEInternalError
battery_level = -1
sensor_id = -1
class TempHumDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
def handleNotification(self, cHandle, data):
temperature = round(int.from_bytes(data[0:2],byteorder='little',signed=True)/100, config.TEMPERATURE_PREC)
logger.info(f"Temp: {temperature}")
humidity = int.from_bytes(data[2:3],byteorder='little')
logger.info(f"Hum: {humidity}")
voltage=int.from_bytes(data[3:5],byteorder='little') / 1000.
logger.info(f"Voltage: {voltage}")
batteryLevel = min(int(round((voltage - 2.1),2) * 100), 100) #3.1 or above --> 100% 2.1 --> 0 %
logger.info(f"Battery level: {batteryLevel}")
comfort_type = get_comfort_type(humidity)
logger.info(f"Comfort type: {comfort_type}")
if (sensor_id != -1 and batteryLevel > -1):
for number, sensor in config.sensors.items():
if sensor['TH_IDX'] == sensor_id:
request_url = create_TH_request(config.DOMOTICZ_SERVER_IP,config.DOMOTICZ_SERVER_PORT,sensor_id,temperature,humidity,comfort_type,batteryLevel)
send_to_domoticz(request_url)
sensor['UPDATED'] = True
if sensor['VOLTAGE_IDX'] != -1:
request_url = create_VOLTAGE_request(config.DOMOTICZ_SERVER_IP,config.DOMOTICZ_SERVER_PORT,sensor['VOLTAGE_IDX'],voltage)
send_to_domoticz(request_url)
def handleDiscovery(self, dev, isNewDev, isNewData):
for (sdid, desc, val) in dev.getScanData():
if self.isTemperature(dev.addr, sdid, val):
logger.info(f"Discovery data from MAC: {dev.addr.upper()}")
bytes = [int(val[i:i+2], 16) for i in range(0, len(val), 2)]
temperature = (bytes[8] * 256 + bytes[9]) / 10
logger.info(f"Temp: {temperature}")
humidity = bytes[10]
logger.info(f"Hum: {humidity}")
comfort_type = get_comfort_type(humidity)
logger.info(f"Comfort type: {comfort_type}")
batteryLevel = bytes[11]
logger.info(f"Battery level: {batteryLevel}")
voltage = (bytes[12] * 256 + bytes[13]) / 1000
logger.info(f"Voltage: {voltage}")
for number, sensor in config.sensors.items():
if (sensor['MAC'].upper() == dev.addr.upper()) and (sensor['UPDATED'] == False):
request_url = create_TH_request(config.DOMOTICZ_SERVER_IP,config.DOMOTICZ_SERVER_PORT,sensor['TH_IDX'],temperature,humidity,comfort_type,batteryLevel)
send_to_domoticz(request_url)
sensor['UPDATED'] = True
if sensor['VOLTAGE_IDX'] != -1:
request_url = create_VOLTAGE_request(config.DOMOTICZ_SERVER_IP,config.DOMOTICZ_SERVER_PORT,sensor['VOLTAGE_IDX'],voltage)
send_to_domoticz(request_url)
#break
def isTemperature(self, addr, sdid, val):
if sdid != 22:
return False
if len(val) != 30:
return False
return True
def parseData(self, val):
bytes = [int(val[i:i+2], 16) for i in range(0, len(val), 2)]
return {
'timestamp': datetime.now().astimezone().replace(microsecond=0).isoformat(),
'mac': ":".join(["{:02X}".format(bytes[i]) for i in range(2,8)]),
'temperature': (bytes[8] * 256 + bytes[9]) / 10,
'humidity': bytes[10],
'battery_percent': bytes[11],
'battery_volt': (bytes[12] * 256 + bytes[13]) / 1000,
'count': bytes[14],
}
def create_TH_request(server, port, idx, temp, hum, comfort, battery):
url = ''
url = (
f"http://{server}:{port}"
f"/json.htm?type=command¶m=udevice&idx={idx}"
f"&nvalue=0&svalue={temp};{hum};{comfort}"
f"&battery={battery}")
logger.info(f"The request is {url}")
return url
def create_VOLTAGE_request(server, port, idx, voltage):
url = ''
url = (
f"http://{server}:{port}"
f"/json.htm?type=command¶m=udevice&idx={idx}"
f"&nvalue=0&svalue={voltage}")
logger.info(f"The request is {url}")
return url
def send_to_domoticz(url):
resp = requests.get(url, auth=(config.DOMOTICZ_USERNAME, config.DOMOTICZ_PASSWORD))
logger.info(f"The response is {resp}")
def get_comfort_type(humidity):
comfort_type = '0'
if float(humidity) < 40:
comfort_type = '2'
elif float(humidity) <= 70:
comfort_type = '1'
elif float(humidity) > 70:
comfort_type = '3'
return comfort_type
def handle_temp_hum_value():
while True:
if p.waitForNotifications(10.0):
break
"""
Creates a rotating log
"""
logger = logging.getLogger("Rotating Log")
formatter = logging.Formatter(fmt="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
logger.setLevel(logging.INFO)
# add a rotating handler
handler = RotatingFileHandler(config.LOG_FILE_NAME, maxBytes=config.LOG_FILE_SIZE, backupCount=1)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info("***************************************")
logger.info("Start script...")
logger.info( f"Input parameters:\r\n"
f"Domoticz Server IP: {config.DOMOTICZ_SERVER_IP}\r\n"
f"Domoticz Server Port: {config.DOMOTICZ_SERVER_PORT}\r\n"
f"Domoticz Server User: {config.DOMOTICZ_USERNAME}\r\n"
f"Domoticz Server Password: {config.DOMOTICZ_PASSWORD}")
try:
scanner = Scanner().withDelegate(TempHumDelegate())
scanner.scan(10.0, passive=True)
except Exception as e:
logger.error(str(e))
pass
for number, sensor in config.sensors.items():
try:
if sensor['UPDATED'] == False:
sensor_id = sensor['TH_IDX']
logger.info(f"TH_IDX:{sensor['TH_IDX']}")
p = Peripheral(sensor['MAC'])
p.writeCharacteristic(0x0038, b'\x01\x00', True) #enable notifications of Temperature, Humidity and Battery voltage
p.writeCharacteristic(0x0046, b'\xf4\x01\x00', True)
p.withDelegate(TempHumDelegate())
handle_temp_hum_value()
p.disconnect()
except Exception as e:
logger.error(str(e))
pass
|
# coding=utf-8
import pandas as pd
ques_importance = {"I easily adapt to day-to-day changes of my life and manage my responsibilities well.": 0.911,
"I care for things that are important to me, not what is important to others.": 0.868,
"I feel I am a sensible person.": 0.862,
"I am flexible.": 0.825,
"I understand the expectation from me.": 0.822,
"I feel I am capable of decision-making.": 0.821,
"I feel depressed from the stress and demands of day-to-day life. *": 0.820,
"I believe that I have a purpose and direction in life.": 0.803,
"I think life is a continuous process of learning": 0.766,
"I am a confident person.": 0.755,
"I am an important part of my team and organization.": 0.880,
"People are trustworthy in my team.": 0.854,
"I am close to my teammates in my organization.": 0.828,
"My team is a great source of social support.": 0.823,
"My views are well accepted by my teammates.": 0.816,
"People in my team don’t help each other in difficult times. *": 0.799,
"I take active part in important decision-making activities of my team.": 0.796,
"I love to spend time with my teammates.": 0.783,
"I can freely share my problems with my colleagues.": 0.782,
"My day-to-day activities contribute towards the benefits of my team.": 0.768,
"I am quite satisfied with my job.": 0.862,
"I enjoy meaningful work": 0.825,
"I attach lots of value to my work.": 0.817,
"My work achievement often acts as a source of motivation.": 0.806,
"My workplace is very conducive.": 0.778,
"My job provides ample scope for career growth.": 0.763,
"I used to maintain a balance between work and home life": 0.702,
"My employer does care a lot about their employees.": 0.667,
"My work offers challenges to advance my skills.": 0.582,
"Mostly I feel happy.": 0.825,
"I am an optimistic person.": 0.777,
"I feel good about myself.": 0.722,
"My life is mostly sorrowful. *": 0.593
}
questions_df = pd.DataFrame(
data={'question_text': list(ques_importance.keys()), 'importance': list(ques_importance.values())})
questions_df["reversed"] = [False] * questions_df.shape[0]
questions_df.loc[questions_df.question_text.str.contains(r"*", regex=False), "reversed"] = True
questions_df["question_text"] = questions_df["question_text"].str.replace("*", "")
questions_df.to_csv("questions_importance.csv", index=False)
|
"""Top-level package for geo-prof."""
__author__ = """Shakur"""
__email__ = 'shakurgds@gmail.com'
__version__ = '0.0.2'
|
from floodsystem.geo import rivers_with_stations
from floodsystem.geo import stations_by_river
from floodsystem.stationdata import build_station_list
stat = build_station_list()
#from floodsystem import geo
print(len(rivers_with_stations(stat)), "Rivers with one or more stations")
print((sorted(rivers_with_stations(stat))[:10]))
print('')
print("River Aire has stations: ", sorted((stations_by_river(stat))['River Aire']))
print('')
print("River Thames has stations: ", sorted((stations_by_river(stat))['River Thames']))
#print(sorted(rivers_with_stations(stat)))
#print(rivers_with_stations(stat))
#print(rivers_morethanone_station(stat))
|
import numpy as np
import os
import traceback
import yaml
from edflow.hooks.hook import Hook
from edflow.util import walk, retrieve, contains_key
from edflow.custom_logging import get_logger
class RuntimeInputHook(Hook):
"""Given a textfile reads that at each step and passes the results to
a callback function."""
def __init__(self, update_file, callback):
"""Args:
update_file (str): path/to/yaml-file containing the parameters of
interest.
callback (Callable): Each time something changes in the update_file
this function is called with the content of the file as
argument.
"""
self.logger = get_logger(self)
self.ufile = update_file
self.callback = callback
self.last_updates = None
if not os.path.exists(self.ufile):
msg = (
"# Automatically created file. Changes made in here will "
"be recognized during runtime."
)
with open(self.ufile, "w+") as f:
f.write(msg)
def before_step(self, *args, **kwargs):
"""Checks if something changed and if yes runs the callback."""
try:
updates = yaml.full_load(open(self.ufile, "r"))
if self.last_updates is not None:
changes = {}
def is_changed(key, val, changes=changes):
if contains_key(key, updates):
other_val = retrieve(key, updates)
change = np.any(val != other_val)
else:
# This key is new -> Changes did happen!
change = True
changes[key] = change
self.logger.debug("Pre CHANGES: {}".format(changes))
walk(self.last_updates, is_changed, pass_key=True)
self.logger.debug("Post CHANGES: {}".format(changes))
if np.any(list(changes.values())):
self.callback(updates)
self.logger.debug("Runtime inputs received.")
self.logger.debug("{}".format(updates))
self.last_updates = updates
else:
if updates is not None:
self.callback(updates)
self.logger.info("Runtime inputs received.")
self.logger.debug("{}".format(updates))
self.last_updates = updates
except Exception as e:
self.logger.error("Something bad happend :(")
self.logger.error("{}".format(e))
self.logger.error(traceback.format_exc())
|
class TimedCycle:
def __init__(self, max_frame, ticks, movements, start_frame=0):
self.current_tick = 0
self.max_frame = max_frame
self.frame = start_frame
self.movements = movements
self.max_ticks = ticks
self.config = (max_frame, start_frame, movements, ticks)
self.one = False
def tick(self):
self.current_tick += 1
if self.current_tick > self.max_ticks[self.frame]:
self.frame += 1
self.current_tick = 0
if self.frame >= self.max_frame:
self.frame = 0
self.one = True
def get_movement(self):
return self.movements[self.frame]
def get_frame(self):
return self.frame
def reset(self):
self.current_tick = 0
self.max_frame, self.frame, self.movements, self.max_ticks = self.config
self.one = False
|
"""
Class Features
Name: drv_dataset_hmc_io_static
Author(s): Fabio Delogu (fabio.delogu@cimafoundation.org)
Date: '20200401'
Version: '3.0.0'
"""
#######################################################################################
# Library
import logging
import os
import xarray as xr
import pandas as pd
from copy import deepcopy
from hmc.algorithm.io.lib_data_geo_ascii import read_data_raster, read_data_grid
from hmc.algorithm.utils.lib_utils_dict import get_dict_nested_value, get_dict_value, lookup_dict_keys
from hmc.algorithm.utils.lib_utils_string import fill_tags2string
from hmc.driver.dataset.drv_dataset_hmc_io_type import DSetReader, DSetWriter, DSetComposer
from hmc.algorithm.default.lib_default_args import logger_name
# Log
log_stream = logging.getLogger(logger_name)
# Debug
# import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Class to read datasets
class DSetManager:
# -------------------------------------------------------------------------------------
# Method to initialize class
def __init__(self, dset, template_static_ref=None, template_run_ref=None, template_run_def=None,
model_tag='hmc', datasets_tag='datasets',
dset_list_format=None, dset_list_filter=None, **kwargs):
if dset_list_format is None:
dset_list_format = ['Shapefile', 'Point', 'Gridded']
self.dset = dset
self.dset_list_format = dset_list_format
self.template_static_ref = template_static_ref
self.template_run_ref = template_run_ref
self.template_run_def = list(template_run_def.values())[0]
self.file_name_tag = 'file_name'
self.folder_name_tag = 'folder_name'
self.dset_ref_tag = ['Gridded', 'Terrain']
self.model_tag = model_tag
self.datasets_tag = datasets_tag
dset_obj = {}
for dset_format in dset_list_format:
if dset_format in self.dset:
dset_obj[dset_format] = {}
dset_tmp = self.dset[dset_format]
file_name = dset_tmp['hmc_file_name']
file_folder = dset_tmp['hmc_file_folder']
file_format = dset_tmp['hmc_file_format']
file_frequency = dset_tmp['hmc_file_frequency']
file_vars = dset_tmp['hmc_file_variable']
if 'hmc_file_filter' in list(dset_tmp.keys()):
file_filters = dset_tmp['hmc_file_filter']
else:
file_filters = None
if file_vars.__len__() > 0:
for var_key, var_data in file_vars.items():
dset_obj[dset_format][var_key] = {}
var_name = var_data['var_name']
var_check = var_data['check']
var_mandatory = var_data['mandatory']
dset_obj[dset_format][var_key][self.file_name_tag] = file_name
dset_obj[dset_format][var_key][self.folder_name_tag] = file_folder
dset_obj[dset_format][var_key]['format'] = file_format
dset_obj[dset_format][var_key]['frequency'] = file_frequency
dset_obj[dset_format][var_key]['filter'] = file_filters
dset_obj[dset_format][var_key]['var_name'] = var_name
dset_obj[dset_format][var_key]['var_check'] = var_check
dset_obj[dset_format][var_key]['var_mandatory'] = var_mandatory
else:
dset_obj[dset_format] = None
self.dset_obj = dset_obj
self.dset_ref_obj = get_dict_nested_value(self.dset_obj, self.dset_ref_tag)
template_static_def = dict.fromkeys(list(self.template_static_ref.keys()), self.dset_ref_obj['var_name'])
template_ref_merge = {**self.template_run_ref, **template_static_ref}
template_def_merge = {**self.template_run_def, **template_static_def}
folder_name_ref = fill_tags2string(self.dset_ref_obj[self.folder_name_tag],
template_ref_merge, template_def_merge)
file_name_ref = fill_tags2string(self.dset_ref_obj[self.file_name_tag], template_ref_merge, template_def_merge)
file_path_ref = os.path.join(folder_name_ref, file_name_ref)
if os.path.exists(file_path_ref):
# self.dset_static_ref = read_data_raster(file_path_ref)
self.dset_static_ref = read_data_grid(file_path_ref, output_format='dictionary')
else:
log_stream.error(' ===> Reference static datasets is not available')
raise IOError('File is not found in the selected folder')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to collect datasets
def collect_data(self, dset_source_static, data_source_static=None):
# Starting information
log_stream.info(' -----> Collect static datasets ... ')
var_name_list = list(dset_source_static.index)
file_name_list = dset_source_static['file_name'].values
file_check_list = dset_source_static['file_check'].values
file_mandatory_list = dset_source_static['file_mandatory'].values
file_format_list = dset_source_static['file_format'].values
file_filter_list = dset_source_static['file_filter'].values
file_var_list = dset_source_static['file_var'].values
var_frame = {}
dset_source = None
for var_name, file_name, file_check, file_mandatory, file_format, file_filter, file_var in zip(
var_name_list, file_name_list, file_check_list, file_mandatory_list, file_format_list,
file_filter_list, file_var_list):
file_info = {'var_format': file_format, 'var_mandatory': file_mandatory,
'var_filter': file_filter,
'var_check': file_check, 'var_file': file_var}
var_data = data_source_static[var_name]
log_stream.info(' ------> Variable ' + var_name + ' ... ')
if var_data is None:
if (file_name is not None) and (os.path.exists(file_name)):
driver_hmc_reader = DSetReader(file_name, file_info, None, time_src_info=None)
obj_var = driver_hmc_reader.read_filename_static(var_name)
elif ((file_name is not None) and (not os.path.exists(file_name))) and \
(var_name == 'Longitude' or var_name == 'Latitude'):
log_stream.warning(' ===> Static datasets for variable ' +
var_name + ' not found. Datasets will be created using the terrain reference.')
driver_hmc_writer = DSetWriter(file_name, file_info, None, time_dst_info=None)
obj_var = driver_hmc_writer.write_filename_static(var_name, self.dset_static_ref)
elif ((file_name is not None) and (not os.path.exists(file_name))) and (var_name == 'Cell_Area'):
log_stream.warning(' ===> Static datasets for variable ' +
var_name + ' not found. Datasets will be created using a default method.')
driver_hmc_composer = DSetComposer(file_name, file_info, None, time_dst_info=None)
obj_var = driver_hmc_composer.compute_data_static(var_name, self.dset_static_ref)
else:
if file_mandatory:
log_stream.error(' ===> Static datasets for variable ' +
var_name + ' in ' + file_format + ' format is mandatory. Exit.')
if file_name is not None:
raise IOError('File ' + file_name + ' not found!')
else:
raise IOError('File is declared using a None value!')
else:
log_stream.warning(' ===> Static datasets for variable ' +
var_name + ' in ' + file_format + ' format is ancillary')
if file_format == 'shapefile':
log_stream.warning(' ===> Static datasets for shapefile case will be initialized to None.')
obj_var = None
elif file_format == 'ascii_point':
if file_name is not None:
log_stream.warning(' ===> Static datasets for ascii point case will be initialized '
'using a default method.')
driver_hmc_reader = DSetReader(file_name, file_info, None, time_src_info=None)
driver_hmc_reader.write_filename_undefined(file_name, var_name)
else:
log_stream.warning(' ===> Filename for ascii point case is declared '
'using a None value!')
obj_var = None
elif file_format == 'ascii_grid':
log_stream.warning(' ===> Static datasets for ascii grid case will be initialized to None.')
obj_var = None
else:
log_stream.error(' ===> Static format ' + file_format + ' is not allowed. Exit.')
raise NotImplementedError('Case not implemented yet')
elif var_data is not None:
if not os.path.exists(file_name):
driver_hmc_writer = DSetWriter(file_name, file_info, None, time_dst_info=None)
obj_var = driver_hmc_writer.write_filename_static(var_name, var_data)
else:
driver_hmc_reader = DSetReader(file_name, file_info, None, time_src_info=None)
check_data = driver_hmc_reader.read_filename_static(var_name)
log_stream.info(' ------> Check variable ' + var_name + ' ... ')
log_stream.info(' -------> File name ' + file_name + 'for variable ' + var_name +
' is already available')
len_check_data = check_data.__len__()
len_var_data = list(var_data.keys()).__len__()
# for key, values in var_data.items(): # commenteted for section obj format changes
# len_var_data = values.__len__()
# break
if len_check_data == len_var_data:
log_stream.info(' -------> The loaded datasets and the stored datasets have the same length')
log_stream.info(' -------> The instance will use the stored datasets.')
# Merge information of the dictionaries
common_data = {}
for var_key, var_fields_step in check_data.items():
if var_key in list(var_data.keys()):
var_fields_tmp = var_data[var_key]
var_fields_common = {**var_fields_tmp, **var_fields_step}
common_data[var_key] = var_fields_common
else:
log_stream.error(' ===> Variable key ' + var_key + ' is not a common fields.')
raise IOError('Obj key in merging procedures is not valid')
obj_var = deepcopy(common_data)
log_stream.info(' ------> Check variable ' + var_name + ' ... DONE')
else:
log_stream.error(' -------> The loaded datasets and the stored datasets have different lengths')
log_stream.error(' -------> The instance will exit for this reason.')
log_stream.error(' ------> Check variable ' + var_name + ' ... FAILED')
raise IOError('Object static length is not valid')
else:
log_stream.error(' ===> Variable data format is not allowed')
raise NotImplementedError('Object static format is not valid')
if dset_source is None:
dset_source = {}
if obj_var is not None:
if isinstance(obj_var, xr.DataArray):
dset_source[var_name] = obj_var
elif isinstance(obj_var, dict):
dset_source[var_name] = obj_var
elif isinstance(obj_var, list):
dset_source[var_name] = obj_var
elif isinstance(obj_var, tuple):
dset_source[var_name] = obj_var
else:
log_stream.error(' ===> Data static object is not allowed')
raise NotImplementedError('Object static type is not valid')
else:
dset_source[var_name] = obj_var
log_stream.info(' ------> Variable ' + var_name + ' ... DONE')
var_frame[self.datasets_tag] = dset_source
# Ending information
log_stream.info(' -----> Collect static datasets ... DONE')
return var_frame
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to collect filename
def collect_filename(self, template_ref, template_filled_run):
# Starting information
log_stream.info(' -----> Collect static filename ... ')
dset_obj = self.dset_obj
list_key_filled = list(template_filled_run.keys())
ws_vars = {}
for dset_format, dset_workspace in dset_obj.items():
log_stream.info(' ------> Type ' + dset_format + ' ... ')
dset_key_list = []
file_path_list = []
file_format_list = []
file_check_list = []
file_filter_list = []
file_mandatory_list = []
file_var_list = []
for dset_key, dset_item in dset_workspace.items():
log_stream.info(' -------> Variable ' + dset_key + ' ... ')
folder_name_raw = dset_item[self.folder_name_tag]
file_name_raw = dset_item[self.file_name_tag]
file_format = dset_item['format']
file_filter = dset_item['filter']
file_var = dset_item['var_name']
file_check = dset_item['var_check']
file_mandatory = dset_item['var_mandatory']
template_tmp = deepcopy(template_ref)
for key in list_key_filled:
if key in template_tmp:
template_tmp.pop(key)
template_filled_tmp = dict.fromkeys(list(template_tmp.keys()), file_var)
template_filled_merge = {**template_filled_run, **template_filled_tmp}
if (folder_name_raw is not None) and (file_name_raw is not None):
folder_name_tmp = fill_tags2string(folder_name_raw, template_ref, template_filled_merge)
file_name_tmp = fill_tags2string(file_name_raw, template_ref, template_filled_merge)
file_path_list.append(os.path.join(folder_name_tmp, file_name_tmp))
else:
file_path_list.append(None)
log_stream.warning(' ===> Folder or/and filename is/are undefined. Initialize fields with null')
file_format_list.append(file_format)
file_check_list.append(file_check)
file_filter_list.append(file_filter)
file_mandatory_list.append(file_mandatory)
file_var_list.append(file_var)
dset_key_list.append(dset_key)
log_stream.info(' -------> Variable ' + dset_key + ' ... DONE')
df_vars = pd.DataFrame(
{'dset_name': dset_key_list,
'file_name': file_path_list,
'file_filter': file_filter_list,
'file_format': file_format_list,
'file_check': file_check_list,
'file_mandatory': file_mandatory_list,
'file_var': file_var_list,
})
df_vars = df_vars.reset_index()
df_vars = df_vars.set_index('dset_name')
ws_vars[dset_format] = df_vars
log_stream.info(' ------> Type ' + dset_format + ' ... DONE')
# Ending information
log_stream.info(' -----> Collect static filename ... DONE')
return ws_vars
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
|
"""
Created on Nov 6, 2021
@author: reganto
"""
from tornado.web import Application
from tests.base import BaseTest
from database.models import migrator, Thread, Channel
from database.model_factory import Factory
class ChannelTest(BaseTest):
def get_app(self):
return Application()
def setUp(self):
super().setUp()
migrator.upgrade()
def tearDown(self):
super().tearDown()
migrator.downgrade()
def test_a_channel_consist_of_a_thread(self):
thread = Factory(Thread).create()
channel = Factory(Channel).create()
thread.channel = channel
thread.save()
self.assertIsInstance(thread.channel, Channel)
|
from typing import Union
from record_keeper.module.admin.query import (
get_listeners,
remove_listener,
update_listener,
)
from record_keeper.utilities.message import MessageWrapper
class AdminRelay:
def __init__(self):
self.admin_options = [
"dice",
"help",
"battle-keeper",
"record-keeper",
"trade-keeper",
"friend-keeper",
"iv-ranker",
"message-cleanup",
"training-wheels",
"deletable-data",
]
async def relay(
self,
msg: MessageWrapper,
) -> Union[str, None]:
if not msg.guild or not msg.from_admin:
# incorrect scope or permission do not continue
return None
response = None
delete_after = 120
# help prompts
if msg.cmd == "setup":
response = self.setup()
delete_after = 600
# supported commands
elif msg.cmd == "activate" and msg.arguments:
response = self.activate(msg)
elif msg.cmd == "deactivate":
response = self.deactivate(msg)
elif msg.cmd == "active":
response = self.list_listener(msg)
# send the response to discord
if response:
return await msg.send_message(
response,
delete_after,
new_message=True,
)
return None
def setup(self):
return (
"__**Setup**__\n"
"*run these commands in channels you would like to modify*\n"
"_**Add a listener to channel**_\n"
"\t!activate <*listener*>\n"
"_**Remove a listener from a channel**_\n"
"\t!deactivate <*listener*>\n"
"_**View all listener for a channel**_\n"
"\t!active\n"
"_**available listeners**_\n"
" - **help**: activates !help\n"
" - **training-wheels**: activates errors on bad commands\n"
" - **record-keeper**: a record keeper for medals\n"
" - **trade-keeper**: a trading want manager\n"
" - **iv-ranker**: a pokemon rater\n"
" - **message-cleanup**: cleans up bot messages on a timer\n"
" - **deletable-data**: activates ability to delete\n"
" - **dice**: activates !roll <*sides*>\n"
"**Note**: it is recommend to turn on *message-cleanup*, "
"*training-wheels* and *help* \n"
"in addition to any other listener (via `!activate default`)\n"
"---------------------------------------------\n"
)
def activate(self, msg: MessageWrapper) -> str:
settings = []
if msg.arguments[0] == "all":
settings = self.admin_options
elif msg.arguments[0] == "default":
settings = ["help", "message-cleanup", "training-wheels"]
else:
settings.extend(msg.arguments)
for setting in settings:
if setting in self.admin_options:
update_listener(msg.guild_id, msg.channel_id, setting.lower())
return "Valid listener's status have activated for this channel!"
def deactivate(self, msg: MessageWrapper) -> str:
settings = []
settings.extend(msg.arguments)
for setting in settings:
if setting in self.admin_options:
remove_listener(msg.guild_id, msg.channel_id, setting.lower())
return "Valid listener's status have deactivated for this channel!"
def list_listener(self, msg: MessageWrapper) -> str:
response = (
"```active listeners on this channel \n\n"
"channel | type \n"
"-------------------+------------\n"
)
listeners = get_listeners(msg.guild_id, msg.channel_id)
for (uuid, server, toggle, channel) in listeners:
response += f"{channel} | {toggle} \n"
response += "```"
return response
|
# Copyright 2018 Science and Technology Facilities Council
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the MessageDirectory class."""
from __future__ import print_function
import logging
import os
import uuid
# logging configuration
log = logging.getLogger(__name__)
class MessageDirectory(object):
"""A structure for holding Accounting messages in a directory."""
def __init__(self, path):
"""Create a new directory structure for holding Accounting messages."""
self.directory_path = path
def add(self, data):
"""Add the passed data to a new file and return it's name."""
# Create a unique file name so APEL admins can pair sent and recieved
# messages easily (as the file name appears in the sender and receiver
# logs as the message ID).
name = uuid.uuid4()
# Open the file and write the provided data into the file.
with open("%s/%s" % (self.directory_path, name), 'w') as message:
message.write(data)
# Return the name of the created file as a string,
# to keep the dirq like interface.
return "%s" % name
def count(self):
"""
Return the number of elements in the queue.
Regardless of their state.
"""
return len(self._get_messages())
def get(self, name):
"""Return the content of the named message."""
with open("%s/%s" % (self.directory_path, name)) as message:
content = message.read()
return content
def lock(self, _name):
"""Return True to simulate a successful lock. Does nothing else."""
return True
def purge(self):
"""
Do nothing, as there are no old/intermediate directories to purge.
Only included to preserve dirq interface.
"""
log.debug("purge called, but purge does nothing for non-dirq sending.")
def remove(self, name):
"""Remove the named message."""
os.unlink("%s/%s" % (self.directory_path, name))
def _get_messages(self, sort_by_mtime=False):
"""
Get the messages stored in this MessageDirectory.
if sort_by_mtime is set to True, the returned list is guaranteed to be
in increasing order of modification time.
mtime is used because (apparently) there is not way to find the
original date of file creation due to a limitation
of the underlying filesystem
"""
try:
# Get a list of files under self.directory_path
# in an arbitrary order.
file_name_list = os.listdir(self.directory_path)
if sort_by_mtime:
# Working space to hold the unsorted messages
# as file paths and mtimes.
unsorted_messages = []
# Working space to hold the sorted messages as file names.
sorted_messages = []
# Work out the mtime of each file.
for file_name in file_name_list:
file_path = os.path.join(self.directory_path, file_name)
# Store the file path and the time
# the file was last modified.
unsorted_messages.append((file_name,
os.path.getmtime(file_path)))
# Sort the file paths by mtime and
# then only store the file name.
for (file_name, _mtime) in sorted(unsorted_messages,
key=lambda tup: tup[1]):
# Store the sorted file paths in a class element.
sorted_messages.append(file_name)
# Return the sorted list.
return sorted_messages
# If we get here, just return the arbitrarily ordered list.
return file_name_list
except (IOError, OSError) as error:
log.error(error)
# Return an empty file list.
return []
def __iter__(self):
"""Return an iterable of files currently in the MessageDirectory."""
return self._get_messages(sort_by_mtime=True).__iter__()
|
import os
import csv
def search_user(userid, guildid):
""" Check if entry exists in database """
if os.path.isfile(f'userdata/{guildid}.csv'):
with open(f'userdata/{guildid}.csv', 'rt') as file:
reader = csv.reader(file)
for row in reader:
if str(userid) in row[0]:
return True
return False
else:
return False
def register_new_user(userid, profile, guildid):
""" Register new entry, only executed if user is indeed unique """
with open(f'userdata/{guildid}.csv', 'a', newline='') as file:
csv.writer(file).writerow([userid, profile])
return
def lookup_user(userid, guildid):
"""Lookup existing entry as requested by user"""
if search_user(userid, guildid):
with open(f'userdata/{guildid}.csv', 'rt') as file:
for row in file:
if str(userid) in row:
return row.rstrip()[row.find(',')+1::]
else:
return
def remove_user(userid, guildid):
""" Remove entry by mounting csv content to RAM and then rewriting entire csv """
with open(f'userdata/{guildid}.csv', 'r') as file:
reader = csv.reader(file)
new = list(reader)
for line in new:
if str(userid) in line:
new.remove(line)
with open(f'userdata/{guildid}.csv', 'w', newline='') as file2:
for line in new:
csv.writer(file2).writerow(line)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetRestorePointCollectionResult',
'AwaitableGetRestorePointCollectionResult',
'get_restore_point_collection',
]
@pulumi.output_type
class GetRestorePointCollectionResult:
"""
Create or update Restore Point collection parameters.
"""
def __init__(__self__, id=None, location=None, name=None, provisioning_state=None, restore_point_collection_id=None, restore_points=None, source=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if restore_point_collection_id and not isinstance(restore_point_collection_id, str):
raise TypeError("Expected argument 'restore_point_collection_id' to be a str")
pulumi.set(__self__, "restore_point_collection_id", restore_point_collection_id)
if restore_points and not isinstance(restore_points, list):
raise TypeError("Expected argument 'restore_points' to be a list")
pulumi.set(__self__, "restore_points", restore_points)
if source and not isinstance(source, dict):
raise TypeError("Expected argument 'source' to be a dict")
pulumi.set(__self__, "source", source)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the restore point collection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="restorePointCollectionId")
def restore_point_collection_id(self) -> str:
"""
The unique id of the restore point collection.
"""
return pulumi.get(self, "restore_point_collection_id")
@property
@pulumi.getter(name="restorePoints")
def restore_points(self) -> Sequence['outputs.RestorePointResponse']:
"""
A list containing all restore points created under this restore point collection.
"""
return pulumi.get(self, "restore_points")
@property
@pulumi.getter
def source(self) -> Optional['outputs.RestorePointCollectionSourcePropertiesResponse']:
"""
The properties of the source resource that this restore point collection is created from.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetRestorePointCollectionResult(GetRestorePointCollectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRestorePointCollectionResult(
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
restore_point_collection_id=self.restore_point_collection_id,
restore_points=self.restore_points,
source=self.source,
tags=self.tags,
type=self.type)
def get_restore_point_collection(expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
restore_point_collection_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRestorePointCollectionResult:
"""
Create or update Restore Point collection parameters.
API Version: 2021-03-01.
:param str expand: The expand expression to apply on the operation. If expand=restorePoints, server will return all contained restore points in the restorePointCollection.
:param str resource_group_name: The name of the resource group.
:param str restore_point_collection_name: The name of the restore point collection.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['restorePointCollectionName'] = restore_point_collection_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:compute:getRestorePointCollection', __args__, opts=opts, typ=GetRestorePointCollectionResult).value
return AwaitableGetRestorePointCollectionResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
restore_point_collection_id=__ret__.restore_point_collection_id,
restore_points=__ret__.restore_points,
source=__ret__.source,
tags=__ret__.tags,
type=__ret__.type)
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cint
from frappe.model.naming import validate_name
@frappe.whitelist()
def rename_doc(doctype, old, new, force=False, merge=False, ignore_permissions=False):
"""
Renames a doc(dt, old) to doc(dt, new) and
updates all linked fields of type "Link"
"""
if not frappe.db.exists(doctype, old):
return
force = cint(force)
merge = cint(merge)
meta = frappe.get_meta(doctype)
# call before_rename
old_doc = frappe.get_doc(doctype, old)
out = old_doc.run_method("before_rename", old, new, merge) or {}
new = (out.get("new") or new) if isinstance(out, dict) else (out or new)
new = validate_rename(doctype, new, meta, merge, force, ignore_permissions)
if not merge:
rename_parent_and_child(doctype, old, new, meta)
# update link fields' values
link_fields = get_link_fields(doctype)
update_link_field_values(link_fields, old, new, doctype)
rename_dynamic_links(doctype, old, new)
if doctype=='DocType':
rename_doctype(doctype, old, new, force)
update_comments(doctype, old, new, force)
update_attachments(doctype, old, new)
if merge:
frappe.delete_doc(doctype, old)
# call after_rename
new_doc = frappe.get_doc(doctype, new)
# copy any flags if required
new_doc._local = getattr(old_doc, "_local", None)
new_doc.run_method("after_rename", old, new, merge)
rename_versions(doctype, old, new)
# update user_permissions
frappe.db.sql("""update tabDefaultValue set defvalue=%s where parenttype='User Permission'
and defkey=%s and defvalue=%s""", (new, doctype, old))
frappe.clear_cache()
return new
def update_attachments(doctype, old, new):
try:
frappe.db.sql("""update `tabFile Data` set attached_to_name=%s
where attached_to_name=%s and attached_to_doctype=%s""", (new, old, doctype))
except Exception, e:
if e.args[0]!=1054: # in patch?
raise
def rename_versions(doctype, old, new):
frappe.db.sql("""update tabVersion set docname=%s where ref_doctype=%s and docname=%s""",
(new, doctype, old))
def rename_parent_and_child(doctype, old, new, meta):
# rename the doc
frappe.db.sql("update `tab%s` set name=%s where name=%s" \
% (doctype, '%s', '%s'), (new, old))
update_child_docs(old, new, meta)
def validate_rename(doctype, new, meta, merge, force, ignore_permissions):
# using for update so that it gets locked and someone else cannot edit it while this rename is going on!
exists = frappe.db.sql("select name from `tab{doctype}` where name=%s for update".format(doctype=doctype), new)
exists = exists[0][0] if exists else None
if merge and not exists:
frappe.msgprint(_("{0} {1} does not exist, select a new target to merge").format(doctype, new), raise_exception=1)
if (not merge) and exists == new:
frappe.msgprint(_("Another {0} with name {1} exists, select another name").format(doctype, new), raise_exception=1)
if not (ignore_permissions or frappe.has_permission(doctype, "write")):
frappe.msgprint(_("You need write permission to rename"), raise_exception=1)
if not force and not meta.allow_rename:
frappe.msgprint(_("{0} not allowed to be renamed").format(_(doctype)), raise_exception=1)
# validate naming like it's done in doc.py
new = validate_name(doctype, new, merge=merge)
return new
def rename_doctype(doctype, old, new, force=False):
# change options for fieldtype Table
update_options_for_fieldtype("Table", old, new)
update_options_for_fieldtype("Link", old, new)
# change options where select options are hardcoded i.e. listed
select_fields = get_select_fields(old, new)
update_link_field_values(select_fields, old, new, doctype)
update_select_field_values(old, new)
# change parenttype for fieldtype Table
update_parenttype_values(old, new)
# rename comments
frappe.db.sql("""update tabComment set comment_doctype=%s where comment_doctype=%s""",
(new, old))
def update_comments(doctype, old, new, force=False):
frappe.db.sql("""update `tabComment` set comment_docname=%s
where comment_doctype=%s and comment_docname=%s""", (new, doctype, old))
def update_child_docs(old, new, meta):
# update "parent"
for df in meta.get_table_fields():
frappe.db.sql("update `tab%s` set parent=%s where parent=%s" \
% (df.options, '%s', '%s'), (new, old))
def update_link_field_values(link_fields, old, new, doctype):
for field in link_fields:
if field['issingle']:
frappe.db.sql("""\
update `tabSingles` set value=%s
where doctype=%s and field=%s and value=%s""",
(new, field['parent'], field['fieldname'], old))
else:
if field['parent']!=new:
frappe.db.sql("""\
update `tab%s` set `%s`=%s
where `%s`=%s""" \
% (field['parent'], field['fieldname'], '%s',
field['fieldname'], '%s'),
(new, old))
def get_link_fields(doctype):
# get link fields from tabDocField
link_fields = frappe.db.sql("""\
select parent, fieldname,
(select ifnull(issingle, 0) from tabDocType dt
where dt.name = df.parent) as issingle
from tabDocField df
where
df.options=%s and df.fieldtype='Link'""", (doctype,), as_dict=1)
# get link fields from tabCustom Field
custom_link_fields = frappe.db.sql("""\
select dt as parent, fieldname,
(select ifnull(issingle, 0) from tabDocType dt
where dt.name = df.dt) as issingle
from `tabCustom Field` df
where
df.options=%s and df.fieldtype='Link'""", (doctype,), as_dict=1)
# add custom link fields list to link fields list
link_fields += custom_link_fields
# remove fields whose options have been changed using property setter
property_setter_link_fields = frappe.db.sql("""\
select ps.doc_type as parent, ps.field_name as fieldname,
(select ifnull(issingle, 0) from tabDocType dt
where dt.name = ps.doc_type) as issingle
from `tabProperty Setter` ps
where
ps.property_type='options' and
ps.field_name is not null and
ps.value=%s""", (doctype,), as_dict=1)
link_fields += property_setter_link_fields
return link_fields
def update_options_for_fieldtype(fieldtype, old, new):
frappe.db.sql("""update `tabDocField` set options=%s
where fieldtype=%s and options=%s""", (new, fieldtype, old))
frappe.db.sql("""update `tabCustom Field` set options=%s
where fieldtype=%s and options=%s""", (new, fieldtype, old))
frappe.db.sql("""update `tabProperty Setter` set value=%s
where property='options' and value=%s""", (new, old))
def get_select_fields(old, new):
"""
get select type fields where doctype's name is hardcoded as
new line separated list
"""
# get link fields from tabDocField
select_fields = frappe.db.sql("""\
select parent, fieldname,
(select ifnull(issingle, 0) from tabDocType dt
where dt.name = df.parent) as issingle
from tabDocField df
where
df.parent != %s and df.fieldtype = 'Select' and
df.options like "%%%%%s%%%%" """ \
% ('%s', old), (new,), as_dict=1)
# get link fields from tabCustom Field
custom_select_fields = frappe.db.sql("""\
select dt as parent, fieldname,
(select ifnull(issingle, 0) from tabDocType dt
where dt.name = df.dt) as issingle
from `tabCustom Field` df
where
df.dt != %s and df.fieldtype = 'Select' and
df.options like "%%%%%s%%%%" """ \
% ('%s', old), (new,), as_dict=1)
# add custom link fields list to link fields list
select_fields += custom_select_fields
# remove fields whose options have been changed using property setter
property_setter_select_fields = frappe.db.sql("""\
select ps.doc_type as parent, ps.field_name as fieldname,
(select ifnull(issingle, 0) from tabDocType dt
where dt.name = ps.doc_type) as issingle
from `tabProperty Setter` ps
where
ps.doc_type != %s and
ps.property_type='options' and
ps.field_name is not null and
ps.value like "%%%%%s%%%%" """ \
% ('%s', old), (new,), as_dict=1)
select_fields += property_setter_select_fields
return select_fields
def update_select_field_values(old, new):
frappe.db.sql("""\
update `tabDocField` set options=replace(options, %s, %s)
where
parent != %s and fieldtype = 'Select' and
(options like "%%%%\\n%s%%%%" or options like "%%%%%s\\n%%%%")""" % \
('%s', '%s', '%s', old, old), (old, new, new))
frappe.db.sql("""\
update `tabCustom Field` set options=replace(options, %s, %s)
where
dt != %s and fieldtype = 'Select' and
(options like "%%%%\\n%s%%%%" or options like "%%%%%s\\n%%%%")""" % \
('%s', '%s', '%s', old, old), (old, new, new))
frappe.db.sql("""\
update `tabProperty Setter` set value=replace(value, %s, %s)
where
doc_type != %s and field_name is not null and
property='options' and
(value like "%%%%\\n%s%%%%" or value like "%%%%%s\\n%%%%")""" % \
('%s', '%s', '%s', old, old), (old, new, new))
def update_parenttype_values(old, new):
child_doctypes = frappe.db.sql("""\
select options, fieldname from `tabDocField`
where parent=%s and fieldtype='Table'""", (new,), as_dict=1)
custom_child_doctypes = frappe.db.sql("""\
select options, fieldname from `tabCustom Field`
where dt=%s and fieldtype='Table'""", (new,), as_dict=1)
child_doctypes += custom_child_doctypes
fields = [d['fieldname'] for d in child_doctypes]
property_setter_child_doctypes = frappe.db.sql("""\
select value as options from `tabProperty Setter`
where doc_type=%s and property='options' and
field_name in ("%s")""" % ('%s', '", "'.join(fields)),
(new,))
child_doctypes += property_setter_child_doctypes
child_doctypes = (d['options'] for d in child_doctypes)
for doctype in child_doctypes:
frappe.db.sql("""\
update `tab%s` set parenttype=%s
where parenttype=%s""" % (doctype, '%s', '%s'),
(new, old))
dynamic_link_queries = [
"""select parent, fieldname, options from tabDocField where fieldtype='Dynamic Link'""",
"""select dt as parent, fieldname, options from `tabCustom Field` where fieldtype='Dynamic Link'""",
]
def rename_dynamic_links(doctype, old, new):
for query in dynamic_link_queries:
for df in frappe.db.sql(query, as_dict=True):
# dynamic link in single, just one value to check
if frappe.get_meta(df.parent).issingle:
refdoc = frappe.db.get_singles_dict(df.parent)
if refdoc.get(df.options)==doctype and refdoc.get(df.fieldname)==old:
frappe.db.sql("""update tabSingles set value=%s where
field=%s and value=%s and doctype=%s""", (new, df.fieldname, old, df.parent))
else:
# replace for each value where renamed
for to_change in frappe.db.sql_list("""select name from `tab{parent}` where
{options}=%s and {fieldname}=%s""".format(**df), (doctype, old)):
frappe.db.sql("""update `tab{parent}` set {fieldname}=%s
where name=%s""".format(**df), (new, to_change))
|
from arm.logicnode.arm_nodes import *
class MergeNode(ArmLogicTreeNode):
"""Activates the output when at least one connected input is activated.
If multiple inputs are active, the behaviour is specified by the
`Execution Mode` option.
@output Active Input Index: [*Available if Execution Mode is set to
Once Per Input*] The index of the last input that activated the output,
-1 if there was no execution yet on the current frame.
@option Execution Mode: The node's behaviour if multiple inputs are
active on the same frame.
- `Once Per Input`: If multiple inputs are active on one frame, activate
the output for each active input individually (simple forwarding).
- `Once Per Frame`: If multiple inputs are active on one frame,
trigger the output only once.
@option New: Add a new input socket.
@option X Button: Remove the lowermost input socket."""
bl_idname = 'LNMergeNode'
bl_label = 'Merge'
arm_section = 'flow'
arm_version = 2
def update_exec_mode(self, context):
self.outputs['Active Input Index'].hide = self.property0 == 'once_per_frame'
property0: HaxeEnumProperty(
'property0',
name='Execution Mode',
description='The node\'s behaviour if multiple inputs are active on the same frame',
items=[('once_per_input', 'Once Per Input',
'If multiple inputs are active on one frame, activate the'
' output for each active input individually (simple forwarding)'),
('once_per_frame', 'Once Per Frame',
'If multiple inputs are active on one frame, trigger the output only once')],
default='once_per_input',
update=update_exec_mode,
)
def __init__(self):
super(MergeNode, self).__init__()
array_nodes[str(id(self))] = self
def arm_init(self, context):
self.add_output('ArmNodeSocketAction', 'Out')
self.add_output('ArmIntSocket', 'Active Input Index')
def draw_buttons(self, context, layout):
layout.prop(self, 'property0', text='')
row = layout.row(align=True)
op = row.operator('arm.node_add_input', text='New', icon='PLUS', emboss=True)
op.node_index = str(id(self))
op.socket_type = 'ArmNodeSocketAction'
op2 = row.operator('arm.node_remove_input', text='', icon='X', emboss=True)
op2.node_index = str(id(self))
def draw_label(self) -> str:
if len(self.inputs) == 0:
return self.bl_label
return f'{self.bl_label}: [{len(self.inputs)}]'
def get_replacement_node(self, node_tree: bpy.types.NodeTree):
if self.arm_version not in (0, 1):
raise LookupError()
newnode = node_tree.nodes.new('LNMergeNode')
newnode.property0 = self.property0
# Recreate all original inputs
array_nodes[str(id(newnode))] = newnode
for idx, input in enumerate(self.inputs):
bpy.ops.arm.node_add_input('EXEC_DEFAULT', node_index=str(id(newnode)), socket_type='ArmNodeSocketAction')
for link in input.links:
node_tree.links.new(link.from_socket, newnode.inputs[idx])
# Recreate outputs
for link in self.outputs[0].links:
node_tree.links.new(newnode.outputs[0], link.to_socket)
return newnode
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A very simple MNIST classifier.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/beginners
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
from tensorflow.contrib.session_bundle import exporter
tf.app.flags.DEFINE_integer('training_iteration', 1000,
'number of training iterations.')
tf.app.flags.DEFINE_integer('model_version', 1, 'version number of the model.')
tf.app.flags.DEFINE_string('data_dir', '/tmp/model/mnist/data', 'Working directory.')
tf.app.flags.DEFINE_string('model_dir', '/opt/mnist/model', 'export model directory.')
tf.app.flags.DEFINE_string('summary_dir', '/opt/mnist/summaries', 'summaries directory.')
FLAGS = tf.app.flags.FLAGS
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
VERSION = 1
def launch_tensorboard(summary_dir):
command = 'tensorboard --logdir=' + summary_dir + ' &'
if summary_dir:
import os
os.system(command)
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
MODEL_EXPORT_PATH = FLAGS.model_dir
MODEL_SUMMARY_DIR = FLAGS.summary_dir
VERSION = FLAGS.model_version
iterations = FLAGS.training_iteration
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
#launch_tensorboard(MODEL_SUMMARY_DIR)
variable_summaries(W)
variable_summaries(b)
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
with tf.name_scope('input_reshape'):
image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
tf.summary.image('input', image_shaped_input, 10)
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
tf.summary.scalar('cross_entropy', cross_entropy)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
pred = tf.argmax(y, axis=1)
correct_prediction = tf.equal(pred, tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
sess = tf.InteractiveSession()
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(MODEL_SUMMARY_DIR + '/log' + '/train', sess.graph)
test_writer = tf.summary.FileWriter(MODEL_SUMMARY_DIR + '/log' + '/test')
tf.global_variables_initializer().run()
# Train
for i in range(iterations):
batch_xs, batch_ys = mnist.train.next_batch(100)
if i % 10 == 0: # Record summaries and test-set accuracy
summary, acc = sess.run([merged, train_step], feed_dict={x: batch_xs, y_: batch_ys})
test_writer.add_summary(summary, i)
else: # Record train set summaries, and train
if i % 100 == 99: # Record execution stats
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, _ = sess.run([merged, train_step],
feed_dict={x: batch_xs, y_: batch_ys},
options=run_options,
run_metadata=run_metadata)
train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
train_writer.add_summary(summary, i)
print('Adding run metadata for', i)
else: # Record a summary
summary, _ = sess.run([merged, train_step], feed_dict={x: batch_xs, y_: batch_ys})
train_writer.add_summary(summary, i)
# Test trained model
pred = tf.argmax(y, axis=1)
correct_prediction = tf.equal(pred, tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
saver = tf.train.Saver(sharded=True)
model_exporter = exporter.Exporter(saver)
model_exporter.init(
sess.graph.as_graph_def(),
named_graph_signatures={
'inputs': exporter.generic_signature({'x': x}),
'outputs': exporter.generic_signature({'pred': pred})})
model_exporter.export(MODEL_EXPORT_PATH, tf.constant(VERSION), sess)
train_writer.close()
test_writer.close()
if __name__ == '__main__':
tf.app.run()
|
import numpy as np
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils import try_import_torch
torch, nn = try_import_torch()
class DQNTorchModel(TorchModelV2):
"""Extension of standard TorchModelV2 to provide dueling-Q functionality.
"""
def __init__(
self,
obs_space,
action_space,
num_outputs,
model_config,
name,
*,
dueling=False,
q_hiddens=(256, ),
dueling_activation="relu",
use_noisy=False,
sigma0=0.5,
# TODO(sven): Move `add_layer_norm` into ModelCatalog as
# generic option, then error if we use ParameterNoise as
# Exploration type and do not have any LayerNorm layers in
# the net.
add_layer_norm=False):
"""Initialize variables of this model.
Extra model kwargs:
dueling (bool): Whether to build the advantage(A)/value(V) heads
for DDQN. If True, Q-values are calculated as:
Q = (A - mean[A]) + V. If False, raw NN output is interpreted
as Q-values.
q_hiddens (List[int]): List of layer-sizes after(!) the
Advantages(A)/Value(V)-split. Hence, each of the A- and V-
branches will have this structure of Dense layers. To define
the NN before this A/V-split, use - as always -
config["model"]["fcnet_hiddens"].
dueling_activation (str): The activation to use for all dueling
layers (A- and V-branch). One of "relu", "tanh", "linear".
use_noisy (bool): use noisy nets
sigma0 (float): initial value of noisy nets
add_layer_norm (bool): Enable layer norm (for param noise).
"""
super(DQNTorchModel, self).__init__(obs_space, action_space,
num_outputs, model_config, name)
self.dueling = dueling
ins = num_outputs
# Dueling case: Build the shared (advantages and value) fc-network.
advantage_module = nn.Sequential()
value_module = None
if self.dueling:
value_module = nn.Sequential()
for i, n in enumerate(q_hiddens):
advantage_module.add_module("dueling_A_{}".format(i),
nn.Linear(ins, n))
value_module.add_module("dueling_V_{}".format(i),
nn.Linear(ins, n))
# Add activations if necessary.
if dueling_activation == "relu":
advantage_module.add_module("dueling_A_act_{}".format(i),
nn.ReLU())
value_module.add_module("dueling_V_act_{}".format(i),
nn.ReLU())
elif dueling_activation == "tanh":
advantage_module.add_module("dueling_A_act_{}".format(i),
nn.Tanh())
value_module.add_module("dueling_V_act_{}".format(i),
nn.Tanh())
# Add LayerNorm after each Dense.
if add_layer_norm:
advantage_module.add_module("LayerNorm_A_{}".format(i),
nn.LayerNorm(n))
value_module.add_module("LayerNorm_V_{}".format(i),
nn.LayerNorm(n))
ins = n
# Actual Advantages layer (nodes=num-actions) and
# value layer (nodes=1).
advantage_module.add_module("A", nn.Linear(ins, action_space.n))
value_module.add_module("V", nn.Linear(ins, 1))
# Non-dueling:
# Q-value layer (use main module's outputs as Q-values).
else:
pass
self.advantage_module = advantage_module
self.value_module = value_module
def get_advantages_or_q_values(self, model_out):
"""Returns distributional values for Q(s, a) given a state embedding.
Override this in your custom model to customize the Q output head.
Arguments:
model_out (Tensor): embedding from the model layers
Returns:
(action_scores, logits, dist) if num_atoms == 1, otherwise
(action_scores, z, support_logits_per_action, logits, dist)
"""
return self.advantage_module(model_out)
def get_state_value(self, model_out):
"""Returns the state value prediction for the given state embedding."""
return self.value_module(model_out)
def _noisy_layer(self, action_in, out_size, sigma0, non_linear=True):
"""
a common dense layer: y = w^{T}x + b
a noisy layer: y = (w + \\epsilon_w*\\sigma_w)^{T}x +
(b+\\epsilon_b*\\sigma_b)
where \epsilon are random variables sampled from factorized normal
distributions and \\sigma are trainable variables which are expected to
vanish along the training procedure
"""
in_size = int(action_in.shape[1])
epsilon_in = torch.normal(
mean=torch.zeros([in_size]), std=torch.ones([in_size]))
epsilon_out = torch.normal(
mean=torch.zeros([out_size]), std=torch.ones([out_size]))
epsilon_in = self._f_epsilon(epsilon_in)
epsilon_out = self._f_epsilon(epsilon_out)
epsilon_w = torch.matmul(
torch.unsqueeze(epsilon_in, -1),
other=torch.unsqueeze(epsilon_out, 0))
epsilon_b = epsilon_out
sigma_w = torch.Tensor(
data=np.random.uniform(
low=-1.0 / np.sqrt(float(in_size)),
high=1.0 / np.sqrt(float(in_size)),
size=[in_size, out_size]),
dtype=torch.float32,
requires_grad=True)
# TF noise generation can be unreliable on GPU
# If generating the noise on the CPU,
# lowering sigma0 to 0.1 may be helpful
sigma_b = torch.Tensor(
data=np.full(
shape=[out_size], fill_value=sigma0 / np.sqrt(float(in_size))),
requires_grad=True)
w = torch.Tensor(
data=np.full(
shape=[in_size, out_size],
fill_value=6 / np.sqrt(float(in_size) + float(out_size))),
requires_grad=True)
b = torch.Tensor(data=np.zeros([out_size]), requires_grad=True)
action_activation = torch.matmul(action_in, w + sigma_w * epsilon_w) \
+ b + sigma_b * epsilon_b
if not non_linear:
return action_activation
return nn.functional.relu(action_activation)
def _f_epsilon(self, x):
return torch.sign(x) * torch.pow(torch.abs(x), 0.5)
|
def inverse(s):
return "AM" if (s == 'PM') else "PM"
def add_time(start, duration, day=''):
shift_hours, shift_mins = duration.split(':')
start_hour = start.split(' ')[0].split(':')[0]
start_min = start.split(' ')[0].split(':')[1]
start_part = start.split(' ')[1]
# print(f'adding {shift_hours} hours and {shift_mins} mins to {start_hour}:{start_min} {start_part}')
new_hours = int(start_hour)
new_mins = int(start_min)
new_part = start_part
days_later = 0
# Add the minutes first
for i in range(int(shift_mins)):
new_mins += 1
if new_mins == 60:
new_mins = 0;
shift_hours = int(shift_hours) + 1
# then add the hours
for i in range(int(shift_hours)):
new_hours += 1
if new_hours == 12:
new_part = inverse(new_part)
if new_part == 'AM':
days_later += 1
if new_hours == 13:
new_hours = 1
# If a day was provided, need to take care of that as well
if day!='':
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
index = days.index(day.capitalize())
day = days[(index+days_later)%7]
# Final checks before returning
if days_later > 0:
# Going to future days, need to format the string accordingly
if days_later == 1:
days_later = 'next day'
else:
days_later = str(days_later) + ' days later'
if day != '':
return (f'{new_hours}:{new_mins:02d} {new_part}, {day} ({days_later})')
else:
return (f'{new_hours}:{new_mins:02d} {new_part} ({days_later})')
if day != '':
return (f'{new_hours}:{new_mins:02d} {new_part}, {day}')
else:
return (f'{new_hours}:{new_mins:02d} {new_part}')
|
from networkx import MultiDiGraph
from pyformlang.cfg import CFG
from scipy.sparse import dok_matrix
from typing import Set, Tuple
from project.utils.cfg_utils import transform_cfg_to_wcnf, is_wcnf
__all__ = ["matrix_based"]
def matrix_based(cfg: CFG, graph: MultiDiGraph) -> Set[Tuple[int, str, int]]:
"""
Matrix Based algorithm for Context Free Path Querying
Parameters
----------
cfg: CFG
Query given in Context Free Grammar form
graph: MultiDiGraph
Labeled graph for the Path Querying task
Returns
-------
r: set
Set of triplets (node, variable, node)
"""
wcnf = cfg if is_wcnf(cfg) else transform_cfg_to_wcnf(cfg)
wcnf = transform_cfg_to_wcnf(cfg)
eps_prod_heads = [p.head.value for p in wcnf.productions if not p.body]
term_productions = {p for p in wcnf.productions if len(p.body) == 1}
var_productions = {p for p in wcnf.productions if len(p.body) == 2}
nodes_num = graph.number_of_nodes()
matrices = {
v.value: dok_matrix((nodes_num, nodes_num), dtype=bool) for v in wcnf.variables
}
for i, j, data in graph.edges(data=True):
l = data["label"]
for v in {p.head.value for p in term_productions if p.body[0].value == l}:
matrices[v][i, j] = True
for i in range(nodes_num):
for v in eps_prod_heads:
matrices[v][i, i] = True
any_changing = True
while any_changing:
any_changing = False
for p in var_productions:
old_nnz = matrices[p.head.value].nnz
matrices[p.head.value] += (
matrices[p.body[0].value] @ matrices[p.body[1].value]
)
new_nnz = matrices[p.head.value].nnz
any_changing = any_changing or old_nnz != new_nnz
return {
(u, variable, v)
for variable, matrix in matrices.items()
for u, v in zip(*matrix.nonzero())
}
|
#
# Copyright (C) 2000-2005 by Yasushi Saito (yasushi.saito@gmail.com)
#
# Jockey is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# Jockey is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
import math
import sys
import time
import re
import font
import pychart_util
import theme
import version
from scaling import *
def _compute_bounding_box(points):
"""Given the list of coordinates (x,y), this procedure computes
the smallest rectangle that covers all the points."""
(xmin, ymin, xmax, ymax) = (999999, 999999, -999999, -999999)
for p in points:
xmin = min(xmin, p[0])
xmax = max(xmax, p[0])
ymin = min(ymin, p[1])
ymax = max(ymax, p[1])
return (xmin, ymin, xmax, ymax)
def _intersect_box(b1, b2):
xmin = max(b1[0], b2[0])
ymin = max(b1[1], b2[1])
xmax = min(b1[2], b2[2])
ymax = min(b1[3], b2[3])
return (xmin, ymin, xmax, ymax)
def invisible_p(x, y):
"""Return true if the point (X, Y) is visible in the canvas."""
if x < -499999 or y < -499999:
return 1
return 0
def to_radian(deg):
return deg*2*math.pi / 360.0
def midpoint(p1, p2):
return ( (p1[0]+p2[0])/2.0, (p1[1]+p2[1])/2.0 )
active_canvases = []
InvalidCoord = 999999
class T(object):
def __init__(self):
global active_canvases
self.__xmax = -InvalidCoord
self.__xmin = InvalidCoord
self.__ymax = -InvalidCoord
self.__ymin = InvalidCoord
self.__clip_box = (-InvalidCoord, -InvalidCoord, InvalidCoord, InvalidCoord)
self.__clip_stack = []
self.__nr_gsave = 0
self.title = theme.title or re.sub("(.*)\\.py$", "\\1", sys.argv[0])
self.creator = theme.creator or "pychart %s" % (version.version,)
self.creation_date = theme.creation_date or \
time.strftime("(%m/%d/%y) (%I:%M %p)")
self.author = theme.author
self.aux_comments = theme.aux_comments or ""
active_canvases.append(self)
def set_title(self, s):
"""Define the string to be shown in EPS/PDF "Title" field. The default value is the name of the script that creates the EPS/PDF file."""
self.title = s
def set_creator(self, tag):
"""Define the string to be shown in EPS %%Creator or PDF Producer field. The default value is "pychart"."""
self.creator = tag
def set_creation_date(self, s):
"""Define the string to be shown in EPS/PDF "CreationDate" field. Defalt value of this field is the current time."""
self.creation_date = s
def set_author(self, s):
"""Set the author string. Unless this method is called, the Author field is not output in EPS or PDF."""
self.author = s
def add_aux_comments(self, s):
"""Define an auxiliary comments to be output to the file, just after the required headers"""
self.aux_comments += s
def close(self):
"""This method closes the canvas and writes
contents to the associated file.
Calling this procedure is optional, because
Pychart calls this procedure for every open canvas on normal exit."""
for i in range(0, len(active_canvases)):
if active_canvases[i] == self:
del active_canvases[i]
return
def open_output(self, fname):
"""Open the output file FNAME. Returns tuple (FD, NEED_CLOSE),
where FD is a file (or file-like) object, and NEED_CLOSE is a
boolean flag that tells whether FD.close() should be called
after finishing writing to the file.
FNAME can be one of the three things:
(1) None, in which case (sys.stdout, False) is returned.
(2) A file-like object, in which case (fname, False) is returned.
(3) A string, in which case this procedure opens the file and returns
(fd, True)."""
if not fname:
return (sys.stdout, False)
elif isinstance(fname, str):
return (file(fname, "wb"), True)
else:
if not hasattr(fname, "write"):
raise Exception, "Expecting either a filename or a file-like object, but got %s" % fname
return (fname, False)
def setbb(self, x, y):
"""Call this method when point (X,Y) is to be drawn in the
canvas. This methods expands the bounding box to include
this point."""
self.__xmin = min(self.__xmin, max(x, self.__clip_box[0]))
self.__xmax = max(self.__xmax, min(x, self.__clip_box[2]))
self.__ymin = min(self.__ymin, max(y, self.__clip_box[1]))
self.__ymax = max(self.__ymax, min(y, self.__clip_box[3]))
def fill_with_pattern(self, pat, x1, y1, x2, y2):
if invisible_p(x2, y2): return
self.comment("FILL pat=%s (%d %d)-(%d %d)\n" % (pat, x1, y1, x2, y2))
self.set_fill_color(pat.bgcolor)
self._path_polygon([(x1, y1), (x1, y2), (x2, y2), (x2, y1)])
self.fill()
pat.draw(self, x1, y1, x2, y2)
self.comment("end FILL.\n")
def _path_polygon(self, points):
"Low-level polygon-drawing routine."
(xmin, ymin, xmax, ymax) = _compute_bounding_box(points)
if invisible_p(xmax, ymax):
return
self.setbb(xmin, ymin)
self.setbb(xmax, ymax)
self.newpath()
self.moveto(xscale(points[0][0]), yscale(points[0][1]))
for point in points[1:]:
self.lineto(xscale(point[0]), yscale(point[1]))
self.closepath()
def polygon(self, edge_style, pat, points, shadow = None):
"""Draw a polygon with EDGE_STYLE, fill with PAT, and the edges
POINTS. POINTS is a sequence of coordinates, e.g., ((10,10), (15,5),
(20,8)). SHADOW is either None or a tuple (XDELTA, YDELTA,
fillstyle). If non-null, a shadow of FILLSTYLE is drawn beneath
the polygon at the offset of (XDELTA, YDELTA)."""
if pat:
self.comment("POLYGON points=[%s] pat=[%s]"
% (str(points), str(pat)))
(xmin, ymin, xmax, ymax) = _compute_bounding_box(points)
if shadow:
xoff, yoff, shadow_pat = shadow
self.gsave()
self._path_polygon(map(lambda p, xoff=xoff, yoff=yoff: (p[0]+xoff, p[1]+yoff), points))
self.clip_sub()
self.fill_with_pattern(shadow_pat, xmin+xoff, ymin+yoff,
xmax+xoff, ymax+yoff)
self.grestore()
self.gsave()
self._path_polygon(points)
self.clip_sub()
self.fill_with_pattern(pat, xmin, ymin, xmax, ymax)
self.grestore()
if edge_style:
self.comment("POLYGON points=[%s] edge=[%s]"
% (str(points), str(edge_style)))
self.set_line_style(edge_style)
self._path_polygon(points)
self.stroke()
def set_background(self, pat, x1, y1, x2, y2):
xmax, xmin, ymax, ymin = self.__xmax, self.__xmin, self.__ymax, self.__ymin
self.rectangle(None, pat, x1, y1, x2, y2)
self.__xmax, self.__xmin, self.__ymax, self.__ymin = xmax, xmin, ymax, ymin
def rectangle(self, edge_style, pat, x1, y1, x2, y2, shadow = None):
"""Draw a rectangle with EDGE_STYLE, fill with PAT, and the
bounding box (X1, Y1, X2, Y2). SHADOW is either None or a
tuple (XDELTA, YDELTA, fillstyle). If non-null, a shadow of
FILLSTYLE is drawn beneath the polygon at the offset of
(XDELTA, YDELTA)."""
self.polygon(edge_style, pat, [(x1,y1), (x1,y2), (x2,y2), (x2, y1)],
shadow)
def _path_ellipsis(self, x, y, radius, ratio, start_angle, end_angle):
self.setbb(x - radius, y - radius*ratio)
self.setbb(x + radius, y + radius*ratio)
oradius = nscale(radius)
centerx, centery = xscale(x), yscale(y)
startx, starty = centerx+oradius * math.cos(to_radian(start_angle)), \
centery+oradius * math.sin(to_radian(start_angle))
self.moveto(centerx, centery)
if start_angle % 360 != end_angle % 360:
self.moveto(centerx, centery)
self.lineto(startx, starty)
else:
self.moveto(startx, starty)
self.path_arc(xscale(x), yscale(y), nscale(radius),
ratio, start_angle, end_angle)
self.closepath()
def ellipsis(self, line_style, pattern, x, y, radius, ratio = 1.0,
start_angle=0, end_angle=360, shadow=None):
"""Draw an ellipsis with line_style and fill PATTERN. The center is \
(X, Y), X radius is RADIUS, and Y radius is RADIUS*RATIO, whose \
default value is 1.0. SHADOW is either None or a tuple (XDELTA,
YDELTA, fillstyle). If non-null, a shadow of FILLSTYLE is drawn
beneath the polygon at the offset of (XDELTA, YDELTA)."""
if invisible_p(x + radius, y + radius*ratio):
return
if pattern:
if shadow:
x_off, y_off, shadow_pat = shadow
self.gsave()
self.newpath()
self._path_ellipsis(x+x_off, y+y_off, radius, ratio,
start_angle, end_angle)
self.clip_sub()
self.fill_with_pattern(shadow_pat,
x-radius*2+x_off,
y-radius*ratio*2+y_off,
x+radius*2+x_off,
y+radius*ratio*2+y_off)
self.grestore()
self.gsave()
self.newpath()
self._path_ellipsis(x, y, radius, ratio, start_angle, end_angle)
self.clip_sub()
self.fill_with_pattern(pattern,
(x-radius*2), (y-radius*ratio*2),
(x+radius*2), (y+radius*ratio*2))
self.grestore()
if line_style:
self.set_line_style(line_style)
self.newpath()
self._path_ellipsis(x, y, radius, ratio, start_angle, end_angle)
self.stroke()
def clip_ellipsis(self, x, y, radius, ratio = 1.0):
"""Create an elliptical clip region. You must call endclip() after
you completed drawing. See also the ellipsis method."""
self.gsave()
self.newpath()
self.moveto(xscale(x)+nscale(radius), yscale(y))
self.path_arc(xscale(x), yscale(y), nscale(radius), ratio, 0, 360)
self.closepath()
self.__clip_stack.append(self.__clip_box)
self.clip_sub()
def clip_polygon(self, points):
"""Create a polygonal clip region. You must call endclip() after
you completed drawing. See also the polygon method."""
self.gsave()
self._path_polygon(points)
self.__clip_stack.append(self.__clip_box)
self.__clip_box = _intersect_box(self.__clip_box, _compute_bounding_box(points))
self.clip_sub()
def clip(self, x1, y1, x2, y2):
"""Activate a rectangular clip region, (X1, Y1) - (X2, Y2).
You must call endclip() after you completed drawing.
canvas.clip(x,y,x2,y2)
draw something ...
canvas.endclip()
"""
self.__clip_stack.append(self.__clip_box)
self.__clip_box = _intersect_box(self.__clip_box, (x1, y1, x2, y2))
self.gsave()
self.newpath()
self.moveto(xscale(x1), yscale(y1))
self.lineto(xscale(x1), yscale(y2))
self.lineto(xscale(x2), yscale(y2))
self.lineto(xscale(x2), yscale(y1))
self.closepath()
self.clip_sub()
def endclip(self):
"""End the current clip region. When clip calls are nested, it
ends the most recently created crip region."""
self.__clip_box = self.__clip_stack[-1]
del self.__clip_stack[-1]
self.grestore()
def curve(self, style, points):
for p in points:
self.setbb(p[0], p[1])
self.newpath()
self.set_line_style(style)
self.moveto(xscale(points[0][0]), xscale(points[0][1]))
i = 1
n = 1
while i < len(points):
if n == 1:
x2 = points[i]
n += 1
elif n == 2:
x3 = points[i]
n += 1
elif n == 3:
x4 = midpoint(x3, points[i])
self.curveto(xscale(x2[0]), xscale(x2[1]),
xscale(x3[0]), xscale(x3[1]),
xscale(x4[0]), xscale(x4[1]))
n = 1
i += 1
if n == 1:
pass
if n == 2:
self.lineto(xscale(x2[0]), xscale(x2[1]))
if n == 3:
self.curveto(xscale(x2[0]), xscale(x2[1]),
xscale(x2[0]), xscale(x2[1]),
xscale(x3[0]), xscale(x3[1]))
self.stroke()
def line(self, style, x1, y1, x2, y2):
if not style:
return
if invisible_p(x2, y2) and invisible_p(x1, y1):
return
self.setbb(x1, y1)
self.setbb(x2, y2)
self.newpath()
self.set_line_style(style)
self.moveto(xscale(x1), yscale(y1))
self.lineto(xscale(x2), yscale(y2))
self.stroke()
def lines(self, style, segments):
if not style:
return
(xmin, ymin, xmax, ymax) = _compute_bounding_box(segments)
if invisible_p(xmax, ymax):
return
self.setbb(xmin, ymin)
self.setbb(xmax, ymax)
self.newpath()
self.set_line_style(style)
self.moveto(xscale(segments[0][0]), xscale(segments[0][1]))
for i in range(1, len(segments)):
self.lineto(xscale(segments[i][0]), yscale(segments[i][1]))
self.stroke()
def _path_round_rectangle(self, x1, y1, x2, y2, radius):
self.moveto(xscale(x1 + radius), yscale(y1))
self.lineto(xscale(x2 - radius), yscale(y1))
self.path_arc(xscale(x2-radius), yscale(y1+radius), nscale(radius), 1, 270, 360)
self.lineto(xscale(x2), yscale(y2-radius))
self.path_arc(xscale(x2-radius), yscale(y2-radius), nscale(radius), 1, 0, 90)
self.lineto(xscale(x1+radius), yscale(y2))
self.path_arc(xscale(x1 + radius), yscale(y2 - radius), nscale(radius), 1, 90, 180)
self.lineto(xscale(x1), xscale(y1+radius))
self.path_arc(xscale(x1 + radius), yscale(y1 + radius), nscale(radius), 1, 180, 270)
def round_rectangle(self, style, fill, x1, y1, x2, y2, radius, shadow=None):
"""Draw a rectangle with rounded four corners. Parameter <radius> specifies the radius of each corner."""
if invisible_p(x2, y2):
return
self.setbb(x1, y1)
self.setbb(x2, y2)
if fill:
if shadow:
x_off, y_off, shadow_fill = shadow
self.gsave();
self.newpath()
self._path_round_rectangle(x1+x_off, y1+y_off, x2+x_off, y2+y_off,
radius)
self.closepath()
self.clip_sub()
self.fill_with_pattern(shadow_fill, x1+x_off, y1+y_off,
x2+x_off, y2+y_off)
self.grestore()
self.gsave();
self.newpath()
self._path_round_rectangle(x1, y1, x2, y2, radius)
self.closepath()
self.clip_sub()
self.fill_with_pattern(fill, x1, y1, x2, y2)
self.grestore()
if style:
self.set_line_style(style)
self.newpath()
self._path_round_rectangle(x1, y1, x2, y2, radius)
self.closepath()
self.stroke()
def show(self, x, y, str):
global out
y_org = y
org_str = str
if invisible_p(x, y):
return
(xmin, xmax, ymin, ymax) = font.get_dimension(str)
# rectangle(line_style.default, None, x+xmin, y+ymin, x+xmax, y+ymax)
# ellipsis(line_style.default, None, x, y, 1)
self.setbb(x+xmin, y+ymin)
self.setbb(x+xmax, y+ymax)
(halign, valign, angle) = font.get_align(str)
base_x = x
base_y = y
# Handle vertical alignment
if valign == "B":
y = font.unaligned_text_height(str)
elif valign == "T":
y = 0
elif valign == "M":
y = font.unaligned_text_height(str) / 2.0
(xmin, xmax, ymin, ymax) = font.get_dimension(org_str)
self.setbb(x+xmin, y_org+y+ymin)
self.setbb(x+xmax, y_org+y+ymax)
itr = font.text_iterator(None)
max_width = 0
lines = []
for line in str.split('\n'):
cur_width = 0
cur_height = 0
itr.reset(line)
strs = []
while 1:
elem = itr.next()
if not elem:
break
(font_name, size, line_height, color, _h, _v, _a, str) = elem
cur_width += font.line_width(font_name, size, str)
max_width = max(cur_width, max_width)
cur_height = max(cur_height, line_height)
# replace '(' -> '\(', ')' -> '\)' to make
# Postscript string parser happy.
str = str.replace("(", "\\(")
str = str.replace(")", "\\)")
strs.append((font_name, size, color, str))
lines.append((cur_width, cur_height, strs))
for line in lines:
cur_width, cur_height, strs = line
cur_y = y - cur_height
y = y - cur_height
self.comment("cury: %d hei %d str %s\n" % (cur_y, cur_height, strs))
if halign == 'C':
cur_x = -cur_width/2.0
elif halign == 'R':
cur_x = -cur_width
else:
cur_x = 0
rel_x, rel_y = pychart_util.rotate(cur_x, cur_y, angle)
self.text_begin()
self.text_moveto(xscale(base_x + rel_x),
yscale(base_y + rel_y), angle)
for segment in strs:
font_name, size, color, str = segment
self.text_show(font_name, nscale(size), color, str)
self.text_end()
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""Demo based on the demo mclist.tcl included with tk source distribution."""
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.font as tkFont
import re
split_symbol = '|'
settings_content = ""
case_columns = ("name", "instructions")
case_data = [
("ReImgInf", "Read Image information"),
("ReWBTMAC", "Read Wi-Fi and BT Address"),
]
tree_columns = ("key", "value")
tree_data = [
("Argentina", "Buenos Aires", "ARS"),
("Australia", "Canberra", "AUD"),
("Brazil", "Brazilia", "BRL"),
("Canada", "Ottawa", "CAD"),
("China", "Beijing", "CNY"),
("France", "Paris", "EUR"),
("Germany", "Berlin", "EUR"),
("India", "New Delhi", "INR"),
("Italy", "Rome", "EUR"),
("Japan", "Tokyo", "JPY"),
("Mexico", "Mexico City", "MXN"),
("Russia", "Moscow", "RUB"),
("South Africa", "Pretoria", "ZAR"),
("United Kingdom", "London", "GBP"),
("United States", "Washington, D.C.", "USD")
]
def sortby(tree, col, descending):
"""Sort tree contents when a column is clicked on."""
# grab values to sort
data = [(tree.set(child, col), child) for child in tree.get_children('')]
# reorder data
data.sort(reverse=descending)
for indx, item in enumerate(data):
tree.move(item[1], '', indx)
# switch the heading so that it will sort in the opposite direction
tree.heading(col,
command=lambda col=col: sortby(tree, col, int(not descending)))
class App(object):
def __init__(self):
self.tree = None
self._setup_widgets()
# self._build_tree()
def _setup_widgets(self):
# '''
self.msg = ttk.Label(wraplength="4i", justify="left", anchor="nw",
padding=(10, 2, 10, 6),
text=("Ttk is the new Tk themed widget set. One of the widgets it "
"includes is a tree widget, which can be configured to "
"display multiple columns of informational data without "
"displaying the tree itself. This is a simple way to build "
"a listbox that has multiple columns. Clicking on the "
"heading for a column will sort the data by that column. "
"You can also change the width of the columns by dragging "
"the boundary between them."))
self.msg['text'] = settings_content
self.msg.pack(fill='x')
# '''
container = ttk.Frame()
container.pack(fill='both', expand=True)
self.tree = ttk.Treeview(columns=tree_columns, selectmode="extended")
vsb = ttk.Scrollbar(orient="vertical", command=self.tree.yview)
hsb = ttk.Scrollbar(orient="horizontal", command=self.tree.xview)
self.tree.configure(yscrollcommand=vsb.set, xscrollcommand=hsb.set)
self.tree.grid(column=0, row=2, sticky='nsew', in_=container)
vsb.grid(column=1, row=2, sticky='ns', in_=container)
# hsb.grid(column=0, row=3, sticky='ew', in_=container)
container.grid_columnconfigure(0, weight=1)
container.grid_rowconfigure(0, weight=1)
helvetica = tkFont.Font(family='Helvetica', size=10, weight=tkFont.BOLD)
# search group
self.SearchFrame = tk.Frame()
self.SearchFrame.configure(borderwidth = 3, background = "green")
self.SearchFrame.pack(side='top', fill='both', expand=1)
self._toSearch = tk.StringVar()
self.entry = tk.Entry(self.SearchFrame, textvariable=self._toSearch, background="#9fcceb", font=helvetica)
self._toSearch.set( "" )
self.entry.pack(side='left', fill='both', expand=1)
self.entry.focus()
# '''
self.number = tk.StringVar()
self.chosen = ttk.Combobox(self.SearchFrame, width=12, textvariable=self.number)
self.chosen['values'] = tree_columns
self.chosen.pack(side='left', fill='both', expand=1)
self.chosen.current(0)
# self.chosen.bind('"<<ComboboxSelected>>', lambda _: None)
# '''
self.button = tk.Button(self.SearchFrame, text='Search', fg="white", bg="black", font=helvetica, command=self.OnSearch)
self.button.pack(side='left', fill='both', expand=1)
self.tree.bind('<Double-1>', self.OnDoubleClick_tree)
# self.tree.bind("<<TreeviewSelect>>", self.OnClick)
def OnSearch(self, item=''):
for item in self.tree.selection():
self.tree.selection_remove(item)
# print( "you search "{}" by {}".format(self._toSearch.get().strip(), self.chosen.get()) )
children = self.tree.get_children(item)
for child in children:
text = self.tree.item(child, 'values')[self.chosen.current()]
# print text
if text.startswith(self._toSearch.get().strip()):
# self.tree.selection_set(child)
self.tree.selection_add(child)
self.tree.focus(child)
# return True
else:
self.tree.selection_remove(child)
res = self.OnSearch(child)
if res:
return True
def OnDoubleClick_tree(self, event):
row_idx = re.sub('I00','',str(self.tree.identify_row(self.tree.winfo_pointerxy()[1]-self.tree.winfo_rooty())))
column_idx = re.sub(r'#','',str(self.tree.identify_column(self.tree.winfo_pointerxy()[0]-self.tree.winfo_rootx())))
# print 'Row: {} & Column: {} '.format(row_idx, column_idx)
# curItem = self.tree.focus()
# print self.tree.item(curItem)
item = self.tree.identify('item',event.x,event.y)
print("you clicked on", self.tree.item(item,"values"))
'''
for item in self.tree.selection():
item_text = self.tree.item(item,"values")
print(item_text)
'''
# self._toSearch.set(self.tree.item(item,"text"))
# self.chosen.current(0)
def _build_tree(self):
for col in tree_columns:
self.tree.heading(col, text=col.title(),
command=lambda c=col: sortby(self.tree, c, 0))
for item in case_data:
root = []
if item.find(split_symbol) != -1:
root = item.split(split_symbol)
parent = self.tree.insert('', 'end', text=root[0], open=True, values=(root[1].strip(),''))
for column_data in tree_data:
column_data = column_data.split(' ')
if root[0].strip() == column_data[0].strip():
self.tree.insert(parent, 'end', values=(column_data[1], column_data[2]), tags=('ttk', 'simple'))
def get_keys(dl, keys_list):
'''
result = []
get_keys(tree_data, result)
print tree_data
'''
if isinstance(dl, dict):
keys_list += dl.keys()
map(lambda x: get_keys(x, keys_list), dl.values())
elif isinstance(dl, list):
map(lambda x: get_keys(x, keys_list), dl)
class DictQuery(dict):
def get(self, path, default = None):
keys = path.split("/")
val = None
for key in keys:
if val:
if isinstance(val, list):
val = [ v.get(key, default) if v else None for v in val]
else:
val = val.get(key, default)
else:
val = dict.get(self, key, default)
if not val:
break;
return val
def run_test(idx):
print TestCaseTab[idx].test_id,">>"
Logger.append_test(TestCaseTab[idx].test_id)
TestCaseTab[idx].pre()
def on_compelet(result):
print '[========Compelet========]: '
print 'test_id: ', TestCaseTab[idx].test_id
print "test_cast: ", idx
print "test_result: ", result
if not (type(result) is bool):
for key, value in result.items():
print " ", key, "->", value
if TestCaseTab[idx].test_id!='TestDutCompleted' or TestCaseTab[idx].test_id!='TestRecordDutID':
# tree_raw = "{} {} {} {} {}".format(TestCaseTab[idx].test_id, split_symbol, key, split_symbol, value)
tree_raw = "{} {} {}".format(TestCaseTab[idx].test_id, key, value)
tree_data.append(tree_raw)
# print "\n",
print '[========================]: '
TestCaseTab[idx].post()
TestCaseTab[idx].action(on_compelet) #push to work queue
'''
def main():
root = tk.Tk()
root.wm_title("Multi-Column List")
root.wm_iconname("mclist")
# optional?
# import plastik_theme
# plastik_theme.install('~/tile-themes/plastik/plastik')
app = App()
root.mainloop()
if __name__ == "__main__":
main()
'''
# --------------------- main ---------------------
import test_bench
import time
import sys
import os
import background_thread as job_runner
import environ as env
def settings_parser(params, filename):
if params.has_key('ADB'):
yield "ADB: {}, {}\n".format(params.get('ADB')['SID'], params.get('ADB')['MODE'])
if params.has_key('SN'):
yield "SN: {:^16}\n".format(params.get('SN'))
if params.has_key('UART'):
yield "UART: {}, {}\n".format(params.get('UART')['COM'], params.get('UART')['BAUD'])
yield '\n'
yield filename
do_SaveAsFiles_Flog = False
TestCaseTab = test_bench.TestCaseTab
Logger = test_bench.Logger
Settings = test_bench.settings
Logger.dut_id = Settings.get('SN')
settings_content = ''.join(settings_parser(Settings, sys.argv[1]))
tree_data = []
case_data = []
for idx in range(len(TestCaseTab)):
if TestCaseTab[idx].test_id != 'TestDutCompleted':
case_raw = "{} {} {}".format(TestCaseTab[idx].test_id, split_symbol, TestCaseTab[idx].instructions)
case_data.append(case_raw)
if len(sys.argv) >= 3 and sys.argv[2].lower()=='true':
do_SaveAsFiles_Flog = True
if do_SaveAsFiles_Flog:
env.create_result_folder( Logger.dut_id )
print '[Start]'
'''
# Sakia note: no effect, really implemented in test_bench.py
TestCaseTab[1].values = sys.argv[2::]
'''
for idx in range(len(TestCaseTab)):
try:
run_test(idx)
while(True):
job = test_bench.dispatch_next() #get on_compelet job
if job != None:
print
job()
break
time.sleep(.1)
#print '>',
time.sleep(.1)
except:
print "main except!"
break
print '[END]'
if do_SaveAsFiles_Flog:
env.backup_result_folder( Logger.dut_id )
test_bench.shutdown()
root = tk.Tk()
root.wm_title("mft runner result")
root.wm_iconname("mclist")
app = App()
app._build_tree()
#root.resizable(0,0)
root.resizable(False, False)
root.withdraw()
root.update_idletasks()
x = (root.winfo_screenwidth() - root.winfo_reqwidth()) / 2
y = (root.winfo_screenheight() - root.winfo_reqheight()) / 2
root.geometry("+%d+%d" % (x, y))
root.deiconify()
root.mainloop()
#print('\nMFT Runner Done.')
|
#!/usr/bin/env python3
import bitstruct
import numpy as np
from bitstruct import *
src = open("input.txt", "r").read()
example = "D2FE28"
example = "38006F45291200"
example = "A0016C880162017C3686B18A3D4780"
example = "9C0141080250320F1802104A08"
# src = example
buf = bytearray.fromhex(src)
literal = 4
# def pad(o):
# return 8 - o % 8
offset = 0
def unpack(fmt):
global offset, buf
vals = bitstruct.unpack_from(fmt, buf, offset=offset)
offset += bitstruct.calcsize(fmt)
if len(vals) == 1:
return vals[0]
return vals
ver_sum = 0
def read_packet():
global offset, ver_sum
v, t = unpack("u3u3")
ver_sum += v
if t == literal:
sv = ""
while True:
c, v = unpack("u1u4")
sv += f"{v:x}"
if c == 0:
break
return int(sv, 16)
else:
length_type = unpack("u1")
results = []
if length_type == 0:
sub_packet_length = unpack("u15")
current_offset = offset
while offset < current_offset + sub_packet_length:
results.append(read_packet())
if length_type == 1:
for _ in range(unpack("u11")):
results.append(read_packet())
if t == 0:
return np.sum(results)
if t == 1:
return np.product(results)
if t == 2:
return np.min(results)
if t == 3:
return np.max(results)
assert len(results) == 2
if t == 5:
return 1 if results[0] > results[1] else 0
if t == 6:
return 1 if results[0] < results[1] else 0
if t == 7:
return 1 if results[0] == results[1] else 0
v = read_packet()
print("part1:", ver_sum)
print("part2:", v)
|
# coding: utf-8
# flake8: noqa
"""
Twitter APIs
Call Twitter APIs.<BR />[Endpoint] https://api.apitore.com/api/23 # noqa: E501
OpenAPI spec version: 0.0.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from swagger_client.api.twitter_simple_controller_api import TwitterSimpleControllerApi
# import ApiClient
from swagger_client.api_client import ApiClient
from swagger_client.configuration import Configuration
# import models into sdk package
from swagger_client.models.tweet_entity import TweetEntity
from swagger_client.models.twitter_response_entity import TwitterResponseEntity
|
# -*- coding: utf-8 -*-
import asyncio
import logging
import logging.config
import os
import queue
import random
import re
import shutil
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
import discord
from ...core.arguments import get_args
from ...binders import available_formats
from ...core.app import App
from ...sources import crawler_list
from ...utils.uploader import upload
from .config import signal
from .message_handler import MessageHandler
logger = logging.getLogger(__name__)
class DiscordBot(discord.Client):
def __init__(self, *args, loop=None, **options):
options['shard_id'] = get_args().shard_id
options['shard_count'] = get_args().shard_count
options['heartbeat_timeout'] = 300
options['guild_subscriptions'] = False
options['fetch_offline_members'] = False
super().__init__(*args, loop=loop, **options)
# end def
def start_bot(self):
self.run(os.getenv('DISCORD_TOKEN'))
# end def
async def on_ready(self):
# Initialize handler cache
self.handlers = {}
print('Discord bot in online!')
activity = discord.Activity(name='for 🔥%slncrawl🔥' % signal,
type=discord.ActivityType.watching)
await self.change_presence(activity=activity,
status=discord.Status.online)
# end def
async def on_message(self, message):
if message.author == self.user:
return # I am not crazy to talk with myself
# end if
if message.author.bot:
return # Other bots are not edible
# end if
try:
# Cleanup unused handlers
self.cleanup_handlers()
text = message.content
if isinstance(message.channel, discord.abc.PrivateChannel):
await self.handle_message(message)
elif text.startswith(signal) and len(text.split(signal)) == 2:
uid = message.author.id
if uid in self.handlers:
self.handlers[uid].destroy()
# end if
await self.send_public_text(message, random.choice([
"Sending you a private message",
"Look for direct message",
]))
await self.handle_message(message)
# end if
except IndexError as ex:
logger.exception('Index error reported', ex)
except Exception:
logger.exception('Something went wrong processing message')
# end try
# end def
async def send_public_text(self, message, text):
async with message.channel.typing():
await message.channel.send(text + (" <@%s>" % str(message.author.id)))
# end def
async def handle_message(self, message):
if self.is_closed():
return
# end if
try:
uid = str(message.author.id)
logger.info("Processing message from %s", message.author.name)
if uid not in self.handlers:
self.handlers[uid] = MessageHandler(self)
await message.author.send(
'-' * 25 + '\n' +
('Hello %s\n' % message.author.name) +
'-' * 25 + '\n'
)
logger.info("New handler for %s", message.author.name)
# end if
self.handlers[uid].process(message)
except Exception as err:
logger.exception('While handling this message: %s', message)
# end try
# end def
def cleanup_handlers(self):
try:
cur_time = datetime.now()
for uid, handler in self.handlers.items():
last_time = getattr(handler, 'last_activity', cur_time)
if (cur_time - last_time).days > 1:
handler.destroy()
# end if
# end for
except Exception:
logger.exception('Failed to cleanup handlers')
# end try
# end def
# end class
|
import sys
def reverse(num):
count = num
result = 0
while count != 0:
n = count % 10
count = count // 10
result = result*10 + n
return result
def firstdigit(num):
while num > 9:
num = num // 10
return num
def lastdigit(num):
return num % 10
line = "121"
result = int(line)
count = 1
rev = reverse(result)
result += rev
while(firstdigit(result) != lastdigit(result)):
rev = reverse(result)
result += rev
count += 1
print(count, result)
|
class Identifier:
"""
A class that encapsulates the column names of a dataset for use in Mobipy functions.
...
Attributes
----------
lat_name : str
the latitude column name
lon_name : str
the longitude column name
timestamp : str
the timestamp column name, used in some mobipy functions
start_time : str
the start_time column name, used in some mobipy functions
end_time : str
the end_time column name, used in some mobipy functions
item_id : str
the item or user/group id column name, used in some mobipy functions
"""
def __init__(self, lat_name, lon_name, timestamp, start_time, end_time, item_id):
"""
Parameters
----------
lat_name : str
the latitude column name
lon_name : str
the longitude column name
timestamp : str
the timestamp column name, used in some mobipy functions
start_time : str
the start_time column name, used in some mobipy functions
end_time : str
the end_time column name, used in some mobipy functions
item_id : str
the item or user/group id column name, used in some mobipy functions
"""
self.latitude = lat_name
self.longitude = lon_name
self.timestamp = timestamp
self.start_time = start_time
self.end_time = end_time
self.item_id = item_id
|
import json
with open('reposfinal') as data_file:
data = json.load(data_file)
i = 0
for repo in data:
i += 1
print "repos = " + str(i)
j = 0
for repo in data:
for commit in repo['commit']:
j+=1
print j
|
import os
import tempfile
from io import StringIO
from django.contrib.gis.geos import MultiPolygon
from django.test import TestCase
from organisations.boundaries.management.commands.boundaryline_import_boundaries import (
Command,
)
from organisations.models import (
DivisionGeography,
OrganisationGeography,
OrganisationDivision,
)
def count_divs_by_source(source):
return DivisionGeography.objects.all().filter(source=source).count()
class ImportBoundariesTests(TestCase):
fixtures = [
"croydon-metadata-gsscodes.json",
"croydon-geographies.json",
"tintagel-metadata.json",
]
def setUp(self):
super().setUp()
self.opts = {
"url": None,
"s3": None,
"file": os.path.abspath(
"every_election/apps/organisations/boundaries/fixtures/boundaryline_subset"
),
"verbosity": 1,
"source": "imported in unit test",
"all": False,
"code": None,
}
# sanity checks on init state
# should start off with 28 boundaries from LGBCE
self.assertEqual(28, count_divs_by_source("lgbce"))
# ..and 24 from some unknown source
self.assertEqual(24, count_divs_by_source("unknown"))
self.assertEqual(0, count_divs_by_source("imported in unit test"))
def run_command_with_test_data(self):
cmd = Command()
cmd.stdout = StringIO()
cmd.handle(**self.opts)
cmd.stdout.seek(0)
output = cmd.stdout.read()
return output
def test_import_division_not_found(self):
# gss:X01000001 is not a valid division in our DB fixture
self.opts["code"] = "gss:X01000001"
output = self.run_command_with_test_data()
self.assertIn("FAILED", output)
self.assertIn(
"X01000001: OrganisationDivision matching query does not exist", output
)
# DB content should not have changed
self.assertEqual(28, count_divs_by_source("lgbce"))
self.assertEqual(24, count_divs_by_source("unknown"))
self.assertEqual(0, count_divs_by_source("imported in unit test"))
def test_import_boundary_not_found(self):
# gss:E05000148 is a valid division in our DB fixture
# but it doesn't exist in our BoundaryLine fixture
self.opts["code"] = "gss:E05000148"
output = self.run_command_with_test_data()
self.assertIn("FAILED", output)
self.assertIn("Expected one match for E05000148, found 0", output)
# DB content should not have changed
self.assertEqual(28, count_divs_by_source("lgbce"))
self.assertEqual(24, count_divs_by_source("unknown"))
self.assertEqual(0, count_divs_by_source("imported in unit test"))
def test_import_single_boundary_overwrite(self):
# gss:E05011464 already has a geography
self.assertEqual(
"lgbce",
OrganisationDivision.objects.all()
.get(official_identifier="gss:E05011464")
.geography.source,
)
# but we're going to overwrite it with a new one from BoundaryLine
self.opts["code"] = "gss:E05011464"
output = self.run_command_with_test_data()
self.assertIn("0 Failures", output)
self.assertEqual(27, count_divs_by_source("lgbce"))
self.assertEqual(
"imported in unit test",
OrganisationDivision.objects.all()
.get(official_identifier="gss:E05011464")
.geography.source,
)
def test_import_single_boundary_create(self):
# this time we'll delete the geography record
# for gss:E05011464 before we start
OrganisationDivision.objects.all().get(
official_identifier="gss:E05011464"
).geography.delete()
# importing from BoundaryLine should create a new record
self.opts["code"] = "gss:E05011464"
output = self.run_command_with_test_data()
self.assertIn("0 Failures", output)
self.assertEqual(27, count_divs_by_source("lgbce"))
self.assertEqual(
"imported in unit test",
OrganisationDivision.objects.all()
.get(official_identifier="gss:E05011464")
.geography.source,
)
def test_import_boundary_with_detached_parts(self):
self.opts["code"] = "gss:E05009271"
output = self.run_command_with_test_data()
self.assertIn("0 Failures", output)
imported_geo = (
OrganisationDivision.objects.all()
.get(official_identifier="gss:E05009271")
.geography.geography
)
# In our input fixture, E05009271 matches 2 records:
# one is a MultiPolygon with 3 polygons in it
# the other is a single polygon object
# Importing this should have consolidated that into
# a single MultiPolygon object with 4 polygons in it
self.assertIsInstance(imported_geo, MultiPolygon)
self.assertEqual(4, len(imported_geo))
def test_import_multiple_boundaries(self):
# import 3 boundaries by passing a list of 3 codes
# as a json file containing an array array
tmp = tempfile.NamedTemporaryFile(suffix=".json")
tmp.write(
b"""[
"gss:E05011462",
"gss:E05011463",
"gss:E05011464"
]"""
)
tmp.seek(0)
self.opts["codes"] = tmp.name
output = self.run_command_with_test_data()
tmp.close()
self.assertIn("Imported 3 boundaries", output)
self.assertIn("0 Failures", output)
self.assertEqual(25, count_divs_by_source("lgbce"))
self.assertEqual(3, count_divs_by_source("imported in unit test"))
def test_import_invalid_ids(self):
# should throw errors on various spurious/invalidinputs
self.opts["code"] = "cheese"
with self.assertRaises(ValueError):
self.run_command_with_test_data()
self.opts["code"] = "gss:cheese"
with self.assertRaises(ValueError):
self.run_command_with_test_data()
self.opts["code"] = "unit_id:12345"
with self.assertRaises(ValueError):
self.run_command_with_test_data()
def test_import_multiple_matches(self):
# to set this test up, we need a slightly more contrived example
# so I'm going to overwrite gss:E05000148 with gss:E05011464
div = OrganisationDivision.objects.all().get(
official_identifier="gss:E05000148"
)
div.official_identifier = "gss:E05011464"
div.save()
# this now gives us a situation where we've got 2 divisions
# in the DB which both have the same GSS code
# which are members of 2 different DivsionSets:
self.assertEqual(
2,
OrganisationDivision.objects.all()
.filter(official_identifier="gss:E05011464")
.count(),
)
# if we try to import without the --all flag
self.opts["code"] = "gss:E05011464"
output = self.run_command_with_test_data()
# this should throw an error and tell us what to do
self.assertIn("Imported 0 boundaries", output)
self.assertIn(
"E05011464: get() returned more than one OrganisationDivision -- it returned 2!",
output,
)
self.assertIn(
"To import this boundary against all occurrences of this code, re-run the command with the --all flag",
output,
)
# and the DB content should not have changed
self.assertEqual(28, count_divs_by_source("lgbce"))
self.assertEqual(24, count_divs_by_source("unknown"))
self.assertEqual(0, count_divs_by_source("imported in unit test"))
# but if we run it again with the --all flag
self.opts["all"] = True
output = self.run_command_with_test_data()
# this time it should import the boundary
# against both matching division objects
self.assertIn("Imported 1 boundaries", output)
self.assertEqual(27, count_divs_by_source("lgbce"))
self.assertEqual(23, count_divs_by_source("unknown"))
self.assertEqual(2, count_divs_by_source("imported in unit test"))
def test_import_organisation(self):
# we can import Organisation boundaries as well as divisions
self.assertEqual(
"unknown", OrganisationGeography.objects.get(gss="E09000008").source
)
self.opts["code"] = "gss:E09000008"
output = self.run_command_with_test_data()
self.assertIn("Imported 1 boundaries", output)
self.assertEqual(
"imported in unit test",
OrganisationGeography.objects.get(gss="E09000008").source,
)
|
passagem = 1500
print(passagem)
custo_por_dia = 350
dias = 2
custo_total = passagem * 2 + custo_por_dia * dias
print(custo_total)
print ("O custo de sua passagem será: R$ 3700,00. ")
print ("O custo de sua passagem será: R$", custo_total)
dólar = 5.01
print("O custo de sua passagem será: US$", custo_total/dólar)
print("Qual é o nome do senhor?")
nome = input()
print(nome,"," "o custo total de sua passagem é de:", custo_total)
print("O senhor vai querer parcelar?")
parcelar = input()
print("Podemos parcelar até em 6 vezes!")
print("Ok, por favor, pode parcelar em 6 vezes então!", "Obrigado.")
print(nome, "o preço de sua passagem parcelada será de:")
preço_parcela = float(input())
print("Muito obrigado, Sr.", nome, "por escolher a Viaje Comigo como agência de sua viagem," "o preço da parcela ficará em 6x de", preço_parcela, "pagos em cartão de crédito.")
print(nome, "Podemos fechar o contrato?")
contrato = input()
print("Perfeito, mais uma vez muito obrigado pela preferência, Sr", nome, ".", "Tenha um ótimo e excelente dia.")
|
from libai.config import LazyCall
from modeling.moco import MoCo_ViT
from modeling.vit import VisionTransformer
base_encoder = LazyCall(VisionTransformer)(
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
drop_path_rate=0.1,
global_pool=False,
stop_grad_conv1=True,
)
momentum_encoder = LazyCall(VisionTransformer)(
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
drop_path_rate=0.1,
global_pool=False,
stop_grad_conv1=True,
)
model = LazyCall(MoCo_ViT)(
base_encoder=base_encoder,
momentum_encoder=momentum_encoder,
dim=256,
mlp_dim=4096,
T=0.2,
m=0.99,
)
|
def remove_every_other(lst):
"""Return a new list of other item.
>>> lst = [1, 2, 3, 4, 5]
>>> remove_every_other(lst)
[1, 3, 5]
This should return a list, not mutate the original:
>>> lst
[1, 2, 3, 4, 5]
"""
return [item for item in lst if lst.index(item) % 2 == 0]
# Answer key. NEED TO rememeber slicing return lst[::2]
|
network="lora"
""" LoRa example configuration options """
_MAX_CONNECTION_ATTEMPT_TIME_SEC = 60
""" LoRa-related configuration options """
_DEV_EUI = "<lora-dev-eui>"
_APP_EUI = "<lora-app-eui>"
_APP_KEY = "<lora-app-key>"
from network import LoRa
_LORA_REGION = LoRa.<lora-region>
_LORA_ADR = <lora-adr>
_LORA_DR = <lora-dr>
_LORA_CONFIRMED = <lora-confirmed>
_LORA_TX_RETRIES = <lora-retries>
_LORA_SOCKET_TIMEOUT = 30
_LISTEN_DL_MSG = False
_LORA_SOCKET_BUFFER_SIZE = 128
|
import os
import sys
import unittest
REGRESSION_TEST_DIRNAME = 'regressiontests'
REGRESSION_TEST_DIR = REGRESSION_TEST_DIRNAME
sys.path.insert(0, '../src/')
def load_suite_tests(only=None):
only_module, only_test_case = None, None
if only:
args = only.split(".")
only_module, only_test_case, only_function = args[0], (args[1:] and args[1] or None), (args[2:] and args[2] or None)
suites = []
for dirpath, dirnames, filenames in os.walk(REGRESSION_TEST_DIR):
for f in filenames:
basename, ext = os.path.splitext(f)
if (ext == '.py') and (not only_module or (only_module == basename)):
modname = "%s.%s" % ('.'.join(dirpath.split('/')), basename)
package = __import__(modname, globals(), locals(), [], 0)
mod = sys.modules[modname]
if hasattr(mod, 'suite'):
suite = mod.suite()
if only_test_case:
suite._tests = [t for t in suite._tests if t.__class__.__name__ == only_test_case]
if only_function:
suite._tests = [t for t in suite._tests if t._testMethodName == only_function]
suites.append(suite)
return suites
if __name__ == '__main__':
# Doctests.
import doctest
doctest_response = doctest.testfile("../src/colbert/daterange.py")
only = None
if len(sys.argv) > 1:
only = sys.argv[1]
suites = load_suite_tests(only=only)
suite = unittest.TestSuite(suites)
unittest_response = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(doctest_response.failed or unittest_response.errors))
|
from utils import misc
class NetTracker:
def __init__(self):
self.peer_servers_set = set()
def add_peer(self, peer_server):
self.peer_servers_set.add(peer_server)
def remove_peer(self, peer_server):
try:
self.peer_servers_set.remove(peer_server)
except KeyError:
misc.print_log ("[i] {} is not in the list of connected peers!".format(peer_server))
def get_peer_servers_list(self):
return self.peer_servers_set
|
from banklite.dtypes import BaseCustomer, RetailCustomer, CommercialCustomer
def test_base_customer():
customer = BaseCustomer("1111111111", 11111, "555-555-5555", "email@mail.com")
assert customer.customer_id is not None
assert customer.zip == 11111
assert customer.phone == "555-555-5555"
assert customer.email == "email@mail.com"
def test_retail_customer():
customer = RetailCustomer(
"1111111111", 11111, "555-555-5555", "email@mail.com", "111-11-1111", "John", "Doe"
)
assert customer.customer_id is not None
assert customer.zip == 11111
assert customer.phone == "555-555-5555"
assert customer.email == "email@mail.com"
assert customer.ein is None
assert customer.ssn == "111-11-1111"
assert customer.first_name == "John"
assert customer.last_name == "Doe"
def test_commercial_customer():
customer = CommercialCustomer(
"1111111111", 11111, "555-555-5555", "email@mail.com", "12-3456789", "Acme, Inc."
)
assert customer.customer_id is not None
assert customer.zip == 11111
assert customer.phone == "555-555-5555"
assert customer.email == "email@mail.com"
assert customer.ein == "12-3456789"
assert customer.ssn is None
assert customer.first_name == "Acme, Inc."
assert customer.last_name is None
|
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import os.path
# The directory containing this file
HERE = os.path.abspath(os.path.dirname(__file__))
# The text of the README file
with open(os.path.join(HERE, "README.md")) as fid:
README = fid.read()
def package_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
extra_files = package_files(HERE+'/tension_inflation/resources')
print(extra_files)
setup(name='tension_inflation',
version='1.0.0',
description='Software controlling the tension-inflation device',
long_description=README, # Optional
long_description_content_type='text/markdown',
url='https://github.com/JosephBrunet/tension_inflation.git',
author='Joseph Brunet',
author_email='jo.brunet73@gmail.com',
license='MIT',
package_dir={'': 'tension_inflation'},
packages=find_packages(where='tension_inflation'),
package_data={'': extra_files},
python_requires='>=3',
install_requires=['PyQt5','pyserial','pyqtgraph','simple-pid','PIPython'],
entry_points={
'gui_scripts':['tension_inflation=tension_inflation.GUI_main:main',],
'console_scripts': ['tension_inflation_console=tension_inflation.GUI_main:main',],
},
)
|
from django.contrib import admin
from django.urls import path,include
from django.contrib.auth import views
from django.conf import settings
from django.conf.urls.static import static
from django_registration.backends.one_step.views import RegistrationView
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('neighbour.urls')),
path('accounts/register/', RegistrationView.as_view(success_url='/'),name='django_registration_register'),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/', include('django_registration.backends.one_step.urls')),
]
|
"""
Package for the video comment API
"""
|
"""Generate Lights Out puzzle.
Ref: https://github.com/pmneila/Lights-Out
"""
# Standard library imports
from operator import add
from itertools import chain, combinations
from functools import reduce
# Third party imports
import numpy as np
from numpy import eye, hstack, vectorize, vstack, int32
from numpy.core.numeric import array, ndarray, where, zeros
from numpy.random import randint
class GF2(object):
"""Galois field GF(2).
Ref: https://github.com/pmneila/Lights-Out
"""
def __init__(self, a=0):
self.value = int(a) % 2
def __add__(self, rhs):
return GF2(self.value + GF2(rhs).value)
def __mul__(self, rhs):
return GF2(self.value * GF2(rhs).value)
def __sub__(self, rhs):
return GF2(self.value - GF2(rhs).value)
def __truediv__(self, rhs):
return GF2(self.value / GF2(rhs).value)
def __repr__(self):
return str(self.value)
def __eq__(self, rhs):
if isinstance(rhs, GF2):
return self.value == rhs.value
return self.value == rhs
def __le__(self, rhs):
if isinstance(rhs, GF2):
return self.value <= rhs.value
return self.value <= rhs
def __lt__(self, rhs):
if isinstance(rhs, GF2):
return self.value < rhs.value
return self.value < rhs
def __int__(self):
return self.value
def __long__(self):
return self.value
GF2array = vectorize(GF2)
def powerset(iterable):
"""Calculate power set.
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
Ref: https://github.com/pmneila/Lights-Out
"""
s = list(iterable)
return chain.from_iterable(
combinations(s, r) for r in range(len(s) + 1))
class ManageLightsOutPuzzle(object):
"""Manage Lights Out Puzzle."""
def __init__(self):
self.n_lights_1axis = 0
self.mat_inv = array([])
self.mat_null = array([])
self.mat_puzzle = array([])
self.mat_solution = array([])
@staticmethod
def state_transition_matrix_lightsout(n_lights_1axis):
"""Calculate state trasition matrix of light out."""
matrix = zeros((n_lights_1axis * n_lights_1axis,
n_lights_1axis * n_lights_1axis))
for idx_row in range(1, n_lights_1axis + 1):
for idx_col in range(1, n_lights_1axis + 1):
vector = zeros((n_lights_1axis + 2, n_lights_1axis + 2))
vector[idx_row - 1, idx_col + 0] = 1
vector[idx_row + 1, idx_col + 0] = 1
vector[idx_row + 0, idx_col + 0] = 1
vector[idx_row + 0, idx_col + 1] = 1
vector[idx_row + 0, idx_col - 1] = 1
vector = vector[1:n_lights_1axis + 1, 1:n_lights_1axis + 1]
matrix[(idx_row - 1) * n_lights_1axis
+ (idx_col - 1), :] = vector.ravel()
return matrix
@staticmethod
def inv_by_gauss_elimination(mat):
"""Caculate inverse matrix by gauss elimination.
Parameters
----------
mat : ndarray
matrix.
Returns
-------
mat_inv : ndarray
inverse matrix.
mat_null : ndarray
null space matrix.
"""
n_row, n_col = mat.shape
if n_row != n_col:
raise ValueError("n_row and n_col are different.")
data = GF2array(hstack([mat, eye(n_row)]))
n_null_dim = 0
mat_null = array([])
# Row echelon form
for idx_row_src in range(n_row - 1):
idx_pivot_candidate = where(data[idx_row_src:, idx_row_src] == 1)[
0]
if len(idx_pivot_candidate) > 0:
idx_pivot = idx_pivot_candidate[0] + idx_row_src
else:
n_null_dim += 1
continue
if idx_pivot != idx_row_src:
tmp = data[idx_row_src, :].copy()
data[idx_row_src, :] = data[idx_pivot, :]
data[idx_pivot, :] = tmp
for idx_row_dst in range(idx_row_src + 1, n_row):
data[idx_row_dst, :] += (data[idx_row_src, :]
* data[idx_row_dst, idx_row_src])
if np.sum(data[-1, :n_col]) == 0:
n_null_dim += 1
# inverse matrix
for idx_row_src in range(n_row - 1, 0, -1):
for idx_row_dst in range(idx_row_src - 1, -1, -1):
data[idx_row_dst, :] += (data[idx_row_src, :]
* data[idx_row_dst, idx_row_src])
# Find Null space
if n_null_dim > 0:
mat_diag = data[:, :n_col]
mat_null = vstack(
[mat_diag[:n_row - n_null_dim, -n_null_dim:],
GF2array(eye(n_null_dim))])
mat_inv = data[-n_row:, -n_col:]
return mat_inv, mat_null
@staticmethod
def check_solvable(lights_mat, mat_null):
"""Check if the problem is solved.
Parameters
----------
lights_mat : ndarray
matrix of lightout problem.
mat_null : ndarray
null space matrix.
Returns
-------
is_solvable: bool
return True if lights_mat is solvable.
"""
is_solvable = True
if len(mat_null) > 0:
ret = np.sum((int32(lights_mat.ravel()) @ int32(mat_null)) % 2)
if ret != 0:
is_solvable = False
return is_solvable
def new_puzzle(self, n_lights_1axis):
"""Generate New Puzzle."""
if self.n_lights_1axis != n_lights_1axis:
self.n_lights_1axis = n_lights_1axis
state_mat = self.state_transition_matrix_lightsout(n_lights_1axis)
(self.mat_inv,
self.mat_null) = self.inv_by_gauss_elimination(state_mat)
self.mat_puzzle = randint(0, 2, size=(n_lights_1axis, n_lights_1axis))
while (not self.check_solvable(self.mat_puzzle, self.mat_null)
or np.sum(self.mat_puzzle) == 0):
self.mat_puzzle = randint(0, 2,
size=(n_lights_1axis, n_lights_1axis))
self.calculate_solution()
def calculate_solution(self):
"""Calculate solution."""
n_lights = self.n_lights_1axis
solution_1st = (int32(self.mat_inv) @ int32(self.mat_puzzle.ravel()))
solution_1st %= 2
# Given a solution, we can find more valid solutions
# adding any combination of the null vectors.
# Find the solution with the minimum number of 1's.
# Ref: https://github.com/pmneila/Lights-Out
solutions = [(solution_1st + reduce(add, nvs, 0)) % 2 for nvs in
powerset(int32(self.mat_null.T))]
solution_final = min(solutions, key=lambda x: x.sum())
# print([x.sum() for x in solutions])
self.mat_solution = solution_final.reshape(n_lights, n_lights)
def count_1_of_solution(self):
"""Count 1 of solution."""
return self.mat_solution.sum()
|
#!/usr/bin/env python
# The Notices and Disclaimers for Ocean Worlds Autonomy Testbed for Exploration
# Research and Simulation can be found in README.md in the root directory of
# this repository.
import constants
import math
import copy
from tf.transformations import quaternion_from_euler
from utils import is_shou_yaw_goal_in_range
from activity_full_digging_traj import move_to_pre_trench_configuration
from activity_full_digging_traj import go_to_Z_coordinate, change_joint_value
def plan_cartesian_path_lin(move_arm, length):
waypoints = []
wpose = move_arm.get_current_pose().pose
wpose.position.x += length
waypoints.append(copy.deepcopy(wpose))
(plan, fraction) = move_arm.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # end effector follow step (meters)
0.0) # jump threshold
return plan, fraction
def grind(move_arm, move_limbs, args):
x_start = args[1]
y_start = args[2]
depth = args[3]
length = args[4]
ground_position = args[5]
pre_move_complete = move_to_pre_trench_configuration(move_arm, x_start, y_start)
if pre_move_complete == False:
return False
# rotate hand
change_joint_value(move_arm, constants.J_HAND_YAW, -2*math.pi/3)
# approaching and entering terrain - along -Z
z_start = ground_position + constants.GRINDER_HEIGHT - depth
go_to_Z_coordinate(move_limbs, x_start, y_start, z_start)
# grinding ice - along +X
cartesian_plan, fraction = plan_cartesian_path_lin(move_arm, length)
move_limbs.execute(cartesian_plan, wait=True)
move_limbs.stop()
# exiting terrain - along +Z
z_start = ground_position + constants.GRINDER_OFFSET - depth
go_to_Z_coordinate(move_arm, x_start, y_start, z_start)
return True
|
# coding: utf-8
"""
Xero Payroll AU
This is the Xero Payroll API for orgs in Australia region. # noqa: E501
OpenAPI spec version: 2.4.0
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class SettingsTrackingCategories(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"employee_groups": "SettingsTrackingCategoriesEmployeeGroups",
"timesheet_categories": "SettingsTrackingCategoriesTimesheetCategories",
}
attribute_map = {
"employee_groups": "EmployeeGroups",
"timesheet_categories": "TimesheetCategories",
}
def __init__(self, employee_groups=None, timesheet_categories=None): # noqa: E501
"""SettingsTrackingCategories - a model defined in OpenAPI""" # noqa: E501
self._employee_groups = None
self._timesheet_categories = None
self.discriminator = None
if employee_groups is not None:
self.employee_groups = employee_groups
if timesheet_categories is not None:
self.timesheet_categories = timesheet_categories
@property
def employee_groups(self):
"""Gets the employee_groups of this SettingsTrackingCategories. # noqa: E501
:return: The employee_groups of this SettingsTrackingCategories. # noqa: E501
:rtype: SettingsTrackingCategoriesEmployeeGroups
"""
return self._employee_groups
@employee_groups.setter
def employee_groups(self, employee_groups):
"""Sets the employee_groups of this SettingsTrackingCategories.
:param employee_groups: The employee_groups of this SettingsTrackingCategories. # noqa: E501
:type: SettingsTrackingCategoriesEmployeeGroups
"""
self._employee_groups = employee_groups
@property
def timesheet_categories(self):
"""Gets the timesheet_categories of this SettingsTrackingCategories. # noqa: E501
:return: The timesheet_categories of this SettingsTrackingCategories. # noqa: E501
:rtype: SettingsTrackingCategoriesTimesheetCategories
"""
return self._timesheet_categories
@timesheet_categories.setter
def timesheet_categories(self, timesheet_categories):
"""Sets the timesheet_categories of this SettingsTrackingCategories.
:param timesheet_categories: The timesheet_categories of this SettingsTrackingCategories. # noqa: E501
:type: SettingsTrackingCategoriesTimesheetCategories
"""
self._timesheet_categories = timesheet_categories
|
from .utils import sorted_by_key
from .station import MonitoringStation
#define the stations with their water level being over the threshold of 0.8
def stations_level_over_threshold(stations, tol):
#create an empty list of tuples
tuples = []
#loop through each station
for station in stations:
waterlevel = station.relative_water_level()
if waterlevel is not None and waterlevel > tol:
# Add tuple to list
tuples.append((station, waterlevel))
# Sort list
return sorted_by_key(tuples, 1, True)
def stations_highest_rel_level(stations, N):
#create list of stations with their relative level
stations_level = []
for station in stations:
waterlevel = station.relative_water_level()
if waterlevel is not None:
stations_level.append((station, waterlevel))
#sort the list of stations in descending order of water level
stations_level = sorted_by_key(stations_level, 1, True)
#return first N
return [x[0] for x in stations_level[:N]]
def towns_average_level(stations):
""" Returns towns with the average relative water level """
towns_stations = {}
for station in stations:
#add the station to the dictionary town stations
try:
towns_stations[station.town].append(station)
except:
towns_stations[station.town] = [station]
towns_average_level = []
for town, stations in towns_stations.items():
#find the mean water level of all the stations in the town
mean_level = 0
number = 0
for station in stations:
if not station.relative_water_level() == None:
mean_level += station.relative_water_level()
number += 1
if number > 0:
mean_level /= number
towns_average_level.append((town, mean_level))
#sort list of towns by their water level
towns_average_level = sorted_by_key(towns_average_level, 1, True)
return towns_average_level
def towns_flooding_risk(stations, N):
""" Return list of towns and risk of flooding """
#find the flooding level of each town
towns_level = towns_average_level(stations)[:N]
towns_risk = []
#give a rating of each water level in town
for town, level in towns_level:
risk = "Low"
if level > 2:
risk = "Severe"
elif level > 1.5:
risk = "High"
elif level > 1:
risk = "Moderate"
towns_risk.append((town, risk))
return towns_risk
|
from typing import List
import re
import numpy as np
import pandas as pd
from eunjeon import Mecab # Uses mecab for better performance
from sklearn.base import BaseEstimator, TransformerMixin
__all__ = ['ColumnSelector', 'ColumnMerger', 'WordUnifier',
'RegExReplacer', 'DuplicateRemover', 'StopWordRemover',
'WordLower', 'MorphTokenizer', 'NounTokenizer', 'PosTokenizer']
############################
# 1. DataFrame Preprocessing
# - ColumnSelector
# - ColumnMerger
############################
class ColumnSelector(BaseEstimator, TransformerMixin):
"""
주어진 데이터프레임에서 Pipeline에서 적용할 컬럼을 선택
Example
>>> df = pd.DataFrame(data={ "과일" : ['사과','배','딸기'],"시장" : ['명동','상정','죽도']})
>>> cs = ColumnSelector("과일")
>>> cs.transform(df)
0 사과
1 배
2 딸기
Name: 과일, dtype: object
"""
def __init__(self, col_name):
self.col_name = col_name
def fit(self, X, y=None):
if self.col_name not in X.columns:
raise ValueError("DataFrame 내에 {}가 없습니다.".format(self.col_name))
return self
def transform(self, X):
return X[self.col_name]
class ColumnMerger(BaseEstimator, TransformerMixin):
"""
주어진 데이터프레임에서 컬럼에 해당하는 string을 합치는
Example
>>> df = pd.DataFrame(data={ "과일" : ['사과','배','딸기'],"시장" : ['명동','상정','죽도']})
>>> cs = ColumnMerger(['과일','시장'])
>>> cs.transform(df)
0 사과 명동
1 배 상정
2 딸기 죽도
dtype: object
"""
def __init__(self, col_names=[]):
self.col_names = col_names
def fit(self, X, y=None):
for col_name in self.col_names:
if col_name not in X.columns:
raise ValueError("DataFrame 내에 {}가 없습니다.".format(col_name))
return self
def transform(self, X):
return X[self.col_names].apply(lambda x: " ".join(x), axis=1)
############################
# 2. Basic NLP Preprocssing
# - WordUnifier
#
# - DuplicateRemover
# - StopWordRemover
# - RegExReplacer
#
# - WordLower
############################
class WordUnifier(BaseEstimator, TransformerMixin):
"""
동일의미 다른 표기 통일
# TODO : 구현은 쉽지만, 잘못 구현 할 경우 속도 이슈가 날 거 같습니다.
# 속도 이슈 없는 코드를 원합니다!
Example
>>> sample = np.array(['삼성전자 노트북', "노트북 삼성", "samsung 스마트폰", 'lg 폰', "엘지전자 상거래"])
>>> wu = WordUnifier([["삼성","삼성전자",'samsung'], ["엘지",'엘지전자','lg']])
>>> wu.transform(sample)
array(['삼성 노트북', "노트북 삼성", "삼성 스마트폰", '엘지 폰', "엘지 상거래"], dtype=object)
"""
def __init__(self, words_list=[]):
self._words_list = words_list
def fit(self, X, y=None):
return self
def transform(self, X):
if isinstance(X, np.ndarray):
x_shape = X.shape
return np.array([self._transform(phrase)
for phrase in X.ravel()]).reshape(x_shape)
elif isinstance(X, pd.Series):
return X.map(self._transform)
elif isinstance(X, pd.DataFrame):
return X.applymap(self._transform)
elif isinstance(X, list) or isinstance(X, tuple):
return [self._transform(phrase) for phrase in X]
else:
raise TypeError("적절하지 못한 DataType이 들어왔습니다.")
@staticmethod
def _transform(phrase):
# TODO : wordunifier 구현
return
class RegExReplacer(BaseEstimator, TransformerMixin):
"""
정규식을 활용한 word 치환
주어진 정규식에 만족하는 word에 대해서, 특정 word로 변경하는 코드
Example
>>>
>>>
>>>
"""
def __init__(self, regex_list=[]):
self._regex_list = regex_list
def fit(self, X, y=None):
return X
def transform(self, X):
if isinstance(X, np.ndarray):
x_shape = X.shape
return np.array([self._transform(phrase)
for phrase in X.ravel()]).reshape(x_shape)
elif isinstance(X, pd.Series):
return X.map(self._transform)
elif isinstance(X, pd.DataFrame):
return X.applymap(self._transform)
elif isinstance(X, list) or isinstance(X, tuple):
return [self._transform(phrase) for phrase in X]
else:
raise TypeError("적절하지 못한 DataType이 들어왔습니다.")
@staticmethod
def _transform(phrase) -> List:
if re.search(r'[0-9]+(kg|KG|Kg)', phrase) is not None:
result = re.sub(r'[0-9]+(kg|KG|Kg)', '<단위>', phrase)
elif re.search(r'[0-9]+.(L)', phrase) is not None:
result = re.sub(r'[0-9]+(L)', '<부피단위>', phrase)
else:
result = phrase
return result
class DuplicateRemover(BaseEstimator, TransformerMixin):
"""
중복 단어 제거
Example
>>> sample = np.array(['청동 사과 할인 특가 사과', "삼성 컴퓨터 특가 세일 삼성", "완전 싸다 완전 초대박 싸다"])
>>> dr = DuplicateRemover()
>>> dr.transform(sample)
array(['청동 사과 할인 특가', '삼성 컴퓨터 특가 세일', '완전 싸다 초대박'], dtype='<U12')
"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
if isinstance(X, np.ndarray):
x_shape = X.shape
return np.array([self._transform(phrase)
for phrase in X.ravel()]).reshape(x_shape)
elif isinstance(X, pd.Series):
return X.map(self._transform)
elif isinstance(X, pd.DataFrame):
return X.applymap(self._transform)
elif isinstance(X, list) or isinstance(X, tuple):
return [self._transform(phrase) for phrase in X]
else:
raise TypeError("적절하지 못한 DataType이 들어왔습니다.")
@staticmethod
def _transform(phrase):
return " ".join(list(dict.fromkeys(phrase.split(" "))))
class StopWordRemover(BaseEstimator, TransformerMixin):
"""
불용어를 제거
Example
>>> sample = ["노트북 할인 판매", "옷 기타 완전 세일", "비아그라 할인", "클래식기타 판매 세일", "판매왕의 판매"]
>>> transformer = StopWordRemover(['판매', '기타'])
>>> transformer.transform(sample)
["노트북 할인", "옷 완전 세일", "비아그라 할인", "클래식기타 세일", "판매왕의"]
pred = transformer.transform(answer)
"""
def __init__(self, stop_words=[]):
self._stop_words = stop_words
self._sw_regex = re.compile(r'\b%s\b' %
r'\b|\b'.join(map(re.escape, self._stop_words)))
self._ds_regex = re.compile(r"\s+")
def fit(self, X, y=None):
return self
def transform(self, X):
if isinstance(X, np.ndarray):
x_shape = X.shape
return np.array([self._transform(phrase)
for phrase in X.ravel()]).reshape(x_shape)
elif isinstance(X, pd.Series):
return X.map(self._transform)
elif isinstance(X, pd.DataFrame):
return X.applymap(self._transform)
elif isinstance(X, list) or isinstance(X, tuple):
return [self._transform(phrase) for phrase in X]
else:
raise TypeError("적절하지 못한 DataType이 들어왔습니다.")
def _transform(self, phrase):
_phrase = self._sw_regex.sub("", phrase)
return self._ds_regex.sub(" ", _phrase).strip()
class WordLower(BaseEstimator, TransformerMixin):
"""
모두 소문자화
>>> sample = np.array(['Kang', "KAM", "Kan"])
>>> wl = WordLower()
>>> wl.transform(sample)
array(['kang', 'kam', 'kan'], dtype='<U4')
"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
if isinstance(X, np.ndarray):
x_shape = X.shape
return np.array([self._transform(phrase)
for phrase in X.ravel()]).reshape(x_shape)
elif isinstance(X, pd.Series):
return X.map(self._transform)
elif isinstance(X, pd.DataFrame):
return X.applymap(self._transform)
elif isinstance(X, list) or isinstance(X, tuple):
return [self._transform(phrase) for phrase in X]
else:
raise TypeError("적절하지 못한 DataType이 들어왔습니다.")
@staticmethod
def _transform(word):
return word.lower()
############################
# 3. Tokenizer
# - MorphTokenizer
# - NounTokenizer
# - PosTokenizer
# TODO : 이 쪽은 transform 코드를 다 짠후 리팩토링 하려고 합니다.
# 고민포인트
# konlpy를 wrapping하여 구성하려고 하는데
# twitter를 주로 사용한다는 가정으로 설계하였습니다.
# (좋지 못한 가정이고, 코드의 유연성을 떨어트리는 못된 행위이지요)
# 어떤 식으로 확장해야 좀 더 좋은 코드가 될 것인지
# 고민이 좀 들고 있었습니다.
############################
class MorphTokenizer(BaseEstimator, TransformerMixin):
def __init__(self):
self._mecab = Mecab()
def fit(self, X, y=None):
return self
def transform(self, X):
if isinstance(X, np.ndarray):
x_shape = X.shape
return np.array([self._transform(phrase)
for phrase in X.ravel()]).reshape(x_shape)
elif isinstance(X, pd.Series):
return X.map(self._transform)
elif isinstance(X, pd.DataFrame):
return X.applymap(self._transform)
elif isinstance(X, list) or isinstance(X, tuple):
return [self._transform(phrase) for phrase in X]
else:
raise TypeError("적절하지 못한 DataType이 들어왔습니다.")
def _transform(self, phrase):
return " ".join(self._mecab.morphs(phrase))
class NounTokenizer(BaseEstimator, TransformerMixin):
def __init__(self):
self._mecab = Mecab()
def fit(self, X, y=None):
return self
def transform(self, X):
if isinstance(X, np.ndarray):
x_shape = X.shape
return np.array([self._transform(phrase)
for phrase in X.ravel()]).reshape(x_shape)
elif isinstance(X, pd.Series):
return X.map(self._transform)
elif isinstance(X, pd.DataFrame):
return X.applymap(self._transform)
elif isinstance(X, list) or isinstance(X, tuple):
return [self._transform(phrase) for phrase in X]
else:
raise TypeError("적절하지 못한 DataType이 들어왔습니다.")
def _transform(self, phrase):
return " ".join(self._mecab.nouns(phrase))
class PosTokenizer(BaseEstimator, TransformerMixin):
def __init__(self, norm=False, stem=False,
excludes=['Punctuation', 'Number', 'Foreign']):
self._norm = norm
self._stem = stem
self._excludes = excludes
self._mecab = Mecab()
def fit(self, X, y=None):
return self
def transform(self, X):
if isinstance(X, np.ndarray):
x_shape = X.shape
return np.array([self._transform(phrase)
for phrase in X.ravel()]).reshape(x_shape)
elif isinstance(X, pd.Series):
return X.map(self._transform)
elif isinstance(X, pd.DataFrame):
return X.applymap(self._transform)
elif isinstance(X, list) or isinstance(X, tuple):
return [self._transform(phrase) for phrase in X]
else:
raise TypeError("적절하지 못한 DataType이 들어왔습니다.")
def _transform(self, phrase):
pos_list = self._mecab.pos(phrase)
pos_drop = list(filter(
lambda pos: pos[1] not in self._excludes, pos_list))
if len(pos_drop) == 0:
return ""
else:
return " ".join(list(zip(*pos_drop))[0])
|
# -*- coding: utf-8 -*-
import scrapy
class KindleTelegramSpider(scrapy.Spider):
name = 'kindle_telegram'
allowed_domains = ['https://www.amazon.com.br/']
start_urls = ['https://www.amazon.com.br/gp/product/B0773XBMB6/']
def parse(self, response):
name = response.xpath('//span[@id="priceblock_ourprice"]/text()').get()
self.log('O Preço do Kindle Papperwhite %s' % name)
|
#!usr/bin/env python
from sprite import *
import universal_var
import timer
import bar
import projectile
class World_camera(object):
world_location = [0, 0] #player position from the origin
original_position = [250, 200]
x = original_position[0]
y = original_position[1]
x_offset = 0
y_offset = 0
all_timers = timer.Timer()
all_timers.add_ID('camera_shake_interval', 0)
target_focus = None
static = False
@classmethod
def follow(cls, sprite_surf=None):
if sprite_surf == None:
cls.static = True
else:
cls.target_focus = sprite_surf
if cls.static:
xdist = 0
ydist = 0
else:
xdist = cls.x - cls.target_focus.x
ydist = cls.y - cls.target_focus.y
for obj in Sprite_surface.all_sprite_surfaces:
#readjust position
if isinstance(obj, bar.Energy_bar) != True:
obj.x += xdist
obj.display_offset = [cls.x_offset, cls.y_offset]
if obj.ID != 'megaman':
obj.spawn_point[0] += xdist
if isinstance(obj, projectile.Projectile):
obj.init_x += xdist
Sprite_surface.update(obj)
#display onto screen
try:
if obj.is_on_screen() and obj.is_active:
obj.display(Sprite_surface.display_screen)
except AttributeError:
#obj.display_collboxes(Sprite_surface.display_screen)
pass
if universal_var.debug:
obj.display_collboxes(Sprite_surface.display_screen)
if universal_var.debug == True:
for obj in Sprite_surface.all_sprite_surfaces:
if isinstance(obj, bar.Energy_bar) != True:
obj.y += ydist
if obj.ID != 'megaman':
obj.spawn_point[1] += ydist
if isinstance(obj, projectile.Projectile):
obj.init_y += ydist
Sprite_surface.update(obj)
cls.world_location[0] -= xdist
if universal_var.debug == True:
cls.world_location[1] -= ydist
@classmethod
def move(cls, speed, direction):
x = 0
y = 0
if direction == 'right':
cls.x -= speed
x -= speed
if direction == 'left':
cls.x += speed
x += speed
if direction == 'down':
cls.y -= speed
y -= speed
if direction == 'up':
cls.y += speed
y += speed
for obj in Sprite_surface.all_sprite_surfaces:
if isinstance(obj, bar.Energy_bar) != True:
obj.x += x
obj.y += y
if obj != cls.target_focus:
obj.spawn_point[0] += x
obj.spawn_point[1] += y
if isinstance(obj, projectile.Projectile):
obj.init_x += x
obj.init_y += y
cls.world_location[0] -= x
cls.world_location[1] -= y
@classmethod
def shake(cls, x_offset, y_offset, speed=5):
half_way_time = cls.all_timers.get_ID('camera_shake_interval')['origin']//2
if cls.all_timers.is_almost_finished('camera_shake_interval', half_way_time):
cls.x_offset = x_offset
cls.y_offset = y_offset
else:
cls.x_offset = -x_offset
cls.y_offset = -y_offset
cls.all_timers.countdown('camera_shake_interval', speed, loop=True)
@classmethod
def update(cls):
if cls.target_focus != None and universal_var.debug != True and universal_var.hitbox in cls.target_focus.collbox_dict:
if check_camerabox_collision(cls.target_focus) != True and Transition_box.in_transition_mode != True and cls.target_focus.is_active:
cls.static = False
elif check_camerabox_collision(cls.target_focus) or Transition_box.in_transition_mode:
cls.static = True
cls.follow(cls.target_focus)
if universal_var.game_reset != True:
check_transitionbox_collision(cls.target_focus)
if Transition_box.current_box != None and Transition_box.in_transition_mode == True:
transition_screen()
elif universal_var.debug:
cls.static = False
cls.follow(cls.target_focus)
else:
cls.static = True
cls.follow()
#reset position
cls.x, cls.y = cls.original_position[0], cls.original_position[1]
cls.x_offset, cls.y_offset = 0, 0
#--------------------------------------
class Camera_box(Sprite_surface):
all_camera_box = []
def __init__(self, ID, x, y, width, height, display_layer=1, colour=(62, 48, 255)):
coll_boxes = [Collision_box(universal_var.hitbox, 300, 400, width, height, colour=colour)]
super().__init__(ID, x, y, None, coll_boxes, display_layer)
Camera_box.add_to_class_lst(self, Camera_box.all_camera_box, ID)
#------------------------------------------------------------------------------------------
class Transition_box(Sprite_surface): #Use to transition the full screen in a direction
all_transition_box = []
all_timers = timer.Timer()
all_timers.add_ID('transition_start', 20)
all_timers.add_ID('transition_end', 20)
in_transition_mode = False #If the screen is currently in transition
current_box = None #Whichever box activated the transition
transition_speed = 10
def __init__(self, ID, x, y, display_layer=1, direction='left', size=200):
if direction == 'left' or direction == 'right':
width = 10
height = size
else:
width = size
height = 10
coll_boxes = [Collision_box(universal_var.hitbox, 300, 400, width, height, colour=(200, 255, 100))]
super().__init__(ID, x, y, None, coll_boxes, display_layer)
Transition_box.add_to_class_lst(self, Transition_box.all_transition_box, ID)
self.all_timers = timer.Timer()
self.original_direction = direction
self.direction = direction
if direction == 'left' or direction == 'right':
Transition_box.all_timers.add_ID(ID, universal_var.screen_width) #Timer for how long the transition should be
else:
Transition_box.all_timers.add_ID(ID, universal_var.screen_height)
def switch_dir(self):
if self.direction == 'left':
self.direction = 'right'
elif self.direction == 'right':
self.direction = 'left'
elif self.direction == 'up':
self.direction = 'down'
else:
self.direction = 'up'
#-------------------------------------------------------------
#--Functions--
def check_camerabox_collision(sprite_surf):
collisions = sprite_surf.check_collision_lst(Camera_box.all_camera_box, universal_var.hitbox, universal_var.hitbox, quota=1)
return collisions.is_empty() != True
def check_transitionbox_collision(sprite_surf):
collisions = sprite_surf.check_collision_lst(Transition_box.all_transition_box, universal_var.hitbox, universal_var.hitbox, quota=1) #returns stack of 1 collision
if collisions.is_empty() != True and Transition_box.in_transition_mode != True:
tbox = collisions.pop()
Transition_box.current_box = tbox
Transition_box.in_transition_mode = True
Transition_box.all_timers.replenish_timer(tbox.ID)
def transition_screen():
tbox = Transition_box.current_box
if Transition_box.all_timers.is_finished('transition_start') != True:
Transition_box.all_timers.countdown('transition_start') #Wait for a little pause before transitioning
universal_var.game_pause = True
else:
if Transition_box.all_timers.is_finished(tbox.ID) != True: #If timer is not empty keep transitioning
universal_var.game_pause = False
World_camera.move(Transition_box.transition_speed, tbox.direction)
sprite_surf = World_camera.target_focus
if tbox.direction == 'right': #move megaman
sprite_surf.follow(x=tbox.collbox_dict[universal_var.hitbox].x + 30, x_vel=2)
elif tbox.direction == 'left':
sprite_surf.follow(x=(tbox.collbox_dict[universal_var.hitbox].x - World_camera.target_focus.width - 30), x_vel=2)
elif tbox.direction == 'up':
sprite_surf.follow(y=(tbox.collbox_dict[universal_var.hitbox].y - World_camera.target_focus.height), y_vel=2)
else:
sprite_surf.follow(y=tbox.collbox_dict[universal_var.hitbox].y + 25, y_vel=2)
Transition_box.all_timers.countdown(tbox.ID, countdown_speed=Transition_box.transition_speed)
else:
if Transition_box.all_timers.is_finished('transition_end') != True:
Transition_box.all_timers.countdown('transition_end') #wait for a little pause again before finishing transition
universal_var.game_pause = True
else:
universal_var.game_pause = False
Transition_box.in_transition_mode = False
tbox.switch_dir()
Transition_box.all_timers.replenish_timer('transition_start')
Transition_box.all_timers.replenish_timer('transition_end')
def camera_transitioning():
return Transition_box.in_transition_mode
def transition_start(): #returns true if transition phase is at the begining
if camera_transitioning():
return Transition_box.all_timers.is_finished('transition_start') != True
else:
return False
def transition_end():
if camera_transitioning():
return Transition_box.all_timers.is_finished(Transition_box.current_box.ID)
else:
return False
|
import os
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DATASET_ROOT = os.path.join(PROJECT_ROOT, 'data')
|
import inspect
from remake.operators.base import BaseOperator
class ParameterOperator(BaseOperator):
def __init__(self, name, sources, transform: callable, default_value, helper, dtype="_right"):
super().__init__(name, sources, priority="LAZY", order=1, type="PARALLEL")
self.transform = transform
self.default_value = default_value
self.helper = helper
self.dtype = dtype
def __call__(self, value):
pass
def Parameter(name, identifier=None, default_value=None, transform=None, helper=None, dtype=None):
class_name = inspect.stack()[1][0].f_locals.get("__qualname__", None)
if not class_name:
return
transformer_name = to_transformer_name(class_name)
if identifier is None:
identifier = name
SchematicCache.add_parameter(transformer_name, name, ParameterContainer(name=name,
identifier=identifier,
default_value=default_value,
transform=transform,
helper=helper,
dtype=dtype))
def parameter(function=None, helper=None, dtype=None, default_value=None):
if function is None:
return partial(parameter, helper=helper, dtype=dtype, default_value=default_value)
transformer, param_name, input_columns = get_info(function)
SchematicCache.add_parameter(transformer, param_name, ParameterContainer(name=input_columns[0],
identifier=param_name,
transform=function,
dtype=dtype,
helper=helper,
default_value=default_value))
return function
|
from modeltranslation.translator import translator, TranslationOptions
from .models import MetaTag
class MetaTagTranslationOptions(TranslationOptions):
fields = ('title', 'keywords', 'description')
translator.register(MetaTag, MetaTagTranslationOptions)
|
import logging
import numpy as np
import pandas as pd
from countess.plugins.scoring import BaseScorerPlugin
from countess.plugins.options import Options
from countess.base.constants import WILD_TYPE_VARIANT
from countess.base.utils import log_message
from countess.base.constants import IDENTIFIERS, VARIANTS
options = Options()
options.add_option(
name="Normalization Method",
varname="logr_method",
dtype=str,
default="Wild Type",
choices={"Wild Type": "wt", "Full": "full", "Complete": "complete"},
hidden=False,
)
class RatiosScorer(BaseScorerPlugin):
name = "Ratios"
version = "1.0"
author = "Alan Rubin, Daniel Esposito"
def compute_scores(self):
for label in self.store_labels():
self.calc_ratios(label)
def calc_ratios(self, label):
"""
Calculate frequency ratios and standard errors between the
last timepoint and the input. Ratios can be calculated using
one of three methods:
- wt
- complete
- full
"""
if self.store_check("/main/{}/scores".format(label)):
return
log_message(
logging_callback=logging.info,
msg="Calculating ratios ({})".format(label),
extra={"oname": self.name},
)
c_last = "c_{}".format(self.store_timepoints()[-1])
df = self.store_select(
key="/main/{}/counts".format(label), columns=["c_0", "{}".format(c_last)]
)
if self.logr_method == "wt":
if VARIANTS in self.store_labels():
wt_label = VARIANTS
elif IDENTIFIERS in self.store_labels():
wt_label = IDENTIFIERS
else:
raise ValueError(
"Failed to use wild type log "
"ratio method, suitable data "
"table not present [{}]".format(self.name)
)
shared_counts = self.store_select(
key="/main/{}/counts".format(wt_label),
columns=["c_0", "{}".format(c_last)],
where="index='{}'".format(WILD_TYPE_VARIANT),
)
# wild type not found
if len(shared_counts) == 0:
raise ValueError(
"Failed to use wild type log "
"ratio method, wild type "
"sequence not present [{}]".format(self.name)
)
shared_counts = shared_counts.values + 0.5
elif self.logr_method == "complete":
shared_counts = (
self.store_select(
key="/main/{}/counts".format(label),
columns=["c_0", "{}".format(c_last)],
)
.sum(axis="index")
.values
+ 0.5
)
elif self.logr_method == "full":
shared_counts = (
self.store_select(
key="/main/{}/counts_unfiltered".format(label),
columns=["c_0", "{}".format(c_last)],
)
.sum(axis="index", skipna=True)
.values
+ 0.5
)
else:
raise ValueError(
'Invalid log ratio method "{}" '
"[{}]".format(self.logr_method, self.name)
)
ratios = np.log(df[["c_0", c_last]].values + 0.5) - np.log(shared_counts)
ratios = ratios[:, 1] - ratios[:, 0] # selected - input
ratios = pd.DataFrame(ratios, index=df.index, columns=["logratio"])
shared_variance = np.sum(1.0 / shared_counts)
summed = np.sum(1.0 / (df[["c_0", c_last]].values + 0.5), axis=1)
ratios["variance"] = summed + shared_variance
ratios["score"] = ratios["logratio"]
ratios["SE"] = np.sqrt(ratios["variance"])
# re-order columns
ratios = ratios[["score", "SE", "logratio", "variance"]]
self.store_put(
key="/main/{}/scores".format(label),
value=ratios,
data_columns=ratios.columns,
)
|
# Cryptopals
# Set 4 Challenge 25
# Break "Random Access Read/Write" AES CTR
import base64
from Crypto.Cipher import AES
from random import randint
def random_bytes(n):
random_ints = []
for i in range(n):
random_ints.append(randint(0,255))
return bytes(random_ints)
def binary_xOR(byte_code1,byte_code2):
result = b''
for i in range(len(byte_code1)):
result += bytes([byte_code1[i] ^ byte_code2[i]])
return result
# Encrypt with AES in ECB
def AES_ECB_encrypt(bytes_code, key):
aes_cipher = AES.new(key, AES.MODE_ECB)
return aes_cipher.encrypt(bytes_code)
def CTR_mode(key, nonce, blocksize, message):
ciphertext = b''
counter = 0
while len(ciphertext) < len(message):
key_stream = AES_ECB_encrypt(nonce + (counter).to_bytes(8, byteorder="little"), key)
block = message[blocksize * counter:blocksize * counter + blocksize]
ciphertext += binary_xOR(key_stream[:len(block)], block)
counter += 1
return ciphertext
def CTR_edit(ciphertext, key, nonce, blocksize, offset, newtext):
new_ciphertext = b''
counter = 0
offset_counter = offset
while len(new_ciphertext) < len(newtext):
keystream = AES_ECB_encrypt(nonce + (offset_counter).to_bytes(8, byteorder="little"), key)
block = newtext[blocksize * counter:blocksize * counter + blocksize]
new_ciphertext += binary_xOR(keystream[:len(block)], block)
counter += 1
offset_counter += 1
return ciphertext[:blocksize * offset] + new_ciphertext + ciphertext[(blocksize * offset) + len(new_ciphertext):]
RANDOM_KEY = random_bytes(16)
NONCE = random_bytes(8)
plaintexts = open("Set3/Challenge20Codes.txt", "r")
plaintext = b''
for line in plaintexts:
plaintext += base64.b64decode(line)
ciphertext = CTR_mode(RANDOM_KEY, NONCE, 16, plaintext)
recovered_plaintext = b''
for i in range(int(len(ciphertext) / 16) + 1):
known_block = b'\x00' * 16
keystream_block = CTR_edit(ciphertext, RANDOM_KEY, NONCE, 16, i, known_block)[i * 16:(i * 16) + 16]
ciphertext_block = ciphertext[i * 16:(i * 16) + 16]
recovered_plaintext += binary_xOR(keystream_block[:len(ciphertext_block)], ciphertext_block)
print(recovered_plaintext)
if (recovered_plaintext == plaintext):
print("It worked!")
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
import imutils
img1 = cv2.imread('qr_code.png',0) # queryImage
img2 = cv2.imread('qr_code_rotated90.jpg',0) # trainImage
img2 = imutils.resize(img2, width=600)
# Initiate SIFT detector
orb = cv2.ORB_create(nfeatures=1000, scoreType=cv2.ORB_FAST_SCORE)
# find the keypoints and descriptors with SIFT
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
print(kp1)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1, des2)
# Sort them in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)
# Draw first 10 matches.
img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:50], None, flags=2)
plt.imshow(img3),plt.show()
|
# Indic library
import sys
from indicnlp import common
INDIC_NLP_LIB_HOME=r"indic_nlp_library"
INDIC_NLP_RESOURCES=r"indic_nlp_resources"
sys.path.append(r'{}\src'.format(INDIC_NLP_LIB_HOME))
common.set_resources_path(INDIC_NLP_RESOURCES)
from indicnlp.tokenize import indic_tokenize
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms as T
from transformers import ViTFeatureExtractor, ViTModel
from utils.dataset import HVGDataset
from utils.custom_transform import ToSequence
from utils.config import Config
def train (feature_extractor, model, decoder, dataloader):
for image, caption, target, target_seq_len in train_dataloader:
# print (f'image shape - {image.shape}')
# print (f'caption - {caption.shape}')
# print (f'target - {target.shape}')
# print (f'target_seq_len shape- {target_seq_len.shape}')
# print (f'target_seq_len - {target_seq_len}')
# print (f'image[0].shape {image [0].shape}')
# print (f'max - {image.max ()}')
# print (f'min - {image.min ()}')
images_list = [image [i] for i in range (config.batch_sz)]
# print (type (images_list))
# print (type (images_list [0]))
# print (images_list [0].shape)
inputs = feature_extractor(images=images_list, return_tensors="pt")
outputs = model(**inputs, output_attentions=False, output_hidden_states=False)
last_hidden_states = outputs.last_hidden_state
print (f'output shape - {last_hidden_states.shape}')
break
if __name__ == '__main__':
config = Config ()
text_transform = ToSequence (tokenizer=indic_tokenize.trivial_tokenize)
image_transform = T.Compose ([T.ToTensor(), T.Resize ((224, 224))])
train_dataset = HVGDataset (config.train_captions, config.word_to_index_path, config.index_to_word_path, config.images_path, config.max_len, text_transform=text_transform, image_transform=image_transform)
train_dataloader = DataLoader (train_dataset, batch_size=config.batch_sz, shuffle=True)
feature_extractor = ViTFeatureExtractor.from_pretrained(config.pretrained_vitfe_path)
model = ViTModel.from_pretrained(config.pretrained_vit_path)
train (feature_extractor=feature_extractor, \
model=model, \
decoder=decoder, \
dataloader=train_dataloader)
|
from model.contact import Contact
class ContactHelper:
def __init__(self, app):
self.app = app
def create(self, contact):
wd = self.app.wd
# fill contact page
self.open_contact_page()
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.firstname)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(contact.middlename)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lastname)
# submit creation
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.return_to_home_page_contact()
def return_to_home_page_contact(self):
wd = self.app.wd
wd.find_element_by_link_text("home page").click()
def open_contact_page(self):
wd = self.app.wd
if not (wd.current_url.endswith ("/edit.php") and len(wd.find_elements_by_name("submit")) > 0):
wd.find_element_by_link_text("add new").click()
def delete(self):
wd = self.app.wd
# fill contact page
wd.find_element_by_name("selected[]").click()
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
wd.find_element_by_link_text("home").click()
def edit(self, contact):
wd = self.app.wd
# fill contact page
wd.find_element_by_xpath("//img[@alt='Edit']").click()
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.firstname)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(contact.middlename)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lastname)
wd.find_element_by_name("update").click()
self.return_to_home_page_contact()
def count_contact(self):
wd = self.app.wd
self.open_contact_page()
# init group creation
return len(wd.find_elements_by_name("selected[]"))
def get_contact_list(self):
wd = self.app.wd
self.app.open_home_page()
contacts = []
for element in wd.find_elements_by_xpath("//tr[./td]"):
id = element.find_element_by_name("selected[]").get_attribute("id")
firstname = element.find_elements_by_tag_name("td")[2].text
lastname = element.find_elements_by_tag_name("td")[1].text
contacts.append(Contact(id=id, firstname=firstname, lastname=lastname))
return contacts
|
import argparse
import datetime
import json
import logging
import shutil
import sys
import lmdb
from dredis import db
from dredis.keyspace import Keyspace
from dredis.path import Path
logger = logging.getLogger(__name__)
BACKEND = 'lmdb'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dir', help='dredis data directory', required=True)
parser.add_argument('--output', choices=['lmdb', 'rdb'], help='output file type', required=True)
parser.add_argument('--backend-option', action='append',
help='database backend options (e.g., --backend-option map_size=BYTES)')
args = parser.parse_args()
db_backend_options = {}
if args.backend_option:
for option in args.backend_option:
if '=' not in option:
logger.error('Expected `key=value` pairs for --backend-option parameter')
sys.exit(1)
key, value = map(str.strip, option.split('='))
db_backend_options[key] = json.loads(value)
logger.info("Copying LMDB files...")
output_dir = copy_dirs(args.dir, db_backend_options)
if args.output == 'rdb':
logger.info("Saving RDB file...")
save_rdb(output_dir, db_backend_options)
shutil.rmtree(output_dir)
logger.info("Done!")
def save_rdb(output_dir, db_backend_options):
db.DB_MANAGER.setup_dbs(output_dir, BACKEND, db_backend_options)
keyspace = Keyspace()
keyspace.save()
def copy_dirs(input_basedir, db_backend_options):
lmdb_options = {
'readonly': True,
}
lmdb_options.update(db_backend_options)
output_basedir = datetime.datetime.utcnow().strftime('lmdb-backup_%Y-%m-%dT%H:%M:%S')
for dbid in range(db.NUMBER_OF_REDIS_DATABASES):
env_dirname = Path(input_basedir).join(str(dbid))
env = lmdb.open(bytes(env_dirname), **lmdb_options)
output_dir = Path(output_basedir).join(str(dbid))
output_dir.makedirs()
env.copy(bytes(output_dir))
return output_basedir
def setup_logging(level):
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
if __name__ == '__main__':
setup_logging(logging.INFO)
main()
|
from sortedintersect.intersect import *
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from os.path import dirname
import os
import networkx as nx
""" Given a set of simulation runs and a threshold graph (output from Tills tool
gml2tg) for a arbitrary threshold and weight, generate one gml file with
networkx for each unique complex = each node in the threshold graph (Tills
tool contains an isomorphism check and all occurring nodes are unique complexes
from the input files).
Further write a list containing all filenames and some basic stats.
"""
def get_numbers_from_name(name):
""" Extract the filenumer and the graphnumber from the node name
"""
name = name.split("_")
file_number = int(name[3][:-4]) # -> remove .gml from number
graph_number = int(name[-1])
return(file_number, graph_number)
def parse_complexes(labels, path_input_graphs, prefix_for_output_gmls, output_file):
""" Parse the complexes for each label and write a single gml file as well
as some stats.
"""
filenames_to_numbers = defaultdict(list)
for l in labels:
filename = "_".join(l.split("_")[:4])
graph_number = int(l.split("_")[-1])
filenames_to_numbers[filename].append(graph_number)
output = open(output_file, "w")
for filename in filenames_to_numbers:
current_file = open(path_input_graphs+filename[:-4]+".nx.gml", "r") # .nx.gml because of duplication for renaming, see below
count = -1
lines = []
current_graphs = sorted(filenames_to_numbers[filename])
i = 0
current_graph = current_graphs[i]
for line in current_file:
if line.strip("\n") == "graph [":
count += 1
if count == current_graph:
lines.append(line)
else:
if lines != []:
graph = nx.parse_gml(lines)
path = prefix_for_output_gmls+"{}_{}".format(filename, current_graph)
nx.write_gml(graph, path+".nx.gml")
os.system("sed '/label/d' {0}.nx.gml | sed \"s/name/label/\" > {0}.gml".format(path))
proteinnames = sorted(list(nx.get_node_attributes(graph,'name').values()))
print("{}_{}".format(filename, current_graph), graph.number_of_nodes(), graph.number_of_edges(), proteinnames, sep="\t", file=output)
lines = []
i += 1
if i < len(current_graphs):
current_graph = current_graphs[i]
if count == current_graph:
lines.append(line)
else:
break
output.close()
if __name__ == "__main__":
path_threshold_graph = snakemake.input[0]
path_input_graphs = snakemake.params.input_graphs
prefix_for_output_gmls = dirname(snakemake.output[0])+"/"
output_file = snakemake.output[0]
threshold_graph = nx.read_gml(path_threshold_graph)
labels = threshold_graph.nodes(data=False) # current format output_0.005_2.5_7.gml_870 with filenumber 7 and graphnumber 870
parse_complexes(labels, path_input_graphs, prefix_for_output_gmls, output_file)
""" networkx does not accept multiple labels in gml format, so protein names
are stored in the attribute "name" and the label is a unique id. The
standard format demands them to be "label", so the following preprocessing
is required before the tools from Till and Nils can use the gml files:
for f in *.gml; do cp $f $f.bak; sed '/label/d' $f.bak | sed "s/name/label/" > $f; done
"""
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
inorderDict = {num: i for i, num in enumerate(inorder)}
def helper(start, end):
if start > end:
return None
rootVal = postorder.pop()
root = TreeNode(rootVal)
idx = inorderDict[rootVal]
root.right = helper(idx + 1, end)
root.left = helper(start, idx - 1)
return root
return helper(0, len(inorder) - 1)
|
"""Test eliminate common subexpr pass"""
from tvm import relay
from tvm.relay.op import register_alter_op_layout
from tvm.relay import ir_pass
def test_simple():
def before():
x = relay.var("x", shape=(1, 16))
y1 = relay.nn.relu(x)
y2 = relay.nn.relu(x)
y1 = relay.add(y1, relay.const(1.0, "float32"))
y2 = relay.add(y2, relay.const(1.0, "float32"))
y = relay.add(y1, y2)
f = relay.Function([x], y)
return f
def expected():
x = relay.var("x", shape=(1, 16))
y = relay.nn.relu(x)
y = relay.add(y, relay.const(1.0, "float32"))
y = relay.add(y, y)
f = relay.Function([x], y)
return f
z = before()
z = ir_pass.eliminate_common_subexpr(z)
assert ir_pass.alpha_equal(z, expected())
def test_callback():
def before():
x = relay.var("x", shape=(1, 16))
y1 = relay.nn.relu(x)
y2 = relay.nn.relu(x)
y1 = relay.add(y1, relay.const(1.0, "float32"))
y2 = relay.add(y2, relay.const(1.0, "float32"))
y = relay.add(y1, y2)
f = relay.Function([x], y)
return f
def expected():
x = relay.var("x", shape=(1, 16))
y = relay.nn.relu(x)
y1 = relay.add(y, relay.const(1.0, "float32"))
y2 = relay.add(y, relay.const(1.0, "float32"))
y = relay.add(y1, y2)
f = relay.Function([x], y)
return f
def fskip(expr):
if isinstance(expr, relay.expr.Call) and expr.op.name == 'add':
return True
return False
z = before()
z = ir_pass.eliminate_common_subexpr(z, fskip)
assert ir_pass.alpha_equal(z, expected())
if __name__ == "__main__":
test_simple()
test_callback()
|
# Copyright IBM Corp, 2016
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Django settings for client project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import ldap
import secret
from django_auth_ldap.config import LDAPSearch
from django_auth_ldap.config import LDAPSearchUnion
from django_auth_ldap.config import GroupOfNamesType
import project.projectdata.settings as dataSettings
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = secret.SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'account',
'project',
'project.projectdata.scm_git',
'project.projectdata.builder_shell',
'partner',
'multipleselection',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'file': {
# 'level': 'ERROR',
# 'class': 'logging.FileHandler',
# 'filename': '/var/log/client/client.log'
# },
# },
# 'loggers': {
# 'client': {
# 'handlers': ['file'],
# 'level': 'ERROR',
# 'propagate': True
# }
# }
# }
ROOT_URLCONF = 'client.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'client.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'clientdb', 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.' +
'UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.' +
'MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.' +
'CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.' +
'NumericPasswordValidator',
},
]
# Authentication backend
# https://docs.djangoproject.com/en/1.9/ref/settings/#authentication-backends
# https://pythonhosted.org/django-auth-ldap/install.html
AUTHENTICATION_BACKENDS = [
'django_auth_ldap.backend.LDAPBackend',
]
# LDAP authentication settings
# https://pythonhosted.org/django-auth-ldap/authentication.html
LDAP_ROOT_DN = 'dc=example,dc=com'
AUTH_LDAP_SERVER_URI = secret.LDAP_URI
AUTH_LDAP_START_TLS = True
LDAP_CACERTFILE = '/etc/ssl/certs/ca-chain.pem'
AUTH_LDAP_GLOBAL_OPTIONS = {
ldap.OPT_X_TLS_REQUIRE_CERT: ldap.OPT_X_TLS_DEMAND,
ldap.OPT_X_TLS_CACERTFILE: LDAP_CACERTFILE,
}
AUTH_LDAP_BIND_AS_AUTHENTICATING_USER = True
AUTH_LDAP_BIND_DN = secret.LDAP_BIND_DN
AUTH_LDAP_BIND_PASSWORD = secret.LDAP_BIND_PASSWORD
AUTH_LDAP_USER_SEARCH = LDAPSearchUnion(
LDAPSearch('ou=Users,' + LDAP_ROOT_DN,
ldap.SCOPE_SUBTREE, '(uid=%(user)s)'),
LDAPSearch('ou=Users,' + LDAP_ROOT_DN,
ldap.SCOPE_SUBTREE, '(mail=%(user)s)')
)
AUTH_LDAP_USER_ATTR_MAP = {
'username': 'uid',
'first_name': 'cn',
'last_name': 'sn',
'email': 'mail'
}
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
'ou=Groups,' + LDAP_ROOT_DN,
ldap.SCOPE_SUBTREE, '(objectClass=groupOfNames)'
)
AUTH_LDAP_GROUP_TYPE = GroupOfNamesType(name_attr='cn')
LDAP_USER_DN_TEMPLATE = 'uid=%s,ou=Users,' + LDAP_ROOT_DN
LDAP_GROUP_DN_TEMPLATE = 'cn=%s,ou=Groups,' + LDAP_ROOT_DN
# Gearman options
GEARMAN_HOST = '192.168.122.231'
GEARMAN_PORT = 4730
GEARMAN_SSH_PORT = '2200'
GEARMAN_SSH_USER = 'client'
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
REMOVE_DANGLING_PROJECTS = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
LOGIN_URL = '/account/signin'
dataSettings.DATA_LIST = {
'shell_script': [
'project.projectdata.scm_git.base.ScmGitData',
'project.projectdata.builder_shell.base.BuilderShellData',
],
}
dataSettings.PROJECT_TYPES = {
'shell_script': 'Write a shell script to build my project.',
}
# vim:set ft=python:
|
def intersect(nums1, nums2):
ret_list = []
if (nums1 is None or nums2 is None):
return ret_list
nums1.sort() #Assume already sorted
nums2.sort() #Assume already sorted
i = 0
j = 0
while(i < len(nums1) and j < len(nums2)):
if(nums1[i] == nums2[j]):
ret_list.append(nums1[i])
i += 1
j += 1
elif(nums1[i] > nums2[j]):
j += 1
elif(nums1[i] < nums2[j]):
i += 1
return ret_list
def intersect_x(nums1,nums2):
res = []
for i in nums1:
if i in nums2:
res.append(i)
nums2.remove(i)
return res
print(intersect([1,2,3,4],[1,1,4]))
|
"""Detection of country for authors affiliations."""
import requests
from bs4 import BeautifulSoup
# from typing import Union, List, Dict, Any
def get_references_from_hal(hal_id: str):
url = "https://hal.archives-ouvertes.fr/{}/html_references".format(hal_id)
try:
r = requests.get(url)
soup = BeautifulSoup(r.text, 'lxml')
references = []
for doi_reference in soup.find_all(class_='doi-reference-value'):
references.append('doi' + doi_reference.get_text().lower())
references = list(set(references))
return references
except Exception:
print("error in hal getting references for id hal {}".format(hal_id))
return []
|
import numpy as np
import itertools
def getBasePrefix(prefix):
return prefix.split(":")[0].split("-")[0].split("@")[0].split("#")[0].upper()
def getScopeGroups(trainPrefixes, scopeFname, ligandReceptorOrder=True, considerPairsInsteadMonomer=True):
'''
scope independency will be carried out as pairs of families, e.g (A-B) (A-B') are independent (A-B) (A'-B') not
given a path to a file that contains scope info as bellow, splits train and test prefixes
as leave-one-scope-family-out
<<<< scopeFname example >>>
1qfw IM AB b.1.1.1:l.1.1.1;b.1.1.1 g.17.1.4;g.17.1.4 2
2jel HL P b.1.1.1:b.1.1.2;b.1.1.1:b.1.1.2 d.94.1.1 2
1avx A B b.47.1.2 b.42.4.1 2
1ay7 A B d.1.1.2 c.9.1.1 2
1buh A B d.144.1.7 d.97.1.1 2
1bvn P T b.71.1.1:c.1.8.1 b.5.1.1 2
1clv A I b.71.1.1:c.1.8.1 g.3.2.1 4
1d6r A I b.47.1.2 g.3.13.1 2
1dfj E I d.5.1.1 c.10.1.1 2
1e6e A B c.3.1.1:c.4.1.1 d.15.4.1 2
or json file like
{"1AK4": {"r": [["A", "c-134"]], "l": [["D", "c-1694"]]}, "3K58": {"r": [["A", "c-545"]], "l": [["B", "c-719"]]}, "4H03": {"r": [["A", "c-33"]], "l": [["A", "c-136"]]}
:param ligandReceptorOrder: True if first scope column is for ligand and second is for receptor.
if False first scope column is for receptor and second is for ligand.
:param considerPairsInsteadMonomer: Ensure independency at the scopes pairs levels instead at the monomers level
'''
print("ensuring scope independency")
if scopeFname.endswith(".json"):
prefixToScope= loadFromJson(scopeFname)
else:
prefixToScope= loadFromTable(scopeFname, ligandReceptorOrder)
scopeGroups={ prefix: (set([prefix]),i) for i, prefix in enumerate(trainPrefixes)}
for i, prefix_i in enumerate(trainPrefixes):
test_ix=[]
train_ix=[]
prefix_i_base= getBasePrefix(prefix_i)
if prefix_i_base in prefixToScope:
scope_L, scope_R= prefixToScope[prefix_i_base]
for j, prefix_other in enumerate(trainPrefixes):
prefix_other_base= getBasePrefix(prefix_other)
if prefix_other_base in prefixToScope:
scope_L_other, scope_R_other= prefixToScope[prefix_other_base]
if considerPairsInsteadMonomer:
condition= (scope_L.intersection(scope_L_other) and scope_R.intersection(scope_R_other) or
scope_L.intersection(scope_R_other) and scope_R.intersection(scope_L_other) )
else:
scopes1= scope_L.union(scope_R)
scopes2= scope_L_other.union(scope_R_other)
condition= scopes1.intersection(scopes2)
if condition:
#Then merge scope groups
group_i, idx_i= scopeGroups[prefix_i]
group_other, idx_others= scopeGroups[prefix_other]
merged_group= group_i.union(group_other)
merged_idx= min(idx_i, idx_others)
scopeGroups[prefix_i]= (merged_group, merged_idx)
scopeGroups[prefix_other]= (merged_group, merged_idx)
prefix_to_idx= { prefix:idx for idx, prefix in enumerate(trainPrefixes)}
# print(prefix_to_idx)
groups=[ 0 for elem in trainPrefixes]
last_g_id=0
for i, (prefixes, g_id) in enumerate(sorted([ (prefixes, p_id) for prefixes, p_id in scopeGroups.values() ],
key=lambda x: x[1])):
if g_id!= last_g_id:
last_g_id= g_id
for prefix in prefixes:
groups[ prefix_to_idx[prefix] ] = g_id
return groups
def loadFromTable(fname, ligandReceptorOrder):
prefixToScope={}
with open(fname) as f:
for line in f:
if line.startswith("#"): continue
lineArray= line.split()
if ligandReceptorOrder:
prefix, chainsL, chainsR, scopesR, scopesL= lineArray[:5]
else:
prefix, chainsR, chainsL, scopesR, scopesL= lineArray[:5]
prefix= prefix.upper()
scopesL= set( itertools.chain.from_iterable([elem.split(":") for elem in scopesL.split(";")] ))
scopesR= set( itertools.chain.from_iterable([elem.split(":") for elem in scopesR.split(";")] ))
prefixToScope[prefix]= ( scopesL, scopesR )
return prefixToScope
def loadFromJson(fname):
import json
prefixToScope={}
with open(fname) as f:
dataDict= json.load(f)
for prefix in dataDict:
prefix= prefix.upper()
scopesL= set( itertools.chain.from_iterable([ dataDict[prefix]["l"][chain] for chain in dataDict[prefix]["l"] ] ))
scopesR= set( itertools.chain.from_iterable([ dataDict[prefix]["r"][chain] for chain in dataDict[prefix]["r"] ] ))
prefixToScope[prefix]= ( scopesL, scopesR )
return prefixToScope
if __name__=="__main__":
import sys, json
scopeFname = sys.argv[1]
trainTestOutName = sys.argv[2]
nGroupsToSample=-1
if len(sys.argv)==4:
nGroupsToSample= int(sys.argv[3])
testPrefixes=[]
trainPrefixes=[]
with open(scopeFname) as f:
for line in f:
lineArray= line.split()
if len(lineArray)>0:
if lineArray[0].islower():
prefix = lineArray[0].strip() + "-" + lineArray[1] + lineArray[2]
trainPrefixes.append( prefix )
else:
prefix = lineArray[0].strip() # + "-" + lineArray[1] + lineArray[2]
testPrefixes.append( prefix )
scopeGroups=getScopeGroups(trainPrefixes+testPrefixes, scopeFname, ligandReceptorOrder=True, considerPairsInsteadMonomer=True)
nTrain= len(trainPrefixes)
trainGroups= scopeGroups[:nTrain]
testGropus= scopeGroups[nTrain:]
independentGroups= set(trainGroups).difference(testGropus)
nItems= [len(trainGroups), len(testGropus), len(independentGroups)]
print( nItems )
independentTrainSet={}
for prefix, group in zip(trainPrefixes, trainGroups):
if group in independentGroups:
if group not in independentTrainSet:
independentTrainSet[group]=[]
independentTrainSet[group].append(prefix)
if nGroupsToSample>1:
samplingIndex= np.random.choice(independentTrainSet.keys(), size= nGroupsToSample,replace=False)
independentTestSet_sampled= {key:independentTrainSet[key] for key in samplingIndex}
independentTrainSet=independentTestSet_sampled
data={"train":[], "test":[prefix for prefix in testPrefixes]}
for pdbIds in independentTrainSet.values():
data["train"].append( np.random.choice(pdbIds))
with open(trainTestOutName, "w") as f:
json.dump([data], f)
|
import a2s
from tensor_site import auth_tokens
from discord_webhook import DiscordWebhook, DiscordEmbed
import re
from servers.models import Server, PlayerCount
from django.core.management.base import BaseCommand
def send_discord_announce(server, successful):
webhook = DiscordWebhook(url=auth_tokens.Discord_Webhook_Server_Status)
if successful:
title = 'Server is back online'
description = "A server query was successful. The server might is back online."
color = 786176
else:
title = 'Server query failed'
description = "A server query failed. The server might be down."
color = 16711680
embed = DiscordEmbed(title=title,
description=description,
color=color,
url="https://data.tensor.fr/servers/")
embed.add_embed_field(name='Server', value=server.name, inline=True)
embed.add_embed_field(name='IP', value="{}:{}".format(
server.ip, server.port), inline=True)
webhook.add_embed(embed)
webhook.execute()
# Fonction liée à un cronjob pour query la télémétrie des serveurs
def playercounter():
labels = []
number = []
for server in Server.objects.all():
address = (server.ip, server.port)
attempts = 0
isdown = True
# Attempt to query the server 3 times maximum
while attempts < 3 and isdown:
try:
query = a2s.info(address, timeout=5, encoding=None)
playernumber = query.player_count
maxplayer = query.max_players
current_map = query.map_name.decode('utf-8')
HasWorkshop = re.search("^workshop/[0-9]*/", current_map)
servername = query.server_name.decode('utf-8')
if HasWorkshop:
current_map = current_map.replace(HasWorkshop[0], "")
isdown = False
queryset = PlayerCount.objects.filter(
server=server).order_by("-id")
if queryset and queryset[0].isdown:
try:
send_discord_announce(server, True)
except Exception as e:
continue
attempts += 1
except:
attempts += 1
continue
# If the queries failed 3 times in a row, send a discord message
if attempts == 3:
playernumber = 0
maxplayer = 64
current_map = "de_mirage"
isdown = True
servername = ""
# Discord webhook message
# Check if the server was down before to avoid spam
queryset = PlayerCount.objects.filter(
server=server).order_by("-id")
if not queryset:
try:
send_discord_announce(server, False)
except Exception as e:
continue
else:
if not queryset[0].isdown:
try:
send_discord_announce(server, False)
except Exception as e:
continue
# Ajout des données dans la BDD de django, pour ne pas perdre les données en cas de redémarrage.
serverstat = PlayerCount(player_count=playernumber,
max_player=maxplayer, server=server, current_map=current_map, isdown=isdown)
serverstat.save()
if servername != "" and servername != server.name:
server.name = servername
server.save()
Name_id = Server.objects.get(name=server.name)
queryset = PlayerCount.objects.filter(server=Name_id)
# Suppression des données les plus anciennes lorsqu'il y en a trop,
# Pour garder uniquement une fenêtre de temps
if queryset.count() >= 36:
to_delete = PlayerCount.objects.values()[:1].get()
PlayerCount.objects.filter(id=to_delete['id']).delete()
class Command(BaseCommand):
help = 'Query the servers'
def handle(self, *args, **options):
playercounter()
|
"""
Module defining the class `Emulator`, from which emulators inherit
"""
from abc import ABC, abstractmethod
from typing import Callable
import numpy as np # type: ignore
def raise_not_implemented_error():
raise NotImplementedError()
class BaseEmulator(ABC):
"""
Base class from which emulators should inherit
This class is abstract.
The child class must implement the marked methods.
"""
def __init__(self):
# The emulating emulations should not exist until the model is trained
self.emul_func: Callable[np.ndarray, float] = raise_not_implemented_error
self.emul_error: Callable[np.ndarray, float] = raise_not_implemented_error
@abstractmethod
def set_emul_func(self, x_train: np.ndarray, y_train: np.ndarray) -> None:
pass
@abstractmethod
def set_emul_error_func(self, x_cv: np.ndarray, y_cv_err: np.ndarray) -> None:
pass
def add_data(self, x_train: np.ndarray, y_train: np.ndarray) -> None:
"""
Add data to the training set on the fly
By default this method does nothing.
Overriding this method is not mandatory (some emulators require a long
time to train, so adding a couple new points may not make sense).
Parameters
----------
x_train : np.ndarray
Array of x-values
y_train : np.ndarray
Array of y-values
"""
pass
|
##
# This class represents a node within the network
#
import sys
import time
import warnings
from collections import deque
import numpy as np
import tensorflow as tf
from dgp_aepmcm.layers.gp_layer import GPLayer
from dgp_aepmcm.layers.input_layer import InputLayer
from dgp_aepmcm.layers.noise_layer import NoiseLayer
from dgp_aepmcm.layers.output_layer_classification import OutputLayerClassification
from dgp_aepmcm.layers.output_layer_regression import OutputLayerRegression
from .utils import (
ProblemType,
calculate_ETA_str,
extend_dimension_if_1d,
memory_used,
valid_q_initializations,
)
class DGPNetwork:
"""Creates a new Deep GP network using Approximate Expectation propagation and Monte Carlo Methods
Args:
x_train (ndarray): Training points (X)
y_train (ndarray): Training targets (y)
inducing_points (ndarray): If not None, initializations for the inducing points (Z) of the GP nodes
share_z_within_layer (Boolean): If True all the nodes in the GP same layer share
the same inducing points
share_kernel_params_within_layer (Boolean): If True all the nodes in the same GP layer
share the same kernel parameters but still using ARD kernel.
n_samples_training (int): Number of samples to use when training
n_samples_prediction (int): Number of samples to use when predicting
show_debug_info (Boolean): Show Epoch information when training
sacred_exp (): _run variable of sacred experiment information,
see: http://sacred.readthedocs.io/en/latest/collected_information.html
seed (int): Seed to use in random number generation functions
jitter (float): Jitter level to add to the diagonal of Kxx, bigger jitters improve numerical stability
minibatch_size (int): Minibatch size to use when initializing, training and predicting.
Smaller minibatches makes the training use less memory.
dtype (type): Type to use for inputs (X) of the network. Either np.float32/np.float64.
float64 will make the network more stable but slower.
"""
def __init__(
self,
x_train,
y_train,
inducing_points=None,
share_z_within_layer=False,
share_kernel_params_within_layer=False,
n_samples_training=20,
n_samples_prediction=100,
show_debug_info=True,
sacred_exp=None,
seed=None,
jitter=1e-5,
minibatch_size=100,
dtype=np.float32,
):
# Sometimes the Tensorflow graph is not deleted when the class is destroyed.
tf.reset_default_graph()
self.seed = seed
self.dtype = dtype
if seed is not None:
print(f"Random seed set: {seed}")
tf.set_random_seed(seed)
np.random.seed(seed)
self.x_train = x_train
self.y_train = y_train
self.inducing_points = inducing_points
self.share_z = share_z_within_layer
self.share_kernel_params = share_kernel_params_within_layer
self.show_debug_info = show_debug_info
# To store sacred experiments data (_run dictionary).
# More info: https://sacred.readthedocs.io/en/latest/collected_information.html
self.sacred_exp = sacred_exp
self.x_train = extend_dimension_if_1d(self.x_train)
self.y_train = extend_dimension_if_1d(self.y_train)
self.n_points = self.x_train.shape[0]
self.problem_dim = self.x_train.shape[1]
# Minibatch size to use in the network and reduce memory usage.
self.minibatch_size = min(self.n_points, minibatch_size)
# Three possible values, regression, bin_classification, multi_classification
self.problem_type = None
self.jitter = jitter
if self.inducing_points is not None:
self.inducing_points = extend_dimension_if_1d(self.inducing_points)
assert (
self.inducing_points.shape[1] == self.x_train.shape[1]
), "The inducing points dimensions must be the same as the X dimensions"
self.inducing_points = self.inducing_points.astype(self.dtype)
self.z_running_tf = self.inducing_points
self.x_tf = tf.placeholder(
self.dtype, name="x_input", shape=[None, self.x_train.shape[1]]
)
# If targets are integer -> classification problem
# If targets are -1 and 1 -> binary classification
# If targets have values from 0, 1, 2,.. n_classes - 1 -> multiclass classification
if np.sum(np.mod(self.y_train, 1)) == 0:
# There is no decimal in y training , we are probably in a classification problem
self.y_train = self.y_train.astype(np.int32)
if np.issubdtype(self.y_train.dtype, np.integer):
self.n_classes = np.max(self.y_train) + 1
# self.n_classes = len(np.unique(self.y_train)) # This one works even if the classes start at 1
y_type = tf.int32
if self.show_debug_info:
print(
f"Creating DGP network for classification problem with {self.n_classes} classes"
)
if self.n_classes == 2:
self.problem_type = ProblemType.BINARY_CLASSIFICATION
else:
self.problem_type = ProblemType.MULTICLASS_CLASSIFICATION
else:
if self.show_debug_info:
print(f"Creating DGP network for regression problem")
self.problem_type = ProblemType.REGRESSION
y_type = self.dtype
# TODO: merge this two placeholders into one. As in x_tf
self.y_train_tf = tf.placeholder(
y_type, name="y_training", shape=[None, self.y_train.shape[1]]
)
self.y_test_tf = tf.placeholder(
y_type, name="y_training", shape=[None, self.y_train.shape[1]]
)
self.y_train_mean_tf = None
self.y_train_std_tf = None
self.layers = []
self.initialized = False
self._predict_function = None
self.session_saved = False
self.n_samples_dict = {
"training": n_samples_training, # num samples for training
"prediction": n_samples_prediction, # num samples for prediction
}
# Placeholder for the status of the network.
# 1 -> Training 0 -> Prediction
# Tells the network the right number of samples to use (training or prediction)
# and uses either the cavity (training) or the posterior (prediction) in the GP node
self.network_set_for_training_tf = tf.placeholder(
self.dtype, shape=(), name="network_set_for_training"
)
self.x_running_tf = tf.cast(self.x_train, self.dtype)
self.sess = tf.Session()
self.saver = None
self.objective_energy_function = None
self.trainable_params = None
self.gradient_optimization_step = None
def add_input_layer(self):
"""Adds an input layer to the network.
The input layer is in charge of replicating the x_train of shape (N,D) to shape (S,N,D)
"""
assert not self.layers, "Network should be empty"
with tf.variable_scope("Input_layer"):
new_layer = InputLayer(
self.x_tf,
self.problem_dim,
self.n_samples_dict,
self.network_set_for_training_tf,
)
self._stack_new_layer(new_layer)
def add_noise_layer(self, noise_initial_value=0.01):
"""Adds noise to the variance of the output of the layer
Args:
noise_initial_value (float): Initial value for the noise
"""
assert self.layers, "Network should have an input node"
# TODO: Reduce default noise?
new_layer = NoiseLayer(self.dtype, noise_initial_value)
self._stack_new_layer(new_layer, self.layers[-1])
def add_gp_layer(
self, n_inducing_points, n_nodes=1, q_initializations="random", W=None
):
"""Adds a Gaussian processes layer
Args:
n_inducing_points (int): Number of inducing points (Z)
n_nodes (int): Number of GP nodes of the layer, the number of nodes will be the output dim. of the layer
q_initializations (str): Initializations of the posterior approximation q(u) params. Valid values are:
'random' (default): Mean and covariance initialized from random normal.
'deterministic': Mean initialized to mean of the prior p(u) and cov. to 1e-5 * Kzz (1e-5 * prior cov)
'prior': Mean and cov. initialized to the prior covariance.
W (ndarray): Mean function weights of the GP m(x) = XW, if None, the identity matrix will be used
"""
if q_initializations not in valid_q_initializations():
raise ValueError(
f"initializations should take a value from {valid_q_initializations()}"
)
assert self.layers, "Network should have an input node"
with tf.variable_scope(f"Layer_{len(self.layers)}_GP"):
is_first_layer = len(self.layers) == 1
# The dim of the layer is the number of nodes in the last one
input_dim_layer = self.layers[-1].n_nodes
output_dim_layer = n_nodes
Z = None
if self.inducing_points is not None:
Z = tf.identity(self.z_running_tf)
# set mean function weights of the layer.
# W should have the same dimension [1] as the number of nodes in the layer
if W is None:
W = self._linear_mean_function(input_dim_layer, output_dim_layer)
else:
W = tf.cast(W, self.dtype)
self.x_running_tf = tf.matmul(self.x_running_tf, W)
if self.inducing_points is not None:
self.z_running_tf = self.z_running_tf @ W
assert W.shape == [input_dim_layer, output_dim_layer], (
f"The given mean weights must be of shape [input_d({input_dim_layer}), output_d({output_dim_layer})], "
f"Given: {W.shape}"
)
new_layer = GPLayer(
W=W,
n_inducing_points=n_inducing_points,
n_points=self.n_points,
n_nodes=output_dim_layer,
input_d=input_dim_layer,
first_gp_layer=is_first_layer,
jitter=self.jitter,
share_z=self.share_z,
share_kernel_params=self.share_kernel_params,
q_initializations=q_initializations,
z_initializations=Z,
seed=self.seed,
dtype=self.dtype,
)
self._stack_new_layer(new_layer, self.layers[-1])
def _linear_mean_function(self, input_dim_layer, output_dim_layer):
""" Sets the W for the mean function m(X) = XW
The last GP layer will have m(X) = 0. This method is based on:
Doubly Stochastic Variational Inference for Deep Gaussian Processes https://arxiv.org/abs/1705.08933
Args:
input_dim_layer (int): Input dimension to the layer. (Number of nodes in the last layer)
output_dim_layer (int): Dimension of the layer. (Number of nodes in the layer)
"""
if input_dim_layer == output_dim_layer:
W = tf.eye(output_dim_layer, dtype=self.dtype)
elif output_dim_layer > input_dim_layer:
zeros = tf.zeros(
(input_dim_layer, output_dim_layer - input_dim_layer), dtype=self.dtype
)
W = tf.concat([tf.eye(input_dim_layer, dtype=self.dtype), zeros], 1)
self.x_running_tf = tf.matmul(self.x_running_tf, W)
if self.inducing_points is not None:
self.z_running_tf = self.z_running_tf @ W
elif output_dim_layer < input_dim_layer:
_, _, V = tf.svd(self.x_running_tf)
# Using the first output_dim_layer values of the input X
W = tf.transpose(V[:output_dim_layer, :])
self.x_running_tf = tf.matmul(self.x_running_tf, W)
if self.inducing_points is not None:
self.z_running_tf = self.z_running_tf @ W
return W
def _set_mean_function_last_layer(self):
# Set the mean funcion of the last GP layer to Zero
for layer in reversed(self.layers):
if isinstance(layer, GPLayer):
for node in layer.get_node_list():
node.W = tf.zeros_like(node.W)
return
def _stack_new_layer(self, new_layer, previous_layer=None):
# Previous layer should be None only when adding the input layer
if previous_layer is not None:
new_layer.stack_on_previous_layer(previous_layer)
self.layers.append(new_layer)
def add_output_layer_regression(self):
""" Add an output layer for regression to the network
This mean that a Gaussian Likelihood is used.
"""
assert self.layers, "Network should have an input node"
self._require_normalized_y()
with tf.variable_scope(f"Layer_{len(self.layers)}_Out"):
new_layer = OutputLayerRegression(
self.y_train_tf,
self.y_test_tf,
self.y_train_mean_tf,
self.y_train_std_tf,
self.n_samples_dict,
self.dtype,
)
new_layer.stack_on_previous_layer(self.layers[-1])
self.layers.append(new_layer)
def add_output_layer_binary_classification(self, use_norm_cdf=False):
"""Adds an output layer for binary classification to the network.
Args:
use_norm_cdf (Boolean): Add bias term (+1) to the variance of f^L (+0 if False).
if use_norm_cdf == True then likelihood p(y | f^L) will be norm.cdf(y_train * f^L)
if use_norm_cdf == False then likelihood p(y | f^L) will be heavyside(y_train * f^L)
"""
self._add_output_layer_classification(use_norm_cdf=use_norm_cdf)
def add_output_layer_multiclass_classification(
self, noise_in_labels=False, noise_in_labels_trainable=True
):
"""Adds an output layer for multiclass classification to the network.
Args:
noise_in_labels (Boolean): If true the likelihood will take into account
that there may be wrong labeled examples.
Using a robust multiclass likelihood (as in GPflow when using Multiclass likelihood).
noise_in_labels_trainable (Boolean): Specifies if the noise in labels is a trainable parameter.
Note: For fair comparison with DGP-VI it should be set to False,
for other tasks it should be set to True as it makes the network more robust.
This parameter is ignored is noise_in_labels=False.
"""
self._add_output_layer_classification(
noise_in_labels=noise_in_labels,
noise_in_labels_trainable=noise_in_labels_trainable,
)
def _add_output_layer_classification(
self, *, use_norm_cdf=False, noise_in_labels=False, noise_in_labels_trainable=True
):
""" Private function. Refer to either:
add_output_layer_binary_classification()
add_output_layer_multiclass_classification()
"""
assert self.layers, "Network should have an input node"
variance_bias = (
tf.constant(1.0, dtype=self.dtype)
if use_norm_cdf
else tf.constant(0.0, dtype=self.dtype)
)
with tf.variable_scope("Layer_{}_Out".format(len(self.layers))):
new_layer = OutputLayerClassification(
self.y_train_tf,
self.y_test_tf,
self.n_samples_dict,
self.n_classes,
variance_bias,
noise_in_labels,
noise_in_labels_trainable,
self.dtype,
)
new_layer.stack_on_previous_layer(self.layers[-1])
self.layers.append(new_layer)
def add_output_layer_regression_multioutput(self, n_outputs):
raise NotImplementedError()
# assert self.layers, "Network should have an input node"
# new_layer = OutputLayerRegressionMultioutput(self.y_train, n_outputs)
# new_layer.stack_on_previous_layer(self.layers[-1])
# self.layers.append(new_layer)
def _require_normalized_y(self):
# This function should be called when the network requires normalized observations
# (regression)
if self.y_train_mean_tf is None:
self.y_train_mean_tf = tf.placeholder(
self.dtype, name="y_train_mean", shape=(1,)
)
if self.y_train_std_tf is None:
self.y_train_std_tf = tf.placeholder(
self.dtype, name="y_train_std", shape=(1,)
)
def _initialize_network(self, learning_rate=1e-3):
assert len(self.layers) > 1
if self.initialized:
return
if self.show_debug_info:
print("Initializing network")
self._set_mean_function_last_layer()
# Do a forward pass trough the network to 'connect the graph'
self.objective_energy_function = -self._get_network_energy()
# Params to optimize
self.trainable_params = self.get_params()
self.gradient_optimization_step = tf.train.AdamOptimizer(
learning_rate=learning_rate
).minimize(self.objective_energy_function, var_list=self.trainable_params)
self.sess.run(tf.global_variables_initializer())
# All inits operations remaining
tf_operations = []
ops_returned = None
for layer in self.layers:
with tf.control_dependencies(tf_operations):
ops_returned = layer.initialize_params_layer()
if ops_returned is not None:
tf_operations += ops_returned
# If minibatch size is smaller than N
# Use part of the data to initialize the network and be memory efficient
batch_indexes = np.random.choice(
self.n_points, min(int(self.minibatch_size), self.n_points), replace=False
)
self.sess.run(
tf_operations,
feed_dict={
self.x_tf: self.x_train[batch_indexes],
self.y_train_tf: self.y_train[batch_indexes],
self.network_set_for_training_tf: 1.0,
},
)
self._load_functions_to_graph()
self.initialized = True
def _load_functions_to_graph(self):
"""Load Symbolic tensorflow functions
"""
# Predict function
self._predict_function = self.layers[-1].get_predicted_values()
if self.problem_type == ProblemType.REGRESSION:
# TODO: Implement some of these for classification
# Calculate rmse function
self._rmse_likelihood_function = self.layers[-1].calculate_loglikehood_rmse()
# Sample from predictive dist.
self._sample_from_predictive_function = self.layers[
-1
].sample_from_predictive_distribution()
# Get PDF for point function
self.y_range_tf = tf.placeholder(
self.dtype, name="y_range", shape=[None, self.y_train.shape[1]]
)
self._pdf_function = (
self.layers[-1].get_predictive_distribution_fixed_x(self.y_range_tf),
)
if self.problem_type == ProblemType.BINARY_CLASSIFICATION:
self._log_likelihood_function = self.layers[-1].calculate_log_likelihood()
self._sample_from_last_layer = self.layers[-1].sample_from_latent()
if self.problem_type == ProblemType.MULTICLASS_CLASSIFICATION:
self._log_likelihood_function = self.layers[-1].calculate_log_likelihood()
self._init_saver()
def _get_network_energy(self):
"""Returns the tensorflow operation to calculate the energy of the network
The energy is the approximation to the marginal likelihood of the AEP algorithm
Returns:
Tensor -- Symbolic operation to calculate the energy
"""
energy = 0.0
for layer in self.layers:
layer.forward_pass_computations()
energy += layer.get_layer_contribution_to_energy()
return energy[0, 0]
def get_params(self):
"""Returns all trainable parameters of the network
Returns:
list -- List of Tensor, with all the parameters
"""
assert len(self.layers) > 1
if self.trainable_params is not None:
return self.trainable_params
params = []
for layer in self.layers:
params += layer.get_params()
return params
def train_via_adam(self, max_epochs=1000, learning_rate=1e-3, step_callback=None):
""" Finalizes the graph and trains the DGP AEPMCM network using Adam optimizer.
Args:
max_epochs (int): Maximun number of epochs to train for.
An epoch is a full pass through all the minibatches (whole dataset)
learning_rate (float): Learning rate to use. Default = 1e-3
step_callback (function): If set, function to call every gradient step.
This function should accept at least one parameter, the iteration number.
"""
assert len(self.layers) > 1
if self.show_debug_info:
print("Compiling adam updates")
self._initialize_network(learning_rate)
# self.sess.graph.finalize()
# Main loop of the optimization
n_batches = int(np.ceil(self.n_points / self.minibatch_size))
if self.show_debug_info:
print(
f"Training for {max_epochs} epochs, {max_epochs * n_batches} iterations"
)
sys.stdout.flush()
start = time.time()
# Object that keeps maxlen epoch times, for ETA prediction.
last_epoch_times = deque(maxlen=20)
for j in range(max_epochs):
shuffle = np.random.choice(self.n_points, self.n_points, replace=False)
shuffled_x_train = self.x_train[shuffle, :]
shuffled_y_train = self.y_train[shuffle, :]
avg_energy = 0.0
start_epoch = time.time()
for i in range(n_batches):
start_index = i * self.minibatch_size
end_index = min((i + 1) * self.minibatch_size, self.n_points)
minibatch_x = shuffled_x_train[start_index:end_index, :]
minibatch_y = shuffled_y_train[start_index:end_index, :]
current_energy = self.sess.run(
[self.gradient_optimization_step, self.objective_energy_function],
feed_dict={
self.x_tf: minibatch_x,
self.y_train_tf: minibatch_y,
self.network_set_for_training_tf: 1.0,
},
)[1]
if step_callback is not None:
step_callback(self, j * n_batches + i)
avg_energy += current_energy / (minibatch_x.shape[0] * n_batches)
elapsed_time_epoch = time.time() - start_epoch
last_epoch_times.append(elapsed_time_epoch)
if self.show_debug_info:
eta = calculate_ETA_str(last_epoch_times, j, max_epochs)
print(
"Epoch: {: <4}| Energy: {: <11.6f} | Time: {: >8.4f}s | Memory: {: >2.2f} GB | ETA: {}".format(
j, avg_energy, elapsed_time_epoch, memory_used(), eta
)
)
sys.stdout.flush()
if self.sacred_exp is not None:
self.sacred_exp.log_scalar("train.energy", round(avg_energy, 4))
elapsed_time = time.time() - start
if self.show_debug_info:
print("Total time: {}".format(elapsed_time))
# Log final energy to sacred
if self.sacred_exp is not None:
if self.sacred_exp.info.get("last_train_energies") is None:
self.sacred_exp.info.update(
{"last_train_energies": [round(avg_energy, 4)]}
)
else:
self.sacred_exp.info.get("last_train_energies").append(
round(avg_energy, 4)
)
def predict(self, x_test):
""" Returns predictions for a given x
Args:
x_test (ndarray): K x D matrix with locations for predictions.
With K the number of test points and D the dimension.
D should be the same as the one in the original training data.
"""
x_test = extend_dimension_if_1d(x_test)
assert x_test.shape[1] == self.problem_dim
x_test = x_test.astype(self.dtype)
# Use minibatches to predic
n_batches = int(np.ceil(x_test.shape[0] / self.minibatch_size))
pred, uncert = [], []
current_batch = 0
for x_test_batch in np.array_split(x_test, n_batches):
if self.show_debug_info and n_batches > 1:
current_batch += 1
print(f"Predicting batch {current_batch}/{n_batches}")
pred_batch, uncert_batch = self.sess.run(
self._predict_function,
feed_dict={
self.x_tf: x_test_batch,
self.network_set_for_training_tf: 0.0,
},
)
pred.append(pred_batch)
uncert.append(uncert_batch)
pred_uncert_values = np.concatenate(pred, 0), np.concatenate(uncert, 0)
return pred_uncert_values
def sample_from_predictive_distribution(self, x_locations):
assert x_locations.shape[1] == self.problem_dim
x_locations = x_locations.astype(self.dtype)
samples = self.sess.run(
self._sample_from_predictive_function,
feed_dict={self.x_tf: x_locations, self.network_set_for_training_tf: 0.0},
)
return samples
def get_predictive_distribution_for_x(self, x_value, y_range):
""" Returns the probability of each y value for a fixed x. p(y | x)
It returns the predictive distribution for a fixed x.
Useful to plot the PDF of the predictive distribution
Args:
x_value (ndarray): Single point to which calculate the PDF
y_range (ndarray): All the plausible y values to test. suggested: np.linspace()
"""
assert x_value.shape[1] == self.problem_dim
x_value = x_value.astype(self.dtype)
pdf = self.sess.run(
self._pdf_function,
feed_dict={
self.x_tf: x_value,
self.y_range_tf: y_range,
self.network_set_for_training_tf: 0.0,
},
)
return pdf[0]
def calculate_log_likelihood(
self, x_test, y_test, y_train_mean=None, y_train_std=None
):
if self.problem_type == ProblemType.REGRESSION:
raise NotImplementedError()
elif (
self.problem_type == ProblemType.BINARY_CLASSIFICATION
or self.problem_type == ProblemType.MULTICLASS_CLASSIFICATION
):
n_batches = int(np.ceil(x_test.shape[0] / self.minibatch_size))
lik = []
for X_batch, Y_batch in zip(
np.array_split(x_test, n_batches), np.array_split(y_test, n_batches)
):
l = self.sess.run(
self._log_likelihood_function,
feed_dict={
self.x_tf: X_batch,
self.y_test_tf: Y_batch,
self.network_set_for_training_tf: 0.0,
},
)
lik.append(l)
# (N, 1), still need to calculate the average likelihood for all the dataset
lik = np.concatenate(lik, 0)
return np.mean(lik)
else:
raise NotImplementedError()
def save_model(self, path_to_save, name):
save_path = self.saver.save(self.sess, f"{path_to_save}/{name}.ckpt")
print(f"Model saved in path: {save_path}")
def restore_model(self, model_path, name):
if not self.initialized:
self._initialize_network()
self.saver.restore(self.sess, f"{model_path}/{name}.ckpt")
def _init_saver(self):
if self.saver is None:
self.saver = tf.train.Saver()
def calculate_loglikehood_rmse(self, x_test, y_test, y_train_mean, y_train_std):
# TODO: As we will normally want log likelihood for classification too
# this function should be separated.
# The calculate_log_likelihood valid for all kind of problems
# and the RMSE one valid just for regression.
# We expect unnormalized y_test
if not np.allclose(np.mean(x_test), 0, atol=0.1) or not np.allclose(
np.std(x_test), 1.0, atol=0.1
):
warnings.warn(
f"x_test should be normalized current mean = {np.mean(x_test)} and std = {np.std(x_test)}"
)
if self.problem_type != ProblemType.REGRESSION:
raise NotImplementedError()
n_batches = int(np.ceil(x_test.shape[0] / self.minibatch_size))
lik, sq_diff = [], []
for X_batch, Y_batch in zip(
np.array_split(x_test, n_batches), np.array_split(y_test, n_batches)
):
l, sq = self.sess.run(
self._rmse_likelihood_function,
feed_dict={
self.x_tf: X_batch,
self.y_test_tf: Y_batch,
self.y_train_mean_tf: y_train_mean.flatten(),
self.y_train_std_tf: y_train_std.flatten(),
self.network_set_for_training_tf: 0.0,
},
)
lik.append(l)
sq_diff.append(sq)
# (N, 1), still need to calculate the average likelihood for all the dataset
lik = np.concatenate(lik, 0)
sq_diff = np.array(np.concatenate(sq_diff, 0), dtype=self.dtype)
return np.average(lik), np.average(sq_diff) ** 0.5
def __enter__(self):
return self
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
self.sess.close()
tf.reset_default_graph()
|
from functools import lru_cache
from typing import Dict, Iterable, Set, Tuple
from .interfaces import Conflict, Layer, Match, Tree
from .utils import depth, is_import_allowed, match_submodule
def match_layer(tree: Tree, layer: Layer, module: str) -> Match:
match = Match(module, layer)
for import_ in layer.imports:
match.chains.extend(tree.find_chains(module, import_))
for submodule in layer.submodules:
if match_submodule(module, submodule):
match.submodules.add(submodule)
return match
def match_modules(tree: Tree, layers: Set[Layer]) -> Iterable[Conflict]:
module_matches: Dict[str, Match] = {}
modules: Set[str] = set()
ordered_layers = sorted(layers, key=lambda l: (depth(l), l.name))
max_depth = 0
for module in tree.walk():
modules.add(module)
for layer in ordered_layers:
if match := match_layer(tree, layer, module):
try:
yield Conflict(module_matches[module], match)
except KeyError:
max_depth = max(max_depth, depth(layer))
module_matches[module] = match
@lru_cache(None)
def key(module: str) -> Tuple[int, str, str]:
try:
layer = module_matches[module].layer
except KeyError:
return max_depth + 1, "", module
return depth(layer), layer.name, module
for module in sorted(modules, key=key, reverse=True):
current_match = module_matches.get(module)
for imported in sorted(tree.find_upstream_modules(module), key=key):
try:
imported_match = module_matches[imported]
except KeyError:
continue
if current_match is None:
module_matches[module] = current_match = Match(
module=module,
layer=imported_match.layer,
chains=list(tree.find_chains(module, imported)),
)
continue
if is_import_allowed(current_match.layer, imported_match.layer):
continue
yield Conflict(
main=current_match,
dupe=Match(
module=current_match.module,
layer=imported_match.layer,
chains=list(tree.find_chains(module, imported)),
),
)
|
import threading
import pexpect
class Connection:
def __init__(self, intro, conn, numb):
self.open = False
self.type = intro
self.connection = conn
self.listening = 1
self.number = int(numb) + 1
self.thread = threading.Thread(target=self.wait)
self.thread.start()
def __str__(self):
string = ""
if(self.open):
string += "Open\n"
else:
string += "Closed\n"
string += self.type
#string += self.connection + "\n"
return string
def wait(self):
try:
self.connection.expect("Connection Received.")
print("\nSession " + str(self.number) + " has recieved a connection. Type sessions -i " + str(self.number) + " to interact.")
self.open = True
except:
self.open = False
def interact(self):
if self.connection.isalive():
if self.listening == 0:
self.connection.sendline()
print("Press Control + ] to exit the shell."),
else:
self.listening = 0
print("Press Control + ] to exit the shell."),
self.connection.interact(escape_character='\x1d', input_filter=None, output_filter=None)
else:
print("The connection has been lost")
|
# encoding: utf-8
"""
@author: gallupliu
@contact: gallup-liu@hotmail.com
@version: 1.0
@license: Apache Licence
@file: test.py
@time: 2017/12/16 19:15
"""
from data.test import DataSuper
class wikiqa(DataSuper):
def gen_train(self):
print("gen wikiqa!")
def gen_embeddings(self):
print("gen vocab")
|
import pytest
from luminos.plugins.bluetooth import (Bluetooth)
instance = None
@pytest.fixture
def bluetooth():
global instance
if instance is None:
instance = Bluetooth()
return instance
class TestBluetooth:
def test_discover_devices(self, bluetooth):
devices = bluetooth.discoverDevices(None)
assert devices is not None
|
from machine.exceptions.machine import MachineError
class UnsupportedResponseTypeError(MachineError):
message = "Client doesn't support required content types"
status_code = 403
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 2011-04-11 10:58:17
###############################################################################
# Copyright (c) 2010, Vadim Shlyakhov
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
from __future__ import with_statement
import os
import logging
import math
from optparse import OptionParser
from tiler_functions import *
from reader_backend import *
def kml_parm(hdr,name,lst=False):
l=re.split('</?%s>' % name,hdr)
# return only even elements as they are inside <name> </name>
return [i.strip() for i in l[1::2]] if lst else l[1].strip()
class KmlMap(SrcMap):
magic='<kml xmlns'
def load_data(self):
'load datum definitions, ellipses, projections from a file'
# http://trac.osgeo.org/proj/wiki/FAQ#ChangingEllipsoidWhycantIconvertfromWGS84toGoogleEarthVirtualGlobeMercator
self.proj="+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"
def get_header(self):
'read map header'
header=[]
with open(self.file,'rU') as f:
header=f.read().decode('utf-8','ignore')
if '<GroundOverlay>' not in header:
raise Exception(" Invalid file: %s" % self.file)
return header
def get_layers(self):
for layer_data in kml_parm(self.header,'GroundOverlay', lst=True): # get list of <GroundOverlay> content
yield KmlLayer(self,layer_data)
# KmlMap
class KmlLayer(SrcLayer):
def get_refs(self):
'get a list of geo refs in tuples'
layer=self.data
if '<gx:LatLonQuad>' in layer:
src_refs=[list(map(float,i.split(','))) for i in kml_parm(layer,'coordinates').split()]
else: # assume LatLonBox
assert '<LatLonBox>' in layer
north,south,east,west=[float(kml_parm(layer,parm)) for parm in ('north','south','east','west')]
src_refs=[(west,south),(east,south),(east,north),(west,north)]
dst_refs=MyTransformer(SRC_SRS=proj_cs2geog_cs(self.map.proj),DST_SRS=self.map.proj).transform(src_refs)
if '<rotation>' in layer:
north,south,east,west=[float(dst_refs[i][j]) for i,j in ((2,1),(0,1),(1,0),(0,0))]
angle=math.radians(float(kml_parm(layer,'rotation')))
dx=east-west
dy=north-south
xc=(west +east )//2
yc=(south+north)//2
x1=dy*math.sin(angle)
x2=dx*math.cos(angle)
y1=dy*math.cos(angle)
y2=dx*math.sin(angle)
x0=xc-(x1+x2)//2
y0=yc-(y1+y2)//2
dst_refs=[(x0+x1,y0),(x0+x1+x2,y0+y2),(x0+x2,y0+y1+y2),(x0,y0+y1)]
ld(dst_refs)
w, h=(self.raster_ds.RasterXSize,self.raster_ds.RasterYSize)
ld('w, h',w, h)
corners=[(0,h),(w,h),(w,0),(0,0)]
ids=[str(i+1) for i in range(4)]
refs=RefPoints(self,
ids=[str(i+1) for i in range(4)],
pixels=[(0,h),(w,h),(w,0),(0,0)],
cartesian=dst_refs)
return refs
def get_plys(self):
'boundary polygon'
mpointlst=shape2mpointlst(self.map.file,self.map.proj,self.name)
if not mpointlst:
return None
plys=RefPoints(self,cartesian=mpointlst[0])
return plys
def get_srs(self):
return self.map.proj, None
def get_raster(self):
img_ref=kml_parm(self.data,'href')
map_dir=os.path.split(self.map.file)[0]
if not map_dir:
map_dir=u'.'
imp_path_slashed=img_ref.replace('\\','/') # replace windows slashes
imp_path_lst=imp_path_slashed.split('/')
img_patt=imp_path_lst[-1].lower()
match=[i for i in os.listdir(map_dir) if i.lower() == img_patt]
try:
return os.path.join(map_dir, match[0])
except IndexError: raise Exception("*** Image file not found: %s" % img_path)
def get_name(self):
return kml_parm(self.data,'name')
# KmlLayer
if __name__=='__main__':
print('\nPlease use convert2gdal.py\n')
sys.exit(1)
|
"""
Created on Mon Feb 26 14:29:11 2018
@author: Christian Bender
@license: MIT-license
This module contains some useful classes and functions for dealing
with linear algebra in python.
Overview:
- class Vector
- function zeroVector(dimension)
- function unitBasisVector(dimension,pos)
- function axpy(scalar,vector1,vector2)
- function randomVector(N,a,b)
- class Matrix
- function squareZeroMatrix(N)
- function randomMatrix(W,H,a,b)
"""
from __future__ import annotations
import math
import random
from typing import Collection, overload
class Vector:
"""
This class represents a vector of arbitrary size.
You need to give the vector components.
Overview about the methods:
constructor(components : list) : init the vector
set(components : list) : changes the vector components.
__str__() : toString method
component(i : int): gets the i-th component (start by 0)
__len__() : gets the size of the vector (number of components)
euclidLength() : returns the euclidean length of the vector.
operator + : vector addition
operator - : vector subtraction
operator * : scalar multiplication and dot product
copy() : copies this vector and returns it.
changeComponent(pos,value) : changes the specified component.
TODO: compare-operator
"""
def __init__(self, components: Collection[float] | None = None) -> None:
"""
input: components or nothing
simple constructor for init the vector
"""
if components is None:
components = []
self.__components = list(components)
def set(self, components: Collection[float]) -> None:
"""
input: new components
changes the components of the vector.
replace the components with newer one.
"""
if len(components) > 0:
self.__components = list(components)
else:
raise Exception("please give any vector")
def __str__(self) -> str:
"""
returns a string representation of the vector
"""
return "(" + ",".join(map(str, self.__components)) + ")"
def component(self, i: int) -> float:
"""
input: index (start at 0)
output: the i-th component of the vector.
"""
if type(i) is int and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception("index out of range")
def __len__(self) -> int:
"""
returns the size of the vector
"""
return len(self.__components)
def euclidLength(self) -> float:
"""
returns the euclidean length of the vector
"""
summe: float = 0
for c in self.__components:
summe += c ** 2
return math.sqrt(summe)
def __add__(self, other: Vector) -> Vector:
"""
input: other vector
assumes: other vector has the same size
returns a new vector that represents the sum.
"""
size = len(self)
if size == len(other):
result = [self.__components[i] + other.component(i) for i in range(size)]
return Vector(result)
else:
raise Exception("must have the same size")
def __sub__(self, other: Vector) -> Vector:
"""
input: other vector
assumes: other vector has the same size
returns a new vector that represents the difference.
"""
size = len(self)
if size == len(other):
result = [self.__components[i] - other.component(i) for i in range(size)]
return Vector(result)
else: # error case
raise Exception("must have the same size")
@overload
def __mul__(self, other: float) -> Vector:
...
@overload
def __mul__(self, other: Vector) -> float:
...
def __mul__(self, other: float | Vector) -> float | Vector:
"""
mul implements the scalar multiplication
and the dot-product
"""
if isinstance(other, float) or isinstance(other, int):
ans = [c * other for c in self.__components]
return Vector(ans)
elif isinstance(other, Vector) and (len(self) == len(other)):
size = len(self)
summe: float = 0
for i in range(size):
summe += self.__components[i] * other.component(i)
return summe
else: # error case
raise Exception("invalid operand!")
def magnitude(self) -> float:
"""
Magnitude of a Vector
>>> Vector([2, 3, 4]).magnitude()
5.385164807134504
"""
return sum([i ** 2 for i in self.__components]) ** (1 / 2)
def angle(self, other: Vector, deg: bool = False) -> float:
"""
find angle between two Vector (self, Vector)
>>> Vector([3, 4, -1]).angle(Vector([2, -1, 1]))
1.4906464636572374
>>> Vector([3, 4, -1]).angle(Vector([2, -1, 1]), deg = True)
85.40775111366095
>>> Vector([3, 4, -1]).angle(Vector([2, -1]))
Traceback (most recent call last):
...
Exception: invalid operand!
"""
num = self * other
den = self.magnitude() * other.magnitude()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def copy(self) -> Vector:
"""
copies this vector and returns it.
"""
return Vector(self.__components)
def changeComponent(self, pos: int, value: float) -> None:
"""
input: an index (pos) and a value
changes the specified component (pos) with the
'value'
"""
# precondition
assert -len(self.__components) <= pos < len(self.__components)
self.__components[pos] = value
def zeroVector(dimension: int) -> Vector:
"""
returns a zero-vector of size 'dimension'
"""
# precondition
assert isinstance(dimension, int)
return Vector([0] * dimension)
def unitBasisVector(dimension: int, pos: int) -> Vector:
"""
returns a unit basis vector with a One
at index 'pos' (indexing at 0)
"""
# precondition
assert isinstance(dimension, int) and (isinstance(pos, int))
ans = [0] * dimension
ans[pos] = 1
return Vector(ans)
def axpy(scalar: float, x: Vector, y: Vector) -> Vector:
"""
input: a 'scalar' and two vectors 'x' and 'y'
output: a vector
computes the axpy operation
"""
# precondition
assert (
isinstance(x, Vector)
and (isinstance(y, Vector))
and (isinstance(scalar, int) or isinstance(scalar, float))
)
return x * scalar + y
def randomVector(N: int, a: int, b: int) -> Vector:
"""
input: size (N) of the vector.
random range (a,b)
output: returns a random vector of size N, with
random integer components between 'a' and 'b'.
"""
random.seed(None)
ans = [random.randint(a, b) for _ in range(N)]
return Vector(ans)
class Matrix:
"""
class: Matrix
This class represents a arbitrary matrix.
Overview about the methods:
__str__() : returns a string representation
operator * : implements the matrix vector multiplication
implements the matrix-scalar multiplication.
changeComponent(x,y,value) : changes the specified component.
component(x,y) : returns the specified component.
width() : returns the width of the matrix
height() : returns the height of the matrix
operator + : implements the matrix-addition.
operator - _ implements the matrix-subtraction
"""
def __init__(self, matrix: list[list[float]], w: int, h: int) -> None:
"""
simple constructor for initializing
the matrix with components.
"""
self.__matrix = matrix
self.__width = w
self.__height = h
def __str__(self) -> str:
"""
returns a string representation of this
matrix.
"""
ans = ""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def changeComponent(self, x: int, y: int, value: float) -> None:
"""
changes the x-y component of this matrix
"""
if 0 <= x < self.__height and 0 <= y < self.__width:
self.__matrix[x][y] = value
else:
raise Exception("changeComponent: indices out of bounds")
def component(self, x: int, y: int) -> float:
"""
returns the specified (x,y) component
"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("changeComponent: indices out of bounds")
def width(self) -> int:
"""
getter for the width
"""
return self.__width
def height(self) -> int:
"""
getter for the height
"""
return self.__height
def determinate(self) -> float:
"""
returns the determinate of an nxn matrix using Laplace expansion
"""
if self.__height == self.__width and self.__width >= 2:
total = 0
if self.__width > 2:
for x in range(0, self.__width):
for y in range(0, self.__height):
total += (
self.__matrix[x][y]
* (-1) ** (x + y)
* Matrix(
self.__matrix[0:x] + self.__matrix[x + 1 :],
self.__width - 1,
self.__height - 1,
).determinate()
)
else:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
return total
else:
raise Exception("matrix is not square")
@overload
def __mul__(self, other: float) -> Matrix:
...
@overload
def __mul__(self, other: Vector) -> Vector:
...
def __mul__(self, other: float | Vector) -> Vector | Matrix:
"""
implements the matrix-vector multiplication.
implements the matrix-scalar multiplication
"""
if isinstance(other, Vector): # vector-matrix
if len(other) == self.__width:
ans = zeroVector(self.__height)
for i in range(self.__height):
summe: float = 0
for j in range(self.__width):
summe += other.component(j) * self.__matrix[i][j]
ans.changeComponent(i, summe)
summe = 0
return ans
else:
raise Exception(
"vector must have the same size as the "
+ "number of columns of the matrix!"
)
elif isinstance(other, int) or isinstance(other, float): # matrix-scalar
matrix = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(matrix, self.__width, self.__height)
def __add__(self, other: Matrix) -> Matrix:
"""
implements the matrix-addition.
"""
if self.__width == other.width() and self.__height == other.height():
matrix = []
for i in range(self.__height):
row = []
for j in range(self.__width):
row.append(self.__matrix[i][j] + other.component(i, j))
matrix.append(row)
return Matrix(matrix, self.__width, self.__height)
else:
raise Exception("matrix must have the same dimension!")
def __sub__(self, other: Matrix) -> Matrix:
"""
implements the matrix-subtraction.
"""
if self.__width == other.width() and self.__height == other.height():
matrix = []
for i in range(self.__height):
row = []
for j in range(self.__width):
row.append(self.__matrix[i][j] - other.component(i, j))
matrix.append(row)
return Matrix(matrix, self.__width, self.__height)
else:
raise Exception("matrix must have the same dimension!")
def squareZeroMatrix(N: int) -> Matrix:
"""
returns a square zero-matrix of dimension NxN
"""
ans: list[list[float]] = [[0] * N for _ in range(N)]
return Matrix(ans, N, N)
def randomMatrix(W: int, H: int, a: int, b: int) -> Matrix:
"""
returns a random matrix WxH with integer components
between 'a' and 'b'
"""
random.seed(None)
matrix: list[list[float]] = [
[random.randint(a, b) for _ in range(W)] for _ in range(H)
]
return Matrix(matrix, W, H)
|
# DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml.
from flask import Blueprint
import handlers
decodeinvoice_api = Blueprint('decodeinvoice_api', __name__)
@decodeinvoice_api.route('/decodeinvoice', methods=['GET'])
def decodeinvoice_get():
"""
Decode a BOLT-11 compatible Lightning invoice into a human readable format.
Uses the implementation of the connected Lightning node so the client app
doesn't have to implement this.
It is handler for GET /decodeinvoice
"""
return handlers.decodeinvoice_getHandler()
|
import pygame, sys
from copy import deepcopy
from pygame.locals import *
from constants import *
setfile = open("settings.txt", 'r')
GAMESIZE = GAMESIZES[int(setfile.readline())]
WINDOWWIDTH = int(800*GAMESIZE)
WINDOWHEIGHT = int(600*GAMESIZE)
SQUARESIZE = int(50*GAMESIZE)
BOARDSIZE = 9
XLMARGIN = int((3/4*WINDOWWIDTH - BOARDSIZE*SQUARESIZE)/2)
XRMARGIN = int((3/4*WINDOWWIDTH - BOARDSIZE*SQUARESIZE)/2 + 1/4 * WINDOWWIDTH)
YMARGIN = int((3/4*WINDOWWIDTH - BOARDSIZE*SQUARESIZE)/2)
NUMBEROFLEVELS = len(LEVELS)
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
ROWS = int(WINDOWWIDTH/SQUARESIZE)
COLS = int(WINDOWHEIGHT/SQUARESIZE)
pygame.init()
BASICFONT = pygame.font.Font('freesansbold.ttf', int(20*GAMESIZE))
SUBSCRIPTFONT = pygame.font.Font('freesansbold.ttf', int(15*GAMESIZE))
TITLEFONT = pygame.font.SysFont('arial', int(70*GAMESIZE))
WALL = pygame.image.load('graphics/wall.png')
WALL = pygame.transform.scale(WALL, (SQUARESIZE, SQUARESIZE))
BOX = pygame.image.load('graphics/box.png')
BOX = pygame.transform.scale(BOX, (SQUARESIZE, SQUARESIZE))
TARGET = pygame.image.load('graphics/target.png')
TARGET = pygame.transform.scale(TARGET, (SQUARESIZE, SQUARESIZE))
FLOOR = pygame.image.load('graphics/floor.png')
FLOOR = pygame.transform.scale(FLOOR, (SQUARESIZE, SQUARESIZE))
PLAYER = pygame.image.load('graphics/player.png')
PLAYER = pygame.transform.scale(PLAYER, (SQUARESIZE, SQUARESIZE))
BOXONTARGET = pygame.image.load('graphics/boxontarget.png')
BOXONTARGET = pygame.transform.scale(BOXONTARGET, (SQUARESIZE, SQUARESIZE))
def makeText(text, position, font = BASICFONT, color = DARKGRAY, bgcolor = None):
Surf = font.render(text, 1, color, bgcolor)
Rect = Surf.get_rect()
Rect.center = (position[0], position[1])
return Surf, Rect
titleSurf, titleRect = makeText('SOKOBAN', (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2)), TITLEFONT)
newGameSurf, newGameRect = makeText('New Game', (int(135*GAMESIZE), int(450*GAMESIZE)), bgcolor = GRAY)
continueSurf, continueRect = makeText('Continue', (int(310*GAMESIZE), int(450*GAMESIZE)), bgcolor = GRAY)
optionsSurf, optionsRect = makeText('Options', (int(485*GAMESIZE), int(450*GAMESIZE)), bgcolor = GRAY)
bestscoresSurf, bestscoresRect = makeText('Best Scores', (int(660*GAMESIZE), int(450*GAMESIZE)), bgcolor = GRAY)
RECTS = (newGameRect, continueRect, optionsRect, bestscoresRect)
def main():
global DISPLAYSURF, FPSCLOCK
with open('settings.txt', 'r') as file:
data = file.readlines()
isMusic = int(data[1])
isSound = int(data[2])
savedLevel = int(data[3])
savedScore = int(data[4])
pygame.mixer.pre_init(22050, -16, 2, 1)
pygame.mixer.init()
pygame.init()
pygame.mixer.music.load('sounds/background.wav')
if isMusic == 1:
pygame.mixer.music.play(-1, 0.0)
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('SOKOBAN')
mousex = 0
mousey = 0
drawStartMenu()
while True:
mouseClicked = False
for event in pygame.event.get():
if event.type == QUIT:
terminate()
if event.type == MOUSEMOTION:
mousex, mousey = event.pos
elif event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
mouseClicked = True
if newGameRect.collidepoint(mousex, mousey):
pygame.draw.rect(DISPLAYSURF, DARKGRAY, (newGameRect.left-5, newGameRect.top-5, newGameRect.width + 10, newGameRect.height +10), 4)
if mouseClicked:
if savedLevel == 1 or (savedLevel != 1 and confirmationBox()):
newGame(isMusic, isSound)
elif continueRect.collidepoint(mousex, mousey):
pygame.draw.rect(DISPLAYSURF, DARKGRAY, (continueRect.left-5, continueRect.top-5, continueRect.width + 10, continueRect.height +10), 4)
if mouseClicked:
newGame(isMusic, isSound, savedLevel, savedScore)
elif optionsRect.collidepoint(mousex, mousey):
pygame.draw.rect(DISPLAYSURF, DARKGRAY, (optionsRect.left-5, optionsRect.top-5, optionsRect.width + 10, optionsRect.height +10), 4)
if mouseClicked:
isSound = settings(isMusic, isSound)
elif bestscoresRect.collidepoint(mousex, mousey):
pygame.draw.rect(DISPLAYSURF, DARKGRAY, (bestscoresRect.left-5, bestscoresRect.top-5, bestscoresRect.width + 10, bestscoresRect.height +10), 4)
if mouseClicked:
readBestScores()
else:
drawStartMenu()
pygame.display.update()
def terminate():
pygame.quit()
sys.exit()
def drawStartMenu():
DISPLAYSURF.fill(BGCOLOR)
for i in range(ROWS):
for j in range(COLS):
if i in (0, ROWS - 1) or j in (0, COLS - 1):
DISPLAYSURF.blit(WALL, (i*SQUARESIZE, j*SQUARESIZE))
else:
DISPLAYSURF.blit(FLOOR, (i*SQUARESIZE, j*SQUARESIZE))
DISPLAYSURF.blit(PLAYER, (int(175*GAMESIZE), int(150*GAMESIZE)))
DISPLAYSURF.blit(BOX, (int(375*GAMESIZE), int(150*GAMESIZE)))
DISPLAYSURF.blit(TARGET, (int(575*GAMESIZE), int(150*GAMESIZE)))
DISPLAYSURF.blit(titleSurf, titleRect)
DISPLAYSURF.blit(newGameSurf, newGameRect, )
DISPLAYSURF.blit(continueSurf, continueRect)
DISPLAYSURF.blit(optionsSurf, optionsRect)
DISPLAYSURF.blit(bestscoresSurf, bestscoresRect)
pygame.display.update()
def readBestScores():
with open('highscores.txt', 'r') as file:
data = file.readlines()
display = True
k = 0
DISPLAYSURF.fill(GRAY)
scoreSurf, scoreRect = makeText('BEST SCORES', (int(WINDOWWIDTH/2), int((100+k*50)*GAMESIZE)), BASICFONT, color = BROWN)
DISPLAYSURF.blit(scoreSurf, scoreRect)
for d in data:
k += 1
surf, rect = makeText(d[:-1], (int(WINDOWWIDTH/2), int((100+k*50)*GAMESIZE)), BASICFONT, color = BROWN)
DISPLAYSURF.blit(surf, rect)
k += 1
surf, rect = makeText('BACK', (int(WINDOWWIDTH/2), int((100+k*50)*GAMESIZE)), BASICFONT)
DISPLAYSURF.blit(surf, rect)
pygame.display.update()
while display:
mouseClicked = False
for event in pygame.event.get():
pygame.draw.rect(DISPLAYSURF, GRAY, (rect.left-5, rect.top-5, rect.width + 10, rect.height +10), 4)
if event.type == QUIT:
terminate()
if event.type == MOUSEMOTION:
mousex, mousey = event.pos
elif event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
mouseClicked = True
if rect.collidepoint(mousex, mousey):
pygame.draw.rect(DISPLAYSURF, DARKGRAY, (rect.left-5, rect.top-5, rect.width + 10, rect.height +10), 4)
if mouseClicked:
display = False
pygame.display.update()
def settings(isMusic, isSound):
mousex = 0
mousey = 0
display = True
aSurf, aRect, bSurf, bRect, cSurf, cRect = checkSettings('settings.txt')
dSurf, dRect = makeText('BACK', (int(WINDOWWIDTH/2), int(500*GAMESIZE)), BASICFONT, color = DARKGRAY)
eSurf, eRect = makeText('Change in resolution will be applied after game restart', (int(WINDOWWIDTH/2), int(250*GAMESIZE)), SUBSCRIPTFONT, color = BROWN)
drawSettings(aSurf, aRect, bSurf, bRect, cSurf, cRect, dSurf, dRect, eSurf, eRect)
while display:
mouseClicked = False
for event in pygame.event.get():
if event.type == QUIT:
terminate()
if event.type == MOUSEMOTION:
mousex, mousey = event.pos
elif event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
mouseClicked = True
if aRect.collidepoint(mousex, mousey):
pygame.draw.rect(DISPLAYSURF, DARKGRAY, (aRect.left-5, aRect.top-5, aRect.width + 10, aRect.height +10), 4)
if mouseClicked:
isSound = switchSettings(0, 3)
elif bRect.collidepoint(mousex, mousey):
pygame.draw.rect(DISPLAYSURF, DARKGRAY, (bRect.left-5, bRect.top-5, bRect.width + 10, bRect.height +10), 4)
if mouseClicked:
isSound = switchSettings(1, 2, isMusic)
elif cRect.collidepoint(mousex, mousey):
pygame.draw.rect(DISPLAYSURF, DARKGRAY, (cRect.left-5, cRect.top-5, cRect.width + 10, cRect.height +10), 4)
if mouseClicked:
isSound = switchSettings(2, 2, isSound)
elif dRect.collidepoint(mousex, mousey):
pygame.draw.rect(DISPLAYSURF, DARKGRAY, (dRect.left-5, dRect.top-5, dRect.width + 10, dRect.height +10), 4)
if mouseClicked:
display = False
else:
aSurf, aRect, bSurf, bRect, cSurf, cRect = checkSettings('settings.txt')
drawSettings(aSurf, aRect, bSurf, bRect, cSurf, cRect, dSurf, dRect, eSurf, eRect)
pygame.display.update()
return isSound
def checkSettings(data):
with open(data, 'r') as file:
d = file.readlines()
k = 1
if int(d[0]) == 0:
aSurf, aRect = makeText('RESOLUTION: 640 x 480', (int(WINDOWWIDTH/2), int((100+k*100)*GAMESIZE)), BASICFONT, color = BROWN, bgcolor = SILVER)
elif int(d[0]) == 1:
aSurf, aRect = makeText('RESOLUTION: 800 x 600', (int(WINDOWWIDTH/2), int((100+k*100)*GAMESIZE)), BASICFONT, color = BROWN, bgcolor = SILVER)
elif int(d[0]) == 2:
aSurf, aRect = makeText('RESOLUTION: 1200 x 900', (int(WINDOWWIDTH/2), int((100+k*100)*GAMESIZE)), BASICFONT, color = BROWN, bgcolor = SILVER)
k += 1
if int(d[1]) == 0:
bSurf, bRect = makeText('MUSIC: OFF', (int(WINDOWWIDTH/2), int((100+k*100)*GAMESIZE)), BASICFONT, color = BROWN, bgcolor = SILVER)
if int(d[1]) == 1:
bSurf, bRect = makeText('MUSIC: ON', (int(WINDOWWIDTH/2), int((100+k*100)*GAMESIZE)), BASICFONT, color = BROWN, bgcolor = SILVER)
k += 1
if int(d[2]) == 0:
cSurf, cRect = makeText('SOUND EFFECTS: OFF', (int(WINDOWWIDTH/2), int((100+k*100)*GAMESIZE)), BASICFONT, color = BROWN, bgcolor = SILVER)
if int(d[2]) == 1:
cSurf, cRect = makeText('SOUND EFFECTS: ON', (int(WINDOWWIDTH/2), int((100+k*100)*GAMESIZE)), BASICFONT, color = BROWN, bgcolor = SILVER)
return aSurf, aRect, bSurf, bRect, cSurf, cRect
def drawSettings(aSurf, aRect, bSurf, bRect, cSurf, cRect, dSurf, dRect, eSurf, eRect):
DISPLAYSURF.fill(GRAY)
optSurf, optRect = makeText('SETTINGS', (int(WINDOWWIDTH/2), int(100*GAMESIZE)), BASICFONT, color = BROWN)
DISPLAYSURF.blit(optSurf, optRect)
DISPLAYSURF.blit(aSurf, aRect)
DISPLAYSURF.blit(bSurf, bRect)
DISPLAYSURF.blit(cSurf, cRect)
DISPLAYSURF.blit(dSurf, dRect)
DISPLAYSURF.blit(eSurf, eRect)
pygame.display.update()
def switchSettings(line, pos, isMusic = None, isSound = None):
with open('settings.txt', 'r') as file:
data = file.readlines()
data[line] = str((int(data[line])+1)%pos)+'\n'
with open('settings.txt', W) as file:
file.writelines( data )
if line == 1:
isMusic = int(data[line])
if isMusic == 1:
pygame.mixer.music.play(-1, 0.0)
else:
pygame.mixer.music.stop()
if line == 2:
isSound = int(data[line])
return isSound
def confirmationBox():
ySurf, yRect = makeText('YES', (int(300*GAMESIZE), int(400*GAMESIZE)), BASICFONT, color = BROWN)
nSurf, nRect = makeText('NO', (int(500*GAMESIZE), int(400*GAMESIZE)), BASICFONT, color = BROWN)
drawBox(ySurf, yRect, nSurf, nRect)
display = True
newGame = False
mousex = 0
mousey = 0
while display:
mouseClicked = False
for event in pygame.event.get():
if event.type == QUIT:
terminate()
if event.type == MOUSEMOTION:
mousex, mousey = event.pos
elif event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
mouseClicked = True
if yRect.collidepoint(mousex, mousey):
pygame.draw.rect(DISPLAYSURF, DARKGRAY, (yRect.left-5, yRect.top-5, yRect.width + 10, yRect.height +10), 4)
if mouseClicked:
display = False
newGame = True
elif nRect.collidepoint(mousex, mousey):
pygame.draw.rect(DISPLAYSURF, DARKGRAY, (nRect.left-5, nRect.top-5, nRect.width + 10, nRect.height +10), 4)
if mouseClicked:
display = False
newGame = False
else:
aSurf, aRect, bSurf, bRect, cSurf, cRect = checkSettings('settings.txt')
drawBox(ySurf, yRect, nSurf, nRect)
pygame.display.update()
return newGame
def drawBox(ySurf, yRect, nSurf, nRect):
DISPLAYSURF.fill(GRAY)
surf, rect = makeText('Starting new game will erase your save. Proceed?', (int(WINDOWWIDTH/2), int(WINDOWHEIGHT/2)), BASICFONT, color = BROWN)
DISPLAYSURF.blit(surf, rect)
DISPLAYSURF.blit(ySurf, yRect)
DISPLAYSURF.blit(nSurf, nRect)
pygame.display.update()
def newGame(isMusic, isSound, level = 1, score = 0):
if level == 1:
autoSave(0, 1)
lvl = deepcopy(LEVELS[level-1])
sc = score
soundFanfare = pygame.mixer.Sound('sounds/fanfare.wav')
resetSurf, resetRect = makeText('RESET LEVEL', (int((WINDOWWIDTH - XRMARGIN)+XRMARGIN/2), int(3/4*WINDOWHEIGHT)), BASICFONT)
drawBoard(level - 1, sc, resetSurf, resetRect)
drawBoardState(lvl)
while True:
for event in pygame.event.get():
mouseClicked = False
mousex = 0
mousey = 0
if event.type == QUIT:
terminate()
if event.type == MOUSEMOTION:
mousex, mousey = event.pos
elif event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
mouseClicked = True
if resetRect.collidepoint(mousex, mousey):
pygame.draw.rect(DISPLAYSURF, DARKGRAY, (resetRect.left-5, resetRect.top-5, resetRect.width + 10, resetRect.height +10), 4)
if mouseClicked:
newGame(isMusic, isSound, level, score)
return
if event.type == KEYDOWN:
px, py = getPlayerPosition(lvl)
if event.key == K_ESCAPE:
isSound = settings(isMusic, isSound)
if event.key in (K_w, K_UP):
if validMove(lvl, UP, px, py, isSound) == 1:
lvl, sc = updateBoardState(lvl, LEVELS[level-1], UP, px, py), sc + 1
elif validMove(lvl, UP, px, py, isSound) == 2:
lvl, sc = updateBoardState(lvl, LEVELS[level-1], UP, px, py, True), sc + 1
elif event.key in (K_s, K_DOWN):
if validMove(lvl, DOWN, px, py, isSound) == 1:
lvl, sc = updateBoardState(lvl, LEVELS[level-1], DOWN, px, py), sc + 1
elif validMove(lvl, DOWN, px, py, isSound) == 2:
lvl, sc = updateBoardState(lvl, LEVELS[level-1], DOWN, px, py, True), sc + 1
elif event.key in (K_d, K_RIGHT):
if validMove(lvl, RIGHT, px, py, isSound) == 1:
lvl, sc = updateBoardState(lvl, LEVELS[level-1], RIGHT, px, py), sc + 1
elif validMove(lvl, RIGHT, px, py, isSound) == 2:
lvl, sc = updateBoardState(lvl, LEVELS[level-1], RIGHT, px, py, True), sc + 1
elif event.key in (K_a, K_LEFT):
if validMove(lvl, LEFT, px, py, isSound) == 1:
lvl, sc = updateBoardState(lvl, LEVELS[level-1], LEFT, px, py), sc + 1
elif validMove(lvl, LEFT, px, py, isSound) == 2:
lvl, sc = updateBoardState(lvl, LEVELS[level-1], LEFT, px, py, True), sc + 1
drawBoard(level - 1, sc, resetSurf, resetRect)
drawBoardState(lvl)
if checkSolution(lvl):
if isSound == 1:
soundFanfare.play()
pygame.time.delay(4000)
if level < NUMBEROFLEVELS:
autoSave(sc)
newGame(isMusic, isSound, level+1, sc)
else:
highscore = False
with open('highscores.txt', 'r') as file:
data = file.readlines()
for line in range(len(data)):
data[line] = int(data[line])
data.append(sc)
data.sort()
if data[len(data)-1] != sc:
highscore = True
data = data[:-1]
for line in range(len(data)):
data[line] = str(data[line])+'\n'
with open('highscores.txt', W) as file:
file.writelines( data )
display = True
mouseClicked = False
mousex = 0
mousey = 0
surf, rect = makeText('BACK', (int(WINDOWWIDTH/2), int(7/8*WINDOWHEIGHT)), BASICFONT)
drawFinal(highscore, sc, surf, rect)
while display:
for event in pygame.event.get():
if event.type == QUIT:
terminate()
if event.type == MOUSEMOTION:
mousex, mousey = event.pos
elif event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
mouseClicked = True
if rect.collidepoint(mousex, mousey):
pygame.draw.rect(DISPLAYSURF, DARKGRAY, (rect.left-5, rect.top-5, rect.width + 10, rect.height +10), 4)
if mouseClicked:
display = False
pygame.display.update()
if checkSolution(lvl):
return
pygame.display.update()
FPSCLOCK.tick(FPS)
def drawFinal(highscore, sc, surf, rect):
DISPLAYSURF.fill(GRAY)
conSurf, conRect = makeText('CONGRATULATIONS!!!', (int(WINDOWWIDTH/2), int(1/8*WINDOWHEIGHT)), TITLEFONT)
descSurf, descRect = makeText('You have solved all of the puzzles!', (int(WINDOWWIDTH/2), conRect.bottom + SQUARESIZE), BASICFONT)
desc2Surf, desc2Rect = makeText('Your score of:', (int(WINDOWWIDTH/2), descRect.bottom + SQUARESIZE), BASICFONT)
scString = str(sc) + ' total moves'
scSurf, scRect = makeText(scString, (int(WINDOWWIDTH/2), desc2Rect.bottom + 2 * SQUARESIZE), TITLEFONT)
if highscore:
hsString = 'is good enough to be put on a highscores list! Great Job!!!'
else:
hsString = 'is not good enough to be put on a highscores list. Try again!'
hsSurf, hsRect = makeText(hsString, (int(WINDOWWIDTH/2), scRect.bottom + 2 * SQUARESIZE), BASICFONT)
DISPLAYSURF.blit(conSurf, conRect)
DISPLAYSURF.blit(descSurf, descRect)
DISPLAYSURF.blit(desc2Surf, desc2Rect)
DISPLAYSURF.blit(scSurf, scRect)
DISPLAYSURF.blit(hsSurf, hsRect)
DISPLAYSURF.blit(surf, rect)
pygame.display.update()
def drawBoard(level, score, resetSurf, resetRect):
board = LEVELS[level]
DISPLAYSURF.fill(BGCOLOR)
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j]:
if board[i][j] == W:
DISPLAYSURF.blit(WALL, (XLMARGIN + i*SQUARESIZE, YMARGIN + j*SQUARESIZE))
else:
DISPLAYSURF.blit(FLOOR, (XLMARGIN + i*SQUARESIZE, YMARGIN + j*SQUARESIZE))
lSurf, lRect = makeText('LEVEL: ' + str(level+1), (int((WINDOWWIDTH - XRMARGIN) + XRMARGIN/2), int(1/4*WINDOWHEIGHT)), BASICFONT)
sSurf, sRect = makeText('TOTAL MOVES: ' + str(score), (int((WINDOWWIDTH - XRMARGIN)+XRMARGIN/2), int(1/2*WINDOWHEIGHT)), BASICFONT)
DISPLAYSURF. blit(lSurf, lRect)
DISPLAYSURF. blit(sSurf, sRect)
DISPLAYSURF. blit(resetSurf, resetRect)
pygame.display.update()
def drawBoardState(lvl):
for i in range(len(lvl)):
for j in range(len(lvl[0])):
if lvl[i][j]:
if lvl[i][j] == P:
DISPLAYSURF.blit(PLAYER, (XLMARGIN + i*SQUARESIZE, YMARGIN + j*SQUARESIZE))
elif lvl[i][j] == B:
DISPLAYSURF.blit(BOX, (XLMARGIN + i*SQUARESIZE, YMARGIN + j*SQUARESIZE))
elif lvl[i][j] == T:
DISPLAYSURF.blit(TARGET, (XLMARGIN + i*SQUARESIZE, YMARGIN + j*SQUARESIZE))
elif lvl[i][j] == K:
DISPLAYSURF.blit(BOXONTARGET, (XLMARGIN + i*SQUARESIZE, YMARGIN + j*SQUARESIZE))
elif lvl[i][j] == M:
DISPLAYSURF.blit(TARGET, (XLMARGIN + i*SQUARESIZE, YMARGIN + j*SQUARESIZE))
DISPLAYSURF.blit(PLAYER, (XLMARGIN + i*SQUARESIZE, YMARGIN + j*SQUARESIZE))
pygame.display.update()
def getPlayerPosition(lvl):
for x in range(len(lvl)):
for y in range(len(lvl)):
if lvl[x][y] == P or lvl[x][y] == M:
return (x,y)
def validMove(lvl, direction, px, py, isSound):
k = None
soundStep = pygame.mixer.Sound('sounds/step2.wav')
soundPush = pygame.mixer.Sound('sounds/push2.wav')
if direction == UP:
if lvl[px][py-1] in (F, T):
k = 1
elif lvl[px][py-1] in (B, K) and lvl[px][py-2] in (F, T):
k = 2
elif direction == DOWN:
if lvl[px][py+1] in (F, T):
k = 1
elif lvl[px][py+1] in (B, K) and lvl[px][py+2] in (F, T):
k = 2
elif direction == RIGHT:
if lvl[px+1][py] in (F, T):
k = 1
elif lvl[px+1][py] in (B, K) and lvl[px+2][py] in (F, T):
k = 2
elif direction == LEFT:
if lvl[px-1][py] in (F, T):
k = 1
elif lvl[px-1][py] in (B, K) and lvl[px-2][py] in (F, T):
k = 2
if k == 1 and isSound == 1:
soundStep.play()
if k == 2 and isSound == 1:
soundPush.play()
return k
def updateBoardState(lvl, default, direction, px, py, push = False):
if direction == UP:
if default[px][py-1] in (T, K):
lvl[px][py-1] = M
else:
lvl[px][py-1] = P
if default[px][py] in (B, F, P):
lvl[px][py] = F
else:
lvl[px][py] = T
if push:
if default[px][py-2] in (T, K):
lvl[px][py-2] = K
else:
lvl[px][py-2] = B
elif direction == DOWN:
if default[px][py+1] in (T, K):
lvl[px][py+1] = M
else:
lvl[px][py+1] = P
if default[px][py] in (B, F, P):
lvl[px][py] = F
else:
lvl[px][py] = T
if push:
if default[px][py+2] in (T, K):
lvl[px][py+2] = K
else:
lvl[px][py+2] = B
elif direction == RIGHT:
if default[px+1][py] in (T, K):
lvl[px+1][py] = M
else:
lvl[px+1][py] = P
if default[px][py] in (B, F, P):
lvl[px][py] = F
else:
lvl[px][py] = T
if push:
if default[px+2][py] in (T, K):
lvl[px+2][py] = K
else:
lvl[px+2][py] = B
elif direction == LEFT:
if default[px-1][py] in (T, K):
lvl[px-1][py] = M
else:
lvl[px-1][py] = P
if default[px][py] in (B, F, P):
lvl[px][py] = F
else:
lvl[px][py] = T
if push:
if default[px-2][py] in (T, K):
lvl[px-2][py] = K
else:
lvl[px-2][py] = B
return lvl
def checkSolution(lvl):
for i in range(len(lvl)):
for j in range(len(lvl)):
if lvl[i][j] == 'b':
return False
return True
def autoSave(sc, level = 0):
with open('settings.txt', 'r') as file:
data = file.readlines()
if level == 0:
data[3] = str(int(data[3])+1)+ '\n'
data[4] = str(sc)+ '\n'
else:
data[3] = str(1) + '\n'
data[4] = str(0) + '\n'
with open('settings.txt', W) as file:
file.writelines( data )
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-21 07:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customercorner', '0004_auto_20171021_0703'),
]
operations = [
migrations.AddField(
model_name='preproductinfo',
name='Investment_cost',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='preproductinfo',
name='Soil_type',
field=models.CharField(choices=[(b'alluvial', b'Alluvial'), (b'loamy', b'Loamy')], default=None, max_length=7),
),
]
|
#
# Test if network forward function runs
# Copyright EAVISE
#
import inspect
import pytest
import torch
import lightnet as ln
classification_networks = ['Darknet', 'Darknet19', 'Darknet53', 'MobileDarknet19', 'MobilenetV1', 'MobilenetV2']
anchor_detection_networks = ['DYolo', 'MobilenetYolo', 'MobileYoloV2', 'MobileYoloV2Upsample', 'TinyYoloV2', 'TinyYoloV3', 'YoloV2', 'YoloV2Upsample', 'YoloV3', 'Yolt']
corner_detection_networks = ['Cornernet', 'CornernetSqueeze']
special_networks = ['YoloFusion']
@pytest.fixture(scope='module')
def input_tensor():
def _input_tensor(dimension, channels=3, batch=1):
return torch.rand(batch, channels, dimension, dimension)
return _input_tensor
# Base classification networks
@pytest.mark.parametrize('network', classification_networks)
def test_classification_cpu(network, input_tensor):
uut = getattr(ln.models, network)(1000).eval()
it = input_tensor(uut.inner_stride)
output_tensor = uut(it)
assert output_tensor.dim() == 2
assert output_tensor.shape[0] == it.shape[0]
assert output_tensor.shape[1] == uut.num_classes
@pytest.mark.parametrize('network', classification_networks)
@pytest.mark.cuda
@pytest.mark.skipif(not torch.cuda.is_available(), reason='CUDA not available')
def test_classification_cuda(network, input_tensor):
uut = getattr(ln.models, network)(1000).to('cuda')
it = input_tensor(uut.inner_stride, batch=2).to('cuda')
output_tensor = uut(it)
assert output_tensor.dim() == 2
assert output_tensor.shape[0] == it.shape[0]
assert output_tensor.shape[1] == uut.num_classes
# Anchor detection networks
@pytest.mark.parametrize('network', anchor_detection_networks)
def test_anchor_detection_cpu(network, input_tensor):
uut = getattr(ln.models, network)(20).eval()
it = input_tensor(uut.inner_stride)
output_tensor = uut(it)
if isinstance(output_tensor, torch.Tensor):
assert output_tensor.dim() == 4
assert output_tensor.shape[0] == it.shape[0]
assert output_tensor.shape[1] == len(uut.anchors) * (5 + uut.num_classes)
assert output_tensor.shape[2] == it.shape[2] // uut.stride
assert output_tensor.shape[3] == it.shape[3] // uut.stride
else:
for i, tensor in enumerate(output_tensor):
assert tensor.dim() == 4
assert tensor.shape[0] == it.shape[0]
assert tensor.shape[1] == len(uut.anchors[i]) * (5 + uut.num_classes)
assert tensor.shape[2] == it.shape[2] // uut.stride[i]
assert tensor.shape[3] == it.shape[3] // uut.stride[i]
@pytest.mark.parametrize('network', anchor_detection_networks)
@pytest.mark.cuda
@pytest.mark.skipif(not torch.cuda.is_available(), reason='CUDA not available')
def test_anchor_detection_cuda(network, input_tensor):
uut = getattr(ln.models, network)(20).to('cuda')
it = input_tensor(uut.inner_stride, batch=2).to('cuda')
output_tensor = uut(it)
if isinstance(output_tensor, torch.Tensor):
assert output_tensor.dim() == 4
assert output_tensor.shape[0] == it.shape[0]
assert output_tensor.shape[1] == len(uut.anchors) * (5 + uut.num_classes)
assert output_tensor.shape[2] == it.shape[2] // uut.stride
assert output_tensor.shape[3] == it.shape[3] // uut.stride
else:
for i, tensor in enumerate(output_tensor):
assert tensor.dim() == 4
assert tensor.shape[0] == it.shape[0]
assert tensor.shape[1] == len(uut.anchors[i]) * (5 + uut.num_classes)
assert tensor.shape[2] == it.shape[2] // uut.stride[i]
assert tensor.shape[3] == it.shape[3] // uut.stride[i]
# Corner detection networks
@pytest.mark.parametrize('network', corner_detection_networks)
def test_corner_detection_cpu(network, input_tensor):
uut = getattr(ln.models, network)(20).eval()
it = input_tensor(uut.inner_stride)
output_tensor = uut(it)
if isinstance(output_tensor, torch.Tensor):
assert output_tensor.dim() == 4
assert output_tensor.shape[0] == it.shape[0]
assert output_tensor.shape[1] == (uut.num_classes + 3) * 2
assert output_tensor.shape[2] == it.shape[2] // uut.stride
assert output_tensor.shape[3] == it.shape[3] // uut.stride
else:
for tensor in output_tensor:
assert tensor.dim() == 4
assert tensor.shape[0] == it.shape[0]
assert tensor.shape[1] == (uut.num_classes + 3) * 2
assert tensor.shape[2] == it.shape[2] // uut.stride
assert tensor.shape[3] == it.shape[3] // uut.stride
@pytest.mark.parametrize('network', corner_detection_networks)
@pytest.mark.cuda
@pytest.mark.skipif(not torch.cuda.is_available(), reason='CUDA not available')
def test_corner_detection_cuda(network, input_tensor):
uut = getattr(ln.models, network)(20).to('cuda')
it = input_tensor(uut.inner_stride, batch=2).to('cuda')
output_tensor = uut(it)
if isinstance(output_tensor, torch.Tensor):
assert output_tensor.dim() == 4
assert output_tensor.shape[0] == it.shape[0]
assert output_tensor.shape[1] == (uut.num_classes + 3) * 2
assert output_tensor.shape[2] == it.shape[2] // uut.stride
assert output_tensor.shape[3] == it.shape[3] // uut.stride
else:
for tensor in output_tensor:
assert tensor.dim() == 4
assert tensor.shape[0] == it.shape[0]
assert tensor.shape[1] == (uut.num_classes + 3) * 2
assert tensor.shape[2] == it.shape[2] // uut.stride
assert tensor.shape[3] == it.shape[3] // uut.stride
# YoloFusion
def test_yolofusion_cpu(input_tensor):
it = input_tensor(ln.models.YoloFusion.inner_stride, 4)
for fusion in (0, 1, 10, 22, 27):
uut = ln.models.YoloFusion(20, fuse_layer=fusion).eval()
output_tensor = uut(it)
assert output_tensor.dim() == 4
assert output_tensor.shape[0] == it.shape[0]
assert output_tensor.shape[1] == len(uut.anchors) * (5 + uut.num_classes)
assert output_tensor.shape[2] == it.shape[2] // uut.stride
assert output_tensor.shape[3] == it.shape[3] // uut.stride
@pytest.mark.cuda
@pytest.mark.skipif(not torch.cuda.is_available(), reason='CUDA not available')
def test_yolofusion_cuda(input_tensor):
it = input_tensor(ln.models.YoloFusion.inner_stride * 2, 4).to('cuda')
for fusion in (0, 1, 10, 22, 27):
uut = ln.models.YoloFusion(20, fuse_layer=fusion).to('cuda')
output_tensor = uut(it)
assert output_tensor.dim() == 4
assert output_tensor.shape[0] == it.shape[0]
assert output_tensor.shape[1] == len(uut.anchors) * (5 + uut.num_classes)
assert output_tensor.shape[2] == it.shape[2] // uut.stride
assert output_tensor.shape[3] == it.shape[3] // uut.stride
# All networks tested?
def test_all_networks_tested():
networks = [
net for net in dir(ln.models)
if (inspect.isclass(getattr(ln.models, net)))
and (issubclass(getattr(ln.models, net), torch.nn.Module))
]
tested_networks = set(
anchor_detection_networks
+ corner_detection_networks
+ classification_networks
+ special_networks
)
for net in networks:
if net not in tested_networks:
raise NotImplementedError(f'Network [{net}] is not being tested!')
|
from .job import Job
from .email_sub import EmailSub
from .user import User
|
#!/usr/bin/env python3
import pyqtgraph as pg
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets
from pyqtgraph.widgets.PlotWidget import *
from pyqtgraph.imageview import *
from pyqtgraph.widgets.GraphicsLayoutWidget import GraphicsLayoutWidget
from pyqtgraph.graphicsItems.GradientEditorItem import addGradientListToDocstring
from pyqtgraph.widgets.GraphicsView import GraphicsView
import matplotlib.cm
import collections
QAPP = None
class ImageViewPfaff(pg.ImageView):
images = []
def image(*args, **kargs):
"""
Create and return an :class:`ImageWindow <pyqtgraph.ImageWindow>`
(this is just a window with :class:`ImageView <pyqtgraph.ImageView>` widget inside), show image data inside.
Will show 2D or 3D image data.
Accepts a *title* argument to set the title of the window.
All other arguments are used to show data. (see :func:`ImageView.setImage() <pyqtgraph.ImageView.setImage>`)
"""
mkQApp()
w = ImageWindow(*args, **kargs)
images.append(w)
w.show()
return w
def buildMenu(self):
super(ImageViewPfaff, self).buildMenu()
self.trendAction = QtGui.QAction("Trend", self.menu)
self.trendAction.setCheckable(True)
self.trendAction.toggled.connect(self.trendToggled)
self.menu.addAction(self.trendAction)
def __init__(self,additionalCmaps=[], setColormap=None, **kargs):
super(ImageViewPfaff, self).__init__(**kargs)
self.trendroi=pg.LineROI([0, 60], [20, 80], width=5)
self.trendroi.setZValue(30)
self.view.addItem(self.trendroi)
self.trendroi.hide()
self.gradientEditorItem = self.ui.histogram.item.gradient
self.activeCm = "grey"
self.mplCmaps = {}
if len(additionalCmaps) > 0:
self.registerCmap(additionalCmaps)
if setColormap is not None:
self.gradientEditorItem.restoreState(setColormap)
def registerCmap(self, cmapNames):
""" Add matplotlib cmaps to the GradientEditors context menu"""
self.gradientEditorItem.menu.addSeparator()
savedLength = self.gradientEditorItem.length
self.gradientEditorItem.length = 100
# iterate over the list of cmap names and check if they're avaible in MPL
for cmapName in cmapNames:
if not hasattr(matplotlib.cm, cmapName):
print('[MplCmapImageView] Unknown cmap name: \'{}\'. Your Matplotlib installation might be outdated.'.format(cmapName))
else:
# create a Dictionary just as the one at the top of GradientEditorItem.py
cmap = getattr(matplotlib.cm, cmapName)
self.mplCmaps[cmapName] = {'ticks': cmapToColormap(cmap), 'mode': 'rgb'}
# Create the menu entries
# The following code is copied from pyqtgraph.ImageView.__init__() ...
px = QtGui.QPixmap(100, 15)
p = QtGui.QPainter(px)
self.gradientEditorItem.restoreState(self.mplCmaps[cmapName])
grad = self.gradientEditorItem.getGradient()
brush = QtGui.QBrush(grad)
p.fillRect(QtCore.QRect(0, 0, 100, 15), brush)
p.end()
label = QtGui.QLabel()
label.setPixmap(px)
label.setContentsMargins(1, 1, 1, 1)
act = QtGui.QWidgetAction(self.gradientEditorItem)
act.setDefaultWidget(label)
act.triggered.connect(self.cmapClicked)
act.name = cmapName
self.gradientEditorItem.menu.addAction(act)
self.gradientEditorItem.length = savedLength
def cmapClicked(self, b=None):
"""onclick handler for our custom entries in the GradientEditorItem's context menu"""
act = self.sender()
self.gradientEditorItem.restoreState(self.mplCmaps[act.name])
self.activeCm = act.name
def setColorMap(self, colormap):
"""Set the color map.
============= =========================================================
**Arguments**
colormap (A ColorMap() instance) The ColorMap to use for coloring
images.
============= =========================================================
"""
self.ui.histogram.gradient.setColorMap(colormap)
def getProcessedImage(self):
"""Returns the image data after it has been processed by any normalization options in use.
"""
if self.imageDisp is None:
image = self.normalize(self.image)
self.imageDisp = image
self._imageLevels = self.quickMinMax(self.imageDisp)
self.levelMin = 0
self.levelMax = 2
return self.imageDisp
@addGradientListToDocstring()
def setPredefinedGradient(self, name):
"""Set one of the gradients defined in :class:`GradientEditorItem <pyqtgraph.graphicsItems.GradientEditorItem>`.
Currently available gradients are:
"""
self.ui.histogram.gradient.loadPreset(name)
def trendToggled(self):
showRoiPlot = False
if self.trendAction.isChecked():
print('showing trendroi')
showRoiPlot = True
self.trendroi.show()
#self.ui.roiPlot.show()
self.ui.roiPlot.setMouseEnabled(True, True)
self.ui.splitter.setSizes([self.height()*0.6, self.height()*0.4])
self.roiCurve.show()
self.roiChanged()
self.ui.roiPlot.showAxis('left')
else:
self.trendroi.hide()
self.ui.roiPlot.setMouseEnabled(False, False)
self.roiCurve.hide()
self.ui.roiPlot.hideAxis('left')
if self.hasTimeAxis():
showRoiPlot = True
mn = self.tVals.min()
mx = self.tVals.max()
self.ui.roiPlot.setXRange(mn, mx, padding=0.01)
self.timeLine.show()
self.timeLine.setBounds([mn, mx])
self.ui.roiPlot.show()
if not self.trendAction.isChecked():
self.ui.splitter.setSizes([self.height()-35, 35])
else:
self.timeLine.hide()
#self.ui.roiPlot.hide()
self.ui.roiPlot.setVisible(showRoiPlot)
# def normalize(self, image):
# """
# Process *image* using the normalization options configured in the
# control panel.
# This can be repurposed to process any data through the same filter.
# """
# if self.ui.normOffRadio.isChecked():
# return image
# div = self.ui.normDivideRadio.isChecked()
# norm = image.view(np.ndarray).copy()
# #if div:
# #norm = ones(image.shape)
# #else:
# #norm = zeros(image.shape)
# if div:
# norm = norm.astype(np.float64)
# if self.ui.normTimeRangeCheck.isChecked() and image.ndim == 3:
# (sind, start) = self.timeIndex(self.normRgn.lines[0])
# (eind, end) = self.timeIndex(self.normRgn.lines[1])
# #print start, end, sind, eind
# #n = image[sind:eind+1].mean(axis=0)
# print('averaging time range...')
# if eind<sind: #swap order if it is wrong
# sind,eind=eind,sind
# n = np.nanmean(image[sind:eind+1],axis=0)
# n.shape = (1,) + n.shape
# if div:
# print('performing division...')
# norm /= n
# else:
# norm=norm.astype(np.float64)
# norm -= n
# if self.ui.normFrameCheck.isChecked() and image.ndim == 3:
# n = image.mean(axis=1).mean(axis=1)
# n.shape = n.shape + (1, 1)
# if div:
# norm /= n
# else:
# norm -= n
# if self.ui.normROICheck.isChecked() and image.ndim == 3:
# n = self.normRoi.getArrayRegion(norm, self.imageItem, (1, 2)).mean(axis=1).mean(axis=1)
# n = n[:,np.newaxis,np.newaxis]
# #print start, end, sind, eind
# if div:
# norm /= n
# else:
# norm -= n
# return norm
def quickMinMax(self, data):
"""
Estimate the min/max values of *data* by subsampling.
"""
while data.size > 1e6:
ax = np.argmax(data.shape)
sl = [slice(None)] * data.ndim
sl[ax] = slice(None, None, 2)
data = data[sl]
if data.dtype=='float64':
data[~np.isfinite(data)] = np.nan
return np.nanmin(data), np.nanmax(data)
# def updateNorm(self):
# if self.ui.normTimeRangeCheck.isChecked():
# self.normRgn.show()
# else:
# self.normRgn.hide()
# if self.ui.normROICheck.isChecked():
# self.normRoi.show()
# else:
# self.normRoi.hide()
# if not self.ui.normOffRadio.isChecked():
# self.imageDisp = None
# self.updateImage(autoHistogramRange=False)
# self.autoLevels()
# self.roiChanged()
# self.sigProcessingChanged.emit(self)
def mkQApp():
if QtGui.QApplication.instance() is None:
global QAPP
QAPP = QtGui.QApplication([])
class ImageWindow(ImageViewPfaff):
#sigClosed = QtCore.Signal(object)
"""
(deprecated; use :class:`~pyqtgraph.ImageView` instead)
"""
def __init__(self, *args, **kargs):
mkQApp()
ImageView.__init__(self)
if 'title' in kargs:
self.setWindowTitle(kargs['title'])
del kargs['title']
if len(args) > 0 or len(kargs) > 0:
self.setImage(*args, **kargs)
self.show()
def closeEvent(self, event):
ImageView.closeEvent(self, event)
self.sigClosed.emit(self)
def cmapToColormap(cmap, nTicks=16):
"""
Converts a Matplotlib cmap to pyqtgraphs colormaps. No dependency on matplotlib.
Parameters:
*cmap*: Cmap object. Imported from matplotlib.cm.*
*nTicks*: Number of ticks to create when dict of functions is used. Otherwise unused.
"""
# Case #1: a dictionary with 'red'/'green'/'blue' values as list of ranges (e.g. 'jet')
# The parameter 'cmap' is a 'matplotlib.colors.LinearSegmentedColormap' instance ...
if hasattr(cmap, '_segmentdata'):
colordata = getattr(cmap, '_segmentdata')
if ('red' in colordata) and isinstance(colordata['red'], collections.Sequence):
# print("[cmapToColormap] RGB dicts with ranges")
# collect the color ranges from all channels into one dict to get unique indices
posDict = {}
for idx, channel in enumerate(('red', 'green', 'blue')):
for colorRange in colordata[channel]:
posDict.setdefault(colorRange[0], [-1, -1, -1])[idx] = colorRange[2]
indexList = list(posDict.keys())
indexList.sort()
# interpolate missing values (== -1)
for channel in range(3): # R,G,B
startIdx = indexList[0]
emptyIdx = []
for curIdx in indexList:
if posDict[curIdx][channel] == -1:
emptyIdx.append(curIdx)
elif curIdx != indexList[0]:
for eIdx in emptyIdx:
rPos = (eIdx - startIdx) / (curIdx - startIdx)
vStart = posDict[startIdx][channel]
vRange = (posDict[curIdx][channel] - posDict[startIdx][channel])
posDict[eIdx][channel] = rPos * vRange + vStart
startIdx = curIdx
del emptyIdx[:]
for channel in range(3): # R,G,B
for curIdx in indexList:
posDict[curIdx][channel] *= 255
posList = [[i, posDict[i]] for i in indexList]
return posList
# Case #2: a dictionary with 'red'/'green'/'blue' values as functions (e.g. 'gnuplot')
elif ('red' in colordata) and isinstance(colordata['red'], collections.Callable):
# print("[cmapToColormap] RGB dict with functions")
indices = np.linspace(0., 1., nTicks)
luts = [np.clip(np.array(colordata[rgb](indices), dtype=np.float), 0, 1) * 255 \
for rgb in ('red', 'green', 'blue')]
return list(zip(indices, list(zip(*luts))))
# If the parameter 'cmap' is a 'matplotlib.colors.ListedColormap' instance, with the attributes 'colors' and 'N'
elif hasattr(cmap, 'colors') and hasattr(cmap, 'N'):
colordata = getattr(cmap, 'colors')
# Case #3: a list with RGB values (e.g. 'seismic')
if len(colordata[0]) == 3:
# print("[cmapToColormap] list with RGB values")
indices = np.linspace(0., 1., len(colordata))
scaledRgbTuples = [(rgbTuple[0] * 255, rgbTuple[1] * 255, rgbTuple[2] * 255) for rgbTuple in colordata]
return list(zip(indices, scaledRgbTuples))
# Case #4: a list of tuples with positions and RGB-values (e.g. 'terrain')
# -> this section is probably not needed anymore!?
elif len(colordata[0]) == 2:
# print("[cmapToColormap] list with positions and RGB-values. Just scale the values.")
scaledCmap = [(idx, (vals[0] * 255, vals[1] * 255, vals[2] * 255)) for idx, vals in colordata]
return scaledCmap
# Case #X: unknown format or datatype was the wrong object type
else:
raise ValueError("[cmapToColormap] Unknown cmap format or not a cmap!")
images = []
def image(*args, **kargs):
"""
Create and return an :class:`ImageWindow <pyqtgraph.ImageWindow>`
(this is just a window with :class:`ImageView <pyqtgraph.ImageView>` widget inside), show image data inside.
Will show 2D or 3D image data.
Accepts a *title* argument to set the title of the window.
All other arguments are used to show data. (see :func:`ImageView.setImage() <pyqtgraph.ImageView.setImage>`)
"""
mkQApp()
w = ImageWindow(*args, **kargs)
images.append(w)
w.show()
return w
|
#!/usr/bin/env python
import mock
import os
import socket
import threading
import time
import unittest
from uhppote_rfid import ControllerSocket, SocketConnectionException, SocketTransmitException
class TestControllerSocket(unittest.TestCase):
"""
Tests UHPPOTE socket transmission by emulating the control board's server.
"""
def setUp(self):
"""
.. function:: setUp()
Runs a server locally on port 60000 to listen for connections to respond accordingly.
"""
self.server = socket.socket()
self.server.bind(('127.0.0.1', 60000))
self.server.listen(1)
self.socket = ControllerSocket('127.0.0.1')
def tearDown(self):
"""
.. function:: tearDown()
Cleanly shuts down the test suite's server.
"""
self.socket.close()
self.sockt = None
self.server.close()
self.server = None
# Socket.__init__
def test_constructor_NegativePort_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('127.0.0.1', -1)
def test_constructor_ZeroPort_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('127.0.0.1', 0)
def test_constructor_LargePort_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('127.0.0.1', 65535 + 1)
def test_constructor_BlankPort_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('127.0.0.1', '')
def test_constructor_NonIntStringPort_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('127.0.0.1', 'ab')
def test_constructor_FloatPort_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('127.0.0.1', 1.1)
def test_constructor_ByteArrayPort_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('127.0.0.1', bytearray([0, 5, 2]))
def test_constructor_EmptyHost_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('')
def test_constructor_IntegerHost_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket(55)
def test_constructor_ByteArrayHost_TooLongException(self):
with self.assertRaises(ValueError):
ControllerSocket(bytearray([127, 0, 0, 1, 5]))
def test_constructor_ByteArrayHost_TooShortException(self):
with self.assertRaises(ValueError):
ControllerSocket(bytearray([127, 0, 0]))
def test_constructor_NegativeIP01_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('-1.0.0.0')
def test_constructor_NegativeIP02_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('0.-1.0.0')
def test_constructor_NegativeIP03_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('0.0.-3.0')
def test_constructor_NegativeIP04_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('0.0.0.-1')
def test_constructor_TooLongHost_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.longhost.long')
def test_constructor_BadChar_Exception(self):
with self.assertRaises(ValueError):
ControllerSocket('Hello*World')
def test_constructor_DefaultPort_Valid(self):
self.assertEquals(self.socket.getPort(), 60000)
def test_constructor_IntegerPort_Valid(self):
socket = ControllerSocket("127.0.0.1", 59)
self.assertEquals(socket.getPort(), 59)
def test_constructor_StringIntegerPort_Valid(self):
socket = ControllerSocket("127.0.0.1", '128')
self.assertEquals(socket.getPort(), 128)
def test_constructor_StringHost_Valid(self):
self.assertEquals(self.socket.getHost(), "127.0.0.1")
def test_constructor_ByteArrayHost_Valid(self):
socket = ControllerSocket(bytearray([127, 0, 0, 1]))
self.assertEquals(socket.getHost(), "127.0.0.1")
def test_constructor_DotAtEndHost_Valid(self):
socket = ControllerSocket("localhost.")
self.assertEquals(socket.getHost(), "localhost")
# Socket.connect
def test_connect_ZeroAttempts_Exception(self):
with self.assertRaises(ValueError):
self.socket.connect(0)
def test_connect_NegativeAttempts_Exception(self):
with self.assertRaises(ValueError):
self.socket.connect(-1)
def test_connect_DefaultAttemptsFail_Exception(self):
socket = ControllerSocket('badhost')
with self.assertRaises(SocketConnectionException):
socket.connect()
def test_connect_ConnectLocal_Success(self):
try:
self.socket.connect()
except SocketConnectionException, e:
self.fail("Unexpected SocketConnectionException raisesd: %s" % str(e))
# Socket.close
def test_close_CloseInactive_Success(self):
try:
self.socket.close()
except Exception, e:
self.fail("Unexpected Exception raised: %s" % str(e))
def test_close_CloseActive_Success(self):
self.socket.connect()
try:
self.socket.close()
except Exception, e:
self.fail("Unexpected Exception raisesd: %s" % str(e))
def test_close_ClosedNotConnected_Success(self):
self.assertFalse(self.socket.isConnected())
self.socket.connect()
self.assertTrue(self.socket.isConnected())
self.socket.close()
self.assertFalse(self.socket.isConnected())
# Socket.send
def test_send_Integer_Exception(self):
with self.assertRaises(ValueError):
self.socket.send(42)
def test_send_Float_Exception(self):
with self.assertRaises(ValueError):
self.socket.send(4.2)
def test_send_Complex_Exception(self):
with self.assertRaises(ValueError):
self.socket.send(complex(4, 2))
def test_send_Tuple_Exception(self):
with self.assertRaises(ValueError):
self.socket.send((4, 2))
def test_send_List_Exception(self):
with self.assertRaises(ValueError):
self.socket.send([4, 2])
def test_send_Dict_Exception(self):
with self.assertRaises(ValueError):
self.socket.send({
'a': 4,
'b': 2,
})
def test_send_Set_Exception(self):
with self.assertRaises(ValueError):
self.socket.send(set([4, 2]))
def test_send_FrozenSet_Exception(self):
with self.assertRaises(ValueError):
self.socket.send(frozenset([4, 2]))
def test_send_EmptyString_Exception(self):
with self.assertRaises(ValueError):
self.socket.send('')
def test_send_EmptyByteArray_Exception(self):
with self.assertRaises(ValueError):
self.socket.send(bytearray())
def test_send_EmptyBytes_Exception(self):
with self.assertRaises(ValueError):
self.socket.send(bytes(''))
def test_send_ClosedSocket_Exception(self):
self.socket.close()
with self.assertRaises(SocketConnectionException):
self.socket.send('hello')
def test_send_Interrupt_Exception(self):
with mock.patch('uhppote_rfid.controller_socket.socket') as mock_socket:
mockSocket = ControllerSocket('127.0.0.1')
mockSocket.socket.send.return_value = 0
mockSocket.connect()
with self.assertRaises(SocketTransmitException):
mockSocket.send('hello')
def test_send_String_Valid(self):
data = 'Hello World'
with mock.patch('uhppote_rfid.controller_socket.socket') as mock_socket:
mockSocket = ControllerSocket('127.0.0.1')
mockSocket.socket.send.return_value = len(data)
mockSocket.connect()
mockSocket.send(data)
mockSocket.socket.send.assert_called_with(data)
def test_send_ByteArray_Valid(self):
data = bytearray(['h', 'e', 'l', 'l', 'o'])
with mock.patch('uhppote_rfid.controller_socket.socket') as mock_socket:
mockSocket = ControllerSocket('127.0.0.1')
mockSocket.socket.send.return_value = len(data)
mockSocket.connect()
mockSocket.send(data)
mockSocket.socket.send.assert_called_with(data)
def test_send_Bytes_Valid(self):
data = bytes([10, 20, 30, 40])
with mock.patch('uhppote_rfid.controller_socket.socket') as mock_socket:
mockSocket = ControllerSocket('127.0.0.1')
mockSocket.socket.send.return_value = len(data)
mockSocket.connect()
mockSocket.send(data)
mockSocket.socket.send.assert_called_with(data)
# Socket.receive
def test_receive_NegativeLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive(-1)
def test_receive_ZeroLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive(0)
def test_receive_NotMultipleOf8_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive(50)
def test_receive_FloatLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive(8.8)
def test_receive_ComplexLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive(complex(4, 2))
def test_receive_TupleLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive((4, 2))
def test_receive_ListLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive([4])
def test_receive_DictLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive({
'a': 1
})
def test_receive_SetLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive(set([4, 2]))
def test_receive_FrozenSetLength_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive(frozenset([4, 2]))
def test_receive_StringEmpty_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive('')
def test_receive_StringAlpha_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive('a')
def test_receive_StringZero_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive('0')
def test_receive_StringNegative_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive('-1')
def test_receive_StringNotMultipleOf8_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive('50')
def test_receive_StringFloat_Exception(self):
with self.assertRaises(ValueError):
self.socket.receive('8.8')
def test_receive_StringSize_Valid(self):
pass
def test_receive_ClosedSocket_Exception(self):
self.socket.close()
with self.assertRaises(SocketConnectionException):
self.socket.receive()
def test_receive_Cutoff_Exception(self):
with mock.patch('uhppote_rfid.controller_socket.socket') as mock_socket:
mockSocket = ControllerSocket('127.0.0.1')
mockSocket.socket.recv.return_value = ''
mockSocket.connect()
with self.assertRaises(SocketTransmitException):
mockSocket.receive()
def test_receive_DefaultLength_Valid(self):
arr = [1, 2, 3, 4, 5, 6, 7, 8]
with mock.patch('uhppote_rfid.controller_socket.socket') as mock_socket:
mockSocket = ControllerSocket('127.0.0.1')
mockSocket.socket.recv.return_value = bytearray(arr)
data = bytearray()
for i in range(0, 8):
data.extend(arr)
mockSocket.connect()
self.assertEquals(mockSocket.receive(), data)
def test_receive_SetLength_Valid(self):
data = bytearray([1, 2, 3, 4, 5, 6, 7, 8])
with mock.patch('uhppote_rfid.controller_socket.socket') as mock_socket:
mockSocket = ControllerSocket('127.0.0.1')
mockSocket.socket.recv.return_value = data
mockSocket.connect()
self.assertEquals(mockSocket.receive(len(data)), data)
if __name__ == '__main__':
unittest.main()
|
from rest_framework import serializers
from .models import Game, Move, Player
from .statements.ifs import STATEMENTS as IF_STATEMENTS
from .statements.thens import STATEMENTS as THEN_STATEMENTS
class MoveSerializer(serializers.ModelSerializer):
"""
Model serializer for Move
"""
if_statement = serializers.SerializerMethodField()
if_statement_options = serializers.SerializerMethodField()
then_statement = serializers.SerializerMethodField()
then_statement_options = serializers.SerializerMethodField()
class Meta:
model = Move
fields = (
"guid",
"if_user",
"if_statement_options",
"if_statement",
"then_user",
"then_statement_options",
"then_statement",
"is_complete",
)
def get_if_statement(self, obj):
"""
Return if_statement if the move has been completed, otherwise empty string
"""
if obj.is_complete:
return {
"statement": IF_STATEMENTS[obj.if_statement].description,
"id": obj.if_statement,
}
return None
def get_if_statement_options(self, obj):
"""
Return if_statement_options
"""
return [
{"statement": IF_STATEMENTS[statement_id].description, "id": statement_id}
for statement_id in obj.if_statement_options.split(",")
]
def get_then_statement(self, obj):
"""
Return then_statement if the move has been completed, otherwise empty string
"""
if obj.is_complete:
return {
"statement": THEN_STATEMENTS[obj.then_statement].description,
"id": obj.then_statement,
}
return None
def get_then_statement_options(self, obj):
"""
Return then_statement_options
"""
return [
{"statement": THEN_STATEMENTS[statement_id].description, "id": statement_id}
for statement_id in obj.then_statement_options.split(",")
]
class PlayerSerializer(serializers.ModelSerializer):
"""
Model serializer for Player
"""
class Meta:
model = Player
fields = ("hp", "attack", "defense", "agility")
class GameSerializer(serializers.ModelSerializer):
"""
Model serializer for Game
"""
state = serializers.CharField()
player1_initial_stats = serializers.SerializerMethodField()
player2_initial_stats = serializers.SerializerMethodField()
moves = serializers.SerializerMethodField()
class Meta:
model = Game
fields = (
"state",
"guid",
"player1_user",
"player1_initial_stats",
"player2_user",
"player2_initial_stats",
"moves",
"winner",
"loser",
"is_draw",
)
def get_player1_initial_stats(self, obj):
"""
Return player1 stats fetched directly from database
"""
try:
return PlayerSerializer(Player.objects.get(id=obj.player1.id)).data
except (Player.DoesNotExist, AttributeError):
return None
def get_player2_initial_stats(self, obj):
"""
Return player2 stats fetched directly from database
"""
try:
return PlayerSerializer(Player.objects.get(id=obj.player2.id)).data
except (Player.DoesNotExist, AttributeError):
return None
def get_moves(self, obj):
"""
Assemble previous moves with Move stats and Player stats
"""
moves = []
for move in obj.move_set.order_by("id"):
if move.is_complete:
obj.play_single_move(move)
moves.append(
{
"move": MoveSerializer(move).data,
"player1_stats": PlayerSerializer(obj.player1).data,
"player2_stats": PlayerSerializer(obj.player2).data,
}
)
return moves
|
import jsonfile, json
def get_all_data():
reader = jsonfile.JSONFile()
reader.open_file('/home/pi/TrailSafe/Device/temp_data/buffer.data')
return reader.read()
def dequeue():
try:
writer = jsonfile.JSONFile()
writer.open_file('/home/pi/TrailSafe/Device/temp_data/buffer.data')
buffers = get_all_data()
data = buffers['buffer'].pop()
writer.write(buffers)
return dict(data)
except:
return None
def queue(data):
writer = jsonfile.JSONFile()
writer.open_file('/home/pi/TrailSafe/Device/temp_data/buffer.data')
buffers = get_all_data()
buffers['buffer'].append(data)
writer.write(buffers)
|
# @Rexhino_Kovaci
# This is an extra exercises that I challenged myself with on Google Kickstart 2020 during the online sessions
# This algorithm takes user input and compare the left and right subtree
# this would allow us to maintain a sorted list of numbers
# this program would check if the tree is balanced between the height of right and left subtree is 1 or 0
class Tree(object):
from binarytree import Node
root = Node(int(input("Enter Root: ")))
root.left = Node(int(input("Enter Left Subtree: ")))
root.right = Node(int(input("Enter Right Subtree: ")))
print('Binary tree :', root)
print('List of nodes :', list(root))
print('Inorder of nodes :', root.inorder)
print('Size of tree :', root.size)
print('Height of tree :', root.height)
print('Properties of tree : \n', root.properties)
# This would print all the properties of the nodes list, order, size, height, properties of our Balanced Search Tree
|
from _group_category import GroupCategorytBackend
from _user_category import UserCategorytBackend
from enums import CategoryEnum
CATEGORY_BACKEND_MAP = {
CategoryEnum.GROUP: GroupCategorytBackend,
CategoryEnum.USER: UserCategorytBackend,
CategoryEnum.OFFICE: UserCategorytBackend,
CategoryEnum.TASK: UserCategorytBackend,
CategoryEnum.SUB_TASK: UserCategorytBackend,
CategoryEnum.FIXTURE: UserCategorytBackend,
CategoryEnum.PROJECT: UserCategorytBackend,
}
|
import komand
from .schema import AssignLicenseToUserInput, AssignLicenseToUserOutput, Input, Output, Component
# Custom imports below
import requests
from komand.exceptions import PluginException
class AssignLicenseToUser(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="assign_license_to_user",
description=Component.DESCRIPTION,
input=AssignLicenseToUserInput(),
output=AssignLicenseToUserOutput())
def run(self, params={}):
user_principal_name = params.get(Input.USER_PRINCIPAL_NAME)
sku_id = params.get(Input.SKU_ID)
token = self.connection.access_token
base_url = "https://graph.microsoft.com/v1.0/users/%s/assignLicense" % user_principal_name
headers = {"Authorization": "Bearer %s" % token, "Content-Type": "application/json",}
body = {
"addLicenses": [{
"disabledPlans": [],
"skuId": sku_id
}],
"removeLicenses": []
}
try:
response = requests.post(base_url, json=body, headers=headers)
except requests.HTTPError:
raise PluginException(cause=f"There was an issue with the Assign License request. Double-check the user name: {user_principal_name}",
data=response.text)
if response.status_code == 200:
return {Output.SUCCESS: True}
else:
raise PluginException(f"The response from Office365 indicated something went wrong: {response.status_code}",
data=response.text)
|
# Copyright 2016 The Spitfire Authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import copy
import optparse
import string
import cStringIO
import StringIO
import sys
import timeit
try:
import spitfire
import spitfire.compiler.util
import spitfire.compiler.options
except ImportError:
spitfire = None
try:
import Cheetah
import Cheetah.Template
except ImportError:
Cheetah = None
try:
import django
import django.conf
import django.template
except ImportError:
django = None
try:
import jinja2
except ImportError:
jinja2 = None
try:
import mako
import mako.template
except ImportError:
mako = None
TABLE_DATA = [
dict(a=1,
b=2,
c=3,
d=4,
e=5,
f=6,
g=7,
h=8,
i=9,
j=10) for x in range(1000)
]
def get_spitfire_tests():
if not spitfire:
return []
tmpl_src = """
<table>
#for $row in $table
<tr>
#for $column in $row.values()
<td>$column</td>
#end for
</tr>
#end for
</table>
"""
tmpl_search_list = [{'table': TABLE_DATA}]
default_opts = spitfire.compiler.options.default_options
o1_opts = spitfire.compiler.options.o1_options
o2_opts = spitfire.compiler.options.o2_options
o3_opts = spitfire.compiler.options.o3_options
def _spitfire_baked_opts(o):
o = copy.copy(o)
o.baked_mode = True
o.generate_unicode = False
return o
baked_opts = _spitfire_baked_opts(default_opts)
baked_o1_opts = _spitfire_baked_opts(o1_opts)
baked_o2_opts = _spitfire_baked_opts(o2_opts)
baked_o3_opts = _spitfire_baked_opts(o3_opts)
tmpl = spitfire.compiler.util.load_template(tmpl_src,
'tmpl',
analyzer_options=default_opts)
tmpl_o1 = spitfire.compiler.util.load_template(tmpl_src,
'tmpl_o1',
analyzer_options=o1_opts)
tmpl_o2 = spitfire.compiler.util.load_template(tmpl_src,
'tmpl_o2',
analyzer_options=o2_opts)
tmpl_o3 = spitfire.compiler.util.load_template(tmpl_src,
'tmpl_o3',
analyzer_options=o3_opts)
tmpl_baked = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_baked',
analyzer_options=baked_opts)
tmpl_baked_o1 = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_baked_o1',
analyzer_options=baked_o1_opts)
tmpl_baked_o2 = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_baked_o2',
analyzer_options=baked_o2_opts)
tmpl_baked_o3 = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_baked_o3',
analyzer_options=baked_o3_opts)
tmpl_unfiltered = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_unfiltered',
analyzer_options=default_opts,
compiler_options={'enable_filters': False})
tmpl_unfiltered_o1 = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_unfiltered_o1',
analyzer_options=o1_opts,
compiler_options={'enable_filters': False})
tmpl_unfiltered_o2 = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_unfiltered_o2',
analyzer_options=o2_opts,
compiler_options={'enable_filters': False})
tmpl_unfiltered_o3 = spitfire.compiler.util.load_template(
tmpl_src,
'tmpl_unfiltered_o3',
analyzer_options=o3_opts,
compiler_options={'enable_filters': False})
def test_spitfire():
"""Spitfire template"""
tmpl(search_list=tmpl_search_list).main()
def test_spitfire_o1():
"""Spitfire template -O1"""
tmpl_o1(search_list=tmpl_search_list).main()
def test_spitfire_o2():
"""Spitfire template -O2"""
tmpl_o2(search_list=tmpl_search_list).main()
def test_spitfire_o3():
"""Spitfire template -O3"""
tmpl_o3(search_list=tmpl_search_list).main()
def test_spitfire_baked():
"""Spitfire template baked"""
tmpl_baked(search_list=tmpl_search_list).main()
def test_spitfire_baked_o1():
"""Spitfire template baked -O1"""
tmpl_baked_o2(search_list=tmpl_search_list).main()
def test_spitfire_baked_o2():
"""Spitfire template baked -O2"""
tmpl_baked_o2(search_list=tmpl_search_list).main()
def test_spitfire_baked_o3():
"""Spitfire template baked -O3"""
tmpl_baked_o3(search_list=tmpl_search_list).main()
def test_spitfire_unfiltered():
"""Spitfire template unfiltered"""
tmpl_unfiltered(search_list=tmpl_search_list).main()
def test_spitfire_unfiltered_o1():
"""Spitfire template unfiltered -O1"""
tmpl_unfiltered_o2(search_list=tmpl_search_list).main()
def test_spitfire_unfiltered_o2():
"""Spitfire template unfiltered -O2"""
tmpl_unfiltered_o2(search_list=tmpl_search_list).main()
def test_spitfire_unfiltered_o3():
"""Spitfire template unfiltered -O3"""
tmpl_unfiltered_o3(search_list=tmpl_search_list).main()
return [
test_spitfire,
test_spitfire_o1,
test_spitfire_o2,
test_spitfire_o3,
test_spitfire_baked,
test_spitfire_baked_o1,
test_spitfire_baked_o2,
test_spitfire_baked_o3,
test_spitfire_unfiltered,
test_spitfire_unfiltered_o1,
test_spitfire_unfiltered_o2,
test_spitfire_unfiltered_o3,
]
def get_python_tests():
tmpl_table = string.Template('<table>\n$table\n</table>\n')
tmpl_row = string.Template('<tr>\n$row\n</tr>\n')
tmpl_column = string.Template('<td>$column</td>\n')
def _buffer_fn(write, table):
write('<table>\n')
for row in table:
write('<tr>\n')
for column in row.itervalues():
write('<td>')
write('%s' % column)
write('</td>\n')
write('</tr>\n')
write('</table>\n')
def test_python_template():
"""Python string template"""
rows = ''
for row in TABLE_DATA:
columns = ''
for column in row.itervalues():
columns = columns + tmpl_column.substitute(column=column)
rows = rows + tmpl_row.substitute(row=columns)
return tmpl_table.substitute(table=rows)
def test_python_stringio():
"""Python StringIO buffer"""
buffer = StringIO.StringIO()
_buffer_fn(buffer.write, TABLE_DATA)
return buffer.getvalue()
def test_python_cstringio():
"""Python cStringIO buffer"""
buffer = cStringIO.StringIO()
_buffer_fn(buffer.write, TABLE_DATA)
return buffer.getvalue()
def test_python_list():
"""Python list concatenation"""
buffer = []
_buffer_fn(buffer.append, TABLE_DATA)
return ''.join(buffer)
return [
test_python_template,
test_python_stringio,
test_python_cstringio,
test_python_list,
]
def get_cheetah_tests():
if not Cheetah:
return []
tmpl_src = """
<table>
#for $row in $table
<tr>
#for $column in $row.values()
<td>$column</td>
#end for
</tr>
#end for
</table>
"""
tmpl_search_list = [{'table': TABLE_DATA}]
tmpl = Cheetah.Template.Template(tmpl_src, searchList=tmpl_search_list)
def test_cheetah():
"""Cheetah template"""
tmpl.respond()
return [
test_cheetah,
]
def get_django_tests():
if not django:
return []
django.conf.settings.configure()
django.setup()
tmpl_src = """
<table>
{% for row in table %}
<tr>
{% for column in row.values %}
<td>{{ column }}</td>
{% endfor %}
</tr>
{% endfor %}
</table>
"""
tmpl_autoescaped_src = ('{% autoescape on %}' +
tmpl_src +
'{% endautoescape %}')
tmpl = django.template.Template(tmpl_src)
tmpl_autoescaped = django.template.Template(tmpl_autoescaped_src)
tmpl_context = django.template.Context({'table': TABLE_DATA})
def test_django():
"""Django template"""
tmpl.render(tmpl_context)
def test_django_autoescaped():
"""Django template autoescaped"""
tmpl_autoescaped.render(tmpl_context)
return [
test_django,
test_django_autoescaped,
]
def get_jinja2_tests():
if not jinja2:
return []
tmpl_src = """
<table>
{% for row in table %}
<tr>
{% for column in row.values() %}
<td>{{ column }}</td>
{% endfor %}
</tr>
{% endfor %}
</table>
"""
tmpl = jinja2.Template(tmpl_src)
tmpl_autoescaped = jinja2.Template(tmpl_src, autoescape=True)
def test_jinja2():
"""Jinja2 template"""
tmpl.render(table=TABLE_DATA)
def test_jinja2_autoescaped():
"""Jinja2 template autoescaped"""
tmpl_autoescaped.render(table=TABLE_DATA)
return [
test_jinja2,
test_jinja2_autoescaped,
]
def get_mako_tests():
if not mako:
return []
tmpl_src = """
<table>
% for row in table:
<tr>
% for column in row.values():
<td>${column}</td>
% endfor
</tr>
% endfor
</table>
"""
tmpl = mako.template.Template(tmpl_src)
tmpl_autoescaped = mako.template.Template(tmpl_src, default_filters=['h'])
def test_mako():
"""Mako template"""
tmpl.render(table=TABLE_DATA)
def test_mako_autoescaped():
"""Mako template autoescaped"""
tmpl_autoescaped.render(table=TABLE_DATA)
return [
test_mako,
test_mako_autoescaped,
]
def time_test(test, number):
# Put the test in the global scope for timeit.
name = 'timeit_%s' % test.__name__
globals()[name] = test
# Time the test.
timer = timeit.Timer(setup='from __main__ import %s;' % name,
stmt='%s()' % name)
time = timer.timeit(number=number) / number
if time < 0.00001:
result = ' (not installed?)'
else:
result = '%16.2f ms' % (1000 * time)
print '%-35s %s' % (test.__doc__, result)
def run_tests(which=None, number=100, compare=False):
if number > 100:
print 'Running benchmarks %d times each...' % number
print
if compare:
groups = ['cheetah', 'django', 'jinja2', 'mako', 'python', 'spitfire']
else:
groups = ['spitfire']
# Built the full list of eligible tests.
tests = []
missing_engines = []
for g in groups:
test_list_fn = 'get_%s_tests' % g
test = globals()[test_list_fn]()
if test:
tests.extend(test)
else:
missing_engines.append(g)
# Optionally filter by a set of matching test name (sub)strings.
if which:
which_tests = []
for t in tests:
for w in which:
if w.lower() in t.__name__.lower():
which_tests.append(t)
tests = which_tests
# Report any missing template engines.
if missing_engines:
sys.stderr.write(
'The following template engines are not installed and will be '
'skipped in the benchmark: %r\n' % missing_engines)
# Run the tests.
for t in tests:
time_test(t, number)
def profile_tests(which=None):
print 'Profiling...'
print
import hotshot, hotshot.stats
profile_data = 'template.prof'
profile = hotshot.Profile(profile_data)
profile.runcall(run_tests, which=which, number=1, compare=False)
stats = hotshot.stats.load(profile_data)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
print
stats.print_stats()
print 'Profile data written to %s' % profile_data
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('-n', '--number', type='int', default=100)
option_parser.add_option('-c',
'--compare',
action='store_true',
default=False)
option_parser.add_option('-p',
'--profile',
action='store_true',
default=False)
(options, args) = option_parser.parse_args()
if options.profile:
profile_tests(which=args)
else:
run_tests(which=args, number=options.number, compare=options.compare)
if __name__ == '__main__':
main()
|
# Generated by Django 3.2.9 on 2021-12-02 22:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_alter_post_hours'),
]
operations = [
migrations.DeleteModel(
name='Foo',
),
migrations.RemoveField(
model_name='post',
name='hours',
),
migrations.AddField(
model_name='post',
name='end_time',
field=models.TimeField(default='3:00', verbose_name='Chamber Ending Time'),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='rating',
field=models.IntegerField(default=0, max_length=20),
),
migrations.AddField(
model_name='post',
name='start_time',
field=models.TimeField(default='12:00', verbose_name='Chamber Beginning Time'),
preserve_default=False,
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
微软的烙饼排序
@Author: AC
2018-4-8
'''
__author__ = 'AC'
##############################################
#------------------import--------------------#
##############################################
import random
import copy
##############################################
#------------------类定义--------------------#
##############################################
class PieSorting(object):
n_search = 0
n_exceed = 0
n_sorted = 0
def __init__(self, PieCnt = None, PieArray = None):
if PieArray:
self.PieCnt = len(PieArray)
self.PieArray = PieArray
elif PieCnt:
self.PieCnt = PieCnt
self.PieArray = list(range(0,PieCnt))
random.shuffle(self.PieArray)
else:
raise ValueError('PieCnt and PieArray must exist once.')
self.MaxSwap = self.PieCnt*2
# self.MinSwap = self._calMinSwap(self.PieArray)
self.PieArraySorted = copy.deepcopy(self.PieArray)
self.PieArraySorted.sort()
self.SwapSteps = []
self.SwapStepsLoc = []
self.SwapStepsTmp = [[] for x in range(self.PieCnt*2)]
self.SwapStepsLocTmp = [[] for x in range(self.PieCnt*2)]
def _calMinSwap(self, NewPieArray):
'''
计算下限
如果有两个相邻的Pie就判定最小交换次数少一次
'''
temp = []
for i in range(1, len(NewPieArray)):
temp.append(NewPieArray[i] - NewPieArray[i-1])
return self.PieCnt - 1 - temp.count(-1) - temp.count(1)
def _isSorted(self, NewPieArray):
return self.PieArraySorted == NewPieArray
def _reverse(self, NewPieArray, start, end):
'''reverse elements in NewPieArray from start to end'''
assert start <= end
temp = NewPieArray[start:end+1]
temp.reverse()
NewPieArray[start:end+1] = temp
def Sort(self, NewPieArray, step):
'''
recursion function for traverse each condition of sorting pie
:param NewPieArray: a copy of PieArray
:param step: swap step
:return:
'''
self.n_search += 1
# step exceed max swap
if step + self._calMinSwap(NewPieArray) >= self.MaxSwap:
# if step + 0 >= self.MaxSwap:
self.n_exceed += 1
return
# sort complete
if self._isSorted(NewPieArray):
if step <= self.MaxSwap:
self.MaxSwap = step
self.SwapSteps = copy.deepcopy(self.SwapStepsTmp[0:step])
self.SwapStepsLoc = copy.deepcopy(self.SwapStepsLocTmp[0:step])
self.n_sorted += 1
return
# recursion
for i in range(1, self.PieCnt):
self._reverse(NewPieArray, 0, i)
self.SwapStepsTmp[step] = copy.deepcopy(NewPieArray)
self.SwapStepsLocTmp[step] = i
self.Sort(NewPieArray, step + 1)
self._reverse(NewPieArray, 0, i)
def OutputStatus(self):
print('Origin Array: %s'%self.PieArray)
print('Sorted Array: %s'%self.PieArraySorted)
for idx in range(len(self.SwapStepsLoc)):
print('%d : %s'%(self.SwapStepsLoc[idx],self.SwapSteps[idx]))
print('Search Times = %d'%self.n_search)
print('Exceed Times = %d'%self.n_exceed)
print('Sorted Times = %d'%self.n_sorted)
def RunSort(self):
NewPieArray = copy.deepcopy(self.PieArray)
self.Sort(NewPieArray, step=0)
self.OutputStatus()
##############################################
#------------------脚本开始------------------#
##############################################
if __name__ == '__main__':
# 主线程
psort = PieSorting(PieCnt=8)
psort.RunSort()
|
import logging
from rest_framework.response import Response
from rest_framework.views import exception_handler
logger = logging.getLogger(__name__)
def core_exception_handler(exc, context) -> Response: # type: ignore
logger.error(exc)
logger.error(context)
response = exception_handler(exc, context)
handlers = {
"NotFound": _handle_not_found,
"NotAuthenticated": _handle_not_authenticated,
}
exception_class = exc.__class__.__name__
if exception_class in handlers:
return handlers[exception_class](exc, context, response)
return _handle_generic_error(exc, context, response)
def _handle_generic_error(exc, context, response) -> Response: # type: ignore
if response:
response.data = {"errors": response.data}
else:
response = Response(
data={"errors": {"detail": "An error occurred."}},
status=500,
)
return response
def _handle_not_found(exc, context, response) -> Response: # type: ignore
view = context.get("view", None)
if view and hasattr(view, "queryset") and view.queryset is not None:
response.data = {"errors": {"detail": response.data["detail"]}}
else:
response = _handle_generic_error(exc, context, response)
return response
def _handle_not_authenticated(exc, context, response) -> Response: # type: ignore
response.data = {
"errors": {"detail": "Authentication credentials were not provided."}
}
response.status_code = 401
return response
|
1. CPU COMMUNICATES WITH RAM VIA THE WHAT?
Memory Bus
2. PROCESSOR GETS SPEED BOOST WHEN PROCESSOR ACCESSES NEARBY SEQUENTIAL MEMORY ADDRESSES:
Because of the cache.
3. GIVEN NUMBER 96, CONVERT INTO BINARY, THEN ADD RESULTING DIGITS IN DECIMAL. WHAT IS THE RESULT:
2
4. *TIME* COMPLEXITY OF PERFORMING MATHEMATICAL OPERATIONS ON FIXED-WIDTH INTEGERS?
*SPACE* COMPLEXITY OF FIXED-WIDTH INTEGERS?
0(1)
5. IN EACH SLOT MEMORY HOLDS 8 BITS AND WE WANT TO STORE AN ARRAY OF 64-bit INTEGERS,
HOW MANY MEM ADDRESSES WILL BE REQUIRED TO STORE AN ARRAY OF 5 INTEGERS?
40
6. IN ORDER TO STORE STRINGS IN MEMORY, EACH CHARACTER IN THE STRING MUST BE ENCODED SO THAT IT CAN BE STORED AS BINARY. ASCII IS ONE EXAMPLE OF A CHARACTERS. SET. EACH CHARACTER IN ASCII CAN BE REPRESENTED BY 7 BITS (ALTHOUGH THEY ARE COMMONLY STORED AS 8 BITS). GIVEN THAT, WHAT IS THE MASXIMUM NUMBER OF CHARACTERS THAT COULD BE IN THE ASCII SET?
128
7. INT, REVERSE, RETURN INT
Given an integer, write a function that reverses the bits (in binary) and returns the integer result.
def csReverseIntegerBits(n):
binary = bin(n)[2:] #0b110100001 #([2:] 110100001)
reverse = binary[::-1] #100001011b0
integer = int(reverse, 2)
return(integer)
8. CS BINARY TO ASCII:
Given a binary string (ASCII encoded), write a function that returns the equivalent decoded text.
def csBinaryToASCII(binary):
if binary == "":
return ""
str_bin = str(binary)
split_binary = [str_bin[i:i+8] for i in range(0, len(str_bin), 8)]
decoded_str = ""
for bin_letter in split_binary:
letter = chr(int(bin_letter, 2))
decoded_str += letter
return decoded_str
9. RETURNS CERTAIN STRINGS BASED ON IF IT HAS A FACTOR OF A NUMBER:
Given a number, write a function that converts that number into a string that contains "raindrop sounds" corresponding to certain potential factors. A factor is a number that evenly divides into another number, leaving no remainder. The simplest way to test if one number is a factor of another is to use the modulo operator.
def csRaindrops(number):
#INPUT: Number
#OUTPUT: strings based on if it is a factor or not
#Create result array
#Test if factor of 3
# push string if factor of 3
#Test if factor of 5
# push string if factor of 5
#Test if factor of 7
# push string if factor of 7
#If none
#return digits of number as string
#join rain sounds
#return array at 0
raindrop_sounds = []
if number % 3 == 0 or number % 5 == 0 or number % 7 == 0:
if number % 3 == 0:
raindrop_sounds.append("Pling")
if number % 5 == 0:
raindrop_sounds.append("Plang")
if number % 7 == 0:
raindrop_sounds.append("Plong")
joined = "".join(raindrop_sounds)
return joined
else:
return str(number)
print(csRaindrops(105))
|
"""Test metar_parser."""
# 3rd Party
import pytest
from pyiem.util import utc
# Local
import pywwa
from pywwa.workflows import metar_parser
from pywwa.testing import get_example_file
def test_api():
"""Test that we can load things to ignore."""
metar_parser.load_ignorelist()
metar_parser.cleandb()
metar_parser.ready(None)
@pytest.mark.parametrize("database", ["postgis"])
def test_processor(cursor):
"""Test basic parsing."""
data = get_example_file("METAR.txt")
pywwa.CTX.utcnow = utc(2011, 11, 5, 14)
for mtr in metar_parser.process_data(data).metars:
metar_parser.do_db(cursor, mtr)
|
import logging
import azure.functions as func
import psycopg2
import os
from datetime import datetime
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import *
def main(msg: func.ServiceBusMessage):
notification_id = int(msg.get_body().decode('utf-8'))
logging.info(
f"Python ServiceBus queue trigger processed message: {str(notification_id)}")
# Get connection to database
conn = psycopg2.connect(
host=os.environ["POSTGRES_URL"],
database=os.environ["POSTGRES_DB"],
user=os.environ["POSTGRES_USER"],
password=os.environ["POSTGRES_PW"]
)
logging.info(f"Successfully connected to database")
try:
# Get notification message and subject from database using the notification_id
cur = conn.cursor()
cmd = f"SELECT message, subject FROM notification WHERE id={str(notification_id)}"
cur.execute(cmd)
logging.info(
f"Notification ID {str(notification_id)}: Get message and subject")
for row in cur.fetchall():
message = row[0]
subject = row[1]
if not message or not subject:
error_message = f"Notification ID {str(notification_id)}: No message or subject"
logging.error(error_message)
raise Exception(error_message)
logging.info(
f"Notification ID {str(notification_id)}: Message '{message}', Subject '{subject}'")
# Get attendees email and name
cmd = f"SELECT first_name, last_name, email FROM attendee"
cur.execute(cmd)
count = 0
# Loop through each attendee and send an email with a personalized subject
for row in cur.fetchall():
first_name = row[0]
last_name = row[1]
email = row[2]
logging.info(
f"Notification ID {str(notification_id)}: First name '{first_name}', last name '{last_name}', email '{email}'")
from_email = Email(os.environ['ADMIN_EMAIL_ADDRESS'])
to_emails = To(email)
personalized_subject = f"Hello, {first_name}! {subject}"
content = Content("text/plain", message)
mail = Mail(from_email, to_emails, personalized_subject, content)
sg = SendGridAPIClient(os.environ['SENDGRID_API_KEY'])
sg.send(mail)
count += 1
# Update the notification table by setting the completed date and
# updating the status with the total number of attendees notified
status = f"Notified {str(count)} attendees"
date = datetime.now()
logging.info(f"Notification ID {str(notification_id)}: {status}@{date}")
cmd = f"UPDATE notification SET status='{status}' WHERE id={str(notification_id)}"
cur.execute(cmd)
cmd = f"UPDATE notification SET completed_date='{str(date)}' WHERE id={str(notification_id)}"
cur.execute(cmd)
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
logging.error(error)
finally:
# Close connection
conn.close()
|
# pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.models.commondb.status.status_unit_test.py is part of The RAMSTK
# Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Test class for testing Status module algorithms and models."""
# Standard Library Imports
from datetime import date, timedelta
# Third Party Imports
import pytest
from pubsub import pub
from treelib import Tree
# RAMSTK Package Imports
from ramstk.models.dbrecords import RAMSTKStatusRecord
from ramstk.models.dbtables import RAMSTKStatusTable
from tests import MockDAO, UnitTestGetterSetterMethods, UnitTestSelectMethods
@pytest.mark.usefixtures("test_record_model", "unit_test_table_model")
class TestCreateStatusModels:
"""Class for unit testing Status model __init__() methods.
Because each table model contains unique attributes, these methods must be
local to the module being tested.
"""
@pytest.mark.unit
def test_record_model_create(self, test_record_model):
"""Should return a Status record model instance."""
assert isinstance(test_record_model, RAMSTKStatusRecord)
# Verify class attributes are properly initialized.
assert test_record_model.__tablename__ == "ramstk_status"
assert test_record_model.status_id == 1
assert test_record_model.status_type == "action"
assert test_record_model.name == "Initiated"
assert test_record_model.description == "Action has been initiated."
@pytest.mark.unit
def test_table_model_create(self, unit_test_table_model):
"""Should return a Status table model instance."""
assert isinstance(unit_test_table_model, RAMSTKStatusTable)
assert isinstance(unit_test_table_model.tree, Tree)
assert isinstance(unit_test_table_model.dao, MockDAO)
assert unit_test_table_model._lst_id_columns == [
"status_id",
]
assert unit_test_table_model._tag == "status"
assert unit_test_table_model._root == 0
assert pub.isSubscribed(
unit_test_table_model.do_get_attributes, "request_get_status_attributes"
)
assert pub.isSubscribed(
unit_test_table_model.do_get_tree, "request_get_status_tree"
)
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestSelectStatus(UnitTestSelectMethods):
"""Class for unit testing Status table do_select() and do_select_all()."""
__test__ = True
_record = RAMSTKStatusRecord
_tag = "status"
@pytest.mark.usefixtures("test_attributes", "test_record_model")
class TestGetterSetterStatus(UnitTestGetterSetterMethods):
"""Class for unit testing Status table methods that get or set."""
__test__ = True
_id_columns = [
"status_id",
]
_test_attr = "name"
_test_default_value = "Status Name"
@pytest.mark.unit
def test_get_attributes(self, test_record_model):
"""Should return a dict of attribute key:value pairs.
This method must be local because the attributes are different for each
database record model.
"""
_attributes = test_record_model.get_attributes()
assert _attributes["status_id"] == 1
assert _attributes["status_type"] == "action"
assert _attributes["name"] == "Initiated"
assert _attributes["description"] == "Action has been initiated."
|
"""Awox device scanner class"""
import asyncio
import logging
from homeassistant.core import HomeAssistant
from .awoxmeshlight import AwoxMeshLight
# import awoxmeshlight from .awoxmeshlight
from .bluetoothctl import Bluetoothctl
_LOGGER = logging.getLogger(__name__)
START_MAC_ADDRESS = "A4:C1"
class DeviceScanner:
@staticmethod
async def connect_device(address: str, username: str, password: str, mesh_key: str) -> bool:
"""Check if device is available"""
light = DeviceScanner._connect(address, username, password, mesh_key)
if light.session_key:
light.setColor(0, 254, 0)
light.disconnect()
return True
return False
@staticmethod
async def async_find_devices(hass: HomeAssistant):
def init():
return Bluetoothctl()
devices = {}
try:
bl = await hass.async_add_executor_job(init)
_LOGGER.info("Scanning 30 seconds for AwoX bluetooth mesh devices!")
await hass.async_add_executor_job(bl.start_scan)
await asyncio.sleep(30)
for mac, dev in (await hass.async_add_executor_job(bl.get_available_devices)).items():
if mac.startswith(START_MAC_ADDRESS):
devices[mac] = dev
await hass.async_add_executor_job(bl.stop_scan)
await hass.async_add_executor_job(bl.shutdown)
except Exception as e:
_LOGGER.exception('Failed: %s', e)
pass
return devices
@staticmethod
async def async_find_available_devices(hass: HomeAssistant, username: str, password: str):
"""Gather a list of device"""
result = []
devices = await DeviceScanner.async_find_devices(hass)
_LOGGER.debug("Found %d AwoX devices" % (len(devices)))
for mac, dev in devices.items():
_LOGGER.debug("Device %s [%s]" % (dev['name'], dev['mac']))
try:
mylight = DeviceScanner._connect(dev['mac'], username, password)
if mylight.session_key:
result.append({
'mac': dev['mac'],
'name': mylight.getModelNumber()
})
mylight.disconnect()
except:
_LOGGER.debug('Failed to connect [%s]' % dev['mac'])
@staticmethod
def _connect(address, username: str, password: str, mesh_key: str = None) -> AwoxMeshLight:
# Try to connect with factory defaults
light = AwoxMeshLight(address)
light.connect()
# When connected with factory defaults and `mesh_key` is set add device to our mesh
if light.session_key and mesh_key is not None:
_LOGGER.info('Add %s to our mesh', address)
light.setMesh(username, password, mesh_key)
if not light.session_key:
light = AwoxMeshLight(address, username, password)
light.connect()
return light
|
from amfibious.crawler import AmfibiousCrawler
from amfibious.parser import AmfibiousParser
from amfibious.amfibious_store import AmfiMongo
|
import functools
import importlib
import inspect
import json
import logging
import pprint
import sys
import threading
import time
thread_local = None
logger = logging.getLogger(__name__)
data_logger = logging.getLogger('generic_profiler_data')
WRAPPED = {None, __name__}
ENABLE_CALLER_FRAME_LOG = True
FUNC_NAME_BLACK_LIST = {}
def get_context():
global thread_local
if not thread_local:
thread_local = threading.local()
return thread_local
def clear():
ctx = get_context()
if not ctx:
return
if hasattr(ctx, 'performance_tree_root'):
delattr(ctx, 'performance_tree_root')
if hasattr(ctx, 'performance_tree'):
delattr(ctx, 'performance_tree')
if hasattr(ctx, 'parameter'):
delattr(ctx, 'parameter')
def get_caller_info():
frame = inspect.currentframe().f_back.f_back
file_name = frame.f_code.co_filename
line_num = frame.f_lineno
return file_name, line_num
class PerformanceTree(object):
def __init__(self, parent, func, location, start_ts):
self.parent = parent
self.children = []
self.func = func
self.location = location
self.start_ts = start_ts
self.end_ts = 0
if parent:
parent.children.append(self)
def finish(self, end_ts):
self.end_ts = end_ts
def init_performance_tree(func, location, start_ts, parameter={}):
ctx = get_context()
tree = PerformanceTree(None, func, location, start_ts)
ctx.performance_tree_root = tree
ctx.performance_tree = tree
ctx.parameter = parameter
return tree
def get_performance_tree_node(func, location, start_ts, para_args=None, para_kwargs=None):
ctx = get_context()
if hasattr(ctx, 'performance_tree_root'):
node = PerformanceTree(ctx.performance_tree, func, location, start_ts)
ctx.performance_tree = node
return node
return init_performance_tree(func, location, start_ts, parameter={'args': para_args, 'kwargs': para_kwargs})
def get_total_time():
ctx = get_context()
if ctx is not None and hasattr(ctx, 'performance_tree_root'):
node = ctx.performance_tree_root
return node.end_ts - node.start_ts
return -1
PERCENTAGE_THRESHOLD = 2
def log_performance_tree(threshold=3):
ctx = get_context()
if not ctx or not hasattr(ctx, 'performance_tree_root'):
return
root = ctx.performance_tree_root
all_time = root.end_ts - root.start_ts
if all_time < threshold:
return
def get_node_info(func, location, time_elapsed, percentage):
return '[{:5.1f}%,{:5.3f}s] {}<{}>'.format(percentage, time_elapsed, func, location)
def search(node, output_parent):
pre_child = None
output_children = []
last_ts = node.start_ts
for child in node.children:
time_elapsed = child.start_ts - last_ts
percentage = time_elapsed * 100 / all_time
output_pre = get_node_info('<interval>',
'from {} to {}'.format(pre_child.location if pre_child else 'start',
child.location),
time_elapsed, percentage)
output_child = {}
last_ts = search(child, output_child)
pre_child = child
output_children.append(output_pre)
output_children.append(output_child)
if node.children:
last_ts = node.children[-1].end_ts
time_elapsed = node.end_ts - last_ts
percentage = time_elapsed * 100 / all_time if all_time else 0
output_children.append(
get_node_info('<interval>', 'from {} to end'.format(node.children[-1].location),
time_elapsed, percentage))
time_elapsed = node.end_ts - node.start_ts
percentage = time_elapsed * 100 / all_time if all_time else 0
output_node = {get_node_info(node.func, node.location, time_elapsed, percentage): output_children}
output_parent.update(output_node)
return node.end_ts
parameter = ctx.parameter
logger.warn('{} costs {}s, {}'.format(root.func, root.end_ts - root.start_ts, pprint.pformat(parameter)))
output_root = {'parameter': ctx.parameter}
search(root, output_root)
data_logger.info(json.dumps(output_root))
def wraps(func, **kwargs):
f = functools.wraps(func, **kwargs)
if hasattr(func, 'im_class'):
f.im_class = func.im_class
return f
def should_patch(func_name):
return (not func_name.startswith('__')) and (func_name not in FUNC_NAME_BLACK_LIST)
class GenericProfiler(object):
def __init__(self, profiler_cfg):
self.profiler_cfg = profiler_cfg
ctx = get_context()
def wrapper(self, func):
@wraps(func)
def wrap(*args, **kwargs):
ctx = get_context()
if ctx is not None:
# wrap everything except real func call in the try statement
try:
caller_file = ''
caller_line = ''
if ENABLE_CALLER_FRAME_LOG:
caller_file, caller_line = get_caller_info()
func_name = '{}.{}.{}'.format(func.im_class.__module__, func.im_class.__name__, func.__name__) \
if hasattr(func, 'im_class') else '{}.{}'.format(func.__module__, func.__name__)
start_ts = time.time()
node = get_performance_tree_node(func_name, '{}:{}'.format(caller_file, caller_line), start_ts,
para_args=args, para_kwargs=kwargs)
except Exception as e:
logger.error(e)
# call the real func
res = func(*args, **kwargs)
try:
end_ts = time.time()
node.finish(end_ts)
# if the node is root, print performance tree
if node.parent is not None:
ctx.performance_tree = ctx.performance_tree.parent
except Exception as e:
logger.error(e)
return res
else:
logger.warn('ctx is None')
return func(*args, **kwargs)
return wrap
def wrap_class(self, clazz, funcs=None):
if clazz in WRAPPED:
return
WRAPPED.add(clazz)
logger.info('wrap class: ' + clazz.__module__ + '.' + clazz.__name__)
for para_name in (dir(clazz) if not funcs else funcs):
if not should_patch(para_name):
continue
if not hasattr(clazz, para_name):
logger.error('there is no field named `{}` in {}'.format(para_name, clazz))
continue
para = getattr(clazz, para_name)
if inspect.ismethod(para):
if not hasattr(para, 'im_class'):
setattr(para, 'im_class', clazz)
setattr(clazz, para_name, self.wrapper(para))
elif isinstance(para, staticmethod):
if not hasattr(para, 'im_class'):
setattr(para, 'im_class', clazz)
setattr(clazz, para_name, staticmethod(self.wrapper(para.__func__)))
elif isinstance(para, classmethod):
if not hasattr(para, 'im_class'):
setattr(para, 'im_class', clazz)
setattr(clazz, para_name, classmethod(self.wrapper(para.__func__)))
def wrap_class_by_name(self, module_str, class_names, funcs=None):
if not isinstance(class_names, list):
class_names = [class_names]
try:
module = importlib.import_module(module_str)
except ImportError as e:
logger.error(e)
return
for class_name in class_names:
if hasattr(module, class_name):
clazz = getattr(module, class_name)
self.wrap_class(clazz, funcs)
else:
logger.error('module {} has no class named {}'.format(module, class_name))
def wrap_module(self, module_str, funcs=None):
if module_str in WRAPPED:
return
WRAPPED.add(module_str)
logger.info('wrap module: ' + module_str)
# Ignore error that module does not exist
try:
module = importlib.import_module(module_str)
except ImportError as e:
logger.error(e)
return
for para_name in (dir(module) if not funcs else funcs):
if not should_patch(para_name):
continue
if not hasattr(module, para_name):
logger.error('there is no field named `{}` in {}'.format(para_name, module))
continue
para = getattr(module, para_name)
if inspect.isfunction(para) and para.func_name != '<lambda>':
setattr(module, para_name, self.wrapper(para))
def wrap(self):
if hasattr(self.profiler_cfg, 'MODULES_TO_WRAP'):
for module in self.profiler_cfg.MODULES_TO_WRAP:
self.wrap_module(module)
if hasattr(self.profiler_cfg, 'CLASSES_TO_WRAP'):
for module_class_tuple in self.profiler_cfg.CLASSES_TO_WRAP:
self.wrap_class_by_name(*module_class_tuple)
if hasattr(self.profiler_cfg, 'FUNCTIONS_TO_WRAP'):
for module in self.profiler_cfg.FUNCTIONS_TO_WRAP.keys():
self.wrap_module(module, self.profiler_cfg.FUNCTIONS_TO_WRAP.get(module))
if hasattr(self.profiler_cfg, 'CLASS_FUNCTIONS_TO_WRAP'):
for module in self.profiler_cfg.CLASS_FUNCTIONS_TO_WRAP.keys():
for clazz in self.profiler_cfg.CLASS_FUNCTIONS_TO_WRAP.get(module):
self.wrap_class_by_name(module, clazz,
self.profiler_cfg.CLASS_FUNCTIONS_TO_WRAP.get(module).get(clazz))
|
from panther_base_helpers import gsuite_details_lookup as details_lookup
def rule(event):
if event['id'].get('applicationName') != 'groups_enterprise':
return False
return bool(
details_lookup('moderator_action', ['ban_user_with_moderation'], event))
def title(event):
return 'User [{}] banned another user from a group.'.format(
event.get('actor', {}).get('email'))
|
import json
import allure
from faker import Faker
from requests import Response
class BaseCase:
@allure.step('Get answer')
def get_answer(self, response: Response, name):
try:
response_as_dict = response.json()
except json.decoder.JSONDecodeError:
assert False, f"response is not JSON format. Response text is '{response.text}'"
assert name in response_as_dict, f"Response not have '{name}'"
return response_as_dict[name]
@allure.step('Get cookie')
def get_cookie(self, response: Response, cookie_name):
assert cookie_name in response.cookies, f"Cannot find cookie with name {cookie_name} in the last Response"
return response.cookies[cookie_name]
@allure.step('Get header')
def get_header(self, response: Response, headers_name):
assert headers_name in response.headers, f"Cannot find header with name {headers_name} in the last Response"
return response.headers[headers_name]
@allure.step('Get json value')
def get_json_value(self, response: Response, name):
try:
response_as_dist = response.json()
except json.decoder.JSONDecodeError:
assert False, f"Response is not in JSON format. Response text is '{response.text}'"
assert name in response_as_dist, f"Response JSON doesn't have key '{name}'"
return response_as_dist[name]
@allure.step('Prepare test data')
def prepare_register_data(self):
fake = Faker()
return {
'username': fake.user_name(),
'firstName': fake.first_name(),
'lastName': fake.last_name(),
'password': fake.password(),
'email': fake.ascii_company_email()
}
@allure.step('Prepare default login data')
def prepare_default_login_data(self):
return {
'email': 'vinkotov@example.com',
'password': '1234'
}
@allure.step('Prepare login data')
def prepare_login_data(self, email, password):
return {
'email': email,
'password': password
}
|
import cv2
import mediapipe as mp
import os
import time
class poseDetector():
def __init__(self,mode= False, upBody =False,smooth = True,detectionCon = 0.5,trackCon=0.5):
self.mode = mode
self.upBody = upBody
self.smooth = smooth
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpPose = mp.solutions.pose
self.pose = self.mpPose.Pose(static_image_mode=False, upper_body_only =False, min_detection_confidence=0.5)
def findPose(self,img,draw = True):
imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
self.results = self.pose.process(imgRGB)
#print(results.pose_landmarks)
if self.results.pose_landmarks:
if draw:
self.mpDraw.draw_landmarks(img,self.results.pose_landmarks,self.mpPose.POSE_CONNECTIONS)
return img
def getPosition(self,img,draw =True):
lmlist = []
if self.results.pose_landmarks:
for id,lm in enumerate(self.results.pose_landmarks.landmark):
h, w ,c = img.shape
cx,cy = int(lm.x*w) ,int(lm.y*h)
lmlist.append([id,cx,cy])
if draw:
cv2.circle(img,(cx,cy),10,(255,0,0),cv2.FILLED)
return lmlist
def main():
pTime = 0
path = os.path.dirname(os.path.realpath(__file__))+'/videos/'+'squats1.mp4'
cap = cv2.VideoCapture(path)
detector = poseDetector()
while True:
success, img = cap.read()
img = detector.findPose(img)
lmlist = detector.getPosition(img,draw=False)
if(len(lmlist)!=0):
print(lmlist[14])
cv2.circle(img,(lmlist[14][1],lmlist[14][2]),10,(0,0,255),cv2.FILLED)
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img,str(int(fps)),(70,50),cv2.FONT_HERSHEY_PLAIN,3,
(255,0,0),3)
img = cv2.resize(img, (1100,1100))
cv2.imshow("Image",img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.