content
stringlengths 5
1.05M
|
|---|
"""Model Zoo"""
from .model_zoo import get_model, get_model_list
|
import csv
import json
import traceback
from ufdl.json.object_detection import Annotation, Polygon
from ufdl.joblauncher.core import load_class
from ufdl.joblauncher.core import AbstractJobExecutor
from ufdl.pythonclient.functional.object_detection.dataset import get_metadata, set_metadata, set_annotations_for_image
def read_rois(csv_file):
"""
Loads the specified ROIs CSV file and generates a list of Annotation objects
and a list of scores from it.
:param csvfile: the CSV file to read
:type csvfile: str
:return: the tuple of annotations list and scores list
:rtype: tuple
"""
annotations = []
scores = []
with open(csv_file, "r") as cf:
reader = csv.DictReader(cf)
for row in reader:
if ('x' in row) and ('y' in row) and ('w' in row) and ('h' in row) and ('label_str' in row) and ('score' in row):
polygon = None
if ('poly_x' in row) and ('poly_y' in row):
xs = row['poly_x'].split(",")
ys = row['poly_y'].split(",")
p = []
for x, y in zip(xs, ys):
p.append([int(float(x)), int(float(y))])
polygon = Polygon(points=p)
if polygon is not None:
annotation = Annotation(
x=int(float(row['x'])),
y=int(float(row['y'])),
width=int(float(row['w'])),
height=int(float(row['h'])),
label=row['label_str'],
polygon=polygon)
else:
annotation = Annotation(
x=int(float(row['x'])),
y=int(float(row['y'])),
width=int(float(row['w'])),
height=int(float(row['h'])),
label=row['label_str'])
annotations.append(annotation)
scores.append(float(row['score']))
return annotations, scores
def store_annotations(executor, job_pk, dataset_pk, img_name, annotations):
"""
Stores the annotations in the backend.
:param executor: the executor class this is done for
:type executor: AbstractJobExecutor
:param job_pk: the PK of the job being executed
:type job_pk: int
:param dataset_pk: the PK of the dataset these scores are calculated for
:type dataset_pk: int
:param img_name: the name of the image the scores were calculated for
:type img_name: str
:param annotations: the list of Annotation objects
:type annotations: list[Annotation]
"""
# set annotations for image
try:
set_annotations_for_image(executor.context, dataset_pk, img_name, annotations)
except:
executor.log_msg("Failed to add annotations generated by job %d to dataset %d!\n%s" % (job_pk, dataset_pk, traceback.format_exc()))
def store_scores(executor, job_pk, dataset_pk, img_name, scores):
"""
Stores the annotations in the backend.
:param executor: the executor class this is done for
:type executor: AbstractJobExecutor
:param job_pk: the PK of the job being executed
:type job_pk: int
:param dataset_pk: the PK of the dataset these scores are calculated for
:type dataset_pk: int
:param img_name: the name of the image the scores were calculated for
:type img_name: str
:param scores: the list of float scores
:type scores: list[float]
"""
try:
metadata = get_metadata(executor.context, dataset_pk, img_name)
if metadata == "":
metadata = dict()
else:
metadata = json.loads(metadata)
metadata['scores'] = scores
set_metadata(executor.context, dataset_pk, img_name, json.dumps(metadata))
except:
executor.log_msg("Failed to add scores of job %d for image %s in dataset %d!\n%s" % (job_pk, img_name, dataset_pk, traceback.format_exc()))
def calculate_confidence_scores(executor, job_pk, dataset_pk, img_name, confidence_score_classes, annotations, scores):
"""
Calcualtes and stores confidence scores.
:param executor: the executor class this is done for
:type executor: AbstractJobExecutor
:param job_pk: the PK of the job being executed
:type job_pk: int
:param dataset_pk: the PK of the dataset these scores are calculated for
:type dataset_pk: int
:param img_name: the name of the image the scores were calculated for
:type img_name: str
:param confidence_score_classes: the list of class names
:type confidence_score_classes: list
:param annotations: the list of annotations to use for the score confidence calculation
:type annotations: list[Annotation]
:param scores: the list of scores to use for the confidence score calculation
:type scores: list
"""
# instantiate calculators
conf_score_obj = []
try:
for c in confidence_score_classes:
conf_score_obj.append(load_class(c)())
except:
executor.log_msg("Failed to instantiate confidence score classes: %s\n%s" % (str(confidence_score_classes), traceback.format_exc()))
# calculate the scores
if len(conf_score_obj) > 0:
try:
conf_scores = dict()
for c in conf_score_obj:
current = c.calculate(annotations, scores)
for k in current:
conf_scores[k] = current[k]
metadata = get_metadata(executor.context, dataset_pk, img_name)
if metadata == "":
metadata = dict()
else:
metadata = json.loads(metadata)
metadata['confidence'] = conf_scores
set_metadata(executor.context, dataset_pk, img_name, json.dumps(metadata))
except:
executor.log_msg("Failed to add confidence scores of job %d for image %s in dataset %d!\n%s" % (job_pk, img_name, dataset_pk, traceback.format_exc()))
|
import pyaudio
import wave
import numpy as np
RESPEAKER_RATE = 16000
RESPEAKER_CHANNELS = 2
RESPEAKER_WIDTH = 2
# run getDeviceInfo.py to get index
RESPEAKER_INDEX = 1 # refer to input device id
CHUNK = 1024
RECORD_SECONDS = 3
WAVE_OUTPUT_FILENAME = "output_one_channel.wav"
p = pyaudio.PyAudio()
stream = p.open(
rate=RESPEAKER_RATE,
format=p.get_format_from_width(RESPEAKER_WIDTH),
channels=RESPEAKER_CHANNELS,
input=True,
input_device_index=RESPEAKER_INDEX,)
print("* recording")
frames = []
for i in range(0, int(RESPEAKER_RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
# extract channel 0 data from 2 channels, if you want to extract channel 1, please change to [1::2]
a = np.fromstring(data,dtype=np.int16)[0::2]
frames.append(a.tostring())
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(p.get_format_from_width(RESPEAKER_WIDTH)))
wf.setframerate(RESPEAKER_RATE)
wf.writeframes(b''.join(frames))
wf.close()
|
import re
import json
import logging
import ptf.testutils as testutils
import pytest
import time
from ipaddress import ip_network, IPv4Network
from tests.common.helpers.assertions import pytest_assert
from tests.common.utilities import wait, wait_until
from tests.common.dualtor.mux_simulator_control import *
from tests.common.dualtor.dual_tor_utils import *
logger = logging.getLogger(__name__)
SYSTEM_STABILIZE_MAX_TIME = 300
MONIT_STABILIZE_MAX_TIME = 420
OMEM_THRESHOLD_BYTES=10485760 # 10MB
__all__ = [
'check_services',
'check_interfaces',
'check_bgp',
'check_dbmemory',
'check_monit',
'check_processes',
'check_mux_simulator']
@pytest.fixture(scope="module")
def check_services(duthosts):
def _check(*args, **kwargs):
check_results = []
for dut in duthosts:
logger.info("Checking services status on %s..." % dut.hostname)
networking_uptime = dut.get_networking_uptime().seconds
timeout = max((SYSTEM_STABILIZE_MAX_TIME - networking_uptime), 0)
interval = 20
logger.info("networking_uptime=%d seconds, timeout=%d seconds, interval=%d seconds" % \
(networking_uptime, timeout, interval))
check_result = {"failed": True, "check_item": "services", "host": dut.hostname}
if timeout == 0: # Check services status, do not retry.
services_status = dut.critical_services_status()
check_result["failed"] = False if all(services_status.values()) else True
check_result["services_status"] = services_status
else: # Retry checking service status
start = time.time()
elapsed = 0
while elapsed < timeout:
services_status = dut.critical_services_status()
check_result["failed"] = False if all(services_status.values()) else True
check_result["services_status"] = services_status
if check_result["failed"]:
wait(interval, msg="Not all services are started, wait %d seconds to retry. Remaining time: %d %s" % \
(interval, int(timeout - elapsed), str(check_result["services_status"])))
elapsed = time.time() - start
else:
break
logger.info("Done checking services status on %s" % dut.hostname)
check_results.append(check_result)
return check_results
return _check
def _find_down_phy_ports(dut, phy_interfaces):
down_phy_ports = []
intf_facts = dut.show_interface(command='status', include_internal_intfs=('201811' not in dut.os_version))['ansible_facts']['int_status']
for intf in phy_interfaces:
try:
if intf_facts[intf]['oper_state'] == 'down':
down_phy_ports.append(intf)
except KeyError:
down_phy_ports.append(intf)
return down_phy_ports
def _find_down_ip_ports(dut, ip_interfaces):
down_ip_ports = []
ip_intf_facts = dut.show_ip_interface()['ansible_facts']['ip_interfaces']
for intf in ip_interfaces:
try:
if ip_intf_facts[intf]['oper_state'] == 'down':
down_ip_ports.append(intf)
except KeyError:
down_ip_ports.append(intf)
return down_ip_ports
def _find_down_ports(dut, phy_interfaces, ip_interfaces):
"""Finds the ports which are operationally down
Args:
dut (object): The sonichost/sonicasic object
phy_interfaces (list): List of all phyiscal operation in 'admin_up'
ip_interfaces (list): List of the L3 interfaces
Returns:
[list]: list of the down ports
"""
down_ports = []
down_ports = _find_down_ip_ports(dut, ip_interfaces) + \
_find_down_phy_ports(dut, phy_interfaces)
return down_ports
@pytest.fixture(scope="module")
def check_interfaces(duthosts):
def _check(*args, **kwargs):
check_results = []
for dut in duthosts.frontend_nodes:
logger.info("Checking interfaces status on %s..." % dut.hostname)
networking_uptime = dut.get_networking_uptime().seconds
timeout = max((SYSTEM_STABILIZE_MAX_TIME - networking_uptime), 0)
interval = 20
logger.info("networking_uptime=%d seconds, timeout=%d seconds, interval=%d seconds" % \
(networking_uptime, timeout, interval))
down_ports = []
check_result = {"failed": True, "check_item": "interfaces", "host": dut.hostname}
for asic in dut.asics:
ip_interfaces = []
cfg_facts = asic.config_facts(host=dut.hostname,
source="persistent", verbose=False)['ansible_facts']
phy_interfaces = [k for k, v in cfg_facts["PORT"].items() if "admin_status" in v and v["admin_status"] == "up"]
if "PORTCHANNEL_INTERFACE" in cfg_facts:
ip_interfaces = cfg_facts["PORTCHANNEL_INTERFACE"].keys()
if "VLAN_INTERFACE" in cfg_facts:
ip_interfaces += cfg_facts["VLAN_INTERFACE"].keys()
logger.info(json.dumps(phy_interfaces, indent=4))
logger.info(json.dumps(ip_interfaces, indent=4))
if timeout == 0: # Check interfaces status, do not retry.
down_ports += _find_down_ports(asic, phy_interfaces, ip_interfaces)
check_result["failed"] = True if len(down_ports) > 0 else False
check_result["down_ports"] = down_ports
else: # Retry checking interface status
start = time.time()
elapsed = 0
while elapsed < timeout:
down_ports = _find_down_ports(asic, phy_interfaces, ip_interfaces)
check_result["failed"] = True if len(down_ports) > 0 else False
check_result["down_ports"] = down_ports
if check_result["failed"]:
wait(interval, msg="Found down ports, wait %d seconds to retry. Remaining time: %d, down_ports=%s" % \
(interval, int(timeout - elapsed), str(check_result["down_ports"])))
elapsed = time.time() - start
else:
break
logger.info("Done checking interfaces status on %s" % dut.hostname)
check_result["failed"] = True if len(down_ports) > 0 else False
check_result["down_ports"] = down_ports
check_results.append(check_result)
return check_results
return _check
@pytest.fixture(scope="module")
def check_bgp(duthosts):
def _check(*args, **kwargs):
check_results = []
for dut in duthosts.frontend_nodes:
def _check_bgp_status_helper():
asic_check_results = []
bgp_facts = dut.bgp_facts(asic_index='all')
for asic_index, a_asic_facts in enumerate(bgp_facts):
a_asic_result = False
a_asic_neighbors = a_asic_facts['ansible_facts']['bgp_neighbors']
if a_asic_neighbors:
down_neighbors = [k for k, v in a_asic_neighbors.items()
if v['state'] != 'established']
if down_neighbors:
if dut.facts['num_asic'] == 1:
check_result['bgp'] = {'down_neighbors' : down_neighbors }
else:
check_result['bgp' + str(asic_index)] = {'down_neighbors' : down_neighbors }
a_asic_result = True
else:
a_asic_result = False
if dut.facts['num_asic'] == 1:
if 'bgp' in check_result:
check_result['bgp'].pop('down_neighbors', None)
else:
if 'bgp' + str(asic_index) in check_result:
check_result['bgp' + str(asic_index)].pop('down_neighbors', None)
else:
a_asic_result = True
asic_check_results.append(a_asic_result)
if any(asic_check_results):
check_result['failed'] = True
return not check_result['failed']
logger.info("Checking bgp status on host %s ..." % dut.hostname)
check_result = {"failed": False, "check_item": "bgp", "host": dut.hostname}
networking_uptime = dut.get_networking_uptime().seconds
timeout = max(SYSTEM_STABILIZE_MAX_TIME - networking_uptime, 1)
interval = 20
wait_until(timeout, interval, _check_bgp_status_helper)
if (check_result['failed']):
for a_result in check_result.keys():
if a_result != 'failed':
# Dealing with asic result
if 'down_neighbors' in check_result[a_result]:
logger.info('BGP neighbors down: %s on bgp instance %s on dut %s' % (check_result[a_result]['down_neighbors'], a_result, dut.hostname))
else:
logger.info('No BGP neighbors are down on %s' % dut.hostname)
logger.info("Done checking bgp status on %s" % dut.hostname)
check_results.append(check_result)
return check_results
return _check
def _is_db_omem_over_threshold(command_output):
total_omem = 0
re_omem = re.compile("omem=(\d+)")
result = False
for line in command_output:
m = re_omem.search(line)
if m:
omem = int(m.group(1))
total_omem += omem
logger.debug(json.dumps(command_output, indent=4))
if total_omem > OMEM_THRESHOLD_BYTES:
result = True
return result, total_omem
@pytest.fixture(scope="module")
def check_dbmemory(duthosts):
def _check(*args, **kwargs):
check_results = []
for dut in duthosts:
logger.info("Checking database memory on %s..." % dut.hostname)
redis_cmd = "client list"
check_result = {"failed": False, "check_item": "dbmemory", "host": dut.hostname}
# check the db memory on the redis instance running on each instance
for asic in dut.asics:
res = asic.run_redis_cli_cmd(redis_cmd)['stdout_lines']
result, total_omem = _is_db_omem_over_threshold(res)
if result:
check_result["failed"] = True
check_result["total_omem"] = total_omem
logging.info("{} db memory over the threshold ".format(str(asic.namespace or '')))
break
logger.info("Done checking database memory on %s" % dut.hostname)
check_results.append(check_result)
return check_results
return _check
def _check_monit_services_status(check_result, monit_services_status):
"""
@summary: Check whether each type of service which was monitored by Monit was in correct status or not.
If a service was in "Not monitored" status, sanity check will skip it since this service
was temporarily set to not be monitored by Monit.
@return: A dictionary contains the testing result (failed or not failed) and the status of each service.
"""
check_result["services_status"] = {}
for service_name, service_info in monit_services_status.items():
check_result["services_status"].update({service_name: service_info["service_status"]})
if service_info["service_status"] == "Not monitored":
continue
if ((service_info["service_type"] == "Filesystem" and service_info["service_status"] != "Accessible")
or (service_info["service_type"] == "Process" and service_info["service_status"] != "Running")
or (service_info["service_type"] == "Program" and service_info["service_status"] != "Status ok")):
check_result["failed"] = True
return check_result
def get_arp_pkt_info(dut):
intf_mac = dut.facts['router_mac']
mgmt_ipv4 = None
mgmt_intf_facts = dut.get_running_config_facts()['MGMT_INTERFACE']
for mgmt_intf in mgmt_intf_facts:
for mgmt_ip in mgmt_intf_facts[mgmt_intf]:
if type(ip_network(mgmt_ip, strict=False)) is IPv4Network:
mgmt_ipv4 = mgmt_ip.split('/')[0]
return intf_mac, mgmt_ipv4
return intf_mac, mgmt_ipv4
@pytest.fixture(scope='module')
def check_mux_simulator(ptf_server_intf, tor_mux_intf, ptfadapter, upper_tor_host, lower_tor_host, \
recover_all_directions, toggle_simulator_port_to_upper_tor, toggle_simulator_port_to_lower_tor, check_simulator_read_side):
def _check(*args, **kwargs):
"""
@summary: Checks if the OVS bridge mux simulator is functioning correctly
@return: A dictionary containing the testing result of the PTF interface tested:
{
'failed': <True/False>,
'failed_reason': <reason string>,
'intf': '<PTF interface name> mux simulator'
}
"""
results = {
'failed': False,
'failed_reason': '',
'check_item': '{} mux simulator'.format(ptf_server_intf)
}
logger.info("Checking mux simulator status for PTF interface {}".format(ptf_server_intf))
ptf_port_index = int(ptf_server_intf.replace('eth', ''))
recover_all_directions(tor_mux_intf)
upper_tor_intf_mac, upper_tor_mgmt_ip = get_arp_pkt_info(upper_tor_host)
lower_tor_intf_mac, lower_tor_mgmt_ip = get_arp_pkt_info(lower_tor_host)
upper_tor_ping_tgt_ip = '10.10.10.1'
lower_tor_ping_tgt_ip = '10.10.10.2'
ptf_arp_tgt_ip = '10.10.10.3'
ping_cmd = 'ping -I {} {} -c 1 -W 1; true'
upper_tor_exp_pkt = testutils.simple_arp_packet(eth_dst='ff:ff:ff:ff:ff:ff',
eth_src=upper_tor_intf_mac,
ip_snd=upper_tor_mgmt_ip,
ip_tgt=upper_tor_ping_tgt_ip,
hw_snd=upper_tor_intf_mac)
lower_tor_exp_pkt = testutils.simple_arp_packet(eth_dst='ff:ff:ff:ff:ff:ff',
eth_src=lower_tor_intf_mac,
ip_snd=lower_tor_mgmt_ip,
ip_tgt=lower_tor_ping_tgt_ip,
hw_snd=lower_tor_intf_mac)
ptf_arp_pkt = testutils.simple_arp_packet(ip_tgt=ptf_arp_tgt_ip,
ip_snd=ptf_arp_tgt_ip,
arp_op=2)
# Clear ARP tables to start in consistent state
upper_tor_host.shell("ip neigh flush all")
lower_tor_host.shell("ip neigh flush all")
# Run tests with upper ToR active
toggle_simulator_port_to_upper_tor(tor_mux_intf)
try:
pytest_assert(check_simulator_read_side(tor_mux_intf) == 1)
except AssertionError:
results['failed'] = True
results['failed_reason'] = 'Unable to switch active link to upper ToR'
return results
# Ping from both ToRs, expect only message from upper ToR to reach PTF
upper_tor_host.shell(ping_cmd.format(tor_mux_intf, upper_tor_ping_tgt_ip))
try:
testutils.verify_packet(ptfadapter, upper_tor_exp_pkt, ptf_port_index)
except AssertionError:
results['failed'] = True
results['failed_reason'] = 'Packet from active upper ToR not received'
return results
lower_tor_host.shell(ping_cmd.format(tor_mux_intf, lower_tor_ping_tgt_ip))
try:
testutils.verify_no_packet(ptfadapter, lower_tor_exp_pkt, ptf_port_index)
except AssertionError:
results['failed'] = True
results['failed_reason'] = 'Packet from standby lower ToR received'
return results
# Send dummy ARP packets from PTF to ToR. Ensure that ARP is learned on both ToRs
upper_tor_host.shell("ip neigh flush all")
lower_tor_host.shell("ip neigh flush all")
testutils.send_packet(ptfadapter, ptf_port_index, ptf_arp_pkt)
upper_tor_arp_table = upper_tor_host.switch_arptable()['ansible_facts']['arptable']['v4']
lower_tor_arp_table = lower_tor_host.switch_arptable()['ansible_facts']['arptable']['v4']
try:
pytest_assert(ptf_arp_tgt_ip in upper_tor_arp_table)
except AssertionError:
results['failed'] = True
results['failed_reason'] = 'Packet from PTF not received on active upper ToR'
return results
try:
pytest_assert(ptf_arp_tgt_ip in lower_tor_arp_table)
except AssertionError:
results['failed'] = True
results['failed_reason'] = 'Packet from PTF not received on standby lower ToR'
return results
# Repeat all tests with lower ToR active
toggle_simulator_port_to_lower_tor(tor_mux_intf)
try:
pytest_assert(check_simulator_read_side(tor_mux_intf) == 2)
except AssertionError:
results['failed'] = True
results['failed_reason'] = 'Unable to switch active link to lower ToR'
return results
lower_tor_host.shell(ping_cmd.format(tor_mux_intf, lower_tor_ping_tgt_ip))
try:
testutils.verify_packet(ptfadapter, lower_tor_exp_pkt, ptf_port_index)
except AssertionError:
results['failed'] = True
results['failed_reason'] = 'Packet from active lower ToR not received'
return results
upper_tor_host.shell(ping_cmd.format(tor_mux_intf, upper_tor_ping_tgt_ip))
try:
testutils.verify_no_packet(ptfadapter, upper_tor_exp_pkt, ptf_port_index)
except AssertionError:
results['failed'] = True
results['failed_reason'] = 'Packet from standby upper ToR received'
return results
upper_tor_host.shell("ip neigh flush all")
lower_tor_host.shell("ip neigh flush all")
testutils.send_packet(ptfadapter, ptf_port_index, ptf_arp_pkt)
upper_tor_arp_table = upper_tor_host.switch_arptable()['ansible_facts']['arptable']['v4']
lower_tor_arp_table = lower_tor_host.switch_arptable()['ansible_facts']['arptable']['v4']
try:
pytest_assert(ptf_arp_tgt_ip in upper_tor_arp_table)
except AssertionError:
results['failed'] = True
results['failed_reason'] = 'Packet from PTF not received on standby upper ToR'
return results
try:
pytest_assert(ptf_arp_tgt_ip in lower_tor_arp_table)
except AssertionError:
results['failed'] = True
results['failed_reason'] = 'Packet from PTF not received on active lower ToR'
return results
logger.info('Finished mux simulator check')
return results
return _check
@pytest.fixture(scope="module")
def check_monit(duthosts):
"""
@summary: Check whether the Monit is running and whether the services which were monitored by Monit are
in the correct status or not.
@return: A dictionary contains the testing result (failed or not failed) and the status of each service.
"""
def _check(*args, **kwargs):
check_results = []
for dut in duthosts:
logger.info("Checking status of each Monit service...")
networking_uptime = dut.get_networking_uptime().seconds
timeout = max((MONIT_STABILIZE_MAX_TIME - networking_uptime), 0)
interval = 20
logger.info("networking_uptime = {} seconds, timeout = {} seconds, interval = {} seconds" \
.format(networking_uptime, timeout, interval))
check_result = {"failed": False, "check_item": "monit", "host": dut.hostname}
if timeout == 0:
monit_services_status = dut.get_monit_services_status()
if not monit_services_status:
logger.info("Monit was not running.")
check_result["failed"] = True
check_result["failed_reason"] = "Monit was not running"
logger.info("Checking status of each Monit service was done!")
return check_result
check_result = _check_monit_services_status(check_result, monit_services_status)
else:
start = time.time()
elapsed = 0
is_monit_running = False
while elapsed < timeout:
check_result["failed"] = False
monit_services_status = dut.get_monit_services_status()
if not monit_services_status:
wait(interval, msg="Monit was not started and wait {} seconds to retry. Remaining time: {}." \
.format(interval, timeout - elapsed))
elapsed = time.time() - start
continue
is_monit_running = True
check_result = _check_monit_services_status(check_result, monit_services_status)
if check_result["failed"]:
wait(interval, msg="Services were not monitored and wait {} seconds to retry. Remaining time: {}. Services status: {}" \
.format(interval, timeout - elapsed, str(check_result["services_status"])))
elapsed = time.time() - start
else:
break
if not is_monit_running:
logger.info("Monit was not running.")
check_result["failed"] = True
check_result["failed_reason"] = "Monit was not running"
logger.info("Checking status of each Monit service was done on %s" % dut.hostname)
check_results.append(check_result)
return check_results
return _check
@pytest.fixture(scope="module")
def check_processes(duthosts):
def _check(*args, **kwargs):
check_results = []
for dut in duthosts:
logger.info("Checking process status on %s..." % dut.hostname)
networking_uptime = dut.get_networking_uptime().seconds
timeout = max((SYSTEM_STABILIZE_MAX_TIME - networking_uptime), 0)
interval = 20
logger.info("networking_uptime=%d seconds, timeout=%d seconds, interval=%d seconds" % \
(networking_uptime, timeout, interval))
check_result = {"failed": False, "check_item": "processes", "host": dut.hostname}
if timeout == 0: # Check processes status, do not retry.
processes_status = dut.all_critical_process_status()
check_result["processes_status"] = processes_status
check_result["services_status"] = {}
for k, v in processes_status.items():
if v['status'] == False or len(v['exited_critical_process']) > 0:
check_result['failed'] = True
check_result["services_status"].update({k: v['status']})
else: # Retry checking processes status
start = time.time()
elapsed = 0
while elapsed < timeout:
check_result["failed"] = False
processes_status = dut.all_critical_process_status()
check_result["processes_status"] = processes_status
check_result["services_status"] = {}
for k, v in processes_status.items():
if v['status'] == False or len(v['exited_critical_process']) > 0:
check_result['failed'] = True
check_result["services_status"].update({k: v['status']})
if check_result["failed"]:
wait(interval, msg="Not all processes are started, wait %d seconds to retry. Remaining time: %d %s" % \
(interval, int(timeout - elapsed), str(check_result["processes_status"])))
elapsed = time.time() - start
else:
break
logger.info("Done checking processes status on %s" % dut.hostname)
check_results.append(check_result)
return check_results
return _check
|
#!/usr/bin/env python
import wget
import tarfile
import os
import sys
try:
import hostphot
except:
pass
def download_dustmaps(mapsdir='.'):
""" Downloads dust maps of Schlegel, Fikbeiner & Davis (1998).
mapsdir : str, default '.'
Directory where the directory with dust maps of Schlegel, Fikbeiner & Davis (1998)
is going to be downloaded with the name 'sfddata-master/'. Default uses current
directory.
"""
sfdmaps_url = 'https://github.com/kbarbary/sfddata/archive/master.tar.gz'
master_tar = wget.download(sfdmaps_url)
# extract tar file under mapsdir directory
tar = tarfile.open(master_tar)
tar.extractall(mapsdir)
tar.close()
os.remove(master_tar)
if __name__ == '__main__':
if len(sys.argv)==1:
download_dustmaps()
elif str(sys.argv[1])=='hostphot':
mapsdir = hostphot.__path__[0]
download_dustmaps(mapsdir)
else:
download_dustmaps(str(sys.argv[1]))
|
# Copyright (c) 2018-2021, Texas Instruments
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
import sys
from colorama import Fore
def timer(func, *args, repeats=1, **kwargs):
start_time = time.time()
for i in range(repeats):
results = func(*args, **kwargs)
#
elapsed_time = (time.time() - start_time)/repeats
return results, elapsed_time
# measures time spent on the current process.
# this is more accurate, but this will not measure time spent in os.system() calls
# so use timer() if you have os.system() calls.
def process_timer(func, *args, repeats=1, **kwargs):
start_time = time.clock()
for i in range(repeats):
results = func(*args, **kwargs)
#
elapsed_process_time = (time.clock() - start_time)/repeats
return results, elapsed_process_time
def delta_time(seconds):
days, seconds = divmod(seconds,(60*60*24))
hours, seconds = divmod(seconds,(60*60))
minutes, seconds = divmod(seconds,60)
return days, hours, minutes, seconds
def delta_time_string(seconds):
days, hours, minutes, seconds = delta_time(seconds)
time_str = f'{minutes:02.0f}:{seconds:02.0f}'
time_str = f'{hours:02.0f}:{time_str}' if hours > 0 else time_str
time_str = f'{days:1.0f}d,{time_str}' if days > 0 else time_str
return time_str
def display_time_bar(desc, num_completed, total, start_time, end_time, file=None, colors=None):
file = file if file is not None else sys.stdout
time_taken_str = eta_str = it_per_sec = ''
time_delta = end_time - start_time
if num_completed > 0 and time_delta > 0 and total is not None:
time_taken_str = delta_time_string(time_delta)
eta_str = delta_time_string(time_delta*(total-num_completed)/num_completed)
it_per_sec = f'{(time_delta/num_completed):5.2f}s/it' if (time_delta > num_completed) \
else f'{(num_completed/time_delta):5.2f}it/s'
#
num_bars = int(num_completed*10.0/total) if total is not None else 0
percentage = f'{num_completed*100.0/total:5.0f}%' if total is not None else ' '
bar_string = f"{'#'*num_bars + ' '*(10-num_bars)}"
if colors is not None:
assert len(colors) == 4, f'colors must have length 4'
file.write(f'\r{colors[0]}{desc}|'
f'{colors[1]}{bar_string}| '
f'{colors[2]}{percentage} {num_completed}/{total}| '
f'{colors[3]}[{time_taken_str}<{eta_str} {it_per_sec}]{Fore.RESET}')
else:
file.write(f'\r{desc}|'
f'{bar_string}| '
f'{percentage} {num_completed}/{total}| '
f'[{time_taken_str}<{eta_str} {it_per_sec}]')
#
file.flush()
|
# Copyright Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansiblelint import AnsibleLintRule
def incorrect_task(task, cmd):
if 'shell' not in task:
return False
if 'register' in task:
return False
if task.get('ignore_errors'):
return False
if isinstance(task['shell'], dict):
args = task['shell']['cmd'].split()
else:
args = task['shell'].split()
if not set(args).isdisjoint(cmd) and 'pipefail' not in args:
return True
return False
class ShellPipefail(AnsibleLintRule):
id = 'OOOQ0001'
shortdesc = 'Shell should have a pipefail'
description = 'Shell commands should have "set -o pipefail" if using PIPE'
tags = ['shell']
cmd = ["|", "timestamper_cmd"]
def matchplay(self, file, play):
ret = []
if play.get('block') and not play.get('ignore_errors'):
block = play['block']
for task in block:
if incorrect_task(task, self.cmd):
ret.append((file, self.shortdesc))
else:
if incorrect_task(play, self.cmd):
ret.append((file, self.shortdesc))
return ret
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Restaurant, Base, MenuItem
engine = create_engine('sqlite:///restaurantmenu.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Menu for UrbanBurger
restaurant1 = Restaurant(name="Urban Burger")
session.add(restaurant1)
session.commit()
menuItem2 = MenuItem(name="Veggie Burger",
description="Juicy grilled veggie patty with tomato "
"mayo and lettuce",
price="$7.50", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem1 = MenuItem(name="French Fries",
description="with garlic and parmesan",
price="$2.99", course="Appetizer", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(name="Chicken Burger",
description="Juicy grilled chicken patty with tomato "
"mayo and lettuce",
price="$5.50", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(name="Chocolate Cake",
description="fresh baked and served with ice cream",
price="$3.99", course="Dessert", restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(name="Sirloin Burger",
description="Made with grade A beef",
price="$7.99", course="Entree", restaurant=restaurant1)
session.add(menuItem4)
session.commit()
menuItem5 = MenuItem(name="Root Beer",
description="16oz of refreshing goodness",
price="$1.99", course="Beverage", restaurant=restaurant1)
session.add(menuItem5)
session.commit()
menuItem6 = MenuItem(name="Iced Tea", description="with Lemon",
price="$.99", course="Beverage", restaurant=restaurant1)
session.add(menuItem6)
session.commit()
menuItem7 = MenuItem(name="Grilled Cheese Sandwich",
description="On texas toast with American Cheese",
price="$3.49", course="Entree", restaurant=restaurant1)
session.add(menuItem7)
session.commit()
menuItem8 = MenuItem(name="Veggie Burger",
description="Made with freshest of ingredients and home "
"grown spices",
price="$5.99", course="Entree", restaurant=restaurant1)
session.add(menuItem8)
session.commit()
# Menu for Super Stir Fry
restaurant2 = Restaurant(name="Super Stir Fry")
session.add(restaurant2)
session.commit()
menuItem1 = MenuItem(name="Chicken Stir Fry",
description="With your choice of noodles vegetables and"
" sauces",
price="$7.99", course="Entree", restaurant=restaurant2)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(
name="Peking Duck",
description=" A famous duck dish from Beijing[1] that has been prepared "
"since the imperial era. The meat is prized for its thin, "
"crisp skin, with authentic versions of the dish serving "
"mostly the skin and little meat, sliced in front of the "
"diners by the cook",
price="$25", course="Entree", restaurant=restaurant2)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(name="Spicy Tuna Roll",
description="Seared rare ahi, avocado, edamame, "
"cucumber with wasabi soy sauce ",
price="15", course="Entree", restaurant=restaurant2)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(name="Nepali Momo ",
description="Steamed dumplings made with vegetables, "
"spices and meat. ",
price="12", course="Entree", restaurant=restaurant2)
session.add(menuItem4)
session.commit()
menuItem5 = MenuItem(name="Beef Noodle Soup",
description="A Chinese noodle soup made of stewed or "
"red braised beef, beef broth, vegetables "
"and Chinese noodles.",
price="14", course="Entree", restaurant=restaurant2)
session.add(menuItem5)
session.commit()
menuItem6 = MenuItem(name="Ramen",
description="a Japanese noodle soup dish. It consists "
"of Chinese-style wheat noodles served in a "
"meat- or (occasionally) fish-based broth, "
"often flavored with soy sauce or miso, "
"and uses toppings such as sliced pork, "
"dried seaweed, kamaboko, and green onions.",
price="12", course="Entree", restaurant=restaurant2)
session.add(menuItem6)
session.commit()
# Menu for Panda Garden
restaurant1 = Restaurant(name="Panda Garden")
session.add(restaurant1)
session.commit()
menuItem1 = MenuItem(name="Pho",
description="a Vietnamese noodle soup consisting of "
"broth, linguine-shaped rice noodles called "
"banh pho, a few herbs, and meat.",
price="$8.99", course="Entree", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(name="Chinese Dumplings",
description="a common Chinese dumpling which generally "
"consists of minced meat and finely chopped "
"vegetables wrapped into a piece of dough "
"skin. The skin can be either thin and "
"elastic or thicker.",
price="$6.99", course="Appetizer", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(name="Gyoza",
description="The most prominent differences between "
"Japanese-style gyoza and Chinese-style "
"jiaozi are the rich garlic flavor, "
"which is less noticeable in the Chinese "
"version, the light seasoning of Japanese "
"gyoza with salt and soy sauce, and the "
"fact that gyoza wrappers are much thinner",
price="$9.95", course="Entree", restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(name="Stinky Tofu",
description="Taiwanese dish, deep fried fermented tofu "
"served with pickled cabbage.",
price="$6.99", course="Entree", restaurant=restaurant1)
session.add(menuItem4)
session.commit()
menuItem2 = MenuItem(name="Veggie Burger",
description="Juicy grilled veggie patty with tomato "
"mayo and lettuce",
price="$9.50", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
# Menu for Thyme for that
restaurant1 = Restaurant(name="Thyme for That Vegetarian Cuisine ")
session.add(restaurant1)
session.commit()
menuItem1 = MenuItem(name="Tres Leches Cake",
description="Rich, luscious sponge cake soaked in sweet "
"milk and topped with vanilla bean whipped "
"cream and strawberries.",
price="$2.99", course="Dessert", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(name="Mushroom risotto",
description="Portabello mushrooms in a creamy risotto",
price="$5.99", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(name="Honey Boba Shaved Snow",
description="Milk snow layered with honey boba, jasmine "
"tea jelly, grass jelly, caramel, cream, "
"and freshly made mochi",
price="$4.50", course="Dessert", restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(name="Cauliflower Manchurian",
description="Golden fried cauliflower florets in a "
"midly spiced soya,garlic sauce cooked with "
"fresh cilantro, celery, chilies,ginger & "
"green onions",
price="$6.95", course="Appetizer", restaurant=restaurant1)
session.add(menuItem4)
session.commit()
menuItem5 = MenuItem(name="Aloo Gobi Burrito",
description="Vegan goodness. Burrito filled with rice, "
"garbanzo beans, curry sauce, potatoes ("
"aloo), fried cauliflower (gobi) and "
"chutney. Nom Nom",
price="$7.95", course="Entree", restaurant=restaurant1)
session.add(menuItem5)
session.commit()
menuItem2 = MenuItem(name="Veggie Burger",
description="Juicy grilled veggie patty with tomato "
"mayo and lettuce",
price="$6.80", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
# Menu for Tony's Bistro
restaurant1 = Restaurant(name="Tony\'s Bistro ")
session.add(restaurant1)
session.commit()
menuItem1 = MenuItem(name="Shellfish Tower",
description="Lobster, shrimp, sea snails, crawfish, "
"stacked into a delicious tower",
price="$13.95", course="Entree", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(name="Chicken and Rice",
description="Chicken... and rice",
price="$4.95", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(name="Mom's Spaghetti",
description="Spaghetti with some incredible tomato "
"sauce made by mom",
price="$6.95", course="Entree", restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(
name="Choc Full O\' Mint (Smitten\'s Fresh Mint Chip ice cream)",
description="Milk, cream, salt, ..., Liquid nitrogen magic", price="$3.95",
course="Dessert", restaurant=restaurant1)
session.add(menuItem4)
session.commit()
menuItem5 = MenuItem(name="Tonkatsu Ramen",
description="Noodles in a delicious pork-based broth "
"with a soft-boiled egg",
price="$7.95", course="Entree", restaurant=restaurant1)
session.add(menuItem5)
session.commit()
# Menu for Andala's
restaurant1 = Restaurant(name="Andala\'s")
session.add(restaurant1)
session.commit()
menuItem1 = MenuItem(name="Lamb Curry",
description="Slow cook that thang in a pool of "
"tomatoes, onions and alllll those tasty "
"Indian spices. Mmmm.",
price="$9.95", course="Entree", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(name="Chicken Marsala",
description="Chicken cooked in Marsala wine sauce with "
"mushrooms",
price="$7.95", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(name="Potstickers",
description="Delicious chicken and veggies encapsulated "
"in fried dough.",
price="$6.50", course="Appetizer", restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(name="Nigiri Sampler",
description="Maguro, Sake, Hamachi, Unagi, Uni, TORO!",
price="$6.75", course="Appetizer", restaurant=restaurant1)
session.add(menuItem4)
session.commit()
menuItem2 = MenuItem(name="Veggie Burger",
description="Juicy grilled veggie patty with tomato "
"mayo and lettuce",
price="$7.00", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
# Menu for Auntie Ann's
restaurant1 = Restaurant(name="Auntie Ann\'s Diner ")
session.add(restaurant1)
session.commit()
menuItem9 = MenuItem(name="Chicken Fried Steak",
description="Fresh battered sirloin steak fried and "
"smothered with cream gravy",
price="$8.99", course="Entree", restaurant=restaurant1)
session.add(menuItem9)
session.commit()
menuItem1 = MenuItem(name="Boysenberry Sorbet",
description="An unsettlingly huge amount of ripe "
"berries turned into frozen (and seedless) "
"awesomeness",
price="$2.99", course="Dessert", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(name="Broiled salmon",
description="Salmon fillet marinated with fresh herbs "
"and broiled hot & fast",
price="$10.95", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem3 = MenuItem(name="Morels on toast (seasonal)",
description="Wild morel mushrooms fried in butter, "
"served on herbed toast slices",
price="$7.50", course="Appetizer", restaurant=restaurant1)
session.add(menuItem3)
session.commit()
menuItem4 = MenuItem(name="Tandoori Chicken",
description="Chicken marinated in yoghurt and seasoned "
"with a spicy mix(chilli, tamarind among "
"others) and slow cooked in a cylindrical "
"clay or metal oven which gets its heat "
"from burning charcoal.",
price="$8.95", course="Entree", restaurant=restaurant1)
session.add(menuItem4)
session.commit()
menuItem2 = MenuItem(name="Veggie Burger",
description="Juicy grilled veggie patty with tomato "
"mayo and lettuce",
price="$9.50", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
menuItem10 = MenuItem(name="Spinach Ice Cream",
description="vanilla ice cream made with organic "
"spinach leaves",
price="$1.99", course="Dessert", restaurant=restaurant1)
session.add(menuItem10)
session.commit()
# Menu for Cocina Y Amor
restaurant1 = Restaurant(name="Cocina Y Amor ")
session.add(restaurant1)
session.commit()
menuItem1 = MenuItem(name="Super Burrito Al Pastor",
description="Marinated Pork, Rice, Beans, Avocado, "
"Cilantro, Salsa, Tortilla",
price="$5.95", course="Entree", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem2 = MenuItem(name="Cachapa",
description="Golden brown, corn-based Venezuelan "
"pancake; usually stuffed with queso telita "
"or queso de mano, and possibly lechon. ",
price="$7.99", course="Entree", restaurant=restaurant1)
session.add(menuItem2)
session.commit()
restaurant1 = Restaurant(name="State Bird Provisions")
session.add(restaurant1)
session.commit()
menuItem1 = MenuItem(name="Chantrelle Toast",
description="Crispy Toast with Sesame Seeds slathered with buttery chantrelle mushrooms",
price="$5.95", course="Appetizer", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem1 = MenuItem(name="Guanciale Chawanmushi",
description="Japanese egg custard served hot with spicey Italian Pork Jowl (guanciale)",
price="$6.95", course="Dessert", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
menuItem1 = MenuItem(name="Lemon Curd Ice Cream Sandwich",
description="Lemon Curd Ice Cream Sandwich on a chocolate macaron with cardamom meringue and cashews",
price="$4.25", course="Dessert", restaurant=restaurant1)
session.add(menuItem1)
session.commit()
print
"added menu items!"
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import io, os, re, shutil, sys
import chardet
import codecs
import datetime
#注意需要安装chardet模块
today = datetime.datetime.now().strftime("%Y-%m-%d");
#检测文件编码,如果不是的话,统一改为utf-8
#将table转换为space
def recode(path):
raw = open(path, 'rb').read();
if raw.startswith(codecs.BOM_UTF8):
encoding = 'utf-8-sig'
else:
result = chardet.detect(raw)
encoding = result['encoding']
lines = io.open(path, "r", encoding=encoding).readlines();
for i in range(0, len(lines)):
lines[i] = lines[i].rstrip().expandtabs(4) + "\n";
io.open(path, "w", encoding="utf-8-sig").writelines(lines);
def find_git_config():
pwd = os.getcwd();
while os.path.isdir(pwd):
path = os.path.join(pwd, ".git", "config");
if os.path.isfile(path):
return path;
pwd = pwd + os.path.sep + "..";
return None;
def load_git_config():
git_config = find_git_config();
if git_config == None:
return None;
lines = open(git_config).readlines();
find_section = False;
for line in lines:
line = line.strip(" \t\r\n");
if line == "[remote \"origin\"]":
find_section = True;
elif find_section:
tokens = line.split("=");
return tokens[1].strip();
return None;
rep_name = load_git_config();
if rep_name == None:
sys.exit("没找到.git配置");
sign = list();
sign.append(u"/*");
sign.append(u"** repository: %s" % rep_name);
sign.append(u"** trumanzhao, %s, trumanzhao@foxmail.com" % today);
sign.append(u"*/");
sign.append(u"");
def sign_file(path):
recode(path);
lines = io.open(path, "r", encoding="utf-8-sig").readlines();
if len(lines) > 2 and re.match(".+repository.+github.+", lines[1]):
print("%s 已签名!" % path);
return;
for i in range(0, len(sign)):
lines.insert(i, sign[i] + u"\n");
print("加签名: %s" % path);
io.open(path, "w", encoding="utf-8").writelines(lines);
root = ".";
items = os.listdir(root);
for item in items:
path = os.path.join(root, item);
ext = os.path.splitext(path)[1].lower();
if ext == ".cpp" or ext == ".h":
sign_file(path);
|
from functools import reduce
import sys
def find_sum_to(numbers, amount=1, total=0):
'''
Return an amount of numbers from the given list that add up to the desired total
:param numbers: list of numbers to select from
:param amount: how many numbers to choose
:param total: what is the desired sum
:returns: list with all individual numbers that add up to total
'''
if not amount:
return []
for ii in range(len(numbers)):
result = []
num = numbers[ii]
remainder = total - num
if amount == 1:
if remainder == 0:
return [num]
else:
others = find_sum_to(numbers[ii + 1:], amount - 1, remainder)
if others:
return [num] + others
return []
if __name__ == '__main__':
numbers = []
for line in sys.stdin:
numbers.append(int(line))
AMOUNT = 3
TOTAL = 2020
result = find_sum_to(numbers, AMOUNT, TOTAL)
if not result:
raise ValueError(f'could not find {AMOUNT} numbers that add up to {TOTAL}')
print(result)
print(reduce(lambda a, b: a*b, result, 1))
|
import logging
import sys
import numpy as np
import optuna
import src.models.train_model as train
import yaml
from keras.layers import BatchNormalization, Dense, Dropout
from keras.models import Sequential
from optuna.integration.keras import KerasPruningCallback
from sklearn.metrics import roc_auc_score
from sklearn.utils import class_weight
from src.features.build_features import (load_preprocessed_data,
scale_event_data)
from tensorflow import keras
config = yaml.safe_load(open("src/config.yaml"))
data = load_preprocessed_data()
data["event_X_train"], data["event_X_test"] = scale_event_data(
data["event_X_train"], data["event_X_test"]
)
METRICS = [
keras.metrics.AUC(name="AUC"),
train.f1_score,
]
# stops training early if score doesn't improve
early_stopping = keras.callbacks.EarlyStopping(
monitor=config["RNN_params"]["monitor"],
verbose=1,
patience=5,
mode=config["RNN_params"]["mode"],
restore_best_weights=True,
)
class_weights = class_weight.compute_class_weight(
class_weight="balanced", classes=np.unique(data["y_train"]), y=data["y_train"]
)
class_weights = {
_class: weight for _class, weight in zip(np.unique(data["y_train"]), class_weights)
}
MONITOR = config["RNN_params"]["monitor"]
MODE = config["RNN_params"]["mode"]
def create_model(params: dict):
"""Generates a FNN model for hyperparameter training using Optuna
Args:
params (dict): contains Optuna-defined hyperparameter ranges
Returns:
keras model: a compiled model
"""
ACTIVATION = config["RNN_params"]["activation"]
model = Sequential()
model.add(Dense(units=data["event_X_train"].shape[1], activation=ACTIVATION))
for i in range(params["num_hidden_layers"]):
model.add(BatchNormalization(epsilon=0.01))
model.add(Dropout(params["dropout"]))
model.add(Dense(units=params[f"n_units_1{i}"], activation=ACTIVATION))
model.add(
Dense(
units=1,
activation="sigmoid",
)
)
OPTIMIZER = keras.optimizers.Adam(
learning_rate=params["lr"],
clipnorm=config["RNN_params"]["clipnorm"],
)
model.compile(optimizer=OPTIMIZER, loss="binary_crossentropy", metrics=METRICS)
return model
def objective(trial):
params = {
"dropout": trial.suggest_uniform("dropout", 0.0, 0.5),
"num_hidden_layers": trial.suggest_int("num_hidden_layers", 1, 6),
"lr": trial.suggest_loguniform("lr", 1e-4, 1e-1),
"batch_size": trial.suggest_categorical("batch_size", [16, 32, 64, 128, 256]),
}
for i in range(params["num_hidden_layers"]):
params[f"n_units_1{i}"] = trial.suggest_int(f"n_units_l{i}", 20, 400)
model = create_model(params)
model.fit(
data["event_X_train"],
data["y_train"],
batch_size=params["batch_size"],
class_weight=class_weights,
epochs=200,
callbacks=[early_stopping, KerasPruningCallback(trial, "val_AUC")],
validation_data=(data["event_X_test"], data["y_test"]),
shuffle=True,
verbose=1,
)
preds = model.predict(data["event_X_test"])
auc_score = roc_auc_score(data["y_test"], preds)
return auc_score
def main():
# Add stream handler of stdout to show the messages
optuna.logging.get_logger("optuna").addHandler(logging.StreamHandler(sys.stdout))
study_name = "bayesian_opt_FNN" # Unique identifier of the study.
storage_name = f"sqlite:///models/hyperparam_dbs/{study_name}.db"
study = optuna.create_study(
direction="maximize",
sampler=optuna.samplers.TPESampler(),
pruner=optuna.pruners.MedianPruner(n_warmup_steps=1, n_min_trials=5),
study_name=study_name,
storage=storage_name,
load_if_exists=True,
)
study.optimize(objective, n_trials=200, n_jobs=1)
if __name__ == "__main__":
main()
|
import asyncio
import itertools
def normalise_environment(key_values):
''' Converts denormalised dict of (string -> string) pairs, where the first string
is treated as a path into a nested list/dictionary structure
{
"FOO__1__BAR": "setting-1",
"FOO__1__BAZ": "setting-2",
"FOO__2__FOO": "setting-3",
"FOO__2__BAR": "setting-4",
"FIZZ": "setting-5",
}
to the nested structure that this represents
{
"FOO": [{
"BAR": "setting-1",
"BAZ": "setting-2",
}, {
"BAR": "setting-3",
"BAZ": "setting-4",
}],
"FIZZ": "setting-5",
}
If all the keys for that level parse as integers, then it's treated as a list
with the actual keys only used for sorting
This function is recursive, but it would be extremely difficult to hit a stack
limit, and this function would typically by called once at the start of a
program, so efficiency isn't too much of a concern.
'''
# Separator is chosen to
# - show the structure of variables fairly easily;
# - avoid problems, since underscores are usual in environment variables
separator = '__'
def get_first_component(key):
return key.split(separator)[0]
def get_later_components(key):
return separator.join(key.split(separator)[1:])
without_more_components = {
key: value
for key, value in key_values.items()
if not get_later_components(key)
}
with_more_components = {
key: value
for key, value in key_values.items()
if get_later_components(key)
}
def grouped_by_first_component(items):
def by_first_component(item):
return get_first_component(item[0])
# groupby requires the items to be sorted by the grouping key
return itertools.groupby(
sorted(items, key=by_first_component),
by_first_component,
)
def items_with_first_component(items, first_component):
return {
get_later_components(key): value
for key, value in items
if get_first_component(key) == first_component
}
nested_structured_dict = {
**without_more_components, **{
first_component: normalise_environment(
items_with_first_component(items, first_component))
for first_component, items in grouped_by_first_component(with_more_components.items())
}}
def all_keys_are_ints():
def is_int(string_to_test):
try:
int(string_to_test)
return True
except ValueError:
return False
return all([is_int(key) for key, value in nested_structured_dict.items()])
def list_sorted_by_int_key():
return [
value
for key, value in sorted(
nested_structured_dict.items(),
key=lambda key_value: int(key_value[0])
)
]
return \
list_sorted_by_int_key() if all_keys_are_ints() else \
nested_structured_dict
def round_robin(items):
i = 0
while True:
yield items[i % len(items)]
i += 1
async def loop_forever(func):
while True:
try:
await func()
except Exception as exception:
# For Python 3.8 onwards, asyncio.CancelledError is _not_ caught and bubbles up, which
# is desired so we do cancel the task, e.g. on graceful shutdown
print("Error", exception)
await asyncio.sleep(60)
|
def cli() -> None:
print("Hello, recv!")
|
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import itertools
import os
import pytest
import gluonnlp as nlp
def test_corpus_stream(stream_identity_wrappers):
EOS = nlp._constants.EOS_TOKEN
path = os.path.join('tests', 'data', 'wikitext-2')
token_path = os.path.join('tests', 'data', 'wikitext-2/*.tokens')
# Make sure train, val and test files exist at given path
train = nlp.data.WikiText2(segment='train', root=path)
val = nlp.data.WikiText2(segment='val', root=path)
test = nlp.data.WikiText2(segment='test', root=path)
stream = nlp.data.SimpleDatasetStream(
nlp.data.CorpusDataset,
token_path,
flatten=True,
skip_empty=True,
eos=EOS)
stream = stream_identity_wrappers(stream)
counter = nlp.data.Counter(itertools.chain.from_iterable(stream))
assert len(counter) == 33278, len(counter)
# examine aggregated vocab
vocab = nlp.vocab.Vocab(counter, bos_token=None, padding_token=None)
assert len(vocab) == 33278, len(vocab)
# examine aggregated stats
assert sum(counter.values()) == 2075677 + 216347 + 244102
counter = nlp.data.Counter(itertools.chain.from_iterable(stream))
assert len(counter) == 33278, len(counter)
def test_lazy_stream(stream_identity_wrappers):
EOS = nlp._constants.EOS_TOKEN
path = os.path.join('tests', 'data', 'wikitext-2')
token_path = os.path.join('tests', 'data', 'wikitext-2/*test*.tokens')
corpus = nlp.data.WikiText2(segment='test', root=path)
stream = nlp.data.SimpleDatasetStream(
nlp.data.CorpusDataset,
token_path,
flatten=True,
skip_empty=True,
eos=EOS)
wrapped_stream = stream_identity_wrappers(stream)
transformed_stream = wrapped_stream.transform(lambda d: [s.lower() for s in d])
wrapped_stream_iter = iter(wrapped_stream)
transformed_stream_iter = iter(transformed_stream)
for dataset in stream:
prefetched_dataset = next(wrapped_stream_iter)
transformed_dataset = next(transformed_stream_iter)
assert all([
w1.lower() == w2.lower() == w3 == w4.lower() for w1, w2, w3, w4 in
zip(dataset, prefetched_dataset, transformed_dataset, corpus)
])
@pytest.mark.parametrize('num_prefetch', [0, 1, 10])
@pytest.mark.parametrize('worker_type', ['thread', 'process'])
def test_prefetch_stream(num_prefetch, worker_type):
EOS = nlp._constants.EOS_TOKEN
path = os.path.join('tests', 'data', 'wikitext-2')
token_path = os.path.join('tests', 'data', 'wikitext-2/*test*.tokens')
test = nlp.data.WikiText2(segment='test', root=path)
corpus = nlp.data.SimpleDatasetStream(
nlp.data.CorpusDataset, token_path, flatten=True, skip_empty=True)
if num_prefetch < 1:
with pytest.raises(ValueError):
prefetch_corpus = nlp.data.PrefetchingStream(
corpus, num_prefetch=num_prefetch, worker_type=worker_type)
else:
prefetch_corpus = nlp.data.PrefetchingStream(
corpus, num_prefetch=num_prefetch, worker_type=worker_type)
prefetch_corpus_iter = iter(prefetch_corpus)
for x in corpus:
y = next(prefetch_corpus_iter)
assert all([sx == sy for sx, sy in zip(x, y)])
|
import numpy as np
from IPython import embed
def make_change(coins, n):
m = np.zeros((len(coins)+1, n+1))
m[0,1:n+1] = np.inf
for c in range(1, len(coins)+1):
for r in range(1, n+1):
if coins[c-1] == r:
m[c,r] = 1
elif coins[c-1] > r:
m[c,r] = m[c-1, r]
else:
m[c,r] = min(m[c-1,r], 1+m[c,r-coins[c-1]])
return m[-1,-1]
def knapsack_problem(values, weights, W):
m = np.zeros((len(values)+1, W+1))
k = np.zeros((len(values)+1, W+1))
for i in range(1,len(values)+1):
for w in range(1,W+1):
wi = weights[i-1] # weight of this item
vi = values[i-1] # value of this item
if (wi <= w ) and (vi + m[i-1,w-wi] > m[i-1,w]):
k[i,w] = 1
else:
m[i,w] = m[i-1,w]
picks = []
C = W
for i in range(len(values), 0, -1):
if k[i,C] == 1:
picks.append(i)
C -= weights[i-1]
picks.sort()
picks = [x-1 for x in picks]
print(picks)
print(make_change([1,2,10], 33))
print(knapsack_dp([2,3,4],[1,2,3],3,3))
|
from CreateTimeGraphs import *
def create_diag(dc):
"""Commands per day"""
if not botStats:
return
botUsers = []
for u in dc.vip:
if len(u.botPlays) == 0 and len(u.botCommands) == 0:
continue
u.commands = [0] * dc.dayCountBot
botUsers.append(u)
for c in u.botPlays:
if type(c[0]) is datetime:
u.commands[(c[0].date() - dc.startDayBot).days] += 1
for c in u.botCommands:
if type(c[0]) is datetime:
u.commands[(c[0].date() - dc.startDayBot).days] += 1
with openTempfile("vipcommandstime") as f:
for u in botUsers:
dc.write_days(f, u.commands, u.name, start = dc.startDayBot)
# Create the diagram
diag = Diagram("vipcommandstime", "Commands per day (vip)", 1500, 600)
diag.xlabel = "Date"
diag.ylabel = "Commands"
diag.appendText = """\
set timefmt "%d.%m.%Y"
set format x "%d.%m.%Y"
set xdata time
set xrange ["{0:%d.%m.%Y}":"{1:%d.%m.%Y}"]
set style data lines
set key autotitle columnhead
set samples 100
""".format(dc.startDayBot, dc.endDayBot)
diag.plots = ["index '{0}' using 1:2 title '{0}'"
.format(u.name) for u in botUsers]
diag.render(dc.diagramTemplate)
dc.botTab.addDiagram(diag)
with openTempfile("vipcommandstimeCumulative") as f:
for u in botUsers:
dc.write_days(f, u.commands, u.name, True, start = dc.startDayBot)
# Create the cumulative diagram
diag = Diagram("vipcommandstimeCumulative", "Cumulative commands per day (vip)", 1500, 600)
diag.xlabel = "Date"
diag.ylabel = "Commands"
diag.appendText = """\
set timefmt "%d.%m.%Y"
set format x "%d.%m.%Y"
set xdata time
set xrange ["{0:%d.%m.%Y}":"{1:%d.%m.%Y}"]
set style data lines
set key autotitle columnhead
set samples 100
""".format(dc.startDayBot, dc.endDayBot)
diag.plots = ["index '{0}' using 1:2 title '{0}'"
.format(u.name) for u in botUsers]
diag.render(dc.diagramTemplate)
dc.botTab.addDiagram(diag)
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Yeolar
#
from datetime import datetime
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.utils.decorators import method_decorator
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template import RequestContext
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.dates import YearArchiveView, MonthArchiveView, DayArchiveView, DateDetailView
from django.db.models import Q
from django.conf import settings
from .models import *
from .forms import *
from ..util.search.constants import STOP_WORDS_RE
from ..util.decorators import superuser_required
@superuser_required
def topic_set(request, id=None, **kwargs):
if id:
topic = Topic.objects.get(id=id)
else:
topic = Topic()
if request.method == 'POST':
form = TopicForm(request.POST, instance=topic)
if form.is_valid():
topic = form.save()
return redirect(topic.get_absolute_url())
else:
form = TopicForm(instance=topic)
return render_to_response(
'note/topic_set.html',
{'form': form, 'id': id},
RequestContext(request)) # use for csrf
@superuser_required
def post_set(request, id=None, **kwargs):
if id:
post = Post.objects.get(id=id)
else:
post = Post()
if request.method == 'POST':
form = PostForm(request.POST, instance=post, author=request.user)
if form.is_valid():
post = form.save()
return redirect(post.get_absolute_url())
else:
form = PostForm(instance=post, author=request.user)
form.fields['topic'].queryset = Topic.objects.all()
return render_to_response(
'note/post_set.html',
{'form': form, 'id': id},
RequestContext(request)) # use for csrf
class TopicListView(ListView):
def get_queryset(self):
if self.request.user.is_superuser:
self.topics = Topic.objects.all()
else:
self.topics = Topic.objects.public()
return self.topics
def get_context_data(self, **kwargs):
context = super(TopicListView, self).get_context_data(**kwargs)
context.update({
'topic_list': self.topics[:5],
})
return context
class TopicDetailView(ListView):
paginate_by = getattr(settings, 'NOTE_PAGESIZE', 20)
template_name = 'note/topic_detail.html'
def get_queryset(self):
self.topic = get_object_or_404(Topic, id=self.kwargs['id'])
if self.request.user.is_superuser:
return self.topic.post_set.published()
else:
return self.topic.post_set.public()
def get_context_data(self, **kwargs):
if self.request.user.is_superuser:
topics = Topic.objects.all()
else:
topics = Topic.objects.public()
context = super(TopicDetailView, self).get_context_data(**kwargs)
context.update({
'topic_list': topics[:5],
'topic': self.topic
})
return context
class PostListView(ListView):
paginate_by = getattr(settings, 'NOTE_PAGESIZE', 20)
def get_queryset(self):
if self.request.user.is_superuser:
return Post.objects.all()
else:
return Post.objects.public()
def get_context_data(self, **kwargs):
if self.request.user.is_superuser:
topics = Topic.objects.all()
else:
topics = Topic.objects.public()
context = super(PostListView, self).get_context_data(**kwargs)
context.update({
'topic_list': topics[:5],
})
return context
class PrivateListView(ListView):
paginate_by = getattr(settings, 'NOTE_PAGESIZE', 20)
template_name = 'note/private_list.html'
@method_decorator(superuser_required)
def dispatch(self, *args, **kwargs):
return super(PrivateListView, self).dispatch(*args, **kwargs)
def get_queryset(self):
return Post.objects.private()
def get_context_data(self, **kwargs):
context = super(PrivateListView, self).get_context_data(**kwargs)
context.update({
'topic_list': Topic.objects.all()[:5],
})
return context
class DraftListView(ListView):
paginate_by = getattr(settings, 'NOTE_PAGESIZE', 20)
template_name = 'note/draft_list.html'
@method_decorator(superuser_required)
def dispatch(self, *args, **kwargs):
return super(DraftListView, self).dispatch(*args, **kwargs)
def get_queryset(self):
return Post.objects.draft()
def get_context_data(self, **kwargs):
context = super(DraftListView, self).get_context_data(**kwargs)
context.update({
'topic_list': Topic.objects.all()[:5],
})
return context
class PostYearArchiveView(YearArchiveView):
date_field = 'publish'
make_object_list = True
def get_queryset(self):
if self.request.user.is_superuser:
return Post.objects.published()
else:
return Post.objects.public()
def get_context_data(self, **kwargs):
if self.request.user.is_superuser:
topics = Topic.objects.all()
else:
topics = Topic.objects.public()
context = super(PostYearArchiveView, self).get_context_data(**kwargs)
context.update({
'topic_list': topics[:5],
})
return context
class PostMonthArchiveView(MonthArchiveView):
date_field = 'publish'
month_format = '%m'
def get_queryset(self):
if self.request.user.is_superuser:
return Post.objects.published()
else:
return Post.objects.public()
def get_context_data(self, **kwargs):
if self.request.user.is_superuser:
topics = Topic.objects.all()
else:
topics = Topic.objects.public()
context = super(PostMonthArchiveView, self).get_context_data(**kwargs)
context.update({
'topic_list': topics[:5],
})
return context
class PostDayArchiveView(DayArchiveView):
date_field = 'publish'
month_format = '%m'
def get_queryset(self):
if self.request.user.is_superuser:
return Post.objects.published()
else:
return Post.objects.public()
def get_context_data(self, **kwargs):
if self.request.user.is_superuser:
topics = Topic.objects.all()
else:
topics = Topic.objects.public()
context = super(PostDayArchiveView, self).get_context_data(**kwargs)
context.update({
'topic_list': topics[:5],
})
return context
class PostDetailView(DateDetailView):
month_format = '%m'
date_field = 'publish'
def get_queryset(self):
if self.request.user.is_superuser:
return Post.objects.all() # show draft for preview
else:
return Post.objects.public()
def get_context_data(self, **kwargs):
if self.request.user.is_superuser:
topics = Topic.objects.all()
else:
topics = Topic.objects.public()
context = super(PostDetailView, self).get_context_data(**kwargs)
context.update({
'topic_list': topics[:5],
})
return context
def search(request, paginate_by=10,
template_name='note/post_search.html', **kwargs):
page_size = getattr(settings,'SEARCH_PAGESIZE', paginate_by)
if request.GET:
if request.user.is_superuser:
posts = Post.objects.published()
topics = Topic.objects.all()
else:
posts = Post.objects.public()
topics = Topic.objects.public()
context = {
'topic_list': topics[:5],
}
vague_terms = True
stop_word_list = STOP_WORDS_RE
search_terms = '%s' % request.GET['q']
search_term_list = search_terms.split()
cleaned_search_term_list = []
for search_term in search_term_list:
cleaned_search_term = stop_word_list.sub('', search_term)
cleaned_search_term = cleaned_search_term.strip()
if len(cleaned_search_term) != 0:
cleaned_search_term_list.append(cleaned_search_term)
posts = posts.filter(
Q(title__icontains=cleaned_search_term) |
Q(author__username__icontains=cleaned_search_term) |
Q(text__icontains=cleaned_search_term))
vague_terms = False
if not vague_terms:
if len(posts) != 0:
paginator = Paginator(posts, page_size)
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
objects = paginator.page(page)
except EmptyPage, InvalidPage:
objects = paginator.page(paginator.num_pages)
context.update({
'paged_objects': objects,
'search_terms': search_terms,
'cleaned_search_terms': ' '.join(cleaned_search_term_list)
})
else:
message = u'结果未找到。'
context.update({'message': message})
else:
message = u'搜索条件太简单,请调整。'
context.update({'message': message})
return render_to_response(template_name, context, RequestContext(request))
|
import numpy as np
import scipy.special as ss
import surface_integration as surf
class VesicleHydro:
def __init__(self, _lattice_parameter, _level_of_tess,
_temperature, _viscosity, _alpha_over_a,
_n_quadrature_points=4):
"""
:type _quadrature_area: str
:type _n_quadrature_points: int
"""
self.d = _lattice_parameter
self.n_angle_div = 100000
self.level_of_tess = _level_of_tess
self.alpha = _alpha_over_a * self.d
self.alpha2 = self.alpha * self.alpha
self.alpha3 = self.alpha2 * self.alpha
self.eta = _viscosity
self.T = _temperature
self.kT = 1.38064852e-2 * self.T
self.R = 0
self.n_particles = 0
self.pos = None
self.normal = None
self.__build_sphere()
self.R2 = self.R * self.R
self.R3 = self.R2 * self.R
self.area_per_particle = 4.0 * np.pi * self.R ** 2 / float(self.n_particles)
self.r_c = np.sqrt(self.area_per_particle / np.pi)
print("Number of particles = ", self.n_particles)
print("Vesicle radius = ", self.R)
print("Area per particle = ", self.area_per_particle)
print("Effective radius of each particle = ", self.r_c)
self.quad_point_x, self.quad_point_y, self.quad_weight =\
surf.build_local_integration_grid_circle(_n_quadrature_points, self.r_c)
self.v_r = None
self.sig_r = None
self.__calc_surface_stress()
self.zeta = None
self.D = None
self.A = None
def __build_sphere(self):
def push_point(v):
v /= np.sqrt(np.sum(v * v))
def tess_tri(p_old, _i, _j, _k, level):
p = np.zeros([3, 3])
p[0, :] = 0.5 * (p_old[_i, :] + p_old[_j, :])
p[1, :] = 0.5 * (p_old[_j, :] + p_old[_k, :])
p[2, :] = 0.5 * (p_old[_k, :] + p_old[_i, :])
push_point(p[0, :])
push_point(p[1, :])
push_point(p[2, :])
n_points = p_old.shape[0]
p_old = np.append(p_old, p, axis=0)
m = n_points
n = n_points + 1
q = n_points + 2
if level > 1:
p_old = tess_tri(p_old, _i, m, q, level - 1)
p_old = tess_tri(p_old, m, _j, n, level - 1)
p_old = tess_tri(p_old, q, n, _k, level - 1)
p_old = tess_tri(p_old, n, m, q, level - 1)
return p_old
self.pos = np.zeros([12, 3])
a = np.arctan(0.5)
b = 2.0 * np.pi / 5.0
c = np.pi / 5.0
th = np.array([0.5 * np.pi, a, a, a, a, a, -a, -a, -a, -a, -a, -0.5 * np.pi])
phi = np.array(
[0.0, 0.0, b, 2.0 * b, 3.0 * b, 4.0 * b, c, c + b, c + 2.0 * b, c + 3.0 * b, c + 4.0 * b, 0.0])
self.pos[:, 0] = np.cos(th[:]) * np.cos(phi[:])
self.pos[:, 1] = np.cos(th[:]) * np.sin(phi[:])
self.pos[:, 2] = np.sin(th[:])
if self.level_of_tess > 1:
self.pos = tess_tri(self.pos, 0, 1, 2, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 0, 2, 3, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 0, 3, 4, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 0, 4, 5, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 0, 5, 1, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 1, 6, 2, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 2, 6, 7, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 2, 7, 3, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 3, 7, 8, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 3, 8, 4, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 4, 8, 9, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 4, 9, 5, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 5, 9, 10, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 5, 10, 1, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 1, 10, 6, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 11, 6, 7, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 11, 7, 8, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 11, 8, 9, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 11, 9, 10, self.level_of_tess - 1)
self.pos = tess_tri(self.pos, 11, 10, 6, self.level_of_tess - 1)
d_min2 = 1.0e5
dup_ind = np.array([])
for i in range(self.pos.shape[0] - 1):
for j in range(i + 1, self.pos.shape[0]):
dr = self.pos[j, :] - self.pos[i, :]
d2 = np.sum(dr * dr)
if d2 < 1.0e-6:
dup_ind = np.append(dup_ind, j)
elif d2 < d_min2:
d_min2 = d2
d_min = np.sqrt(d_min2)
self.pos = np.delete(self.pos, dup_ind, axis=0)
self.n_particles = self.pos.shape[0]
"""
d_mean = 0.0
n_bonds = 0.0
for i in range(self.pos.shape[0] - 1):
for j in range(i + 1, self.pos.shape[0]):
dr = self.pos[j, :] - self.pos[i, :]
dij = np.sqrt(np.sum(dr * dr))
if dij < 1.5 * d_min:
d_mean += np.sqrt(d2)
n_bonds += 1.0
d_mean /= n_bonds
"""
#self.R = self.d / d_min
#self.R = self.d / d_mean
aa = 0.5 * np.sqrt(3.0) * (self.d * self.d)
self.R = np.sqrt(aa * float(self.n_particles) / (4.0 * np.pi))
self.normal = self.pos.copy()
self.pos *= self.R
def __calc_surface_stress(self):
v_factor = 1.0 / (4.0 * np.pi) # in units of W / alpha2 = area_per_particle * v_p / alpha2
s_factor = 1.0 / (4.0 * np.pi) # in units of F_m / alpha2
n_legendre = 87
th = np.linspace(0.0, np.pi, self.n_angle_div)
th2 = th * th
cos_th = np.cos(th)
sin_th = np.sin(th)
beta = self.R2 / (4.0 * self.alpha2)
v0_r = v_factor * np.exp(-beta * th2)
#v0_r = v_factor * np.cos (th / 2) ** (8.0 * beta)
sig0_r = -s_factor * np.exp(-beta * th2)
leg = []
leg_poly = []
for i in range(n_legendre):
leg_poly.append(ss.legendre(i))
leg.append(leg_poly[i](cos_th))
c_v = np.zeros(n_legendre)
c_sig = np.zeros(n_legendre)
for i in range(n_legendre):
c_v[i] = (2.0 * float(i) + 1.0) / 2.0 * np.trapz(v0_r * leg[i] * sin_th, th)
c_sig[i] = (2.0 * float(i) + 1.0) / 2.0 * np.trapz(sig0_r * leg[i] * sin_th, th)
k_acc = np.load("../hydrodynamics/k_acc.npy")
fc1_v = np.zeros(n_legendre)
fc2_v = np.zeros(n_legendre)
fc1_sig = np.zeros(n_legendre)
fc2_sig = np.zeros(n_legendre)
for m in range(1, n_legendre):
k1 = k_acc[m, 0]
k2 = k_acc[m, 1]
fm = float(m)
fm2 = fm * fm
fac = c_v[m] / ((fm * (fm + 1.0)) * (k1 - k2))
fc1_v[m] = fac * k2 * self.R ** (2.0 - k1)
fc2_v[m] = -fac * k1 * self.R ** (2.0 - k2)
fac = c_sig[m] / ((k1 * k2 * (k1 + k2) - 3.0 * k1 * k2 - 6.0 * fm2 - 6.0 * fm) * (k1 - k2))
fc1_sig[m] = fac * k2 * self.R ** (3.0 - k1)
fc2_sig[m] = -fac * k1 * self.R ** (3.0 - k2)
self.v_r = np.zeros_like(th)
self.sig_r = np.zeros_like(th)
for m in range(n_legendre):
k1 = k_acc[m, 0]
k2 = k_acc[m, 1]
mmp = float(m) * float(m + 1)
r_to_k1 = self.R ** k1
r_to_k2 = self.R ** k2
f_v = fc1_v[m] * r_to_k1 + fc2_v[m] * r_to_k2
rfp_v = fc1_v[m] * k1 * r_to_k1 + fc2_v[m] * k2 * r_to_k2
r3fppp_v = fc1_v[m] * k1 * (k1 - 1.0) * (k1 - 2.0) * r_to_k1 +\
fc2_v[m] * k2 * (k2 - 1.0) * (k2 - 2.0) * r_to_k2
f_sig = fc1_sig[m] * r_to_k1 + fc2_sig[m] * r_to_k2
pm_cos_th = leg_poly[m](cos_th)
self.sig_r += (r3fppp_v - 3.0 * mmp * rfp_v + 6.0 * mmp * f_v) / self.R3 * pm_cos_th
self.v_r -= f_sig * mmp / self.R2 * pm_cos_th
self.sig_r *= self.area_per_particle * self.eta / self.alpha2
self.v_r *= 1.0 / (self.area_per_particle * self.eta * self.alpha2)
def __integrate_over_sphere(self, integrand):
result = np.zeros([self.n_particles, self.n_particles])
for i in range(self.n_particles):
for j in range(i, self.n_particles):
cos_th0 = np.dot(self.normal[i], self.normal[j])
sin_th0 = np.linalg.norm(np.cross(self.normal[i], self.normal[j]))
th0 = np.arctan2(sin_th0, cos_th0)
integral = 0.0
for k in range(self.quad_weight.shape[0]):
if i == j:
d_th = np.sqrt(self.quad_point_x[k] ** 2 + self.quad_point_y[k] ** 2) / self.R
else:
d_th = self.quad_point_x[k] / self.R
#d_phi = (self.quad_point_y[k] / self.R) / sin_th0
th = th0 + d_th
if th > np.pi:
th = 2.0 * np.pi - th
f_ind = th / np.pi * float(self.n_angle_div)
ind = np.floor(f_ind)
frac = f_ind - ind
if ind < self.n_angle_div:
value = integrand[int(ind)] * (1.0 - frac) + integrand[int(ind) + 1] * frac
else:
value = 0.0
#det_Jac = 1.0 / (self.R2 * np.sin(th))
integral += value * self.quad_weight[k]
result[i, j] = integral
result[j, i] = result[i, j]
return result
def calc_friction_tensor(self):
# Friction coefficient tensor
# this corresponds to a normalized Gaussian used for point velocities
# it is necessary in the sense that the "momentum" of the distributed velocity be equal
# to the actual momentum of the particle
self.zeta = self.__integrate_over_sphere(-self.sig_r)
def calc_diffusion_tensor_direct(self):
# Diffusion coefficient tensor
# this corresponds to a normalized Gaussian used for point velocities
# it is necessary in the sense that the "momentum" of the distributed velocity be equal
# to the actual momentum of the particle
self.D = self.__integrate_over_sphere(self.kT * self.v_r)
def calc_diffusion_tensor_from_friction(self):
if self.zeta.any() is None:
self.calc_friction_tensor()
self.D = self.kT * np.linalg.inv(self.zeta)
def get_histogram(self, q, _n_bins):
# Histogram of a matrix q based on inter-particle distances
r = np.linspace(0.0, np.pi * self.R, _n_bins)
dr = r[1] - r[0]
n_hist = np.zeros(_n_bins)
q_hist = np.zeros(_n_bins)
q_min_hist = np.ones(_n_bins) * 1.0e10
q_max_hist = np.ones(_n_bins) * -1.0e10
for i in range(self.n_particles):
for j in range(i, self.n_particles):
cos_th = np.dot(self.normal[i], self.normal[j])
sin_th = np.linalg.norm(np.cross(self.normal[i], self.normal[j]))
th0 = np.arctan2(sin_th, cos_th)
r_ij = self.R * th0
ind = np.floor(r_ij / dr).astype('int')
q_hist[ind] += q[i, j]
if q[i, j] < q_min_hist[ind]:
q_min_hist[ind] = q[i, j]
if q[i, j] > q_max_hist[ind]:
q_max_hist[ind] = q[i, j]
n_hist[ind] += 1.0
nz_ind = np.nonzero(n_hist)
q_mean_hist = q_hist[nz_ind] / n_hist[nz_ind]
return r[nz_ind], q_mean_hist, q_min_hist[nz_ind], q_max_hist[nz_ind]
|
# coding: utf-8
"""
Harbor API
These APIs provide services for manipulating Harbor project.
OpenAPI spec version: 0.3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class LdapConf(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'ldap_url': 'str',
'ldap_search_dn': 'str',
'ldap_search_password': 'str',
'ldap_base_dn': 'str',
'ldap_filter': 'str',
'ldap_uid': 'str',
'ldap_scope': 'int',
'ldap_connection_timeout': 'int'
}
attribute_map = {
'ldap_url': 'ldap_url',
'ldap_search_dn': 'ldap_search_dn',
'ldap_search_password': 'ldap_search_password',
'ldap_base_dn': 'ldap_base_dn',
'ldap_filter': 'ldap_filter',
'ldap_uid': 'ldap_uid',
'ldap_scope': 'ldap_scope',
'ldap_connection_timeout': 'ldap_connection_timeout'
}
def __init__(self, ldap_url=None, ldap_search_dn=None, ldap_search_password=None, ldap_base_dn=None, ldap_filter=None, ldap_uid=None, ldap_scope=None, ldap_connection_timeout=None):
"""
LdapConf - a model defined in Swagger
"""
self._ldap_url = None
self._ldap_search_dn = None
self._ldap_search_password = None
self._ldap_base_dn = None
self._ldap_filter = None
self._ldap_uid = None
self._ldap_scope = None
self._ldap_connection_timeout = None
if ldap_url is not None:
self.ldap_url = ldap_url
if ldap_search_dn is not None:
self.ldap_search_dn = ldap_search_dn
if ldap_search_password is not None:
self.ldap_search_password = ldap_search_password
if ldap_base_dn is not None:
self.ldap_base_dn = ldap_base_dn
if ldap_filter is not None:
self.ldap_filter = ldap_filter
if ldap_uid is not None:
self.ldap_uid = ldap_uid
if ldap_scope is not None:
self.ldap_scope = ldap_scope
if ldap_connection_timeout is not None:
self.ldap_connection_timeout = ldap_connection_timeout
@property
def ldap_url(self):
"""
Gets the ldap_url of this LdapConf.
The url of ldap service.
:return: The ldap_url of this LdapConf.
:rtype: str
"""
return self._ldap_url
@ldap_url.setter
def ldap_url(self, ldap_url):
"""
Sets the ldap_url of this LdapConf.
The url of ldap service.
:param ldap_url: The ldap_url of this LdapConf.
:type: str
"""
self._ldap_url = ldap_url
@property
def ldap_search_dn(self):
"""
Gets the ldap_search_dn of this LdapConf.
The search dn of ldap service.
:return: The ldap_search_dn of this LdapConf.
:rtype: str
"""
return self._ldap_search_dn
@ldap_search_dn.setter
def ldap_search_dn(self, ldap_search_dn):
"""
Sets the ldap_search_dn of this LdapConf.
The search dn of ldap service.
:param ldap_search_dn: The ldap_search_dn of this LdapConf.
:type: str
"""
self._ldap_search_dn = ldap_search_dn
@property
def ldap_search_password(self):
"""
Gets the ldap_search_password of this LdapConf.
The search password of ldap service.
:return: The ldap_search_password of this LdapConf.
:rtype: str
"""
return self._ldap_search_password
@ldap_search_password.setter
def ldap_search_password(self, ldap_search_password):
"""
Sets the ldap_search_password of this LdapConf.
The search password of ldap service.
:param ldap_search_password: The ldap_search_password of this LdapConf.
:type: str
"""
self._ldap_search_password = ldap_search_password
@property
def ldap_base_dn(self):
"""
Gets the ldap_base_dn of this LdapConf.
The base dn of ldap service.
:return: The ldap_base_dn of this LdapConf.
:rtype: str
"""
return self._ldap_base_dn
@ldap_base_dn.setter
def ldap_base_dn(self, ldap_base_dn):
"""
Sets the ldap_base_dn of this LdapConf.
The base dn of ldap service.
:param ldap_base_dn: The ldap_base_dn of this LdapConf.
:type: str
"""
self._ldap_base_dn = ldap_base_dn
@property
def ldap_filter(self):
"""
Gets the ldap_filter of this LdapConf.
The serach filter of ldap service.
:return: The ldap_filter of this LdapConf.
:rtype: str
"""
return self._ldap_filter
@ldap_filter.setter
def ldap_filter(self, ldap_filter):
"""
Sets the ldap_filter of this LdapConf.
The serach filter of ldap service.
:param ldap_filter: The ldap_filter of this LdapConf.
:type: str
"""
self._ldap_filter = ldap_filter
@property
def ldap_uid(self):
"""
Gets the ldap_uid of this LdapConf.
The serach uid from ldap service attributes.
:return: The ldap_uid of this LdapConf.
:rtype: str
"""
return self._ldap_uid
@ldap_uid.setter
def ldap_uid(self, ldap_uid):
"""
Sets the ldap_uid of this LdapConf.
The serach uid from ldap service attributes.
:param ldap_uid: The ldap_uid of this LdapConf.
:type: str
"""
self._ldap_uid = ldap_uid
@property
def ldap_scope(self):
"""
Gets the ldap_scope of this LdapConf.
The serach scope of ldap service.
:return: The ldap_scope of this LdapConf.
:rtype: int
"""
return self._ldap_scope
@ldap_scope.setter
def ldap_scope(self, ldap_scope):
"""
Sets the ldap_scope of this LdapConf.
The serach scope of ldap service.
:param ldap_scope: The ldap_scope of this LdapConf.
:type: int
"""
self._ldap_scope = ldap_scope
@property
def ldap_connection_timeout(self):
"""
Gets the ldap_connection_timeout of this LdapConf.
The connect timeout of ldap service(second).
:return: The ldap_connection_timeout of this LdapConf.
:rtype: int
"""
return self._ldap_connection_timeout
@ldap_connection_timeout.setter
def ldap_connection_timeout(self, ldap_connection_timeout):
"""
Sets the ldap_connection_timeout of this LdapConf.
The connect timeout of ldap service(second).
:param ldap_connection_timeout: The ldap_connection_timeout of this LdapConf.
:type: int
"""
self._ldap_connection_timeout = ldap_connection_timeout
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, LdapConf):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
"""Spatial Proximity Index."""
__author__ = "Renan X. Cortes <renanc@ucr.edu>, Sergio J. Rey <sergio.rey@ucr.edu> and Elijah Knaap <elijah.knaap@ucr.edu>"
import numpy as np
from ..util import generate_distance_matrix
from .._base import SingleGroupIndex, SpatialExplicitIndex
def _spatial_proximity(data, group_pop_var, total_pop_var, alpha=0.6, beta=0.5):
"""Calculate Spatial Proximity index.
Parameters
----------
data : a geopandas DataFrame with a geometry column.
group_pop_var : string
The name of variable in data that contains the population size of the group of interest
total_pop_var : string
The name of variable in data that contains the total population of the unit
alpha : float
A parameter that estimates the extent of the proximity within the same unit. Default value is 0.6
beta : float
A parameter that estimates the extent of the proximity within the same unit. Default value is 0.5
metric : string. Can be 'euclidean' or 'haversine'. Default is 'euclidean'.
The metric used for the distance between spatial units.
If the projection of the CRS of the geopandas DataFrame field is in degrees, this should be set to 'haversine'.
Returns
----------
statistic : float
Spatial Proximity Index
core_data : a geopandas DataFrame
A geopandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Massey, Douglas S., and Nancy A. Denton. "The dimensions of residential segregation." Social forces 67.2 (1988): 281-315.
The pairwise distance between unit i and itself is (alpha * area_of_unit_i) ^ beta.
Reference: :cite:`massey1988dimensions`.
"""
if alpha < 0:
raise ValueError("alpha must be greater than zero.")
if beta < 0:
raise ValueError("beta must be greater than zero.")
T = data[total_pop_var].sum()
data = data.assign(
xi=data[group_pop_var],
yi=data[total_pop_var] - data[group_pop_var],
ti=data[total_pop_var],
)
X = data.xi.sum()
Y = data.yi.sum()
dist = generate_distance_matrix(data)
np.fill_diagonal(dist, val=np.exp(-((alpha * data.area.values) ** (beta))))
c = 1 - dist.copy() # proximity matrix
Pxx = ((np.array(data.xi) * c).T * np.array(data.xi)).sum() / X ** 2
Pyy = ((np.array(data.yi) * c).T * np.array(data.yi)).sum() / Y ** 2
Ptt = ((np.array(data.ti) * c).T * np.array(data.ti)).sum() / T ** 2
SP = (X * Pxx + Y * Pyy) / (T * Ptt)
core_data = data[[group_pop_var, total_pop_var, data.geometry.name]]
return SP, core_data
class SpatialProximity(SingleGroupIndex, SpatialExplicitIndex):
"""Spatial Proximity Index.
Parameters
----------
data : pandas.DataFrame or geopandas.GeoDataFrame, required
dataframe or geodataframe if spatial index holding data for location of interest
group_pop_var : str, required
name of column on dataframe holding population totals for focal group
total_pop_var : str, required
name of column on dataframe holding total overall population
alpha : float
A parameter that estimates the extent of the proximity within the same unit. Default value is 0.6
beta : float
A parameter that estimates the extent of the proximity within the same unit. Default value is 0.5
metric : string. Can be 'euclidean' or 'haversine'. Default is 'euclidean'.
The metric used for the distance between spatial units.
If the projection of the CRS of the geopandas DataFrame field is in degrees, this should be set to 'haversine'.
Attributes
----------
statistic : float
Spatial Proximity Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Massey, Douglas S., and Nancy A. Denton. "The dimensions of residential segregation." Social forces 67.2 (1988): 281-315.
The pairwise distance between unit i and itself is (alpha * area_of_unit_i) ^ beta.
Reference: :cite:`massey1988dimensions`.
"""
def __init__(
self, data, group_pop_var, total_pop_var, alpha=0.6, beta=0.5, **kwargs,
):
"""Init."""
SingleGroupIndex.__init__(self, data, group_pop_var, total_pop_var)
SpatialExplicitIndex.__init__(self,)
self.alpha = alpha
self.beta = beta
aux = _spatial_proximity(
self.data, self.group_pop_var, self.total_pop_var, self.alpha, self.beta,
)
self.statistic = aux[0]
self.core_data = aux[1]
self._function = _spatial_proximity
|
import math
from models import *
from physics import *
import pprint
from good_geo import LOOKUP_TABLE
class Bot:
def handle_join_response(self, game_response):
if not game_response.is_valid or game_response.game_stage == GAME_STAGE_HAS_FINISHED:
print(f"{self} Skipping role detection for {game_response}")
return
self.role = game_response.static_game_info.role
print(f"{self} Detected role as {self.role}")
def handle_start_response(self, game_response):
if not game_response.is_valid or game_response.game_stage == GAME_STAGE_HAS_FINISHED:
print(f"{self} Skipping ship ID detection for {game_response}")
return
self.ship_id = None
for ship in game_response.game_state.ships:
if ship.role == self.role:
self.ship_id = ship.ship_id
print(f"{self} Detected ship ID as {self.ship_id}")
if self.ship_id is None:
raise ValueError(f"Failed to find initial ship IDs in {game_response} with role {self.role}")
def get_start_data(self, game_response: GameResponse):
raise RuntimeError("not implemented")
def get_commands(self, game_response: GameResponse):
raise RuntimeError("not implemented")
def get_other_ship_ids(self, game_response):
return [
ship.ship_id
for ship in game_response.game_state.ships
if ship.ship_id != self.ship_id
]
class DoNothingBot(Bot):
def get_start_data(self, game_response: GameResponse):
# default: not sure what it means
# return [64, 48, 14, 1] # [446, 0, 0, 1]
# return [5, 5, 5, 5]
return [1, 1, 1, 1]
def get_commands(self, game_response: GameResponse):
# default: do nothing
return []
class NaiveBot(Bot):
def get_start_data(self, game_response: GameResponse):
return [1, 1, 1, 1]
def get_commands(self, game_response: GameResponse):
# default: do nothing
return []
class BasicFlyingHelper:
def __init__(self, bot):
self.bot = bot
def get_commands(self, game_response: GameResponse, ship_id: int):
"""Issues commands to keep the given ship flying."""
x, y = game_response.get_ship(ship_id).position
if abs(x) > 47 or abs(y) > 47:
# Just cool down
return []
else:
x = -1 if x > 0 else 1
y = -1 if y > 0 else 1
return [
AccelerateCommand(ship_id=ship_id, vector=(x, y))
]
class FlyingBot(Bot):
def __init__(self):
self.flying_helper = BasicFlyingHelper(self)
def get_start_data(self, game_response: GameResponse):
return [200, 10, 10, 1]
def get_commands(self, game_response: GameResponse):
return self.flying_helper.get_commands(game_response, self.ship_id)
class TrajectoryBot(FlyingBot):
def get_commands(self, game_response: GameResponse):
ship_state = game_response.get_ship(self.ship_id)
x, y = ship_state.position
vx, vy = ship_state.velocity
N_PREDICTION_STEPS = 14
W_CENTER1 = 20
W_CENTER2 = 4
W_CORNER = 2
W_FUEL = 15
W_PLANET_DIRECTION = 10
W_V_DIRECTION = 5
# print(f"params {(N_PREDICTION_STEPS, W_CENTER1, W_CENTER2, W_CORNER, W_FUEL)}")
best_dv = (0, 0)
best_cost = 1000
for dvx in (-1, 0, 1):
for dvy in (-1, 0, 1):
# traj = []
# print(f"\n\ndv: {(dvx, dvy)}")
ks = KinematicState((x,y), (vx-dvx,vy-dvy))
cost = 0
min_linf_center_dist = min_l2_center_dist = min_l2_corner_dist = 1000
for i in range(N_PREDICTION_STEPS):
ks = ks.update()
pos = ks.pos
# print(f"pos {i}: {pos}")
min_linf_center_dist = min(linf_norm(pos), min_linf_center_dist)
min_l2_center_dist = min(l2_norm(pos), min_l2_center_dist)
min_l2_corner_dist = min(min(l2_norm((pos[0]-cx, pos[1]-cy)) \
for cx in (16, -16) for cy in (16, -16) ), min_l2_corner_dist)
# print(f"dists {(min_linf_center_dist, min_l2_center_dist, min_l2_corner_dist)}")
cost = W_CENTER1*hinge(16 - min_linf_center_dist)*(1+sign(int(i<=3))) \
+ W_CENTER2*hinge(23 - min_l2_center_dist) \
+ W_CORNER*hinge(3 - min_l2_corner_dist) \
+ W_FUEL*linf_norm((dvx, dvy)) \
+ W_PLANET_DIRECTION * (int(sign(x) == sign(dvx) and abs(y)<= 18) \
+ int(sign(y) == sign(dvy) and abs(x) <= 18) ) \
+ W_V_DIRECTION * (int(sign(vx) == sign(dvx)) \
+ int(sign(vy) == sign(dvy) )) \
+ (0.8 + int(min(vx, vy)<=2))*hinge(linf_norm(pos)-48)
# print(f"cost {cost}")
if cost < best_cost:
best_dv = (dvx, dvy)
best_cost = cost
# print(f"new best dv: {best_dv}, cost {best_cost}\n")
# print(f"Overall best dv {best_dv}, cost {best_cost} \n \n")
return [AccelerateCommand(ship_id=self.ship_id, vector=best_dv)] if best_dv != (0, 0) else []
class ShootAheadHelper:
def __init__(self, bot):
self.bot = bot
def get_commands(self, game_response: GameResponse, shooter_ship_id: int, target_ship_id: int, power: int):
target_ship = game_response.get_ship(target_ship_id)
p = target_ship.position
v = target_ship.velocity
g = get_g_force(p[0], p[1])
target = (
p[0] + v[0] + g[0],
p[1] + v[1] + g[1],
)
return [
ShootCommand(ship_id=shooter_ship_id, target=target, x3=power)
]
def get_distance(p1, p2):
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
return math.sqrt(dx * dx + dy * dy)
def can_detonate(p1, p2):
return abs(p2[0] - p1[0]) < 8 and abs(p2[1] - p1[1]) < 8
class ShooterBot(Bot):
def __init__(self):
self.shoot_ahead_helper = ShootAheadHelper(self)
self.trajectory_bot = TrajectoryBot()
def get_start_data(self, game_response: GameResponse):
if self.role == SHIP_ROLE_ATTACKER:
return [134, 64, 10, 1]
else:
return [100, 48, 8, 1]
def get_commands(self, game_response: GameResponse):
self.trajectory_bot.ship_id = self.ship_id
commands = self.trajectory_bot.get_commands(game_response)
ship = game_response.get_ship(self.ship_id)
target_ship_id = self.get_other_ship_ids(game_response)[0]
other_ship = game_response.get_ship(target_ship_id)
if self.role == SHIP_ROLE_ATTACKER and can_detonate(ship.position, other_ship.position) and len(self.get_other_ship_ids(game_response)) == 1:
print(f"{self} WILL DETONATE")
return [DetonateCommand(ship_id=self.ship_id)]
max_temp = ship.x6
temp = ship.x5
coolant = ship.x4[2]
shot_power = ship.x4[1]
new_temp = temp + shot_power - coolant
distance = get_distance(ship.position, other_ship.position)
print(f"{self} temp={temp} max_temp={max_temp} coolant={coolant} shot_power={shot_power} new_temp={new_temp} distance={distance}")
if new_temp < max_temp and distance < 1.3 * shot_power:
print(f"{self} WILL SHOOT")
commands += self.shoot_ahead_helper.get_commands(
game_response, shooter_ship_id=self.ship_id, target_ship_id=target_ship_id, power=shot_power)
return commands
def get_away_vectors(xy1, xy2):
x1, y1 = xy1
x2, y2 = xy2
if abs(x2 - x1) + abs(y2 - y1) <= 5:
dx = sign(x2 - x1)
dy = sign(y2 - y1)
if dx == 0: dx += 1
if dy == 0: dy += 1
return ((dx, dy), (-dx, -dy))
return None
def apply_point(pt, delta):
return (pt[0] + delta[0], pt[1] + delta[1])
class StationaryBot(Bot):
def __init__(self):
self.moved = False
self.flying_helper = BasicFlyingHelper(self)
def get_start_data(self, game_response: GameResponse):
return [200, 10, 10, 1]
def get_commands(self, game_response: GameResponse):
move = None
if not self.moved:
ship = game_response.get_ship(self.ship_id)
move = LOOKUP_TABLE.get((ship.position, ship.velocity))
res = []
if move:
print (f"We have acceleration to orbit {move}")
res = [AccelerateCommand(ship_id=self.ship_id, vector=(-move[0], -move[1]))]
self.moved = True
elif not self.moved:
res = self.flying_helper.get_commands(game_response, self.ship_id)
return res
class ForkBot(Bot):
def __init__(self):
self.flying_helper = BasicFlyingHelper(self)
self.starting_stats = [64, 64, 10, 4]
self.num_forked = 0
def get_start_data(self, game_response: GameResponse):
return self.starting_stats
def get_commands(self, game_response: GameResponse):
team_ship_ids = []
for ship in game_response.game_state.ships:
if ship.role == self.role:
team_ship_ids.append(ship.ship_id)
if self.num_forked == 0:
self.num_forked += 1
return [ ForkCommand(
ship_id=self.ship_id,
x4=map(lambda x: x // 2, self.starting_stats))
]
commands = []
if len(team_ship_ids) >= 2:
ship_id1 = team_ship_ids[0]
ship_id2 = team_ship_ids[1]
pos1 = game_response.get_ship(ship_id1).position
pos2 = game_response.get_ship(ship_id2).position
vectors = get_away_vectors(pos1, pos2)
if vectors:
return [
AccelerateCommand(ship_id=ship_id1, vector=vectors[0]),
AccelerateCommand(ship_id=ship_id2, vector=vectors[1])
]
for ship_id in team_ship_ids:
commands.extend(self.flying_helper.get_commands(game_response, ship_id))
return commands
class RoleSmartBot(Bot):
def __init__(self, attacker, defender):
self.attacker = attacker
self.defender = defender
def handle_join_response(self, game_response):
self.role = game_response.static_game_info.role
if self.role == SHIP_ROLE_ATTACKER:
print(f"{self} will use the ATTACKER strategy with bot {self.attacker}.")
self.bot = self.attacker
else:
print(f"{self} will use the DEFENDER strategy with bot {self.defender}.")
self.bot = self.defender
self.bot.handle_join_response(game_response)
def handle_start_response(self, game_response):
self.bot.handle_start_response(game_response)
def get_start_data(self, game_response: GameResponse):
return self.bot.get_start_data(game_response)
def get_commands(self, game_response: GameResponse):
return self.bot.get_commands(game_response)
|
import unittest
from nodes import *
from interpreter import Interpreter
from values import Number
class TestInterpreter(unittest.TestCase):
def test_number(self):
value = Interpreter().visit(NumberNode(23.5))
self.assertEqual(value, Number(23.5))
def test_individual_operations(self):
value = Interpreter().visit(AddNode(NumberNode(27), NumberNode(12)))
self.assertEqual(value, Number(39))
value = Interpreter().visit(SubtractNode(NumberNode(27), NumberNode(12)))
self.assertEqual(value, Number(15))
value = Interpreter().visit(MultiplyNode(NumberNode(7), NumberNode(2)))
self.assertEqual(value, Number(14))
value = Interpreter().visit(DivideNode(NumberNode(8), NumberNode(2)))
self.assertAlmostEqual(value, Number(4), 5)
with self.assertRaises(Exception):
Interpreter().visit(DivideNode(NumberNode(8), NumberNode(0)))
def test_full_expression(self):
tree = AddNode(
NumberNode(27.0),
MultiplyNode(
SubtractNode(
DivideNode(
NumberNode(43.0),
NumberNode(36.0)
),
NumberNode(48.0)
),
NumberNode(51.0)
)
)
result = Interpreter().visit(tree)
self.assertAlmostEqual(result.value, -2360.08, 2)
if __name__ == '__main__':
unittest.main()
|
from .util import (run_shell, cdo)
from .nco_cdo import (cdo_sellevel,
nc_cal_daily_flux,
nc_detrend,
nc_monmean, nc_daymean, nc_mean, nc_ydrunmean, nc_ydaysub, nc_ydrunanom,
nc_mergetime,
nc_rcat, nc_rename, nc_reunit_time, nc_pack, nc_unpack,
nc_shifttime, nc_splitmon, nc_splitday,
nc_update_units,
nco_remove_attr, nc_change_attr,
nc_set_record_dimension)
|
import os
from contextlib import contextmanager
from django.core.files.temp import NamedTemporaryFile
from django.db import transaction
from django.db.models.signals import post_delete, post_save
from wagtailvideos import ffmpeg, get_video_model
@contextmanager
def get_local_file(file):
"""
Get a local version of the file, downloading it from the remote storage if
required. The returned value should be used as a context manager to
ensure any temporary files are cleaned up afterwards.
"""
try:
with open(file.path):
yield file.path
except NotImplementedError:
_, ext = os.path.splitext(file.name)
with NamedTemporaryFile(prefix='wagtailvideo-', suffix=ext) as tmp:
try:
file.open('rb')
for chunk in file.chunks():
tmp.write(chunk)
finally:
file.close()
tmp.flush()
yield tmp.name
def post_delete_file_cleanup(instance, **kwargs):
# Pass false so FileField doesn't save the model.
transaction.on_commit(lambda: instance.file.delete(False))
if hasattr(instance, 'thumbnail'):
# Delete the thumbnail for videos too
transaction.on_commit(lambda: instance.thumbnail.delete(False))
# Fields that need the actual video file to create using ffmpeg
def video_post_save(instance, **kwargs):
if not ffmpeg.installed():
return
if hasattr(instance, '_from_signal'):
# Sender was us, don't run post save
return
has_changed = instance._initial_file is not instance.file
filled_out = instance.thumbnail is not None and instance.duration is not None
if has_changed or not filled_out:
with get_local_file(instance.file) as file_path:
if has_changed or instance.thumbnail is None:
instance.thumbnail = ffmpeg.get_thumbnail(file_path)
if has_changed or instance.duration is None:
instance.duration = ffmpeg.get_duration(file_path)
instance.file_size = instance.file.size
instance._from_signal = True
instance.save()
del instance._from_signal
def register_signal_handlers():
Video = get_video_model()
VideoTranscode = Video.get_transcode_model()
TrackListing = Video.get_track_listing_model()
VideoTrack = TrackListing.get_track_model()
post_save.connect(video_post_save, sender=Video)
post_delete.connect(post_delete_file_cleanup, sender=Video)
post_delete.connect(post_delete_file_cleanup, sender=VideoTranscode)
post_delete.connect(post_delete_file_cleanup, sender=VideoTrack)
|
import tkinter
from src.gui_app import *
def main():
root = tkinter.Tk()
program = Application(root)
root.mainloop()
if __name__ == "__main__":
main()
|
import tkinter as tk
class GUI:
def __init__(self):
self.fenster = tk.Tk()
self.fenster.configure(background = "black")
self.fenster.attributes("-fullscreen", True)
self.fenster.mainloop()
self.main = tk.Frame()
def set_time(self,hour,minute):
return
|
import os
from copy import deepcopy
INPUT_FILE = os.path.join(os.path.dirname(__file__), "input")
def part1():
acc = 0
ip = 0
sequence = set()
with open(INPUT_FILE, "r") as fp:
lines = fp.read().splitlines()
while ip not in sequence:
sequence.add(ip)
if lines[ip].startswith("acc"):
acc += int(lines[ip][3:])
ip += 1
elif lines[ip].startswith("jmp"):
ip += int(lines[ip][3:])
elif lines[ip].startswith("nop"):
ip += 1
return acc
def part2():
with open(INPUT_FILE, "r") as fp:
lines = fp.read().splitlines()
nops = [idx for idx, val in enumerate(lines) if val.startswith("nop")]
jmps = [idx for idx, val in enumerate(lines) if val.startswith("jmp")]
while True:
for instr in nops + jmps:
_lines = deepcopy(lines)
ip = 0
acc = 0
sequence = set()
if _lines[instr].startswith("nop"):
_lines[instr] = _lines[instr].replace("nop", "jmp")
else:
_lines[instr] = _lines[instr].replace("jmp", "nop")
while ip not in sequence:
if ip >= len(_lines):
return acc
sequence.add(ip)
if _lines[ip].startswith("acc"):
acc += int(_lines[ip][3:])
ip += 1
elif _lines[ip].startswith("jmp"):
ip += int(_lines[ip][3:])
elif _lines[ip].startswith("nop"):
ip += 1
if __name__ == "__main__":
part1 = part1()
print(part1)
part2 = part2()
print(part2)
|
class ParamRequest(object):
"""
Represents a set of request parameters.
"""
def to_request_parameters(self):
"""
:return: list[:class:`ingenico.connect.sdk.RequestParam`] representing the HTTP request parameters
"""
raise NotImplementedError
|
# <<BEGIN-copyright>>
# Copyright 2019, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: MIT
# <<END-copyright>>
from __future__ import print_function
import sys
import os
from argparse import ArgumentParser
code = 'deposition_multiGroup'
description = """Runs %s on a list of targets for defined options.""" % code
parser = ArgumentParser( description = description )
parser.add_argument( '-v', '--verbose', action = 'count', default = 0, help = 'Enable verbose output.' )
parser.add_argument( 'targets', action = 'append', help = 'List of target IDs to check.' )
args = parser.parse_args( )
def checkOptions( target, options ) :
if( args.verbose > 0 ) : print()
output = '%s.%s' % ( code, target )
for option in options : output += '.%s' % option
benchmarks = os.path.join( 'Benchmarks', output + '.out' ).replace( '..', '.' )
output = os.path.join( 'Outputs', output + '.out' ).replace( '..', '.' )
cmd = './%s %s --tid %s > %s' % ( code, ' '.join( options ), target, output )
if( args.verbose > 0 ) : print( cmd )
os.system( cmd )
cmd = '../Utilities/diff.com %s/%s %s %s' % ( code, code, benchmarks, output )
if( args.verbose > 0 ) : print( cmd )
os.system( cmd )
def checkTarget( target ) :
for options in [ [ '' ], [ '-p' ] ] : checkOptions( target, options )
if( not( os.path.exists( 'Outputs' ) ) ) : os.mkdir( 'Outputs' )
for target in args.targets : checkTarget( target )
|
from cbint.utils.detonation import DetonationDaemon, ConfigurationError
from cbint.utils.detonation.binary_analysis import (BinaryAnalysisProvider, AnalysisPermanentError,
AnalysisTemporaryError, AnalysisResult, AnalysisInProgress)
import cbint.utils.feed
from cbapi.connection import CbAPISessionAdapter
import time
import logging
import os
import sys
from api_request import APISession
from lxml import etree
log = logging.getLogger(__name__)
class WildfireProvider(BinaryAnalysisProvider):
def __init__(self, name, wildfire_url, wildfire_ssl_verify, api_keys, work_directory):
super(WildfireProvider, self).__init__(name)
self.api_keys = api_keys
self.wildfire_url = wildfire_url
self.wildfire_ssl_verify = wildfire_ssl_verify
self.current_api_key_index = 0
self.session = APISession(api_keys=self.api_keys, throttle_per_minute=120)
tls_adapter = CbAPISessionAdapter(force_tls_1_2=True)
self.session.mount("https://", tls_adapter)
self.work_directory = work_directory
def get_api_key(self):
for i in range(len(self.api_keys)):
yield self.api_keys[self.current_api_key_index]
self.current_api_key_index += 1
self.current_api_key_index %= len(self.api_keys)
# if we've gotten here, we have no more keys to give.
def _call_wildfire_api(self, method, path, payload=None, files=None):
url = self.wildfire_url + path
if method == 'GET':
try:
r = self.session.get(url, verify=self.wildfire_ssl_verify)
except Exception as e:
log.exception("Exception when sending WildFire API GET request: %s" % e)
raise
return r.status_code, r.content
elif method == 'POST':
try:
r = self.session.post(url, data=payload, files=files, verify=self.wildfire_ssl_verify)
except Exception as e:
log.exception("Exception when sending WildFire API query: %s" % e)
# bubble this up as necessary
raise
return r.status_code, r.content
def query_wildfire(self, md5sum):
"""
query the wildfire api to get a report on an md5
"""
log.info("Querying wildfire for md5sum %s" % md5sum)
status_code, content = self._call_wildfire_api("POST", "/publicapi/get/verdict",
{'hash': md5sum.lower()})
if status_code == 404:
return None # can't find the binary
elif status_code != 200:
log.info("Received unknown HTTP status code %d from WildFire" % status_code)
log.info("-> response content: %s" % content)
raise AnalysisTemporaryError("Received unknown HTTP status code %d from WildFire" % status_code,
retry_in=120)
response = etree.fromstring(content)
# Return 0 Benign verdict
# 1 Malware verdict
# 2 Grayware verdict
# -100 Verdict is pending
# -101 Indicates a file error
# -102 The file could not be found
# -103 The hash submitted is invalid
if md5sum.lower() == response.findtext("./get-verdict-info/md5").lower():
verdict = response.findtext("./get-verdict-info/verdict").strip()
if verdict == "-100":
return None # waiting for WildFire verdict
elif verdict == "-102":
return None # file not in WildFire yet
elif verdict.startswith("-"):
raise AnalysisPermanentError("WildFire could not process file: error %s" % verdict)
elif verdict == "1":
return self.generate_malware_result(md5sum, 100)
elif verdict == "2":
return self.generate_malware_result(md5sum, 50)
else:
return AnalysisResult(score=0)
def generate_malware_result(self, md5, score):
status_code, content = self._call_wildfire_api("POST", "/publicapi/get/report",
{'hash': md5.lower(), "format": "pdf"})
if status_code == 200:
open(os.path.join(self.work_directory, md5.upper()) + ".pdf", 'wb').write(content)
return AnalysisResult(score=score, link="/reports/%s.pdf" % md5.upper())
else:
return AnalysisResult(score=score)
def submit_wildfire(self, md5sum, file_stream):
"""
submit a file to the wildfire api
returns a wildfire submission status code
"""
files = {'file': ('CarbonBlack_%s' % md5sum, file_stream)}
try:
status_code, content = self._call_wildfire_api("POST", "/publicapi/submit/file", files=files)
except Exception as e:
log.exception("Exception while submitting MD5 %s to WildFire: %s" % (md5sum, e))
raise AnalysisTemporaryError("Exception while submitting to WildFire: %s" % e)
else:
if status_code == 200:
return True
else:
raise AnalysisTemporaryError("Received HTTP error code %d while submitting to WildFire" % status_code)
def check_result_for(self, md5sum):
return self.query_wildfire(md5sum)
def analyze_binary(self, md5sum, binary_file_stream):
self.submit_wildfire(md5sum, binary_file_stream)
retries = 20
while retries:
time.sleep(30)
result = self.check_result_for(md5sum)
if result:
return result
retries -= 1
raise AnalysisTemporaryError(message="Maximum retries (20) exceeded submitting to WildFire", retry_in=120)
class WildfireConnector(DetonationDaemon):
@property
def filter_spec(self):
filters = []
max_module_len = 10 * 1024 * 1024
filters.append('(os_type:windows) orig_mod_len:[1 TO %d]' % max_module_len)
additional_filter_requirements = self.get_config_string("binary_filter_query", None)
if additional_filter_requirements:
filters.append(additional_filter_requirements)
log.info("Filter spec is %s" % ' '.join(filters))
return ' '.join(filters)
@property
def integration_name(self):
return 'Cb Wildfire Connector 2.5.10'
@property
def num_quick_scan_threads(self):
return 1
@property
def num_deep_scan_threads(self):
return 4
def get_provider(self):
wildfire_provider = WildfireProvider(self.name, self.wildfire_url, self.wildfire_ssl_verify, self.api_keys,
self.work_directory)
return wildfire_provider
def get_metadata(self):
return cbint.utils.feed.generate_feed(self.name, summary="PaloAlto Wildfire cloud binary feed",
tech_data=("There are no requirements to share any data with Carbon Black to use this feed. "
"However, binaries may be shared with Palo Alto."),
provider_url="http://wildfire.paloaltonetworks.com/",
icon_path='/usr/share/cb/integrations/wildfire/wildfire-logo.png',
display_name="Wildfire", category="Connectors")
def validate_config(self):
super(WildfireConnector, self).validate_config()
keys = self.get_config_string("wildfire_api_keys", None)
if not keys:
raise ConfigurationError("WildFire API keys must be specified in the wildfire_api_keys option")
self.api_keys = keys.split(';')
wildfire_url = self.get_config_string("wildfire_url", "https://wildfire.paloaltonetworks.com")
self.wildfire_url = wildfire_url.rstrip("/")
self.wildfire_ssl_verify = self.get_config_boolean("wildfire_verify_ssl", True)
log.info("connecting to WildFire server at %s with API keys %s" % (self.wildfire_url, self.api_keys))
return True
if __name__ == '__main__':
# import yappi
import logging
logging.basicConfig(level=logging.DEBUG)
# yappi.start()
my_path = os.path.dirname(os.path.abspath(__file__))
temp_directory = "/tmp/wildfire"
config_path = os.path.join(my_path, "testing.conf")
daemon = WildfireConnector('wildfiretest', configfile=config_path, work_directory=temp_directory,
logfile=os.path.join(temp_directory, 'test.log'), debug=True)
daemon.start()
# yappi.get_func_stats().print_all()
# yappi.get_thread_stats().print_all()
|
from models import UfsObj, CollectionItem
'''
class UfsObjForm(ModelForm):
class Meta:
model = UfsObj
fields = ('ufs_url')
widgets = {
'name': Textarea(attrs={'cols': 80, 'rows': 20}),
}
'''
|
from django.contrib import admin
from .models import *
class PartyAdmin(admin.ModelAdmin):
pass
admin.site.register(Party, PartyAdmin)
|
import pygame
class MediaPlayer(object):
"""
Play media file
"""
def __init__(self):
self.channel = None
pygame.mixer.init()
def play(self, media_file):
"""
Play media file
"""
self.player = pygame.mixer.Sound(media_file)
if self.channel is None or not self.channel.get_busy():
self.channel = self.player.play()
def close(self):
"""
Close player
"""
if not self.channel is None:
self.channel.stop()
|
n = 4562;
rev = 0
while(n > 0):
a = n % 10
rev = rev * 10 + a
n = n // 10
print(rev)
|
import numpy as np
import math
from mgcpy.independence_tests.utils.transform_matrices import \
transform_matrices
import scipy.io
import os
def power(independence_test, sample_generator, num_samples=100, num_dimensions=1, theta=0, noise=0.0, repeats=1000, alpha=.05, simulation_type=''):
'''
Estimate power
:param independence_test: an object whose class inherits from the Independence_Test abstract class
:type: Object(Independence_Test)
:param sample_generator: a function used to generate simulation from simulations.py with parameters given by the following arguments
- num_samples: default to 100
- num_dimensions: default to 1
- noise: default to 0
:type: function
:param num_samples: the number of samples generated by the simulation
:type: int
:param num_dimensions: the number of dimensions of the samples generated by the simulation
:type: int
:param noise: the noise used in simulation
:type: float
:param repeats: the number of times we generate new samples to estimate the null/alternative distribution
:type: int
:param alpha: the type I error level
:type: float
:param simulation_type: specify simulation when necessary (default to empty string)
:type: string
:return empirical_power: the estimated power
:type: float
'''
# test statistics under the null, used to estimate the cutoff value under the null distribution
test_stats_null = np.zeros(repeats)
# test statistic under the alternative
test_stats_alternative = np.zeros(repeats)
theta = math.radians(theta)
a = [[0 for x in range(2)] for y in range(2)]
a[0][0] = math.cos(theta)
a[0][1] = math.sin(theta)*(-1)
a[1][0] = math.sin(theta)
a[1][1] = math.cos(theta)
a = np.asarray(a)
for rep in range(repeats):
# generate new samples for each iteration
# the if-else block below is for simulations that have a different argument list
# than the general case
if simulation_type == 'sine_16pi':
matrix_X, matrix_Y = sample_generator(
num_samples, num_dimensions, noise=noise, period=np.pi*16)
elif simulation_type == 'multi_noise' or simulation_type == 'multi_indept':
matrix_X, matrix_Y = sample_generator(num_samples, num_dimensions)
elif simulation_type == 'ellipse':
matrix_X, matrix_Y = sample_generator(
num_samples, num_dimensions, noise=noise, radius=5)
elif simulation_type == 'diamond':
matrix_X, matrix_Y = sample_generator(
num_samples, num_dimensions, noise=noise, period=-np.pi/8)
else:
matrix_X, matrix_Y = sample_generator(
num_samples, num_dimensions, noise=noise)
data_matrix_X = transform_matrices(matrix_X, matrix_Y)[0]
data_matrix_Y = transform_matrices(matrix_X, matrix_Y)[1]
data_matrix_Y = data_matrix_Y[:, np.newaxis]
data_matrix_X = data_matrix_X.T
data_matrix_X = np.dot(data_matrix_X, a)
# permutation test
permuted_y = np.random.permutation(matrix_Y)
test_stats_null[rep], _ = independence_test.test_statistic(
matrix_X, permuted_y)
test_stats_alternative[rep], _ = independence_test.test_statistic(
matrix_X, matrix_Y)
'''
# if the test is pearson, use absolute value of the test statistic
# so the more extreme test statistic is still in a one-sided interval
if independence_test.get_name() == 'pearson':
test_stats_null[rep] = abs(test_stats_null[rep])
test_stats_alternative[rep] = abs(test_stats_alternative[rep])
'''
# the cutoff is determined so that 1-alpha of the test statistics under the null distribution
# is less than the cutoff
cutoff = np.sort(test_stats_null)[math.ceil(repeats*(1-alpha))]
# the proportion of test statistics under the alternative which is no less than the cutoff (in which case
# the null is rejected) is the empirical power
empirical_power = np.where(test_stats_alternative >= cutoff)[
0].shape[0] / repeats
return empirical_power
def power_given_data(independence_test, simulation_type, data_type='dimension', num_samples=100, num_dimensions=1, repeats=1000, alpha=.05):
# test statistics under the null, used to estimate the cutoff value under the null distribution
test_stats_null = np.zeros(repeats)
# test statistic under the alternative
test_stats_alternative = np.zeros(repeats)
# absolute path to the benchmark directory
dir_name = os.path.dirname(__file__)
if data_type == 'dimension':
file_name_prefix = dir_name + \
'/sample_data_power_dimensions/type_{}_dim_{}'.format(
simulation_type, num_dimensions)
else:
file_name_prefix = dir_name + \
'/sample_data_power_sample_sizes/type_{}_size_{}'.format(
simulation_type, num_samples)
all_matrix_X = scipy.io.loadmat(file_name_prefix + '_X.mat')['X']
all_matrix_Y = scipy.io.loadmat(file_name_prefix + '_Y.mat')['Y']
for rep in range(repeats):
matrix_X = all_matrix_X[:, :, rep]
matrix_Y = all_matrix_Y[:, :, rep]
# permutation test
permuted_y = np.random.permutation(matrix_Y)
test_stats_null[rep], _ = independence_test.test_statistic(
matrix_X, permuted_y)
test_stats_alternative[rep], _ = independence_test.test_statistic(
matrix_X, matrix_Y)
'''
# if the test is pearson, use absolute value of the test statistic
# so the more extreme test statistic is still in a one-sided interval
if independence_test.get_name() == 'pearson':
test_stats_null[rep] = abs(test_stats_null[rep])
test_stats_alternative[rep] = abs(test_stats_alternative[rep])
'''
# the cutoff is determined so that 1-alpha of the test statistics under the null distribution
# is less than the cutoff
cutoff = np.sort(test_stats_null)[math.ceil(repeats*(1-alpha))]
# the proportion of test statistics under the alternative which is no less than the cutoff (in which case
# the null is rejected) is the empirical power
empirical_power = np.where(test_stats_alternative >= cutoff)[
0].shape[0] / repeats
return empirical_power
|
import pytest
from sand import Sand
@pytest.fixture
def app():
return Sand()
@pytest.fixture
def client(app):
return app.test_session()
|
"""Configures a Docker container to run API Gateway Analytics."""
from __future__ import with_statement
import calendar
import datetime
import distutils.dir_util
import optparse
import os
import re
import shlex
import sys
import zipfile
import shutil
import analytics
import configutil
import vutil
from esapi import EntityStoreAPI
from java.lang import Throwable
from com.vordel.version import ProductVersion
from com.vordel.archive.fed import DeploymentArchive
ES_PASSPHRASE = "ES_PASSPHRASE"
ANALYTICS_PASSPHRASE = "ANALYTICS_PASSPHRASE"
METRICS_DB_PASSPHRASE = "METRICS_DB_PASSPHRASE"
SMTP_PASSPHRASE = "SMTP_PASSPHRASE"
DISTDIR = vutil.getVDISTDIR()
fedDir = os.path.join(DISTDIR, "conf", "fed")
def _parseArgs():
parser = optparse.OptionParser()
parser.add_option("--props", dest="propsFile",
help="Properties file containing name=value pairs.")
parser.add_option("--fed", dest="fedFile",
help="The FED to use for this image.")
parser.add_option("--merge-dir", dest="mergeDir",
help="Config directory to merge into analytics directory.")
parser.add_option("--docker-image-id", dest="dockerImageId",
help="Name and version of the Docker image.")
parser.add_option("--analytics-port", dest="analyticsPort",
help="Port number that exposes the API Gateway Analytics API.")
parser.add_option("--analytics-username", dest="analyticsUsername",
help="Username for logging into API Gateway Analytics UI.")
parser.add_option("--metrics-db-url", dest="metricsDbUrl",
help="Metrics database URL.")
parser.add_option("--metrics-db-username", dest="metricsDbUsername",
help="Metrics database username.")
parser.add_option("--reports-dir", dest="reportsDir",
help="Directory for Analytics reports.")
parser.add_option("--email-reports", dest="emailReports",
help="Enable report emails.", )
parser.add_option("--email-to", dest="emailTo",
help="List of destination email addresses, separated by ';'.")
parser.add_option("--email-from", dest="emailFrom",
help="Originating email address.")
parser.add_option("--smtp-conn-type", dest="smtpConnType", choices=["NONE", "TLS/SSL", "SSL"], default="NONE",
help="SMTP connection type.")
parser.add_option("--smtp-host", dest="smtpHost",
help="Hostname of SMTP server.")
parser.add_option("--smtp-port", dest="smtpPort", type="int",
help="Port number of SMTP server.")
parser.add_option("--smtp-username", dest="smtpUsername",
help="SMTP server username.")
parser.add_option("--cleanup-report", dest="cleanupReport",
help="Delete report file after emailing.")
opts = parser.parse_args()[0]
opts.emailReports = True if opts.emailReports.lower() in ("1", "true", "yes") else False
opts.cleanupReport = True if opts.cleanupReport.lower() in ("1", "true", "yes") else False
return opts
def _loadPassphrases():
with open(options.propsFile) as f:
lines = f.readlines()
passDict = dict(line.strip().split('=', 1) for line in lines if '=' in line)
if set(passDict.keys()) != set((ES_PASSPHRASE, ANALYTICS_PASSPHRASE, METRICS_DB_PASSPHRASE, SMTP_PASSPHRASE)):
_fail("Configuration file is missing required properties: %s" % options.propsFile)
return passDict
def _setup():
_checkLicense()
_updateEsPassphrase()
_extractCustomFedFile()
_configureMetrics()
_setAdminUser()
_setEnvVariables()
if options.mergeDir is not None and os.path.exists(options.mergeDir):
print("Merging provided config directory into analytics directory")
distutils.dir_util.copy_tree(options.mergeDir, DISTDIR)
open(os.path.join(DISTDIR, "conf", ".IAgreeToTheTermsAndConditionsOfTheEULA"), 'a').close()
def _checkLicense():
print("Checking license")
with open(os.path.join(DISTDIR, "conf", "licenses", "lic.lic")) as f:
s = f.read()
if not re.search("analytics *= *1", s):
_fail("Supplied license file is not valid for API Gateway Analytics.")
matcher = re.search(r"expires *=.*, ([\d]{2}) ([A-Za-z]{3}) ([\d]{4})", s)
if matcher is not None:
day, monthStr, year = matcher.group(1), matcher.group(2), matcher.group(3)
month = list(calendar.month_abbr).index(monthStr)
expiryDate = datetime.date(int(year), int(month), int(day))
if expiryDate < datetime.date.today():
_fail("Supplied license file has expired.")
def _updateEsPassphrase():
if len(passphrases[ES_PASSPHRASE]) > 0:
print("Using a custom entity store passphrase")
confFile = os.path.join(DISTDIR, "system", "conf", "analytics.xml")
configutil.updateSystemSettings("file:////%s" % confFile, confFile, None, None, None, None,
secret=passphrases[ES_PASSPHRASE])
def _extractCustomFedFile():
if not os.path.exists(options.fedFile):
print("Using factory FED for API Gateway Analytics")
return
print("Using custom FED file")
product, fedVersion = _getProductAndFedVersion()
productLabel = ProductVersion.getLabel()
print(" FED Product: %s, FED Version: %s, Product: %s" % (product, fedVersion, productLabel))
fedPrefix, productPrefix = fedVersion[0:3], productLabel[0:3]
if "Reporter" not in product:
_fail("Provided FED is not an API Gateway Analytics FED.")
if fedPrefix != productPrefix:
_fail("FED version %s does not match the product version %s" % (fedPrefix, productPrefix))
print("Deleting factory configuration")
shutil.rmtree(fedDir)
os.mkdir(fedDir)
print("Extracting FED file")
try:
zipFile = zipfile.ZipFile(options.fedFile)
for member in zipFile.namelist():
fname = os.path.basename(member)
if fname and fname.endswith(".xml"):
print(" - %s" % fname)
with open(os.path.join(DISTDIR, "conf", "fed", fname), 'w') as f:
f.write(zipFile.read(member))
zipFile.close()
except (Exception, Throwable), e:
_fail("Error extracting FED content: %s" % e)
def _getProductAndFedVersion():
try:
if os.path.exists(options.fedFile):
newArchive = DeploymentArchive(options.fedFile)
es = EntityStoreAPI.wrap(newArchive.getEntityStore(), passphrases[ES_PASSPHRASE])
else:
es = _getEntityStore("PrimaryStore")
productKey = es.getProductKey()
fedVersion = es.getVersion()
es.close()
return productKey, fedVersion
except (Exception, Throwable), e:
_fail("Error reading the FED: %s" % e)
def _getEntityStore(storeType):
fedDir = os.path.join(DISTDIR, "conf", "fed")
try:
for fname in os.listdir(fedDir):
if re.match(storeType + r".*\.xml", fname) is not None:
es = EntityStoreAPI.create("file:///%s/%s" % (fedDir, fname),
passphrases[ES_PASSPHRASE], {"strictImportSchema": "false"})
return es
_fail("Failed to locate %s in directory '%s'" % (storeType, fedDir))
except (Exception, Throwable), e:
_fail("Error opening entity store of type %s: %s" % (storeType, e))
def _configureMetrics():
print("Adding analytics settings to entity store")
params = [("passphrase", passphrases[ES_PASSPHRASE]),
("port", options.analyticsPort),
("dburl", options.metricsDbUrl),
("dbuser", options.metricsDbUsername),
("dbpass", passphrases[METRICS_DB_PASSPHRASE]),
("no-dbcheck", None),
("generate", None),
("guser", options.analyticsUsername),
("gpass", passphrases[ANALYTICS_PASSPHRASE]),
("gtemp", options.reportsDir),
("email" if options.emailReports else "no-email", None),
("emailfrom", options.emailFrom),
("emailto", options.emailTo),
("smtptype", options.smtpConnType),
("smtphost", options.smtpHost),
("smtpport", options.smtpPort),
("smtpuser", options.smtpUsername),
("smtppass", passphrases[SMTP_PASSPHRASE]),
("cleanup" if options.cleanupReport else "no-cleanup", None)]
cmd = os.path.join(DISTDIR, "posix", "bin", "configureserver")
for name, value in params:
if value is None:
cmd += ' --%s' % name
elif value != "":
cmd += ' --%s="%s"' % (name, value)
savedArgv = sys.argv
sys.argv = shlex.split(cmd)
analytics.main()
sys.argv = savedArgv
def _setAdminUser():
print("Updating admin user details")
try:
es = _getEntityStore("UserStore")
entity = es.get("/[UserStore]**/[User]name=admin")
entity.setStringField("name", options.analyticsUsername)
entity.setStringField("password", es.encrypt(passphrases[ANALYTICS_PASSPHRASE]))
es.updateEntity(entity)
es.close()
except (Exception, Throwable), e:
_fail("Error updating admin user details: %s" % e)
def _setEnvVariables():
venvPath = os.path.join(DISTDIR, "posix", "lib", "venv")
print("Setting env variables:")
with open(venvPath, 'a') as f:
print(" EMT_ENABLED=true")
f.write("\nexport EMT_ENABLED=true")
print(" EMT_IMAGE_ID=%s" % options.dockerImageId)
f.write("\nexport EMT_IMAGE_ID=%s" % options.dockerImageId)
def _fail(msg, errorCode=1):
"""Prints an error message in red."""
print("""\033[91m
=====================================ERROR=====================================
%s
===============================================================================\n\033[0m""" % msg)
sys.exit(errorCode)
if __name__ == "__main__":
print("\nSetting up API Gateway Analytics...\n")
options = _parseArgs()
passphrases = _loadPassphrases()
_setup()
print("\nAPI Gateway Analytics setup complete.\n")
|
#!/usr/bin/python3
from setuptools import setup
with open('README.rst') as f:
long_description = f.read()
setup(
name='py-iir-filter',
version='1.0.5',
description="Fast realtime IIR filter",
long_description=long_description,
author='Bernd Porr',
author_email='mail@berndporr.me.uk',
py_modules=['iir_filter'],
install_requires=['numpy'],
zip_safe=False,
url='https://github.com/berndporr/py-iir-filter',
license='GPL 3.0',
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
],
)
|
import FWCore.ParameterSet.Config as cms
from SimG4Core.Application.g4SimHits_cfi import *
g4SimHits.Generator.MinEtaCut = cms.double(-13.0)
g4SimHits.Generator.MaxEtaCut = cms.double( 13.0)
g4SimHits.Generator.HepMCProductLabel = 'LHCTransport'
g4SimHits.SteppingAction.MaxTrackTime = cms.double(2000.0)
g4SimHits.StackingAction.MaxTrackTime = cms.double(2000.0)
from IOMC.RandomEngine.IOMC_cff import *
RandomNumberGeneratorService.LHCTransport.engineName = cms.untracked.string('TRandom3')
#
# to avoid higher level moodules to import uneeded objects, import module as _module
#
from SimTransport.PPSProtonTransport.CommonParameters_cfi import commonParameters as _commonParameters
from SimTransport.PPSProtonTransport.HectorTransport_cfi import hector_2016 as _hector_2016
from SimTransport.PPSProtonTransport.TotemTransport_cfi import totemTransportSetup_2016 as _totemTransportSetup_2016
from SimTransport.PPSProtonTransport.OpticalFunctionsConfig_cfi import opticalfunctionsTransportSetup_2016 as _opticalfunctionsTransportSetup_2016
from SimTransport.PPSProtonTransport.OpticalFunctionsConfig_cfi import opticalfunctionsTransportSetup_2017 as _opticalfunctionsTransportSetup_2017
from SimTransport.PPSProtonTransport.OpticalFunctionsConfig_cfi import opticalfunctionsTransportSetup_2018 as _opticalfunctionsTransportSetup_2018
from SimTransport.PPSProtonTransport.OpticalFunctionsConfig_cfi import opticalfunctionsTransportSetup_2021 as _opticalfunctionsTransportSetup_2021
_LHCTransportPSet = cms.PSet()
# so far, it is not yet defined the optic for 2017 and 2018, if needed, change the config for these year to the 2016 one
from Configuration.Eras.Modifier_ctpps_2016_cff import ctpps_2016
#ctpps_2016.toReplaceWith(LHCTransportPSet, _totemTransportSetup_2016)
ctpps_2016.toReplaceWith(_LHCTransportPSet, _opticalfunctionsTransportSetup_2016.optics_parameters)
from Configuration.Eras.Modifier_ctpps_2017_cff import ctpps_2017
ctpps_2017.toReplaceWith(_LHCTransportPSet, _opticalfunctionsTransportSetup_2017.optics_parameters)
from Configuration.Eras.Modifier_ctpps_2018_cff import ctpps_2018
ctpps_2018.toReplaceWith(_LHCTransportPSet, _opticalfunctionsTransportSetup_2018.optics_parameters)
from Configuration.Eras.Modifier_ctpps_2021_cff import ctpps_2021
ctpps_2021.toReplaceWith(_LHCTransportPSet, _opticalfunctionsTransportSetup_2021.optics_parameters)
LHCTransport = cms.EDProducer("PPSSimTrackProducer",_commonParameters,_LHCTransportPSet)
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
# TODO: disabling for now as there is no dummy mode that works without
# phenotype DB, cf. https://github.com/exomiser/Exomiser/issues/347
# def test_local_socket(host):
# assert host.socket("tcp://127.0.0.1:8085").is_listening
def test_external_socket(host):
assert not host.socket("tcp://0.0.0.0:8085").is_listening
assert host.socket("tcp://0.0.0.0:80").is_listening
assert host.socket("tcp://0.0.0.0:443").is_listening
|
import json
from collections import namedtuple
from datetime import datetime
from hashlib import md5
from itertools import groupby
from operator import itemgetter
import sqlalchemy as sa
from flask_bcrypt import Bcrypt
from geoalchemy2 import Geometry
from shapely.geometry import shape
from sqlalchemy import Boolean, Column, Date, DateTime, String, Table, Text, func, select
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
from sqlalchemy.exc import ProgrammingError
from plenario.database import postgres_base, postgres_session
from plenario.utils.helpers import get_size_in_degrees, slugify
bcrypt = Bcrypt()
class MetaTable(postgres_base):
__tablename__ = 'meta_master'
# limited to 50 chars elsewhere
dataset_name = Column(String(100), nullable=False)
human_name = Column(String(255), nullable=False)
description = Column(Text)
source_url = Column(String(255))
source_url_hash = Column(String(32), primary_key=True)
view_url = Column(String(255))
attribution = Column(String(255))
# Spatial and temporal boundaries of observations in this dataset
obs_from = Column(Date)
obs_to = Column(Date)
bbox = Column(Geometry('POLYGON', srid=4326))
# TODO: Add restriction list ['daily' etc.]
update_freq = Column(String(100), nullable=False)
last_update = Column(DateTime)
date_added = Column(DateTime)
# The names of our "special" fields
observed_date = Column(String, nullable=False)
latitude = Column(String)
longitude = Column(String)
location = Column(String)
# if False, then do not display without first getting administrator approval
approved_status = Column(Boolean)
contributor_name = Column(String)
contributor_organization = Column(String)
contributor_email = Column(String)
result_ids = Column(ARRAY(String))
column_names = Column(JSONB) # {'<COLUMN_NAME>': '<COLUMN_TYPE>'}
def __init__(self, url, human_name, observed_date,
approved_status=False, update_freq='yearly',
latitude=None, longitude=None, location=None,
attribution=None, description=None,
column_names=None,
contributor_name=None, contributor_email=None,
contributor_organization=None, **kwargs):
"""
:param url: url where CSV or Socrata dataset with this dataset resides
:param human_name: Nicely formatted name to display to people
:param business_key: Name of column with the dataset's unique ID
:param observed_date: Name of column with the datset's timestamp
:param approved_status: Has an admin signed off on this dataset?
:param update_freq: one of ['daily', 'weekly', 'monthly', 'yearly']
:param latitude: Name of col with latitude
:param longitude: Name of col with longitude
:param location: Name of col with location formatted as (lat, lon)
:param attribution: Text describing who maintains the dataset
:param description: Text describing the dataset.
"""
def curried_slug(name):
if name is None:
return None
else:
return slugify(str(name), delimiter='_')
# Some combination of columns from which we can derive a point in space.
assert (location or (latitude and longitude))
# Frontend validation should have slugified column names already,
# but enforcing it here is nice for testing.
self.latitude = curried_slug(latitude)
self.longitude = curried_slug(longitude)
self.location = curried_slug(location)
assert human_name
self.human_name = human_name
# Known issue: slugify fails hard on Non-ASCII
self.dataset_name = kwargs.get('dataset_name',
curried_slug(human_name)[:50])
assert observed_date
self.observed_date = curried_slug(observed_date)
assert url
# Assume a URL has already been slugified,
# and can only contain ASCII characters
self.source_url, self.source_url_hash = url, md5(url.encode('ascii')).hexdigest()
self.view_url = self._get_view_url_val(url)
assert update_freq
self.update_freq = update_freq
# Can be None. In practice,
# frontend validation makes sure these are always passed along.
self.description, self.attribution = description, attribution
# Expect a list of strings
self.column_names = column_names
# Boolean
self.approved_status = approved_status
self.contributor_name = contributor_name
self.contributor_organization = contributor_organization
self.contributor_email = contributor_email
@staticmethod
def _get_view_url_val(url):
trunc_index = url.find('.csv?accessType=DOWNLOAD')
if trunc_index == -1:
return None
else:
return url[:trunc_index]
def __repr__(self):
return '<MetaTable %r (%r)>' % (self.human_name, self.dataset_name)
def meta_tuple(self):
PointDataset = namedtuple('PointDataset', 'name date lat lon loc')
basic_info = PointDataset(name=self.dataset_name,
date=self.observed_date,
lat=self.latitude,
lon=self.longitude,
loc=self.location)
return basic_info
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def column_info(self):
return self.point_table.c
@property
def point_table(self):
try:
return self._point_table
except AttributeError:
self._point_table = Table(self.dataset_name, postgres_base.metadata, autoload=True, extend_existing=True)
return self._point_table
@classmethod
def attach_metadata(cls, rows):
"""Given a list of dicts that include a dataset_name, add metadata about the datasets to each dict.
:param rows: List of dict-likes with a dataset_name attribute
"""
dataset_names = [row['dataset_name'] for row in rows]
# All the metadata attributes that we can pull in unaltered
as_is_attr_names = ['dataset_name', 'human_name', 'date_added',
'obs_from', 'obs_to', 'last_update',
'attribution', 'description', 'update_freq',
'view_url', 'source_url',
'contributor_name', 'contributor_email',
'contributor_organization']
as_is_attrs = [getattr(cls, name) for name in as_is_attr_names]
# Bounding box is the exception. We need to process it a bit.
bbox = func.ST_AsGeoJSON(cls.bbox)
# Put our as-is and processed attributes together
attr_names = as_is_attr_names + ['bbox']
attrs = as_is_attrs + [bbox]
# Make the DB call
result = postgres_session.query(*attrs). \
filter(cls.dataset_name.in_(dataset_names))
meta_list = [dict(list(zip(attr_names, row))) for row in result]
# We need to coerce datetimes to strings
date_attrs = ['date_added', 'obs_from', 'obs_to']
for row in meta_list:
row['bbox'] = json.loads(row['bbox'])
for attr in date_attrs:
row[attr] = str(row[attr])
# Align the original list and metadata list...
meta_list = sorted(meta_list, key=itemgetter('dataset_name'))
to_coalesce = sorted(rows, key=itemgetter('dataset_name'))
# and coalesce them.
for original, meta in zip(to_coalesce, meta_list):
original.update(meta)
return to_coalesce
@classmethod
def timeseries_all(cls, table_names, agg_unit, start, end, geom=None, ctrees=None):
"""Return a list of
[
{
'dataset_name': 'Foo',
'items': [{'datetime': dt, 'count': int}, ...]
}
]
"""
# For each table in table_names, generate a query to be unioned
selects = []
for name in table_names:
# If we have condition trees specified, apply them.
# .get will return None for those datasets who don't have filters
ctree = ctrees.get(name) if ctrees else None
table = cls.get_by_dataset_name(name)
ts_select = table.timeseries(agg_unit, start, end, geom, ctree)
selects.append(ts_select)
# Union the time series selects to get a panel
panel_query = sa.union(*selects) \
.order_by('dataset_name') \
.order_by('time_bucket')
panel_vals = postgres_session.execute(panel_query)
panel = []
for dataset_name, ts in groupby(panel_vals, lambda row: row.dataset_name):
# ts gets closed after it's been iterated over once,
# so we need to store the rows somewhere to iterate over them twice.
rows = [row for row in ts]
# If no records were found, don't include this dataset
if all([row.count == 0 for row in rows]):
continue
ts_dict = {'dataset_name': dataset_name,
'items': []}
for row in rows:
ts_dict['items'].append({
'datetime': row.time_bucket.date().isoformat(),
'count': row.count
})
# Aggregate top-level count across all time slices.
ts_dict['count'] = sum([i['count'] for i in ts_dict['items']])
panel.append(ts_dict)
return panel
# Information about all point datasets
@classmethod
def index(cls):
try:
q = postgres_session.query(cls.dataset_name)
q = q.filter(cls.approved_status == True)
names = [result.dataset_name for result in q.all()]
except ProgrammingError:
# Handles a case that causes init_db to crash.
# Validator calls index when initializing, prevents this call
# from raising an error when the database is empty.
names = []
return names
@classmethod
def narrow_candidates(cls, dataset_names, start, end, geom=None):
"""
:param dataset_names: Names of point datasets to be considered
:return names: Names of point datasets whose bounding box and date range
interesects with the given bounds.
"""
# Filter out datsets that don't intersect the time boundary
q = postgres_session.query(cls.dataset_name) \
.filter(cls.dataset_name.in_(dataset_names), cls.date_added != None,
cls.obs_from < end,
cls.obs_to > start)
# or the geometry boundary
if geom:
intersecting = cls.bbox.ST_Intersects(func.ST_GeomFromGeoJSON(geom))
q = q.filter(intersecting)
return [row.dataset_name for row in q.all()]
@classmethod
def get_by_dataset_name(cls, name):
foo = postgres_session.query(cls).filter(cls.dataset_name == name).first()
return foo
def get_bbox_center(self):
sel = select([func.ST_AsGeoJSON(func.ST_centroid(self.bbox))])
result = postgres_session.execute(sel)
# returns [lon, lat]
return json.loads(result.first()[0])['coordinates']
def update_date_added(self):
now = datetime.now()
if self.date_added is None:
self.date_added = now
self.last_update = now
def make_grid(self, resolution, geom=None, conditions=None, obs_dates={}):
"""
:param resolution: length of side of grid square in meters
:type resolution: int
:param geom: string representation of geojson fragment
:type geom: str
:param conditions: conditions on columns to filter on
:type conditions: list of SQLAlchemy binary operations
(e.g. col > value)
:return: grid: result proxy with all result rows
size_x, size_y: the horizontal and vertical size
of the grid squares in degrees
"""
if conditions is None:
conditions = []
# We need to convert resolution (given in meters) to degrees
# - which is the unit of measure for EPSG 4326 -
# - in order to generate our grid.
center = self.get_bbox_center()
# center[1] is longitude
size_x, size_y = get_size_in_degrees(resolution, center[1])
t = self.point_table
q = postgres_session.query(
func.count(t.c.hash),
func.ST_SnapToGrid(
t.c.geom,
0,
0,
size_x,
size_y
).label('squares')
).filter(*conditions).group_by('squares')
if geom:
q = q.filter(t.c.geom.ST_Within(func.ST_GeomFromGeoJSON(geom)))
if obs_dates:
q = q.filter(t.c.point_date >= obs_dates['lower'])
q = q.filter(t.c.point_date <= obs_dates['upper'])
return postgres_session.execute(q), size_x, size_y
# Return select statement to execute or union
def timeseries(self, agg_unit, start, end, geom=None, column_filters=None):
# Reading this blog post
# http://no0p.github.io/postgresql/2014/05/08/timeseries-tips-pg.html
# inspired this implementation.
t = self.point_table
# Special case for the 'quarter' unit of aggregation.
step = '3 months' if agg_unit == 'quarter' else '1 ' + agg_unit
# Create a CTE to represent every time bucket in the timeseries
# with a default count of 0
day_generator = func.generate_series(func.date_trunc(agg_unit, start),
func.date_trunc(agg_unit, end),
step)
defaults = select([sa.literal_column("0").label('count'),
day_generator.label('time_bucket')]) \
.alias('defaults')
where_filters = [t.c.point_date >= start, t.c.point_date <= end]
if column_filters is not None:
# Column filters has to be iterable here, because the '+' operator
# behaves differently for SQLAlchemy conditions. Instead of
# combining the conditions together, it would try to build
# something like :param1 + <column_filters> as a new condition.
where_filters += [column_filters]
# Create a CTE that grabs the number of records contained in each time
# bucket. Will only have rows for buckets with records.
actuals = select([func.count(t.c.hash).label('count'),
func.date_trunc(agg_unit, t.c.point_date).
label('time_bucket')]) \
.where(sa.and_(*where_filters)) \
.group_by('time_bucket')
# Also filter by geometry if requested
if geom:
contains = func.ST_Within(t.c.geom, func.ST_GeomFromGeoJSON(geom))
actuals = actuals.where(contains)
# Need to alias to make it usable in a subexpression
actuals = actuals.alias('actuals')
# Outer join the default and observed values
# to create the timeseries select statement.
# If no observed value in a bucket, use the default.
name = sa.literal_column("'{}'".format(self.dataset_name)) \
.label('dataset_name')
bucket = defaults.c.time_bucket.label('time_bucket')
count = func.coalesce(actuals.c.count, defaults.c.count).label('count')
ts = select([name, bucket, count]). \
select_from(defaults.outerjoin(actuals, actuals.c.time_bucket == defaults.c.time_bucket))
return ts
def timeseries_one(self, agg_unit, start, end, geom=None, column_filters=None):
ts_select = self.timeseries(agg_unit, start, end, geom, column_filters)
rows = postgres_session.execute(ts_select.order_by('time_bucket'))
header = [['count', 'datetime']]
# Discard the name attribute.
rows = [[count, time_bucket.date()] for _, time_bucket, count in rows]
return header + rows
@classmethod
def get_all_with_etl_status(cls):
"""
:return: Every row of meta_shape joined with celery task status.
"""
query = """
SELECT m.*, c.*
FROM meta_master AS m
LEFT JOIN celery_taskmeta AS c
ON c.id = (
SELECT id FROM celery_taskmeta
WHERE task_id = ANY(m.result_ids)
ORDER BY date_done DESC
LIMIT 1
)
WHERE m.approved_status = 'true'
"""
return list(postgres_session.execute(query))
|
import bleach
from yaa_settings import AppSettings
class Settings(AppSettings):
prefix = "YARR"
#
# To manage the web interface
#
# Use webpack dev server instead of static files
DEV_MODE = False
# Page to open at Yarr root url (resolved using reverse)
INDEX_URL = "yarr:list_unread"
# Pagination limits
PAGE_LENGTH = 25
API_PAGE_LENGTH = 5
# If true, fix the layout elements at the top of the screen when scrolling down
# Disable if using a custom layout
LAYOUT_FIXED = True
# Template string for document title (shown on the browser window and tabs).
# If set, used to update the title when changing feeds in list view.
# Use ``%(feed)s`` as a placeholder for the feed title (case sensitive)
TITLE_TEMPLATE = "%(feed)s"
# jQuery Selector for page title (an element in your page template)
# If set, this element's content will be replaced with the feed title when
# changing feeds in list view.
TITLE_SELECTOR = ""
#
# To control feed updates
#
# Socket timeout, in seconds
# Highly recommended that this is **not** set to ``None``, which would block
# Note: this sets the global socket timeout, which is not thread-safe; it is
# therefore set explicitly when checking feeds, and reset after feeds have been
# updated (see ``yarr.decorators.with_socket_timeout`` for more details).
SOCKET_TIMEOUT = 15
# Minimum and maximum interval for checking a feed, in minutes
# The minimum interval must match the interval that the cron job runs at,
# otherwise some feeds may not get checked on time
MINIMUM_INTERVAL = 60
MAXIMUM_INTERVAL = 24 * 60
# Default frequency to check a feed, in minutes
# Defaults to just under 24 hours (23:45) to avoid issues with slow responses
# Note: this will be removed in a future version
FREQUENCY = 24 * 60
# Number of days to keep a read item which is no longer in the feed
# Set this to 0 to expire immediately, -1 to never expire
ITEM_EXPIRY = 1
#
# Bleach settings for Yarr
#
# HTML whitelist for bleach
# This default list is roughly the same as the WHATWG sanitization rules
# <http://wiki.whatwg.org/wiki/Sanitization_rules>, but without form elements.
# A few common HTML 5 elements have been added as well.
ALLOWED_TAGS = [
"a",
"abbr",
"acronym",
"aside",
"b",
"bdi",
"bdo",
"blockquote",
"br",
"code",
"data",
"dd",
"del",
"dfn",
"div", # Why not?
"dl",
"dt",
"em",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"hr",
"i",
"img",
"ins",
"kbd",
"li",
"ol",
"p",
"pre",
"q",
"s",
"samp",
"small",
"span",
"strike",
"strong",
"sub",
"sup",
"table",
"tbody",
"td",
"tfoot",
"th",
"thead",
"tr",
"time",
"tt", # Obsolete, but docutils likes to generate these.
"u",
"var",
"wbr",
"ul",
]
ALLOWED_ATTRIBUTES = {
"*": ["lang", "dir"], # lang is necessary for hyphentation.
"a": ["href", "title"],
"abbr": ["title"],
"acronym": ["title"],
"data": ["value"],
"dfn": ["title"],
"img": ["src", "alt", "width", "height", "title"],
"li": ["value"],
"ol": ["reversed", "start", "type"],
"td": ["align", "valign", "width", "colspan", "rowspan"],
"th": ["align", "valign", "width", "colspan", "rowspan"],
"time": ["datetime"],
}
ALLOWED_STYLES = bleach.ALLOWED_STYLES
|
# -*- coding: utf-8 -*-
"""
Huffman code
"""
from data_structures import MinHeap
def huffmancode(weight):
heap = MinHeap()
depth = []
for i,w in enumerate(weight):
heap.insert(key=w, node=[i])
depth.append(0)
while len(heap.key) >= 2:
w1,symbols1 = heap.extract_min()
heap.delete_min()
w2,symbols2 = heap.extract_min()
heap.delete_min()
w = w1 + w2
symbols1.extend(symbols2)
for s in symbols1:
depth[s] += 1
heap.insert(key=w, node=symbols1)
return min(depth), max(depth)
|
import responses
import status
import uuid
@responses.activate
def test_list_equipes(equipe_client, crm_base_url):
responses.add(
method=responses.GET,
url=f"{crm_base_url}/backend/equipes/",
status=status.HTTP_200_OK,
json={}
)
response = equipe_client.equipes().get()
assert response().status_code == status.HTTP_200_OK
@responses.activate
def test_retrieve_equipe(equipe_client, crm_base_url):
codigo = str(uuid.uuid4())
responses.add(
method=responses.GET,
url=f"{crm_base_url}/backend/equipes/{codigo}/",
status=status.HTTP_200_OK,
json={}
)
response = equipe_client.equipe(codigo=codigo).get()
assert response().status_code == status.HTTP_200_OK
|
# -*- coding: utf-8 -*-
responsablereseau = "responsablereseau"
admin = "admin"
membreca = "membreca"
tresorier = "tresorier"
respsalleinfo = "respsalleinfo"
exemptdecoglobale = "exemptdecoglobale"
|
from typing import List, Tuple
from pynars.NAL import Inference
from pynars.NARS.DataStructures._py.Concept import Concept
from pynars.NARS.DataStructures._py.Link import Link, TaskLink, TermLink
from pynars.NARS.InferenceEngine.GeneralEngine.GeneralEngine import GeneralEngine
from pynars.Narsese import Task
from pynars import Narsese
from pynars.NARS.RuleMap import RuleMap, RuleCallable
from pynars.NARS import Reasoner as Reasoner
from pynars.Narsese._py.Statement import Statement
from pynars.Narsese._py.Task import Belief
from pynars.Narsese._py.Term import Term
from pynars.NAL.MentalOperation import execute
nars = Reasoner(100, 100)
engine: GeneralEngine = nars.inference
def rule_map_two_premises(premise1: str, premise2: str, term_common: str, inverse: bool=False, is_belief_term: bool=False, index_task=None, index_belief=None) -> Tuple[List[RuleCallable], Task, Belief, Concept, TaskLink, TermLink, Tuple[Task, Task, Task, Task]]:
''''''
nars.reset()
premise1: Task = Narsese.parse(premise1)
result1 = nars.memory.accept(premise1)
premise2: Task = Narsese.parse(premise2)
result2 = nars.memory.accept(premise2)
task, belief = (premise1, premise2) if not inverse else( premise2, premise1)
term_common: Term = Narsese.parse(term_common).term
concept = nars.memory.take_by_key(term_common)
if index_task is None:
if task.term == concept.term: index_task = ()
else:
if task.term.complexity > concept.term.complexity: indices_task = Link.get_index(task.term, concept.term)
else: indices_task = Link.get_index(concept.term, task.term)
if indices_task is not None: index_task = indices_task[0]
if index_belief is None:
if belief.term == concept.term: index_task = ()
else:
if belief.term.complexity > concept.term.complexity: indices_belief = Link.get_index(belief.term, concept.term)
else: indices_belief = Link.get_index(concept.term, belief.term)
if indices_belief is not None: index_belief = indices_belief[0]
task_link = concept.task_links.take_by_key(TaskLink(concept, task, None, index=index_task))
term_link = concept.term_links.take_by_key(TermLink(concept, belief, None, index=index_belief))
belief: Belief
_, _, rules = engine.match(task, (belief if not is_belief_term else None), belief.term, task_link, term_link)
return rules, task, belief, concept, task_link, term_link, result1, result2
def rule_map_task_only(premise1: str, conecept_term: str, index_concept_task: tuple):
''''''
task = Narsese.parse(premise1)
result1 = nars.memory.accept(task)
concept_term = Narsese.parse(conecept_term+".").term
concept = nars.memory.take_by_key(concept_term)
task_link = concept.task_links.take_by_key(TaskLink(concept, task, None, index=index_concept_task))
rules = engine.match(task, None, None, task_link, None)
return rules, task, concept, task_link, result1
def memory_accept_revision(judgement1: str, judgement2: str):
task1 = Narsese.parse(judgement1)
nars.memory.accept(task1)
task2 = Narsese.parse(judgement2)
task_derived, *_ = nars.memory.accept(task2)
return [task_derived]
def execute_one_premise(premise: Task):
''''''
stat: Statement = premise.term
if stat.is_executable:
op = stat.predicate
args = stat.subject.terms
return execute(op, *args)
else:
raise "Invalide case."
def output_contains(outputs: List[Task], target: str):
target: Task = Narsese.parse(target)
for output in outputs:
flag_contain = output.term == target.term
if output.truth is None:
flag_contain &= target.truth is None
else:
flag_contain &= round(output.truth.f, 2) == round(target.truth.f, 2)
flag_contain &= round(output.truth.c, 2) == round(target.truth.c, 2)
flag_contain &= target.sentence.is_eternal == output.sentence.is_eternal
# compare the time stamp
if not target.sentence.is_eternal:
flag_contain &= target.stamp.t_occurrence == output.stamp.t_occurrence
if flag_contain:
return True
return False
|
# -*- coding: utf-8 -*-
"""
IPSec VPN登录
"""
import os
import time
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from BasePages.getVerifyCode import get_pictures
class IPSecLogin:
def __init__(self):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--ignore-certificate-errors')
self.driver = webdriver.Chrome(options=chrome_options)
self.driver.maximize_window()
time.sleep(2)
def setDriver(self, testAPI):
# noinspection PyBroadException
self.driver.get(testAPI)
self.driver.implicitly_wait(5)
self.driver.find_element_by_xpath("//input[@placeholder='请输入管理员名']").send_keys('anquan')
self.driver.find_element_by_xpath("//input[@placeholder='请输入登录密码']").send_keys('test2020+')
self.inputcode()
return self.driver
def inputcode(self):
try:
code = get_pictures(self.driver, 'xpath', "//img[@id='imgVerify']")
times = str(time.time()).replace(".", '')
self.driver.find_element('xpath', "//input[@placeholder='请输入验证码']") \
.send_keys(code)
time.sleep(0.5)
self.driver.find_element_by_id('login').click()
exit = WebDriverWait(self.driver.find_element('link text', '退出'), 5, 0.5)
# 保存识别正确的图片,用于训练模型
os.rename('./BasePages/verifyCodePic/poo3.png', f'./BasePages/verifyCodePic2/{code}_{times}.png')
except Exception as e:
# print(e)
os.rename('./BasePages/verifyCodePic/poo3.png', f'./BasePages/verifyCodePic2/{code}_{times}_需修改.png')
self.inputcode()
|
import time
from threading import Thread
import rxbp
from rxbp.acknowledgement.acksubject import AckSubject
from rxbp.acknowledgement.continueack import continue_ack
from rxbp.overflowstrategy import DropOld, ClearBuffer, Drop
from rxbp.schedulers.asyncioscheduler import AsyncIOScheduler
from rxbp.schedulers.threadpoolscheduler import ThreadPoolScheduler
from rxbp.schedulers.timeoutscheduler import TimeoutScheduler
from rxbp.testing.tobserver import TObserver
def demo1():
def counter(sink):
while True:
time.sleep(5)
print(f"[**client**] received: ", sink.received)
publisher = rxbp.interval(0.5).pipe(
# rxbp.op.strategy(DropOld(buffer_size=15))
rxbp.op.strategy(ClearBuffer(buffer_size=15))
)
sink = TObserver(immediate_continue=5)
publisher.subscribe(observer=sink, subscribe_scheduler=TimeoutScheduler())
t1 = Thread(target=counter, args=(sink,))
t1.start()
t1.join()
def demo2():
def counter(sink):
while True:
time.sleep(5)
print(f"[**client**] received: ", sink.received)
def work(o, skd):
for i in range(1_000):
o.on_next([i])
o.on_completed()
source = rxbp.create(work)
source = source.pipe(
rxbp.op.strategy(DropOld(buffer_size=100)),
# rxbp.op.strategy(ClearBuffer(buffer_size=15)),
# rxbp.op.strategy(Drop(buffer_size=15)),
)
sink = TObserver(immediate_continue=5)
source.subscribe(observer=sink, subscribe_scheduler=ThreadPoolScheduler("publisher"))
t1 = Thread(target=counter, args=(sink,))
t1.start()
t1.join()
if __name__ == '__main__':
# demo1()
demo2()
|
import os
import pytest
from leapp.snactor.fixture import current_actor_context
from leapp.models import SELinuxModule, SELinuxModules, SELinuxCustom, SELinuxFacts, SELinuxRequestRPMs
from leapp.libraries.stdlib import api, run, CalledProcessError
from leapp.reporting import Report
TEST_MODULES = [
["400", "mock1"],
["99", "mock1"],
["200", "mock1"],
["400", "mock2"],
["999", "mock3"],
["400", "permissive_abrt_t"]
]
SEMANAGE_COMMANDS = [
['fcontext', '-t', 'ganesha_var_run_t', "'/ganesha(/.*)?'"],
['fcontext', '-t', 'httpd_sys_content_t', "'/web(/.*)?'"],
['port', '-t', 'http_port_t', '-p', 'udp', '81']
]
def _run_cmd(cmd, logmsg="", split=True):
try:
return run(cmd, split=split).get("stdout", "")
except CalledProcessError as e:
if logmsg:
api.current_logger().warning("%s: %s", logmsg, str(e.stderr))
return None
def find_module_semodule(semodule_lfull, name, priority):
return next((line for line in semodule_lfull if (name in line and priority in line)), None)
def find_semanage_rule(rules, rule):
return next((r for r in rules if all(word in r for word in rule)), None)
@pytest.fixture(scope="function")
def destructive_selinux_teardown():
# actor introduces changes to the system, therefore only teardown is needed
yield
for priority, module in TEST_MODULES:
_run_cmd(["semodule", "-X", priority, "-r", module],
"Error removing module {} after testing".format(module))
for command in SEMANAGE_COMMANDS[1:]:
_run_cmd(["semanage", command[0], "-d"] + [x.strip('"\'') for x in command[1:]],
"Failed to remove SELinux customizations after testing")
_run_cmd(["semanage", SEMANAGE_COMMANDS[0][0], "-d"] + SEMANAGE_COMMANDS[0][1:],
"Failed to remove SELinux customizations after testing")
@pytest.mark.skipif(os.getenv("DESTRUCTIVE_TESTING", False) in [False, "0"],
reason='Test disabled by default because it would modify the system')
def test_SELinuxApplyCustom(current_actor_context, destructive_selinux_teardown):
semodule_list = [SELinuxModule(name=module, priority=int(prio),
content="(allow domain proc_type (file (getattr open read)))", removed=[])
for (prio, module) in TEST_MODULES]
commands = [" ".join([c[0], "-a"] + c[1:]) for c in SEMANAGE_COMMANDS[1:]]
semanage_removed = [" ".join([SEMANAGE_COMMANDS[0][0], "-a"] + SEMANAGE_COMMANDS[0][1:])]
current_actor_context.feed(SELinuxModules(modules=semodule_list))
current_actor_context.feed(SELinuxCustom(commands=commands, removed=semanage_removed))
current_actor_context.run()
semodule_lfull = _run_cmd(["semodule", "-lfull"],
"Error listing selinux modules")
semanage_export = _run_cmd(["semanage", "export"],
"Error listing selinux customizations")
# check that all reported modules where introduced to the system
for priority, name in TEST_MODULES:
if priority not in ('100', '200'):
assert find_module_semodule(semodule_lfull, name, priority)
# check that all valid commands where introduced to the system
for command in SEMANAGE_COMMANDS[1:-1]:
assert find_semanage_rule(semanage_export, command)
|
# -*- coding: utf-8 -*-
# pylint: disable=no-name-in-module,broad-except,relative-import
from __future__ import unicode_literals
try:
import urllib.request as urllib2
except ImportError:
import urllib2
try:
import xml.etree.cElementTree as ET
except ImportError: # pragma: no cover
import xml.etree.ElementTree as ET
from models.search import SearchResult
from models.series import Series
from . import settings
class TVDB(object):
@staticmethod
def _get(url):
try:
result = urllib2.urlopen(url)
data = result.read()
result.close()
except Exception:
data = ''
return data
@classmethod
def search(cls, query):
query = urllib2.quote(query)
url = '{}/GetSeries.php?seriesname={}'.format(settings.BASE_URL, query)
data = cls._get(url)
root_node = ET.fromstring(data)
return SearchResult(cls, query, root_node)
@classmethod
def series(cls, series_id):
url = settings.SERIES_URL.format(
api_key=settings.API_KEY,
series_id=series_id
)
data = cls._get(url)
root_node = ET.fromstring(data)
return Series(root_node)
|
import copy
import getopt
import math
import networkx as nx
import matplotlib.pyplot as plt
import operator
import os
import random
import sys
from scipy import stats
import time
import random
def randomize_weights(weights):
number = random.random() * sum(weights.values())
for k, v in weights.items():
if number <= v:
break
number -= v
return k
def pad_string(integer, n):
string = str(integer)
while len(string) < n:
string = "0" + string
return string
|
import itertools
import json
import logging
import string
from collections import defaultdict
from typing import Dict, List, Union, Tuple, Any
from overrides import overrides
from word2number.w2n import word_to_num
from allennlp.common.file_utils import cached_path
from allennlp.data.fields import (
Field,
TextField,
MetadataField,
LabelField,
ListField,
SequenceLabelField,
SpanField,
IndexField,
)
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.dataset_readers.reading_comprehension.util import (
IGNORED_TOKENS,
STRIPPED_CHARACTERS,
make_reading_comprehension_instance,
split_tokens_by_hyphen,
)
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, SpacyTokenizer
logger = logging.getLogger(__name__)
WORD_NUMBER_MAP = {
"zero": 0,
"one": 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
"six": 6,
"seven": 7,
"eight": 8,
"nine": 9,
"ten": 10,
"eleven": 11,
"twelve": 12,
"thirteen": 13,
"fourteen": 14,
"fifteen": 15,
"sixteen": 16,
"seventeen": 17,
"eighteen": 18,
"nineteen": 19,
}
@DatasetReader.register("drop")
class DropReader(DatasetReader):
"""
Reads a JSON-formatted DROP dataset file and returns instances in a few different possible
formats. The input format is complicated; see the test fixture for an example of what it looks
like. The output formats all contain a question ``TextField``, a passage ``TextField``, and
some kind of answer representation. Because DROP has instances with several different kinds of
answers, this dataset reader allows you to filter out questions that do not have answers of a
particular type (e.g., remove questions that have numbers as answers, if you model can only
give passage spans as answers). We typically return all possible ways of arriving at a given
answer string, and expect models to marginalize over these possibilities.
Parameters
----------
tokenizer : ``Tokenizer``, optional (default=``SpacyTokenizer()``)
We use this ``Tokenizer`` for both the question and the passage. See :class:`Tokenizer`.
Default is ```SpacyTokenizer()``.
token_indexers : ``Dict[str, TokenIndexer]``, optional
We similarly use this for both the question and the passage. See :class:`TokenIndexer`.
Default is ``{"tokens": SingleIdTokenIndexer()}``.
lazy : ``bool``, optional (default=False)
If this is true, ``instances()`` will return an object whose ``__iter__`` method
reloads the dataset each time it's called. Otherwise, ``instances()`` returns a list.
passage_length_limit : ``int``, optional (default=None)
If specified, we will cut the passage if the length of passage exceeds this limit.
question_length_limit : ``int``, optional (default=None)
If specified, we will cut the question if the length of passage exceeds this limit.
skip_when_all_empty: ``List[str]``, optional (default=None)
In some cases such as preparing for training examples, you may want to skip some examples
when there are no gold labels. You can specify on what condition should the examples be
skipped. Currently, you can put "passage_span", "question_span", "addition_subtraction",
or "counting" in this list, to tell the reader skip when there are no such label found.
If not specified, we will keep all the examples.
instance_format: ``str``, optional (default="drop")
We try to be generous in providing a few different formats for the instances in DROP,
in terms of the ``Fields`` that we return for each ``Instance``, to allow for several
different kinds of models. "drop" format will do processing to detect numbers and
various ways those numbers can be arrived at from the passage, and return ``Fields``
related to that. "bert" format only allows passage spans as answers, and provides a
"question_and_passage" field with the two pieces of text joined as BERT expects.
"squad" format provides the same fields that our BiDAF and other SQuAD models expect.
relaxed_span_match_for_finding_labels : ``bool``, optional (default=True)
DROP dataset contains multi-span answers, and the date-type answers are usually hard to
find exact span matches for, also. In order to use as many examples as possible
to train the model, we may not want a strict match for such cases when finding the gold
span labels. If this argument is true, we will treat every span in the multi-span
answers as correct, and every token in the date answer as correct, too. Because models
trained on DROP typically marginalize over all possible answer positions, this is just
being a little more generous in what is being marginalized. Note that this will not
affect evaluation.
"""
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False,
passage_length_limit: int = None,
question_length_limit: int = None,
skip_when_all_empty: List[str] = None,
instance_format: str = "drop",
relaxed_span_match_for_finding_labels: bool = True,
) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or SpacyTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.passage_length_limit = passage_length_limit
self.question_length_limit = question_length_limit
self.skip_when_all_empty = skip_when_all_empty if skip_when_all_empty is not None else []
for item in self.skip_when_all_empty:
assert item in [
"passage_span",
"question_span",
"addition_subtraction",
"counting",
], f"Unsupported skip type: {item}"
self.instance_format = instance_format
self.relaxed_span_match_for_finding_labels = relaxed_span_match_for_finding_labels
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset = json.load(dataset_file)
logger.info("Reading the dataset")
kept_count, skip_count = 0, 0
for passage_id, passage_info in dataset.items():
passage_text = passage_info["passage"]
passage_tokens = self._tokenizer.tokenize(passage_text)
passage_tokens = split_tokens_by_hyphen(passage_tokens)
for question_answer in passage_info["qa_pairs"]:
question_id = question_answer["query_id"]
question_text = question_answer["question"].strip()
answer_annotations = []
if "answer" in question_answer:
answer_annotations.append(question_answer["answer"])
if "validated_answers" in question_answer:
answer_annotations += question_answer["validated_answers"]
instance = self.text_to_instance(
question_text,
passage_text,
question_id,
passage_id,
answer_annotations,
passage_tokens,
)
if instance is not None:
kept_count += 1
yield instance
else:
skip_count += 1
logger.info(f"Skipped {skip_count} questions, kept {kept_count} questions.")
@overrides
def text_to_instance(
self, # type: ignore
question_text: str,
passage_text: str,
question_id: str = None,
passage_id: str = None,
answer_annotations: List[Dict] = None,
passage_tokens: List[Token] = None,
) -> Union[Instance, None]:
if not passage_tokens:
passage_tokens = self._tokenizer.tokenize(passage_text)
passage_tokens = split_tokens_by_hyphen(passage_tokens)
question_tokens = self._tokenizer.tokenize(question_text)
question_tokens = split_tokens_by_hyphen(question_tokens)
if self.passage_length_limit is not None:
passage_tokens = passage_tokens[: self.passage_length_limit]
if self.question_length_limit is not None:
question_tokens = question_tokens[: self.question_length_limit]
answer_type: str = None
answer_texts: List[str] = []
if answer_annotations:
# Currently we only use the first annotated answer here, but actually this doesn't affect
# the training, because we only have one annotation for the train set.
answer_type, answer_texts = self.extract_answer_info_from_annotation(
answer_annotations[0]
)
# Tokenize the answer text in order to find the matched span based on token
tokenized_answer_texts = []
for answer_text in answer_texts:
answer_tokens = self._tokenizer.tokenize(answer_text)
answer_tokens = split_tokens_by_hyphen(answer_tokens)
tokenized_answer_texts.append(" ".join(token.text for token in answer_tokens))
if self.instance_format == "squad":
valid_passage_spans = (
self.find_valid_spans(passage_tokens, tokenized_answer_texts)
if tokenized_answer_texts
else []
)
if not valid_passage_spans:
if "passage_span" in self.skip_when_all_empty:
return None
else:
valid_passage_spans.append((len(passage_tokens) - 1, len(passage_tokens) - 1))
return make_reading_comprehension_instance(
question_tokens,
passage_tokens,
self._token_indexers,
passage_text,
valid_passage_spans,
# this `answer_texts` will not be used for evaluation
answer_texts,
additional_metadata={
"original_passage": passage_text,
"original_question": question_text,
"passage_id": passage_id,
"question_id": question_id,
"valid_passage_spans": valid_passage_spans,
"answer_annotations": answer_annotations,
},
)
elif self.instance_format == "bert":
question_concat_passage_tokens = question_tokens + [Token("[SEP]")] + passage_tokens
valid_passage_spans = []
for span in self.find_valid_spans(passage_tokens, tokenized_answer_texts):
# This span is for `question + [SEP] + passage`.
valid_passage_spans.append(
(span[0] + len(question_tokens) + 1, span[1] + len(question_tokens) + 1)
)
if not valid_passage_spans:
if "passage_span" in self.skip_when_all_empty:
return None
else:
valid_passage_spans.append(
(
len(question_concat_passage_tokens) - 1,
len(question_concat_passage_tokens) - 1,
)
)
answer_info = {
"answer_texts": answer_texts, # this `answer_texts` will not be used for evaluation
"answer_passage_spans": valid_passage_spans,
}
return self.make_bert_drop_instance(
question_tokens,
passage_tokens,
question_concat_passage_tokens,
self._token_indexers,
passage_text,
answer_info,
additional_metadata={
"original_passage": passage_text,
"original_question": question_text,
"passage_id": passage_id,
"question_id": question_id,
"answer_annotations": answer_annotations,
},
)
elif self.instance_format == "drop":
numbers_in_passage = []
number_indices = []
for token_index, token in enumerate(passage_tokens):
number = self.convert_word_to_number(token.text)
if number is not None:
numbers_in_passage.append(number)
number_indices.append(token_index)
# hack to guarantee minimal length of padded number
numbers_in_passage.append(0)
number_indices.append(-1)
numbers_as_tokens = [Token(str(number)) for number in numbers_in_passage]
valid_passage_spans = (
self.find_valid_spans(passage_tokens, tokenized_answer_texts)
if tokenized_answer_texts
else []
)
valid_question_spans = (
self.find_valid_spans(question_tokens, tokenized_answer_texts)
if tokenized_answer_texts
else []
)
target_numbers = []
# `answer_texts` is a list of valid answers.
for answer_text in answer_texts:
number = self.convert_word_to_number(answer_text)
if number is not None:
target_numbers.append(number)
valid_signs_for_add_sub_expressions: List[List[int]] = []
valid_counts: List[int] = []
if answer_type in ["number", "date"]:
valid_signs_for_add_sub_expressions = self.find_valid_add_sub_expressions(
numbers_in_passage, target_numbers
)
if answer_type in ["number"]:
# Currently we only support count number 0 ~ 9
numbers_for_count = list(range(10))
valid_counts = self.find_valid_counts(numbers_for_count, target_numbers)
type_to_answer_map = {
"passage_span": valid_passage_spans,
"question_span": valid_question_spans,
"addition_subtraction": valid_signs_for_add_sub_expressions,
"counting": valid_counts,
}
if self.skip_when_all_empty and not any(
type_to_answer_map[skip_type] for skip_type in self.skip_when_all_empty
):
return None
answer_info = {
"answer_texts": answer_texts, # this `answer_texts` will not be used for evaluation
"answer_passage_spans": valid_passage_spans,
"answer_question_spans": valid_question_spans,
"signs_for_add_sub_expressions": valid_signs_for_add_sub_expressions,
"counts": valid_counts,
}
return self.make_marginal_drop_instance(
question_tokens,
passage_tokens,
numbers_as_tokens,
number_indices,
self._token_indexers,
passage_text,
answer_info,
additional_metadata={
"original_passage": passage_text,
"original_question": question_text,
"original_numbers": numbers_in_passage,
"passage_id": passage_id,
"question_id": question_id,
"answer_info": answer_info,
"answer_annotations": answer_annotations,
},
)
else:
raise ValueError(
f'Expect the instance format to be "drop", "squad" or "bert", '
f"but got {self.instance_format}"
)
@staticmethod
def make_marginal_drop_instance(
question_tokens: List[Token],
passage_tokens: List[Token],
number_tokens: List[Token],
number_indices: List[int],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
answer_info: Dict[str, Any] = None,
additional_metadata: Dict[str, Any] = None,
) -> Instance:
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
question_offsets = [(token.idx, token.idx + len(token.text)) for token in question_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
question_field = TextField(question_tokens, token_indexers)
fields["passage"] = passage_field
fields["question"] = question_field
number_index_fields: List[Field] = [
IndexField(index, passage_field) for index in number_indices
]
fields["number_indices"] = ListField(number_index_fields)
# This field is actually not required in the model,
# it is used to create the `answer_as_plus_minus_combinations` field, which is a `SequenceLabelField`.
# We cannot use `number_indices` field for creating that, because the `ListField` will not be empty
# when we want to create a new empty field. That will lead to error.
numbers_in_passage_field = TextField(number_tokens, token_indexers)
metadata = {
"original_passage": passage_text,
"passage_token_offsets": passage_offsets,
"question_token_offsets": question_offsets,
"question_tokens": [token.text for token in question_tokens],
"passage_tokens": [token.text for token in passage_tokens],
"number_tokens": [token.text for token in number_tokens],
"number_indices": number_indices,
}
if answer_info:
metadata["answer_texts"] = answer_info["answer_texts"]
passage_span_fields: List[Field] = [
SpanField(span[0], span[1], passage_field)
for span in answer_info["answer_passage_spans"]
]
if not passage_span_fields:
passage_span_fields.append(SpanField(-1, -1, passage_field))
fields["answer_as_passage_spans"] = ListField(passage_span_fields)
question_span_fields: List[Field] = [
SpanField(span[0], span[1], question_field)
for span in answer_info["answer_question_spans"]
]
if not question_span_fields:
question_span_fields.append(SpanField(-1, -1, question_field))
fields["answer_as_question_spans"] = ListField(question_span_fields)
add_sub_signs_field: List[Field] = []
for signs_for_one_add_sub_expression in answer_info["signs_for_add_sub_expressions"]:
add_sub_signs_field.append(
SequenceLabelField(signs_for_one_add_sub_expression, numbers_in_passage_field)
)
if not add_sub_signs_field:
add_sub_signs_field.append(
SequenceLabelField([0] * len(number_tokens), numbers_in_passage_field)
)
fields["answer_as_add_sub_expressions"] = ListField(add_sub_signs_field)
count_fields: List[Field] = [
LabelField(count_label, skip_indexing=True) for count_label in answer_info["counts"]
]
if not count_fields:
count_fields.append(LabelField(-1, skip_indexing=True))
fields["answer_as_counts"] = ListField(count_fields)
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
@staticmethod
def make_bert_drop_instance(
question_tokens: List[Token],
passage_tokens: List[Token],
question_concat_passage_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
passage_text: str,
answer_info: Dict[str, Any] = None,
additional_metadata: Dict[str, Any] = None,
) -> Instance:
additional_metadata = additional_metadata or {}
fields: Dict[str, Field] = {}
passage_offsets = [(token.idx, token.idx + len(token.text)) for token in passage_tokens]
# This is separate so we can reference it later with a known type.
passage_field = TextField(passage_tokens, token_indexers)
question_field = TextField(question_tokens, token_indexers)
fields["passage"] = passage_field
fields["question"] = question_field
question_and_passage_field = TextField(question_concat_passage_tokens, token_indexers)
fields["question_and_passage"] = question_and_passage_field
metadata = {
"original_passage": passage_text,
"passage_token_offsets": passage_offsets,
"question_tokens": [token.text for token in question_tokens],
"passage_tokens": [token.text for token in passage_tokens],
}
if answer_info:
metadata["answer_texts"] = answer_info["answer_texts"]
passage_span_fields: List[Field] = [
SpanField(span[0], span[1], question_and_passage_field)
for span in answer_info["answer_passage_spans"]
]
if not passage_span_fields:
passage_span_fields.append(SpanField(-1, -1, question_and_passage_field))
fields["answer_as_passage_spans"] = ListField(passage_span_fields)
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
@staticmethod
def extract_answer_info_from_annotation(
answer_annotation: Dict[str, Any]
) -> Tuple[str, List[str]]:
answer_type = None
if answer_annotation["spans"]:
answer_type = "spans"
elif answer_annotation["number"]:
answer_type = "number"
elif any(answer_annotation["date"].values()):
answer_type = "date"
answer_content = answer_annotation[answer_type] if answer_type is not None else None
answer_texts: List[str] = []
if answer_type is None: # No answer
pass
elif answer_type == "spans":
# answer_content is a list of string in this case
answer_texts = answer_content
elif answer_type == "date":
# answer_content is a dict with "month", "day", "year" as the keys
date_tokens = [
answer_content[key]
for key in ["month", "day", "year"]
if key in answer_content and answer_content[key]
]
answer_texts = date_tokens
elif answer_type == "number":
# answer_content is a string of number
answer_texts = [answer_content]
return answer_type, answer_texts
@staticmethod
def convert_word_to_number(word: str, try_to_include_more_numbers=False):
"""
Currently we only support limited types of conversion.
"""
if try_to_include_more_numbers:
# strip all punctuations from the sides of the word, except for the negative sign
punctruations = string.punctuation.replace("-", "")
word = word.strip(punctruations)
# some words may contain the comma as deliminator
word = word.replace(",", "")
# word2num will convert hundred, thousand ... to number, but we skip it.
if word in ["hundred", "thousand", "million", "billion", "trillion"]:
return None
try:
number = word_to_num(word)
except ValueError:
try:
number = int(word)
except ValueError:
try:
number = float(word)
except ValueError:
number = None
return number
else:
no_comma_word = word.replace(",", "")
if no_comma_word in WORD_NUMBER_MAP:
number = WORD_NUMBER_MAP[no_comma_word]
else:
try:
number = int(no_comma_word)
except ValueError:
number = None
return number
@staticmethod
def find_valid_spans(
passage_tokens: List[Token], answer_texts: List[str]
) -> List[Tuple[int, int]]:
normalized_tokens = [
token.text.lower().strip(STRIPPED_CHARACTERS) for token in passage_tokens
]
word_positions: Dict[str, List[int]] = defaultdict(list)
for i, token in enumerate(normalized_tokens):
word_positions[token].append(i)
spans = []
for answer_text in answer_texts:
answer_tokens = answer_text.lower().strip(STRIPPED_CHARACTERS).split()
num_answer_tokens = len(answer_tokens)
if answer_tokens[0] not in word_positions:
continue
for span_start in word_positions[answer_tokens[0]]:
span_end = span_start # span_end is _inclusive_
answer_index = 1
while answer_index < num_answer_tokens and span_end + 1 < len(normalized_tokens):
token = normalized_tokens[span_end + 1]
if answer_tokens[answer_index].strip(STRIPPED_CHARACTERS) == token:
answer_index += 1
span_end += 1
elif token in IGNORED_TOKENS:
span_end += 1
else:
break
if num_answer_tokens == answer_index:
spans.append((span_start, span_end))
return spans
@staticmethod
def find_valid_add_sub_expressions(
numbers: List[int], targets: List[int], max_number_of_numbers_to_consider: int = 2
) -> List[List[int]]:
valid_signs_for_add_sub_expressions = []
# TODO: Try smaller numbers?
for number_of_numbers_to_consider in range(2, max_number_of_numbers_to_consider + 1):
possible_signs = list(itertools.product((-1, 1), repeat=number_of_numbers_to_consider))
for number_combination in itertools.combinations(
enumerate(numbers), number_of_numbers_to_consider
):
indices = [it[0] for it in number_combination]
values = [it[1] for it in number_combination]
for signs in possible_signs:
eval_value = sum(sign * value for sign, value in zip(signs, values))
if eval_value in targets:
labels_for_numbers = [0] * len(numbers) # 0 represents ``not included''.
for index, sign in zip(indices, signs):
labels_for_numbers[index] = (
1 if sign == 1 else 2
) # 1 for positive, 2 for negative
valid_signs_for_add_sub_expressions.append(labels_for_numbers)
return valid_signs_for_add_sub_expressions
@staticmethod
def find_valid_counts(count_numbers: List[int], targets: List[int]) -> List[int]:
valid_indices = []
for index, number in enumerate(count_numbers):
if number in targets:
valid_indices.append(index)
return valid_indices
|
# coding: utf-8
import argparse
import time
from itertools import product
from functools import partial
import gc
import json
import torch
import torch.nn as nn
from apex import amp
from data_utils import get_time_series
from mem_transformer import MemTransformerLM
from utils.initialization import weights_init
from train_ts import parallelize_model, build_optimizer, build_scheduler
from utils.torch_utils import non_emb_param_count, openai_compute
class DotDict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def head_repartition_rule(d_model):
if d_model > 256:
d_head = 64
n_head = d_model // d_head
elif d_model > 128:
n_head = 8
d_head = d_model // n_head
elif d_model > 64:
n_head = 4
d_head = d_model // n_head
elif d_model > 16:
n_head = 2
d_head = d_model // n_head
else:
n_head = 1
d_head = d_model
return n_head, d_head
def benchmark(model, optimizers, schedulers):
# Turn on training mode which enables dropout.
if isinstance(model, nn.DataParallel):
parent_model = model.module
else:
parent_model = model
optimizer, optimizer_sparse = optimizers
scheduler, scheduler_sparse = schedulers
train_step = 0
train_losses = []
model.train()
if default_args.batch_chunk > 1:
mems = [tuple() for _ in range(default_args.batch_chunk)]
else:
mems = tuple()
train_iter = tr_iter.get_varlen_iter() if default_args.varlen else tr_iter
start_time = time.time()
for batch, (data, target, seq_len) in enumerate(train_iter):
model.zero_grad()
if default_args.batch_chunk > 1:
data_chunks = torch.chunk(data, default_args.batch_chunk, 1)
target_chunks = torch.chunk(target, default_args.batch_chunk, 1)
for i in range(default_args.batch_chunk):
data_i = data_chunks[i].contiguous()
target_i = target_chunks[i].contiguous()
ret = model(data_i, target_i, *mems[i])
loss, mems[i] = ret[0], ret[1:]
loss = loss.float().mean().type_as(loss) / default_args.batch_chunk
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
train_losses.append(loss.float().item())
else:
ret = model(data, target, *mems)
loss, mems = ret[0], ret[1:]
loss = loss.float().mean().type_as(loss)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
train_losses.append(loss.float().item())
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), default_args.clip
)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), default_args.clip)
optimizer.step()
parent_model.compute += openai_compute(
non_emb_param_count(parent_model, nseries), data.numel(), 1
)
# step-wise learning rate annealing
train_step += 1
parent_model.training_steps += 1
if default_args.scheduler in ["cosine", "constant", "dev_perf"]:
# linear warmup stage
if train_step < default_args.warmup_step:
curr_lr = default_args.lr * train_step / default_args.warmup_step
optimizer.param_groups = curr_lr
else:
if default_args.scheduler == "cosine":
scheduler.step(train_step)
elif default_args.scheduler == "inv_sqrt":
scheduler.step(train_step)
if train_step == default_args.max_step:
return (
parent_model.compute * 24 * 3600,
time.time() - start_time,
train_step * data.numel(),
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="benchmarking script")
parser.add_argument(
"-l", "--n_layers", nargs="+", help="n_layers to test", required=True
)
parser.add_argument(
"-d", "--d_models", nargs="+", help="d_models to test", required=True
)
parser.add_argument(
"-b", "--batch_sizes", nargs="+", help="batch sizes to test", required=True
)
parser.add_argument("--fp16", type=str, default=None, choices=["O1", "O2", "O0"])
parser.add_argument("-t", "--tracking", action="store_true")
parser.add_argument("--reload", action="store_true")
args = parser.parse_args()
start_time = time.time()
if args.reload:
try:
results = json.load(open(f"compute_grid_results_{args.fp16}.json"))
print(f"reloaded from compute_grid_results_{args.fp16}.json")
except FileNotFoundError:
results = {}
else:
results = {}
default_args = DotDict(
{
"data" : "../data/etf",
"dataset" : "allData.pickle",
"batch_chunk" : 1,
"not_tied" : False,
"div_val" : 1,
"pre_lnorm" : False,
"dropout" : 0.0,
"dropatt" : 0.0,
"init" : "normal",
"emb_init" : "normal",
"init_range" : 0.1,
"emb_init_range": 0.01,
"init_std" : 0.02,
"proj_init_std" : 0.01,
"optim" : "adam",
"lr" : 5e-05,
"mom" : 0.0,
"scheduler" : "cosine",
"warmup_step" : 0,
"decay_rate" : 0.5,
"lr_min" : 0.0,
"clip": 0.25,
"clip_nonemb": False,
"eta_min": 0.0,
"tgt_len": 150,
"eval_tgt_len": 150,
"ext_len": 0,
"mem_len": 150,
"varlen": False,
"same_length": False,
"clamp_len": -1,
"seed": 1111,
"max_step": 100,
"cuda": True,
"multi_gpu": False,
"gpu0_bsz": -1,
"debug": False,
"knockknock": True,
"tied": True,
}
)
device = torch.device("cuda" if default_args.cuda else "cpu")
if args.fp16 == "O1":
amp.register_half_function(torch, "einsum")
cutoffs, tie_projs = [], [False]
for n_layer, d_model, batch_size in product(
args.n_layers, args.d_models, args.batch_sizes
):
n_layer, d_model, batch_size = int(n_layer), int(d_model), int(
batch_size)
if args.reload:
if results.get(str((n_layer, d_model, batch_size))) is not None:
print(f"{(n_layer, d_model, batch_size)} already in results")
continue
corpus = get_time_series(default_args.data, default_args.dataset)
nseries = len(corpus.vocab)
default_args.n_token = nseries
if args.tracking:
from experiment_impact_tracker.compute_tracker import ImpactTracker
tracker = ImpactTracker(f"impact/{n_layer}_{d_model}_{batch_size}")
tracker.launch_impact_monitor()
n_head, d_head = head_repartition_rule(d_model)
d_inner = d_model
model = MemTransformerLM(
nseries,
n_layer,
n_head,
d_model,
d_head,
d_inner,
default_args.dropout,
default_args.dropatt,
tie_weight=default_args.tied,
d_embed=d_model,
div_val=default_args.div_val,
tie_projs=tie_projs,
pre_lnorm=default_args.pre_lnorm,
tgt_len=default_args.tgt_len,
ext_len=default_args.ext_len,
mem_len=default_args.mem_len,
cutoffs=cutoffs,
same_length=default_args.same_length,
clamp_len=default_args.clamp_len,
)
initialization_func = partial(
weights_init,
init="normal",
init_range=0.1,
init_std=0.02,
proj_init_std=0.01,
)
model.apply(initialization_func)
try:
tr_iter = corpus.get_iterator(
"train",
batch_size,
default_args.tgt_len,
device=device,
ext_len=default_args.ext_len,
)
para_model = parallelize_model(model, default_args)
optimizers = build_optimizer(para_model, default_args, reload=False)
optimizer, optimizer_sparse = optimizers
schedulers = build_scheduler(optimizers, default_args)
scheduler, scheduler_sparse = schedulers
if default_args.cuda and args.fp16:
para_model, optimizer = amp.initialize(
para_model, optimizer, opt_level=args.fp16, verbosity=0
)
compute, run_time, processed_tokens = benchmark(
para_model, optimizers, schedulers
)
total_time = time.time() - start_time
print("-" * 130)
print(
f"n_layer {n_layer} d_model {d_model} batch_size {batch_size} fp16 {args.fp16}: "
+ "{:.4e} FLOs in {:.4e}s for ".format(compute, run_time)
+ f"{processed_tokens} tokens, "
f"total time {total_time}"
)
results[str((n_layer, d_model, batch_size))] = (
compute,
run_time,
processed_tokens,
compute / run_time,
)
except RuntimeError as e:
print("-" * 100)
total_time = time.time() - start_time
print(
f"n_layer {n_layer} d_model {d_model} batch_size {batch_size} fp16 {args.fp16}: OOM error, "
f"total time {total_time}"
)
results[str((n_layer, d_model, batch_size))] = None
finally:
# Handle CUDA OOM Error Safely
try:
del model
del para_model
del optimizer
del scheduler
gc.collect()
torch.cuda.empty_cache()
except NameError:
pass
with open(f"compute_grid_results_{args.fp16}.json", "w") as f:
json.dump(results, f, indent=2)
|
from tests.test_helper import *
from braintree.test.credit_card_numbers import CreditCardNumbers
class TestDisputeSearch(unittest.TestCase):
def create_sample_disputed_transaction(self):
customer = Customer.create({
"first_name": "Jen",
"last_name": "Smith",
"company": "Braintree",
"email": "jen@example.com",
"phone": "312.555.1234",
"fax": "614.555.5678",
"website": "www.example.com",
}).customer
return Transaction.sale({
"amount": "100.00",
"credit_card": {
"number": CreditCardNumbers.Disputes.Chargeback,
"expiration_date": "12/2019",
},
"customer_id": customer.id,
"merchant_account_id": "14LaddersLLC_instant",
"options": {
"submit_for_settlement": True,
},
}).transaction
def test_advanced_search_no_results(self):
collection = Dispute.search([
DisputeSearch.id == "non_existent_dispute"
])
disputes = [dispute for dispute in collection.disputes.items]
self.assertEquals(0, len(disputes))
def test_advanced_search_returns_single_dispute_by_customer_id(self):
transaction = self.create_sample_disputed_transaction()
collection = Dispute.search([
DisputeSearch.customer_id == transaction.customer_details.id
])
disputes = [dispute for dispute in collection.disputes.items]
self.assertEquals(1, len(disputes))
dispute = disputes[0]
self.assertEquals(dispute.id, transaction.disputes[0].id)
self.assertEquals(dispute.status, Dispute.Status.Open)
def test_advanced_search_returns_single_dispute_by_id(self):
collection = Dispute.search([
DisputeSearch.id == "open_dispute"
])
disputes = [dispute for dispute in collection.disputes.items]
self.assertEquals(1, len(disputes))
dispute = disputes[0]
self.assertEquals(dispute.id, "open_dispute")
self.assertEquals(dispute.status, Dispute.Status.Open)
def test_advanced_search_returns_disputes_by_multiple_reasons(self):
collection = Dispute.search([
DisputeSearch.reason.in_list([
braintree.Dispute.Reason.ProductUnsatisfactory,
braintree.Dispute.Reason.Retrieval
])
])
disputes = [dispute for dispute in collection.disputes.items]
self.assertGreaterEqual(len(disputes), 2)
def test_advanced_search_returns_disputes_by_chargeback_protection_level(self):
collection = Dispute.search([
DisputeSearch.chargeback_protection_level.in_list([
braintree.Dispute.ChargebackProtectionLevel.Effortless,
])
])
disputes = [dispute for dispute in collection.disputes.items]
self.assertEqual(len(disputes), 1)
self.assertEqual(disputes[0].case_number, "CASE-CHARGEBACK-PROTECTED")
self.assertEqual(disputes[0].reason, braintree.Dispute.Reason.Fraud)
self.assertEqual(disputes[0].chargeback_protection_level, braintree.Dispute.ChargebackProtectionLevel.Effortless)
def test_advanced_search_returns_disputes_by_date_range(self):
collection = Dispute.search([
DisputeSearch.received_date.between("03/03/2014", "03/05/2014")
])
disputes = [dispute for dispute in collection.disputes.items]
self.assertGreaterEqual(len(disputes), 1)
self.assertEquals(disputes[0].received_date, date(2014, 3, 4))
def test_advanced_search_returns_disputes_by_disbursement_date_range(self):
transaction = self.create_sample_disputed_transaction()
disbursement_date = transaction.disputes[0].status_history[0].disbursement_date
collection = Dispute.search([
DisputeSearch.disbursement_date.between(disbursement_date, disbursement_date)
])
disputes = [dispute for dispute in collection.disputes.items]
self.assertGreaterEqual(len(disputes), 1)
self.assertEquals(disputes[0].status_history[0].disbursement_date, disbursement_date)
def test_advanced_search_returns_disputes_by_effective_date_range(self):
transaction = self.create_sample_disputed_transaction()
effective_date = transaction.disputes[0].status_history[0].effective_date
collection = Dispute.search([
DisputeSearch.effective_date.between(effective_date, effective_date)
])
disputes = [dispute for dispute in collection.disputes.items]
self.assertGreaterEqual(len(disputes), 1)
self.assertEquals(disputes[0].status_history[0].effective_date, effective_date)
def test_advanced_search_returns_disputes_by_amount_and_status(self):
collection = Dispute.search([
DisputeSearch.amount_disputed.between("1.00", "100.00"),
DisputeSearch.id == "open_dispute"
])
disputes = [dispute for dispute in collection.disputes.items]
self.assertEquals(1, len(disputes))
def test_advanced_search_can_take_one_criteria(self):
collection = Dispute.search(
DisputeSearch.id == "non_existent_dispute"
)
disputes = [dispute for dispute in collection.disputes.items]
self.assertEquals(0, len(disputes))
|
"""Root resources."""
import flask
from dnstwister import app
from dnstwister import cache
@cache.memoize(86400)
@app.route(r'/favicon.ico')
def favicon():
"""Favicon (because some clients don't read the link tag)."""
return flask.send_from_directory(app.static_folder, flask.request.path[1:])
|
import json
import logging
import os
import subprocess
import boto3
import json
from botocore.exceptions import ClientError
logger = logging.getLogger()
logger.setLevel(logging.INFO)
repo = os.environ.get('REPOSITORY_URI')
mount_target = os.environ.get('MOUNT_TARGET', '/mnt/efsmount')
sync_path = os.environ.get('SYNC_PATH')
github_secret_id = os.environ.get('GITHUB_SECRET_ID')
github_secret_key = os.environ.get('GITHUB_SECRET_KEY')
def get_secret_value(id, key):
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager'
)
try:
json_secret_value = json.loads(client.get_secret_value(SecretId=id).get('SecretString'))
except ClientError as e:
print(e.response['Error']['Code'])
return None
return json_secret_value.get(key)
if github_secret_id and github_secret_key and repo.startswith('https://'):
github_oauth_token = get_secret_value(github_secret_id, github_secret_key)
repo = 'https://{}@{}'.format(github_oauth_token, repo[8:])
def on_event(event, context):
print(event)
request_type = event['RequestType']
if request_type == 'Create': return on_create(event)
if request_type == 'Update': return on_update(event)
if request_type == 'Delete': return on_delete(event)
raise Exception("Invalid request type: %s" % request_type)
def on_create(event):
props = event["ResourceProperties"]
print("create new resource with props %s" % props)
# add your create code here...
# physical_id = ...
sync(repo, mount_target)
ok_result = {'status': 'ok'}
return { 'Data': ok_result }
# return { 'PhysicalResourceId': physical_id }
def on_update(event):
physical_id = event["PhysicalResourceId"]
props = event["ResourceProperties"]
print("update resource %s with props %s" % (physical_id, props))
# ...
def on_delete(event):
physical_id = event["PhysicalResourceId"]
print("delete resource %s" % physical_id)
# ...
def sync(repo, target_path):
full_path = '{}{}'.format(mount_target, sync_path)
if sync_path == '/':
# delete all contents from root directory, but not root directory itself
os.chdir(full_path)
subprocess.check_call('rm -rf {}*'.format(full_path), shell=True)
else:
subprocess.check_call([ 'rm', '-rf', full_path ])
subprocess.check_call([ 'git', 'clone', repo, full_path ])
|
#!/usr/bin/env python3
# coding: utf8
# Author: Lenz Furrer, 2018--2019
'''
Postfilters for removing nested annotations.
'''
from collections import defaultdict
from typing import Sequence, Iterator
from ..doc import document
def remove_overlaps(content: document.Exporter) -> None:
'''
Remove annotations that partially overlap with another one.
Of any cluster of overlapping spans, only the longest
one(s) are kept. Ties are not broken.
Ignores entity type.
'''
_rm_any_overlaps(content, sametype=False, sub=False)
def remove_sametype_overlaps(content: document.Exporter) -> None:
'''
Remove same-type annotations that partially overlap with another one.
Overlapping entities of different type are kept.
'''
_rm_any_overlaps(content, sametype=True, sub=False)
def remove_submatches(content: document.Exporter) -> None:
'''
Remove annotations that are contained in another one.
Unlike `remove_overlaps()`, only true subsequences are removed.
Consider the following annotation spans:
|--------------| (1)
|--------| (2)
|-----| (3)
This function removes only annotation (2), whereas
`remove_overlaps` removes (2) and (3).
'''
_rm_any_overlaps(content, sametype=False, sub=True)
def remove_sametype_submatches(content: document.Exporter) -> None:
'''
Remove same-type annotations that are contained in another one.
'''
_rm_any_overlaps(content, sametype=True, sub=True)
def _rm_any_overlaps(content: document.Exporter, sametype: bool, sub: bool):
if sametype:
filter_ = _rm_sametype_overlaps
else:
filter_ = lambda e, s: list(_rm_overlapping(e, s))
for sentence in content.get_subelements(document.Sentence):
sentence.entities = filter_(sentence.entities, sub)
def _rm_sametype_overlaps(entities, sub):
# Divide the entities into subsets by entity type.
entity_types = defaultdict(list)
for e in entities:
entity_types[e.type].append(e)
# Remove the submatches from each subset.
filtered = []
for e in entity_types.values():
filtered.extend(_rm_overlapping(e, sub))
filtered.sort(key=document.Entity.sort_key)
return filtered
def _rm_overlapping(entities: Sequence[document.Entity],
sub: bool) -> Iterator[document.Entity]:
'''
Filter out annotations that overlap with others.
'''
# Get the indices of all removables.
filter_ = _submatches if sub else _crossmatches
removables = set(filter_(entities))
# Create a new, filtered list.
return (e for i, e in enumerate(entities) if i not in removables)
def _submatches(entities):
'''
Identify all entities that are found within another entity.
'''
# Since the entities are sorted by offsets, only one reference is
# needed for comparison.
# However, runs of equal offsets might need to be excluded together --
# when followed by a later entity which contains them all.
ref_is, ref_entity = [], None
for i, entity in enumerate(entities):
if i: # skip comparison in the first iteration (no reference yet)
if _contains(ref_entity, entity):
yield i
continue # keep the previous reference
elif _contains(entity, ref_entity):
yield from ref_is
elif _equals(entity, ref_entity):
# If the next entity will contain this one, then the previous
# needs to be excluded as well.
ref_is.append(i)
continue # keep the previous reference
# If the current entity was not contained in the reference, then its
# end offset is greater or equal to that of the reference.
# Since the start offset of any future entity will not be lower than
# the current one, we can safely update the reference.
ref_is, ref_entity = [i], entity
def _contains(a, b):
'''
Return True if a contains b, False otherwise.
'''
return ((a.start <= b.start and a.end > b.end)
or
(a.start < b.start and a.end >= b.end))
def _equals(a, b):
'''
Return True if a's and b's offsets are the same.
'''
return a.start == b.start and a.end == b.end
def _crossmatches(entities):
'''
Identify partially overlapping entities to be excluded.
'''
for cluster in _clusters(entities):
longest = max(l for _, l in cluster)
for i, l in cluster:
if l != longest:
yield i
def _clusters(entities):
cluster = []
current_end = 0
for i, e in enumerate(entities):
if e.start >= current_end:
if len(cluster) > 1:
yield cluster
cluster.clear()
current_end = max(current_end, e.end)
cluster.append((i, e.end-e.start))
if len(cluster) > 1:
yield cluster
|
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, game, args):
# game params
self.board_x, self.board_y = game.getBoardSize()
self.action_size = game.getActionSize()
self.args = args
# nnet params
layers = args.layers
self.layers = layers
assert layers > 2
self.shrink = 2 * (self.layers - 2)
logging.info(f"Creating NN with {layers} layers 🧮")
self.conv = []
self.batchnorm = []
self.fc = []
self.fcbn = []
super(Model, self).__init__()
self._setup()
def _setup(self):
# Create Conv layers
in_channels = 1
kernel_size = int(float(min(self.board_x, self.board_y)) / self.layers)
# if kernel_size < 3:
kernel_size = 3
paddings = [0] * self.layers
paddings[0] = 1
paddings[1] = 1
for i in range(self.layers):
conv = nn.Conv2d(in_channels, self.args.num_channels, kernel_size, stride=1, padding=paddings[i])
self.add_module(f'conv{i}', conv)
self.conv.append(conv)
in_channels = self.args.num_channels
# Prepare Batch Normalization
for i in range(self.layers):
bn = nn.BatchNorm2d(self.args.num_channels)
self.batchnorm.append(bn)
self.add_module(f'batchnorm{i}', bn)
# Prepare features
in_features = self.args.num_channels * (self.board_x - self.shrink) * (self.board_y - self.shrink)
if in_features <= 0:
logging.error("Too many layers considering the board size.")
raise ValueError
out_features = 256 * 2 ** (self.layers - 2)
for i in range(self.layers - 2):
out_features = int(out_features / 2.0) # needs to be unchanged same outside of the loop
max_features = min(out_features, 256)
logging.warning(f"Creating a feature with out_features: {out_features}")
linear = nn.Linear(in_features, max_features)
self.fc.append(linear)
self.add_module(f'fc{i}', linear)
bn = nn.BatchNorm1d(max_features)
self.fcbn.append(bn)
self.add_module(f'batchnorm1d{i}', bn)
in_features = max_features
self.fc_pi = nn.Linear(out_features, self.action_size)
self.fc_v = nn.Linear(out_features, 1)
def forward(self, s: torch.Tensor):
s = s.view(-1, 1, self.board_x, self.board_y)
for i in range(self.layers):
s = F.relu(self.batchnorm[i](self.conv[i](s)))
size = self.args.num_channels * (self.board_x - self.shrink) * (self.board_y - self.shrink)
s = s.view(-1, size)
for i in range(self.layers - 2):
s = F.dropout(F.relu(self.fcbn[i](self.fc[i](s))), p=self.args.dropout, training=self.training)
pi = self.fc_pi(s)
v = self.fc_v(s)
return F.log_softmax(pi, dim=1), torch.tanh(v)
|
class MemberDescriptor(object):
""" Represents a class member,such as a property or event. This is an abstract base class. """
def CreateAttributeCollection(self,*args):
"""
CreateAttributeCollection(self: MemberDescriptor) -> AttributeCollection
Creates a collection of attributes using the array of attributes passed to the constructor.
Returns: A new System.ComponentModel.AttributeCollection that contains the
System.ComponentModel.MemberDescriptor.AttributeArray attributes.
"""
pass
def Equals(self,obj):
"""
Equals(self: MemberDescriptor,obj: object) -> bool
Compares this instance to the given object to see if they are equivalent.
obj: The object to compare to the current instance.
Returns: true if equivalent; otherwise,false.
"""
pass
def FillAttributes(self,*args):
"""
FillAttributes(self: MemberDescriptor,attributeList: IList)
When overridden in a derived class,adds the attributes of the inheriting class to the specified
list of attributes in the parent class.
attributeList: An System.Collections.IList that lists the attributes in the parent class. Initially,this is
empty.
"""
pass
def FindMethod(self,*args):
"""
FindMethod(componentClass: Type,name: str,args: Array[Type],returnType: Type,publicOnly: bool) -> MethodInfo
Finds the given method through reflection,with an option to search only public methods.
componentClass: The component that contains the method.
name: The name of the method to find.
args: An array of parameters for the method,used to choose between overloaded methods.
returnType: The type to return for the method.
publicOnly: Whether to restrict search to public methods.
Returns: A System.Reflection.MethodInfo that represents the method,or null if the method is not found.
FindMethod(componentClass: Type,name: str,args: Array[Type],returnType: Type) -> MethodInfo
Finds the given method through reflection,searching only for public methods.
componentClass: The component that contains the method.
name: The name of the method to find.
args: An array of parameters for the method,used to choose between overloaded methods.
returnType: The type to return for the method.
Returns: A System.Reflection.MethodInfo that represents the method,or null if the method is not found.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: MemberDescriptor) -> int
Returns the hash code for this instance.
Returns: A hash code for the current System.ComponentModel.MemberDescriptor.
"""
pass
def GetInvocationTarget(self,*args):
"""
GetInvocationTarget(self: MemberDescriptor,type: Type,instance: object) -> object
Retrieves the object that should be used during invocation of members.
type: The System.Type of the invocation target.
instance: The potential invocation target.
Returns: The object to be used during member invocations.
"""
pass
def GetInvokee(self,*args):
"""
GetInvokee(componentClass: Type,component: object) -> object
Gets the component on which to invoke a method.
componentClass: A System.Type representing the type of component this System.ComponentModel.MemberDescriptor is
bound to. For example,if this System.ComponentModel.MemberDescriptor describes a property,this
parameter should be the class that the property is declared on.
component: An instance of the object to call.
Returns: An instance of the component to invoke. This method returns a visual designer when the property
is attached to a visual designer.
"""
pass
def GetSite(self,*args):
"""
GetSite(component: object) -> ISite
Gets a component site for the given component.
component: The component for which you want to find a site.
Returns: The site of the component,or null if a site does not exist.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
@staticmethod
def __new__(self,*args): #cannot find CLR constructor
"""
__new__(cls: type,name: str)
__new__(cls: type,name: str,attributes: Array[Attribute])
__new__(cls: type,descr: MemberDescriptor)
__new__(cls: type,oldMemberDescriptor: MemberDescriptor,newAttributes: Array[Attribute])
"""
pass
def __ne__(self,*args):
pass
AttributeArray=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets an array of attributes.
"""
Attributes=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the collection of attributes for this member.
Get: Attributes(self: MemberDescriptor) -> AttributeCollection
"""
Category=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the category to which the member belongs,as specified in the System.ComponentModel.CategoryAttribute.
Get: Category(self: MemberDescriptor) -> str
"""
Description=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the description of the member,as specified in the System.ComponentModel.DescriptionAttribute.
Get: Description(self: MemberDescriptor) -> str
"""
DesignTimeOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets whether this member should be set only at design time,as specified in the System.ComponentModel.DesignOnlyAttribute.
Get: DesignTimeOnly(self: MemberDescriptor) -> bool
"""
DisplayName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name that can be displayed in a window,such as a Properties window.
Get: DisplayName(self: MemberDescriptor) -> str
"""
IsBrowsable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the member is browsable,as specified in the System.ComponentModel.BrowsableAttribute.
Get: IsBrowsable(self: MemberDescriptor) -> bool
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the member.
Get: Name(self: MemberDescriptor) -> str
"""
NameHashCode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the hash code for the name of the member,as specified in System.String.GetHashCode.
"""
|
"""Retrieving current Top news stories
This module gets up to date current Top news stories through an API supplied by "newsapi.org".
It formats the information as an news story to be use as a notification and an announcement in the
alarm and its interface
"""
import json
import logging
import requests
from flask import Markup
from CA3_code_package import global_vars
logging.basicConfig(filename='sys.log', format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
def get_news() -> list or None:
"""Retrieves parameters defined in config.json, filters and formats weather data for the alarm.
It retrieves a user-defined JSON file through a URL containing the newsapi link, the
specific country/region for the news data, as well as an API key given by newsapi.org.
This data is applied accordingly to a dictionary type with the keys: title, content and
announcement to notify users clearly of top news stories in the country.
This dictionary is added to a list of current notifications to be displayed and/or voiced.
"""
news_list = []
base_url = "https://newsapi.org/v2/top-headlines?"
# Validating and Retrieving a country and an API key from config.json for the news json file
with open('config.json', 'r') as config_file:
api_config = json.load(config_file)
news_config = api_config['news_api']
api_key = news_config['api_key']
country = news_config['country']
# forming complete URL containing a news json file from user-defined components
complete_url = base_url + "country=" + country + "&apiKey=" + api_key
# Retrieve and validates news JSON file
response = requests.get(complete_url)
if response.status_code == 200:
try:
articles = response.json()["articles"]
except KeyError as excep:
logging.error("JSON file cannot be retrieved as invalid URL or API key %s", excep)
return None
for article in articles:
# filters articles that contain covid or coronavirus in the title
if "covid" in article["title"].lower() or "coronavirus" in article["title"].lower():
check_news_list = [{"title": article["title"],
"content": Markup("<a href=" + article["url"] + ">"
+ article["url"] + "<a>")}]
# Checks if article has not already been a notification
if check_news_list not in global_vars.old_notifs:
# Creates dictionary to be added to notif list with article title and its link
news_list.append({"title": article["title"],
"content": Markup("<a href=" + article["url"] + ">"
+ article["url"] + "<a>")})
break
return news_list
|
import time
import requests
from multiprocessing import Process, Queue
from datetime import timedelta
import mailSender
# Types of indicators
indicatorsTypes = {'MAX': 'maxTime', 'AVG': 'avgTime', 'AVA': 'availability', 'STA' : 'status'}
def startMonitor(user, queueTwoMin, queueTenMin, queueHour, queueAlerts, queueTermination, testDic = None):
"""
Simple init of the class, for intelligibility.
Starting all sub processes
:param user:
:param queueTwoMin:
:param queueTenMin:
:param queueHour:
:param queueAlerts:
:param queueTermination:
:param testDic:
:return:
"""
Monitor(user, queueTwoMin, queueTenMin, queueHour, queueAlerts, queueTermination, testDic = testDic)
class Monitor(object):
"""
Class that handles all the monitoring
Subprocess are created for each monitored website, to handle different check intervals
"""
def __init__(self, user, queueTwoMin, queueTenMin, queueHour, queueAlerts, queueTermination, testDic = None):
"""
Init, calls at the end the monitor all functin that starts the processes
:param user:
:param queueTwoMin:
:param queueTenMin:
:param queueHour:
:param queueAlerts:
:param queueTermination:
:param testDic:
"""
# Start Time, keep track of the elapsed time
self.originalTime = time.time()
# User that uses the monitoring app, must exist !
self.user = user
# Queue to transmit all data
self.queueTwoMin = queueTwoMin
self.queueTenMin = queueTenMin
self.queueHour = queueHour
self.queueAlerts = queueAlerts
# Queue for termination
self.queueTermination = queueTermination
# Alert Storage, to check whether raised alert are to be sent
self.alertsDic = {}
if testDic:
self.alertsDic = testDic
self.mailer = mailSender.MailSender(mailSender.mailrecipient)
# Start monitoring
self.monitorAll()
def monitorAll(self):
"""
Starts all subprocesses for each website
:return:
"""
websites = self.user.mySites.values()
# subprocesses to get the requests logs
self.processes = [Process(target=self.monitorOne, args=(website,)) for website in websites]
for process in self.processes:
process.daemon = True
for process in self.processes:
process.start()
for process in self.processes:
process.join()
return
def _terminateAll(self):
"""
Terminate all processes.
Need exception handling as it can be called several times
:return:
"""
# Termination of all processes
try :
for process in self.processes:
process.terminate()
except AttributeError:
pass
return
def monitorOne(self,website):
"""
Monitoring for each website
:param website:
:return:
"""
checkInterval = website.checkInterval
time.sleep(checkInterval)
while self\
.queueTermination.empty():
startSubProcess = time.time()
# todo define timeout for requests
try :
req = requests.get(website.url, timeout=checkInterval)
reqCode = req.status_code
reqTime = req.elapsed
# Generic to handle all kind of http exceptions
# Possible enhancement
except Exception:
continue
# unix epoch time good for comparison
currentTime = time.time()
website.log[currentTime] = {'code': reqCode, 'responseTime': reqTime}
# 2 mins
twoMinsDic = self.getTimeframedData(website, 120, currentTime=currentTime)
self.queueTwoMin.put(twoMinsDic)
# 10 mins
tenMinsDic = self.getTimeframedData(website, 600, currentTime=currentTime)
self.queueTenMin.put(tenMinsDic)
# 1 hour
hourDic = self.getTimeframedData(website, 3600, currentTime=currentTime)
self.queueHour.put(hourDic)
endSubProcess = time.time()
# Wait for the next check
try:
time.sleep(checkInterval-(endSubProcess-startSubProcess))
except ValueError:
pass
# Terminate all processes
self._terminateAll()
return
# todo suppress old logs
def getTimeframedData(self, website, timeframe, currentTime=time.time()):
"""
Get all data for a given timeframe,
If the timeframe is 2min checks for alerts
:param website:
:param timeframe:
:param currentTime:
:return:
"""
timeList = list(website.log.keys())
# inside the dic from most recent to most ancient
# reverse order
# list of time of requests
inFrame = []
# getting the times within the timeframe
for listind in range(len(timeList)):
if (currentTime-timeList[len(timeList)-1-listind] <= timeframe):
inFrame.append(timeList[len(timeList)-1-listind])
# Indicators
# Max
maxTime = self.computeMaxResponseTime(website, inFrame)
# Avg
avgTime = self.computeAvgResponsetime(website, inFrame)
# Availability
availability = self.computeAvailability(website, inFrame)
# Status
status = self.computeStatus(website, currentTime)
# Alert checking with 120 timeframe
if (timeframe == 120):
self.checkForIsDownAlert(website= website, availability= availability)
self.checkForIsUpAlert(website=website, availability=availability)
return {'website': website, 'frame': timeframe,'time': currentTime, 'indicators': {'maxTime': maxTime, 'avgTime': avgTime, 'availability': availability, 'status': status}}
def computeMaxResponseTime(self, website, inFrame):
"""
Indicator n1
:param website:
:param inFrame:
:return:
"""
maxTime = 0
for timeOfReq in inFrame:
if website.log[timeOfReq]['responseTime'] > timedelta(seconds=maxTime):
maxTime = self.timedeltaToFloat(website.log[timeOfReq]['responseTime'])
return maxTime
def computeAvgResponsetime(self,website, inFrame):
"""
Indicator n2
:param website:
:param inFrame:
:return:
"""
avgTime = 0
for timeOfReq in inFrame:
avgTime += self.timedeltaToFloat(website.log[timeOfReq]['responseTime'])
avgTime = avgTime / len(inFrame)
return avgTime
def computeAvailability(self, website, inFrame):
"""
Indicator n3
:param website:
:param inFrame:
:return:
"""
availability = 0
for timeReq in inFrame:
# All 2XX response codes
if website.log[timeReq]['code'] // 100 == 2:
availability += 1
availability = availability / len(inFrame)
return availability
def computeStatus(self, website, time):
"""
Indicator n4, last response status
:param website:
:param time:
:return:
"""
return website.log[time]['code']
def checkForIsDownAlert(self, website, availability):
"""
Check for a isDown Alert.
:param website:
:param availability:
:return:
"""
checkTime = time.time()
# Verify that the system has been running for longer than 2 minutes
if (checkTime-self.originalTime >= 120):
if availability < 0.8:
# website already alerted, check if the alert is gone -> value : None
if website.name in self.alertsDic:
if not self.alertsDic[website.name]:
alert = {'website': website.name, 'time': time.time(), 'availability': availability, 'status': 'DOWN'}
self.alertsDic[website.name] = alert
self.mailer.sendAlert(alert)
self.queueAlerts.put(alert)
# no alert for this website before, no check
else :
alert = {'website': website.name, 'time': time.time(), 'availability': availability, 'status': 'DOWN'}
self.alertsDic[website.name] = alert
self.mailer.sendAlert(alert)
self.queueAlerts.put(alert)
return
def checkForIsUpAlert(self, website, availability):
"""
Check for is up Alert
:param website:
:param availability:
:return:
"""
checkTime = time.time()
# Verify that the system has been running for longer than 2 minutes
if (checkTime - self.originalTime >= 120):
# Verify that the system has been alerted by the site
if website.name in self.alertsDic:
# Verify that the UP alert wasn't already sent
if self.alertsDic[website.name]:
if availability > 0.8 :
alert = {'website': website.name, 'time': time.time(), 'availability': availability,
'status': 'UP'}
self.alertsDic[website.name] = None
self.mailer.sendAlert(alert)
self.queueAlerts.put(alert)
return
def timedeltaToFloat(self,time_d):
"""
Transforms a time delta type to a float, for readability on the display
Note that only minutes are considered, because I assume that response times won't exceed an hour
:param time_d:
:return:
"""
time_d_min = time_d / timedelta(minutes=1)
time_d_s = time_d / timedelta(seconds=1)
time_d_ms = time_d / timedelta(milliseconds=1)
return (time_d_min * 60 + time_d_s + time_d_ms * 0.001)
|
import datetime
from urllib.parse import urljoin
from pathlib import Path
import requests
from twstock import stock
# example fetch url
# https://www.twse.com.tw/exchangeReport/MI_INDEX?response=html&date=20200327&type=ALL
class MyFetcher(stock.BaseFetcher):
REPORT_URL = urljoin(stock.TWSE_BASE_URL, 'exchangeReport/MI_INDEX')
def fetch_daily(self, response_type: str='html', type: str='ALL'):
current_date = datetime.date.today()
params = {'response': response_type, 'date': current_date.strftime('%Y%m%d'), 'type': type}
for _ in range(5):
try:
r = requests.get(self.REPORT_URL, params=params)
except requests.exceptions.RequestException as e:
print("Get {url} failed: {error}".format(self.REPORT_URL, e))
continue
else:
data = r.content
break
if data != '':
try:
save_path = current_date.strftime('%Y%m')
print(save_path)
if not Path(save_path).exists():
Path(save_path).mkdir(parents=True,exist_ok=True)
with open('{}/DailyReport_{}.html'.format(save_path, current_date.strftime('%Y%m%d')), 'wb') as f:
f.write(data)
except EnvironmentError as e:
print(e)
if __name__ == "__main__":
fetcher = MyFetcher()
fetcher.fetch_daily()
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from . import Request
from pygithub3.resources.users import User
class List(Request):
uri = 'repos/{user}/{repo}/collaborators'
resource = User
class Is_collaborator(Request):
uri = 'repos/{user}/{repo}/collaborators/{collaborator}'
class Add(Request):
uri = 'repos/{user}/{repo}/collaborators/{collaborator}'
class Delete(Request):
uri = 'repos/{user}/{repo}/collaborators/{collaborator}'
|
'''
Author: ZHAO Zinan
Created: 26. April 2019
357. Count Numbers with Unique Digits
'''
class Solution:
def countNumbersWithUniqueDigits(self, n: int) -> int:
if n > 10:
return 0
digit = [9, 9, 8, 7, 6, 5, 4, 3, 2, 1]
answer = 1
product = 1
for i in range(n):
product *= digit[i]
answer += product
return answer
|
from datetime import datetime, timedelta
import pandas as pd
from specusticc.configs_init.model.preprocessor_config import (
PreprocessorConfig,
DateRange,
)
from specusticc.data_loading.loaded_data import LoadedData
from specusticc.data_preprocessing.data_set import DataSet
from specusticc.data_preprocessing.input_data_preprocessor import InputDataPreprocessor
from specusticc.data_preprocessing.output_data_preprocessor import (
OutputDataPreprocessor,
)
from specusticc.data_preprocessing.preprocessed_data import PreprocessedData
class DataPreprocessor:
def __init__(self, config: PreprocessorConfig):
self.config = config
self.input: {}
self.output: {}
self.context: {}
self.input_df: pd.DataFrame
self.output_df: pd.DataFrame
self.context_df: pd.DataFrame
self.preprocessed_data: PreprocessedData = PreprocessedData()
def get_data(self) -> PreprocessedData:
return self.preprocessed_data
def preprocess_data(self, data: LoadedData):
self.input = data.input
self.output = data.output
self.context = data.context
self._limit_columns()
self._merge_data_dicts()
self._filter_by_dates()
self._limit_context_dates_by_input_dates()
self._reshape_data_to_neural_network()
def _limit_columns(self):
for ticker, df in self.input.items():
self.input[ticker] = df[self.config.input_columns]
for ticker, df in self.output.items():
self.output[ticker] = df[self.config.output_columns]
for ticker, df in self.context.items():
self.context[ticker] = df[self.config.context_columns]
def _merge_data_dicts(self):
self.input_df = self._dict_to_merged_dataframe(self.input)
self.output_df = self._dict_to_merged_dataframe(self.output)
self.context_df = self._dict_to_merged_dataframe(self.context)
@staticmethod
def _dict_to_merged_dataframe(data: dict) -> pd.DataFrame:
unified_df = pd.DataFrame(columns=["date"])
for ticker, df in data.items():
df.columns = [
ticker + "_" + str(col) for col in df.columns if col != "date"
] + ["date"]
unified_df = unified_df.merge(
df, left_on="date", right_on="date", how="outer"
)
unified_df = (
unified_df.sort_values(by=["date"])
.interpolate()
.fillna(1.0)
.reset_index(drop=True)
)
return unified_df
def _filter_by_dates(self):
self.train_ioc = {
"input": _filter_history_by_dates(self.input_df, self.config.train_date),
"output": _filter_history_by_dates(self.output_df, self.config.train_date),
"context": _filter_history_by_dates(
self.context_df, self.config.train_date
),
}
self.test_iocs = []
for date_range in self.config.test_dates:
test_ioc = {
"input": _filter_history_by_dates(self.input_df, date_range),
"output": _filter_history_by_dates(self.output_df, date_range),
"context": _filter_history_by_dates(self.context_df, date_range),
}
self.test_iocs.append(test_ioc)
def _limit_context_dates_by_input_dates(self):
self.train_ioc["context"] = self.train_ioc["context"][
self.train_ioc["context"]["date"].isin(self.train_ioc["input"]["date"])
]
for test_ioc in self.test_iocs:
test_ioc["context"] = test_ioc["context"][
test_ioc["context"]["date"].isin(test_ioc["input"]["date"])
]
def _reshape_data_to_neural_network(self):
ph = self.preprocessed_data
data2i = InputDataPreprocessor(self.config)
data2o = OutputDataPreprocessor(self.config)
data_set = DataSet()
data_set.input = data2i.transform_input(self.train_ioc["input"])
data_set.context = data2i.transform_input(self.train_ioc["context"])
(
data_set.output,
data_set.output_scaler,
data_set.output_columns,
data_set.output_dates,
) = data2o.transform_output(self.train_ioc["output"])
ph.train_set = data_set
for test_ioc in self.test_iocs:
data_set = DataSet()
data_set.input = data2i.transform_input(test_ioc["input"])
data_set.context = data2i.transform_input(test_ioc["context"])
(
data_set.output,
data_set.output_scaler,
data_set.output_columns,
data_set.output_dates,
) = data2o.transform_output(test_ioc["output"])
ph.test_sets.append(data_set)
self.preprocessed_data = ph
def _filter_history_by_dates(df: pd.DataFrame, dates: DateRange) -> pd.DataFrame:
if dates is None:
return df
from_date = dates.from_date
to_date = dates.to_date
first_available_date = df.iloc[0].date
last_available_date = df.iloc[-1].date
if from_date < first_available_date:
from_index = 0
else:
from_index = _get_closest_date_index(df, from_date)
if to_date > last_available_date:
to_index = -1
else:
to_index = _get_closest_date_index(df, to_date)
filtered = df.iloc[from_index:to_index]
return filtered
def _get_closest_date_index(df: pd.DataFrame, date: datetime) -> int:
closest_index = None
while closest_index is None:
try:
closest_index = df.index[df["date"] == date][0]
except Exception:
pass
date = date - timedelta(days=1)
return closest_index
|
"""
A jade extension that embed jade markup in the .html file
{% jade %}
{% endjade %}
"""
import re
import pyjade
from jinja2.ext import Extension
from pyjade.utils import process
from pyjade.ext.jinja import Compiler
from jinja2 import (TemplateSyntaxError, nodes)
begin_tag_rx = r'\{%\-?\s*jade.*?%\}'
end_tag_rx = r'\{%\-?\s*endjade\s*\-?%\}'
begin_tag_m = re.compile(begin_tag_rx)
end_tag_m = re.compile(end_tag_rx)
def convert(text, filename=None):
return process(text, filename=filename, compiler=Compiler)
class TemplateIndentationError(TemplateSyntaxError): pass
class JadeTagExtension(Extension):
tags = set(['jade'])
def _get_lineno(self, source):
matches = re.finditer(r"\n", source)
if matches:
return len(tuple(matches))
return 0
def preprocess(self, source, name, filename=None):
ret_source = ''
start_pos = 0
while True:
tag_match = begin_tag_m.search(source, start_pos)
if tag_match:
end_tag = end_tag_m.search(source, tag_match.end())
if not end_tag:
raise TemplateSyntaxError('Expecting "endjade" tag',
self._get_lineno(source[:start_pos]))
jade_source = source[tag_match.end(): end_tag.start()]
jade_source = convert(jade_source)
try:
ret_source += source[start_pos: tag_match.start()] + jade_source
except TemplateIndentationError as e:
raise TemplateSyntaxError(e.message, e.lineno, name=name, filename=filename)
except TemplateSyntaxError as e:
raise TemplateSyntaxError(e.message, e.lineno, name=name, filename=filename)
start_pos = end_tag.end()
else:
ret_source += source[start_pos:]
break
return ret_source
|
"""
Initial migration
"""
from django.db import migrations, models
class Migration(migrations.Migration):
"""
Initial migration for CCXCon model
"""
dependencies = [
]
operations = [
migrations.CreateModel(
name='CCXCon',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(unique=True, db_index=True)),
('oauth_client_id', models.CharField(max_length=255)),
('oauth_client_secret', models.CharField(max_length=255)),
('title', models.CharField(max_length=255)),
],
),
]
|
# author: kagemeka
# created: 2019-11-07 11:52:43(JST)
# import collections
# import math
# import string
import bisect
import sys
# import re
# import itertools
# import statistics
# import functools
# import operator
def main():
n, m, x, *a = (int(i) for i in sys.stdin.read().split())
th_x = bisect.bisect_left(a, x)
cost = min(th_x, m - th_x)
print(cost)
if __name__ == "__main__":
# execute only if run as a script
main()
|
from pylab import *
xvalues, yvalues = meshgrid(arange(-5, 5.5, 0.05), arange(-5, 5.5, 0.05))
zvalues = sin(sqrt(xvalues**2 + yvalues**2))
imshow(zvalues)
show()
|
"""Here we got commands with custom callback"""
from pathlib import Path
from typing import Tuple
import bcrypt
import typer
from passman import config, db
from passman.utils import TabulateData
def callback() -> None:
typer.secho("⚠️ This command requires logging in first.", fg="yellow")
name = typer.prompt("Name")
password = typer.prompt("Password", hide_input=True)
check = bcrypt.checkpw(password.encode("utf-8"), config["password"].encode("utf-8"))
if name == config["name"] and check:
return True
typer.secho("❌ Wrong owner credits were entered, exiting...", fg="red")
raise typer.Exit()
# All commmands below should require logging in first.
# we achieve this by overriding the default callback
app = typer.Typer(
name="log",
callback=callback,
no_args_is_help=True,
help="A set of commands that require logging in before work"
)
@app.command(name="save")
def save_password(network: str, email: str, content: str) -> None:
"""saves user data (network, email, password)"""
db.add(network, email, content)
typer.secho("✅ Inserted the data successfully.", fg="green")
@app.command(name="update")
def update_password(
user_id: str,
credentials: Tuple[str, str, str] = typer.Argument(
..., help="New network name, email and password"
)
):
"""update any user profile with the given ID
credentials argument is used to replace the old data.
"""
db.update(user_id, *credentials)
typer.secho(f"✅ Updated password #{user_id} successfully.", fg="green")
@app.command(name="delete")
def delete_password(
user_id: str = typer.Argument(..., help="ID of the user you want to delete")
) -> None:
"""delete a row of user data depending on what ID you provide"""
db.remove(user_id)
typer.secho(f"✅ Deleted password #{user_id} successfully.", fg="green")
def return_data() -> None:
table = TabulateData()
table.set_columns(["id", "network", "email", "content", "saved_at"])
results = db.push("SELECT * FROM passwords;").fetchall()
table.set_rows(results)
return table.render()
@app.command()
def show() -> None:
"""shows user data in a pretty-formatted table"""
typer.echo(return_data())
@app.command(name="export")
def export_data(path: str) -> None:
"""extracts all the user data into `passwords.txt` file"""
try:
with open(f"{Path.home()}/{path}/passwords.txt", "w") as f:
f.write(return_data())
typer.secho("✅ Exported all of your passwords successfully.", fg="green")
except FileNotFoundError as e:
typer.secho(f"❌ Something went wrong: {e}", fg="red")
|
import os
import itertools
import io
class Utils():
def output_file_path(self, srctotarget_file, targettosrc_file):
source_set = set()
source_trans = []
for filename in itertools.chain.from_iterable(
(srctotarget_file, ['..'], targettosrc_file)):
filename_set, filename_trans = os.path.basename(filename).split('.')[:2]
source_set.add(filename_set)
source_trans.append(filename_trans)
source_set.discard('')
if len(source_set) > 1:
raise RuntimeError
output_filename = '.'.join(
itertools.chain.from_iterable(([source_set.pop()], source_trans)))
return output_filename
def cmp_files(self, result, refer, output_object):
refer_file = io.open(refer)
refer_data = refer_file.read()
refer_file.close()
try:
result_data = output_object.getvalue()
except:
result_file = io.open(result)
result_data = result_file.read()
result_file.close()
self.assertEqual(result_data, refer_data, result)
|
from pathlib import Path
from datetime import datetime
import json
import tensorflow as tf
import gen
VAL2017_FOLDER_PATH = Path("/home/mbernardi/extra/async/ipcv/sem_3/deep_learning/labs/5/val2017")
class Case:
def __init__(self, model, batch_size, num_batches, num_epochs,
optimizer, loss, metrics, gen_params, notes):
"""
Represents a training trial.
Contains a model, all hyperparameters, a description, etc.
"""
# ID of case
self.id = datetime.now().isoformat(timespec="seconds")
# Samples per batch
self.batch_size = batch_size
# Number of batches per epoch
self.num_batches = num_batches
# Number of epochs
self.num_epochs = num_epochs
# Optimizer and loss, use Keras objects and not strings
self.optimizer = optimizer
self.loss = loss
# Metrics, list of Keras objects
self.metrics = metrics
# History of the training, empty for now
self.history = []
# Results of each metric evaluation, empty for now
self.eval = []
# Parameters of the data generator. Can only contain serializable things
self.gen_params = gen_params
# Model
self.model = model
self.model.compile(optimizer=self.optimizer, loss=self.loss,
metrics=self.metrics)
# Notes
self.notes = notes
def set_history(self, history):
"""
Save in this object the result of the history of the fit() of the model.
Takes the History object returned by fit()
"""
self.history = history.history
def set_eval(self, evaluation):
"""
Save in this object the result of the evaluation of the the model.
Takes the object returned by evaluate()
"""
self.eval = dict(zip(self.model.metrics_names, evaluation))
def save_description(self, json_path):
"""
Saves a JSON with a description of the case.
"""
# Dictionaries with configuration of layers
layers_config = [l.get_config() for l in self.model.layers]
# Save summary as string
summary = []
self.model.summary(print_fn=lambda x: summary.append(x))
summary = "\n".join(summary)
data = {
"id": self.id,
"batch_size": self.batch_size,
"num_batches": self.num_batches,
"num_epochs": self.num_epochs,
"optimizer": str(self.optimizer.get_config()),
"loss": str(self.loss.get_config()),
"metrics": [str(m.get_config()) for m in self.metrics],
"history": self.history,
"eval": self.eval,
"gen_params": self.gen_params,
"notes": self.notes,
"layers_config": layers_config,
"model_summary": summary,
}
with open(json_path, "w") as f:
json.dump(data, f)
def train_case(case, cases_path):
case_path = cases_path / case.id
case_path.mkdir(parents=True)
checkpoint_path = case_path / "cp.ckpt"
# Create a callback that saves the model's weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
save_weights_only=True,
verbose=1
)
train_dataset = tf.data.Dataset.from_generator(
lambda: gen.image_generator(
batch_size=case.batch_size,
num_batches=case.num_batches,
shuffle=True,
params=case.gen_params,
evaluation=False,
),
output_signature=(
tf.TensorSpec(shape=(None, None, None, 3), dtype=tf.float32),
tf.TensorSpec(shape=(None, None, None, 1), dtype=tf.float32),
)
)
val_dataset = tf.data.Dataset.from_generator(
lambda: gen.image_generator(
batch_size=case.batch_size,
num_batches=case.num_batches,
shuffle=False,
params=case.gen_params,
evaluation=False,
),
output_signature=(
tf.TensorSpec(shape=(None, None, None, 3), dtype=tf.float32),
tf.TensorSpec(shape=(None, None, None, 1), dtype=tf.float32),
)
)
history = case.model.fit(
train_dataset,
validation_data=val_dataset,
epochs=case.num_epochs,
verbose=1,
callbacks=[cp_callback],
)
case.set_history(history)
eval_dataset = tf.data.Dataset.from_generator(
lambda: gen.image_generator(
batch_size=16,
num_batches=10,
shuffle=False,
params=case.gen_params,
evaluation=True,
),
output_signature=(
tf.TensorSpec(shape=(None, None, None, 3), dtype=tf.float32),
tf.TensorSpec(shape=(None, None, None, 1), dtype=tf.float32),
)
)
case.set_eval(case.model.evaluate(eval_dataset))
case.model.save(case_path)
case.save_description(case_path / "case.json")
def simple_fcn():
"""
Define a simple FCN model
"""
# Define model
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(
8, (3, 3),
# dilation_rate=2,
activation='relu', padding='same',
))
model.add(tf.keras.layers.MaxPooling2D((2, 2), padding="same"))
model.add(tf.keras.layers.Conv2D(
8, (3, 3),
# dilation_rate=2,
activation='relu', padding='same',
))
model.add(tf.keras.layers.MaxPooling2D((2, 2), padding="same"))
model.add(tf.keras.layers.Conv2D(
8, (3, 3),
# dilation_rate=2,
activation='relu', padding='same',
))
model.add(tf.keras.layers.MaxPooling2D((2, 2), padding="same"))
model.add(tf.keras.layers.Conv2D(
8, (3, 3),
# dilation_rate=2,
activation='relu', padding='same',
))
model.add(tf.keras.layers.MaxPooling2D((2, 2), padding="same"))
model.add(tf.keras.layers.Conv2DTranspose(
8, (3, 3), strides=2,
# dilation_rate=2,
activation='sigmoid', padding='same',
))
model.add(tf.keras.layers.Conv2DTranspose(
8, (3, 3), strides=2,
# dilation_rate=2,
activation='sigmoid', padding='same',
))
model.add(tf.keras.layers.Conv2DTranspose(
8, (3, 3), strides=2,
# dilation_rate=2,
activation='sigmoid', padding='same',
))
model.add(tf.keras.layers.Conv2DTranspose(
1, (3, 3), strides=2,
# dilation_rate=2,
activation='sigmoid', padding='same',
))
return model
def u_net(input_size=(128, 128, 3), n_filters=32, n_classes=3):
"""
Combine both encoder and decoder blocks according to the U-Net research paper
Return the model as output
Code taken from https://github.com/VidushiBhatia/U-Net-Implementation
"""
def encoder_block(inputs, n_filters=32, dropout_prob=0.3, max_pooling=True):
"""
This block uses multiple convolution layers, max pool, relu activation to create an architecture for learning.
Dropout can be added for regularization to prevent overfitting.
The block returns the activation values for next layer along with a skip connection which will be used in the decoder
"""
# Add 2 Conv Layers with relu activation and HeNormal initialization using TensorFlow
# Proper initialization prevents from the problem of exploding and vanishing gradients
# 'Same' padding will pad the input to conv layer such that the output has the same height and width (hence, is not reduced in size)
conv = tf.keras.layers.Conv2D(n_filters,
3, # Kernel size
activation='relu',
padding='same',
kernel_initializer='HeNormal')(inputs)
conv = tf.keras.layers.Conv2D(n_filters,
3, # Kernel size
activation='relu',
padding='same',
kernel_initializer='HeNormal')(conv)
# Batch Normalization will normalize the output of the last layer based on the batch's mean and standard deviation
conv = tf.keras.layers.BatchNormalization()(conv, training=False)
# In case of overfitting, dropout will regularize the loss and gradient computation to shrink the influence of weights on output
if dropout_prob > 0:
conv = tf.keras.layers.Dropout(dropout_prob)(conv)
# Pooling reduces the size of the image while keeping the number of channels same
# Pooling has been kept as optional as the last encoder layer does not use pooling (hence, makes the encoder block flexible to use)
# Below, Max pooling considers the maximum of the input slice for output computation and uses stride of 2 to traverse across input image
if max_pooling:
next_layer = tf.keras.layers.MaxPooling2D(pool_size = (2,2))(conv)
else:
next_layer = conv
# skip connection (without max pooling) will be input to the decoder layer to prevent information loss during transpose convolutions
skip_connection = conv
return next_layer, skip_connection
def decoder_block(prev_layer_input, skip_layer_input, n_filters=32):
"""
Decoder Block first uses transpose convolution to upscale the image to a bigger size and then,
merges the result with skip layer results from encoder block
Adding 2 convolutions with 'same' padding helps further increase the depth of the network for better predictions
The function returns the decoded layer output
"""
# Start with a transpose convolution layer to first increase the size of the image
up = tf.keras.layers.Conv2DTranspose(
n_filters,
(3,3), # Kernel size
strides=(2,2),
padding='same')(prev_layer_input)
# Merge the skip connection from previous block to prevent information loss
merge = tf.keras.layers.concatenate([up, skip_layer_input], axis=3)
# Add 2 Conv Layers with relu activation and HeNormal initialization for further processing
# The parameters for the function are similar to encoder
conv = tf.keras.layers.Conv2D(n_filters,
3, # Kernel size
activation='relu',
padding='same',
kernel_initializer='HeNormal')(merge)
conv = tf.keras.layers.Conv2D(n_filters,
3, # Kernel size
activation='relu',
padding='same',
kernel_initializer='HeNormal')(conv)
return conv
# Input size represent the size of 1 image (the size used for pre-processing)
inputs = tf.keras.layers.Input(input_size)
# Encoder includes multiple convolutional mini blocks with different maxpooling, dropout and filter parameters
# Observe that the filters are increasing as we go deeper into the network which will increasse the # channels of the image
cblock1 = encoder_block(inputs, n_filters,dropout_prob=0, max_pooling=True)
cblock2 = encoder_block(cblock1[0],n_filters*2,dropout_prob=0, max_pooling=True)
cblock3 = encoder_block(cblock2[0], n_filters*4,dropout_prob=0, max_pooling=True)
cblock4 = encoder_block(cblock3[0], n_filters*8,dropout_prob=0.3, max_pooling=True)
cblock5 = encoder_block(cblock4[0], n_filters*16, dropout_prob=0.3, max_pooling=False)
# Decoder includes multiple mini blocks with decreasing number of filters
# Observe the skip connections from the encoder are given as input to the decoder
# Recall the 2nd output of encoder block was skip connection, hence cblockn[1] is used
ublock6 = decoder_block(cblock5[0], cblock4[1], n_filters * 8)
ublock7 = decoder_block(ublock6, cblock3[1], n_filters * 4)
ublock8 = decoder_block(ublock7, cblock2[1], n_filters * 2)
ublock9 = decoder_block(ublock8, cblock1[1], n_filters)
# Complete the model with 1 3x3 convolution layer (Same as the prev Conv Layers)
# Followed by a 1x1 Conv layer to get the image to the desired size.
# Observe the number of channels will be equal to number of output classes
conv9 = tf.keras.layers.Conv2D(n_filters,
3,
activation='relu',
padding='same',
kernel_initializer='he_normal')(ublock9)
conv10 = tf.keras.layers.Conv2D(n_classes, 1, activation='sigmoid', padding='same')(conv9)
# Define the model
model = tf.keras.Model(inputs=inputs, outputs=conv10)
return model
def train(cases_path):
import tensorflow as tf
metrics = [
tf.keras.metrics.BinaryCrossentropy(from_logits=False),
tf.keras.metrics.BinaryAccuracy(threshold=0.5),
tf.keras.metrics.Precision(thresholds=0.5),
tf.keras.metrics.Recall(thresholds=0.5),
tf.keras.metrics.MeanAbsoluteError(),
tf.keras.metrics.MeanSquaredError(),
]
for size in [128, 256]:
# model = u_net(input_size=(None,None,3), n_filters=16, n_classes=1)
model = simple_fcn()
case = Case(
model=model,
batch_size=16,
num_batches=100,
num_epochs=300,
optimizer=tf.keras.optimizers.Adam(lr=1e-3),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=False),
metrics=metrics,
gen_params={
# Dataset: Background and ball images
"folder_path": str(VAL2017_FOLDER_PATH),
"obj_path": str(Path("./res/ball_2_transparent.png")),
# Shape of object in ground truth, "rect" or "ellipse"
"ground_truth_shape": "ellipse",
# Needed divisibility of the width and height of images. Depends
# in amount of downsampling
"divisibility": 32,
# Size of images, make divisible by previous parameter or
# otherwise padding will be added.
# Used in training dataset but also in validation dataset during
# training, but not during evaluation.
"train_val_img_size": (size, size),
# Modifications to object
"ball_rotate": True,
"ball_size_range": (20, 70),
"ball_brightness": (0.4, 1),
},
notes="",
)
train_case(case, cases_path)
|
"""Scraper for Connecticut Appellate Court
CourtID: connctapp
Court Short Name: Connappct.
Author: Asadullah Baig<asadullahbeg@outlook.com>
Date created: 2014-07-11
"""
from datetime import date
from juriscraper.opinions.united_states.state import conn
class Site(conn.Site):
def __init__(self):
super(Site, self).__init__()
self.crawl_date = date.today()
self.url = 'http://www.jud.ct.gov/external/supapp/archiveAROap{year}.htm'.format(
year=self.crawl_date.strftime("%y"))
self.court_id = self.__module__
|
from django.contrib import admin
from .models import (
ConsumptionForm, ConsumptionProduct, UnitEntry, WeightConsumptionProduct,
WeightEntry,
)
class WeightConsumptionProductInline(admin.StackedInline):
model = WeightConsumptionProduct
extra = 0
class UnitEntryInline(admin.TabularInline):
model = UnitEntry
class WeightEntryInline(admin.TabularInline):
model = WeightEntry
@admin.register(ConsumptionProduct)
class ConsumptionProductAdmin(admin.ModelAdmin):
inlines = [
WeightConsumptionProductInline,
]
@admin.register(ConsumptionForm)
class ConsumptionFormAdmin(admin.ModelAdmin):
date_hierarchy = 'completed_at'
list_display = ('__str__', 'completed_at')
raw_id_fields = ('event', 'completed_by')
inlines = [
UnitEntryInline,
WeightEntryInline,
]
|
"""This module defines a HooksManager class for handling (de)installation of decorators."""
import sys
from typing import Optional
from functools import partial
from metapandas.util import vprint, friendly_symbol_name, snake_case, mangle
class HooksManager:
"""A hooks class."""
@classmethod
def _generate_hook_flag_varname(cls):
"""Generate a variable name for keeping track of the installation of the hook decorators."""
return "_{}_INSTALLED".format(snake_case(cls.__name__).upper())
@classmethod
def apply_hooks(
cls,
obj,
decorator_function,
hooks_dict,
flag_var: Optional[str] = None,
mangled_prefix: str = "",
mangled_suffix: str = "_original",
) -> bool:
"""Apply hooks to obj using decorator_function.
Parameters
----------
obj: Any
A mutable python module, class or function to decorate.
decorator_function: Callable
The decorator function to apply.
hooks_dict: Dict[str, Dict[str, Any]]
A dictionary of obj properties to modify with each
entry defining a set of kwargs to use for the decorator_function.
flag_var: str or None
A handle to journal the hook installation/deinstallation.
managed_prefix: str
The prefix to use for a new handle to the original decorated property.
mangled_suffix: str
The suffix to use for a new handle to the original decorated property.
Returns
-------
bool
Indicator of whether hooks were successfully installed.
"""
flag_var = flag_var or cls._generate_hook_flag_varname()
# only apply decorators if not already done so
# this prevents clobbering the original methods when called multiple times
applied = False
def not_found(obj_name, method_name, decorator_function, *args, **kwargs):
raise AttributeError(
"Unable to decorate {obj_name}.{method_name} with {decorator_function}"
"".format(**locals())
)
if not getattr(obj, flag_var, None):
for method_name, decorator_kwargs in hooks_dict.items():
obj_name = friendly_symbol_name(obj)
mangled_name = mangle(
prefix=mangled_prefix, name=method_name, suffix=mangled_suffix
)
if not hasattr(obj, method_name):
vprint(
"Unable to decorate {obj_name}.{method_name} with {decorator_function}"
"".format(**locals()),
file=sys.stderr,
)
original_func = getattr(
obj,
method_name,
partial(not_found, obj_name, method_name, decorator_function),
)
setattr(
obj,
method_name,
decorator_function(original_func, **decorator_kwargs),
)
setattr(obj, mangled_name, original_func)
vprint("Applied hook for {obj_name}.{method_name}".format(**locals()))
# mark as installed
setattr(obj, flag_var, True)
applied = True
return applied
@classmethod
def remove_hooks(
cls,
obj,
hooks_dict,
flag_var: Optional[str] = None,
mangled_prefix: str = "",
mangled_suffix: str = "_original",
):
"""Remove hooks from obj.
Parameters
----------
obj: Any
A mutable python module, class or function to decorate.
hooks_dict: Dict[str, Dict[str, Any]]
A dictionary of obj modified properties.
flag_var: str or None
A handle to journal the hook installation/deinstallation.
managed_prefix: str
The prefix used for the original decorated property.
mangled_suffix: str
The suffix used for the original decorated property.
Returns
-------
bool
Indicator of whether hooks were successfully uninstalled.
"""
flag_var = flag_var or cls._generate_hook_flag_varname()
# only remove decorators if needed
applied = False
if getattr(obj, flag_var, None):
for method_name in hooks_dict.keys():
mangled_name = mangle(
prefix=mangled_prefix, name=method_name, suffix=mangled_suffix
)
setattr(obj, method_name, getattr(obj, mangled_name))
try:
delattr(obj, method_name + "_original")
except AttributeError:
setattr(obj, method_name + "_original", None)
# mark as uninstalled
try:
delattr(obj, flag_var)
except AttributeError:
# can't delete as a class-level variable, so set to False instead
setattr(obj, flag_var, False)
applied = True
return applied
|
# Standard library imports
import os
from tempfile import NamedTemporaryFile
from uuid import uuid4
from itertools import islice
# Third party imports
import pandas as pd
def df_to_table(df,
table,
write_disposition='WRITE_EMPTY',
blocking=True):
"""Upload a Pandas DataFrame to Google BigQuery
Args:
df (DataFrame): The Pandas DataFrame to be uploaded.
table (google.cloud.bigquery.Table): BigQuery table object.
write_disposition (str): Either 'WRITE_EMPTY', 'WRITE_TRUNCATE', or
'WRITE_APPEND'; the default is 'WRITE_EMPTY'.
blocking (bool): Set to False if you don't want to block until the job
is complete.
Returns:
google.cloud.bigquery.Job: The file upload job object. If you have set
blocking=False, this can be used to check for job completion.
"""
# Two annoyances here:
# 1) df.to_csv() requires a non binary mode file handle, whereas
# table.upload_from_file() requires a binary mode file handle, so
# we can't reuse the same file handle in read/write mode.
# 2) Windows won't allow reading from a temporary file whilst it's
# still open (see robfraz/gbq-pandas issue #2), so we can't use
# context handlers to auto-close (and therefore delete) the temporary
# file that we write to.
writebuf = NamedTemporaryFile(mode='w',
encoding='UTF-8',
prefix="df_to_table_",
suffix=".csv",
delete=False) # robfraz/gbq-pandas issue #2
try:
df.to_csv(writebuf, index=False, encoding='UTF-8')
writebuf.flush()
writebuf.close()
with open(writebuf.name, mode='rb') as readbuf:
job = table.upload_from_file(readbuf,
encoding='UTF-8',
source_format='CSV',
skip_leading_rows=1,
create_disposition='CREATE_IF_NEEDED',
write_disposition=write_disposition)
finally:
os.remove(writebuf.name)
if blocking:
job.result()
return job
def query_to_df(sql, client):
"""Run a Google BigQuery query, and return the result in a Pandas Dataframe
The query must be a single SQL statement
Args:
sql (str): A string containing a single SQL statement.
client (google.cloud.bigquery.Client): BigQuery client object.
Returns
DataFrame: A Pandas DataFrame containing the result of the query.
"""
job = client.run_async_query(str(uuid4()), sql)
job.use_legacy_sql = False
result = job.result()
return table_to_df(result.destination)
def table_to_df(table, limit=None):
"""Download a table from Google BigQuery into a dataframe, with optional row limit
Args:
table (google.cloud.bigquery.Table): BigQuery table object.
limit (None|int): The default is limit=None (i.e. all rows in table); set to
zero to get an empty DataFrame with the column names set, or a positive
number to limit the maximum number of rows fetched into the DataFrame.
Returns:
DataFrame: A Pandas DataFrame containing the table data.
"""
if limit and limit < 0:
limit = None
table.reload()
return pd.DataFrame(data=list(islice(table.fetch_data(), 0, limit)),
columns=[column.name for column in table.schema])
|
# -*- coding: utf-8 -*-
import requests
import os
def download(url, fulllfile, overwrite=True):
# open in binary mode
if(not overwrite and os.path.isfile(fulllfile)):
return 1
with open(fulllfile, "wb") as file:
# get request
response = requests.get(url)
# write to file
file.write(response.content)
return 0
|
from rest_framework import viewsets, status
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
class NewsViewSetMixin(viewsets.ModelViewSet):
"""Manage objects in the database"""
serializer_class = None
queryset = None
permission_classes = (IsAuthenticatedOrReadOnly,)
def perform_create(self, serializer):
serializer.save(author=self.request.user)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if self.request.user == instance.author:
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(status=status.HTTP_304_NOT_MODIFIED)
|
import dask
import datetime
import itertools
import logging
import os
import time
import pandas as pd
from collections import OrderedDict
from dscribe.descriptors import CoulombMatrix as CoulombMatrixDscribe
from ml4chem.data.preprocessing import Preprocessing
from ml4chem.data.serialization import dump, load
from ml4chem.atomistic.features.base import AtomisticFeatures
from ml4chem.utils import get_chunks, convert_elapsed_time
logger = logging.getLogger()
class CoulombMatrix(AtomisticFeatures, CoulombMatrixDscribe):
"""Coulomb Matrix features
Parameters
----------
filename : str
Path to save database. Note that if the filename exists, the features
will be loaded without being recomputed.
preprocessor : str
Use some scaling method to preprocess the data. Default None.
batch_size : int
Number of data points per batch to use for training. Default is None.
scheduler : str
The scheduler to be used with the dask backend.
overwrite : bool
If overwrite is set to True, ml4chem will not try to load existing
databases. Default is True.
save_preprocessor : str
Save preprocessor to file.
Notes
-----
This class computes Coulomb matrix features using the dscribe module. As
mentioned in ML4Chem's paper, we avoid duplication of efforts and this
module serves as a demonstration.
"""
NAME = "CoulombMatrix"
@classmethod
def name(cls):
"""Returns name of class"""
return cls.NAME
def __init__(
self,
preprocessor=None,
batch_size=None,
filename="features.db",
scheduler="distributed",
save_preprocessor="ml4chem",
overwrite=True,
**kwargs
):
super(CoulombMatrix, self).__init__()
CoulombMatrixDscribe.__init__(self, permutation="none", flatten=False, **kwargs)
self.batch_size = batch_size
self.filename = filename
self.preprocessor = preprocessor
self.scheduler = scheduler
self.overwrite = overwrite
self.save_preprocessor = save_preprocessor
# Let's add parameters that are going to be stored in the .params json
# file.
self.params = OrderedDict()
self.params["name"] = self.name()
# This is a very general way of not forgetting to save variables
_params = vars()
# Delete useless variables
delete = [
"self",
"scheduler",
"overwrite",
"k",
"v",
"value",
"keys",
"batch_size",
"__class__",
]
for param in delete:
try:
del _params[param]
except KeyError:
# In case the variable does not exist we just pass.
pass
for k, v in _params.items():
if v is not None:
self.params[k] = v
def calculate(self, images=None, purpose="training", data=None, svm=False):
"""Calculate the features per atom in an atoms objects
Parameters
----------
image : dict
Hashed images using the Data class.
purpose : str
The supported purposes are: 'training', 'inference'.
data : obj
data object
svm : bool
Whether or not these features are going to be used for kernel
methods.
Returns
-------
feature_space : dict
A dictionary with key hash and value as a list with the following
structure: {'hash': [('H', [vector]]}
reference_space : dict
A reference space useful for SVM models.
"""
client = dask.distributed.get_client()
logger.info(" ")
logger.info("Featurization")
logger.info("=============")
now = datetime.datetime.now()
logger.info("Module accessed on {}.".format(now.strftime("%Y-%m-%d %H:%M:%S")))
# FIXME the block below should become a function.
if os.path.isfile(self.filename) and self.overwrite is False:
logger.warning("Loading features from {}.".format(self.filename))
logger.info(" ")
svm_keys = [b"feature_space", b"reference_space"]
data = load(self.filename)
data_hashes = list(data.keys())
image_hashes = list(images.keys())
if image_hashes == data_hashes:
# Check if both lists are the same.
return data
elif any(i in image_hashes for i in data_hashes):
# Check if any of the elem
_data = {}
for hash in image_hashes:
_data[hash] = data[hash]
return _data
if svm_keys == list(data.keys()):
feature_space = data[svm_keys[0]]
reference_space = data[svm_keys[1]]
return feature_space, reference_space
initial_time = time.time()
# Verify that we know the unique element symbols
if data.unique_element_symbols is None:
logger.info("Getting unique element symbols for {}".format(purpose))
unique_element_symbols = data.get_unique_element_symbols(
images, purpose=purpose
)
unique_element_symbols = unique_element_symbols[purpose]
logger.info("Unique chemical elements: {}".format(unique_element_symbols))
elif isinstance(data.unique_element_symbols, dict):
unique_element_symbols = data.unique_element_symbols[purpose]
logger.info("Unique chemical elements: {}".format(unique_element_symbols))
# we make the features
preprocessor = Preprocessing(self.preprocessor, purpose=purpose)
preprocessor.set(purpose=purpose)
# We start populating computations to get atomic features.
logger.info("")
logger.info("Embarrassingly parallel computation of atomic features...")
stacked_features = []
atoms_symbols_map = [] # This list is used to reconstruct images from atoms.
if self.batch_size is None:
self.batch_size = data.get_total_number_atoms()
chunks = get_chunks(images, self.batch_size, svm=svm)
for chunk in chunks:
images_ = OrderedDict(chunk)
intermediate = []
for image in images_.items():
key, image = image
atoms_symbols_map.append(image.get_chemical_symbols())
# Use .create() class method from dscribe.
_features = dask.delayed(self.create)(image)
intermediate.append(_features)
intermediate = client.compute(intermediate, scheduler=self.scheduler)
stacked_features += intermediate
del intermediate
# scheduler_time = time.time() - initial_time
# dask.distributed.wait(stacked_features)
logger.info("")
if self.preprocessor is not None:
raise NotImplementedError
else:
scaled_feature_space = []
atoms_symbols_map = [client.scatter(chunk) for chunk in atoms_symbols_map]
stacked_features = client.scatter(stacked_features, broadcast=True)
for image_index, symbols in enumerate(atoms_symbols_map):
features = client.submit(
self.stack_features, *(symbols, image_index, stacked_features)
)
scaled_feature_space.append(features)
scaled_feature_space = client.gather(scaled_feature_space)
# Clean
del stacked_features
# Restack images
feature_space = []
if svm and purpose == "training":
for i, image in enumerate(images.items()):
restacked = client.submit(
self.restack_image, *(i, image, scaled_feature_space, svm)
)
feature_space.append(restacked)
elif svm is False and purpose == "training":
for i, image in enumerate(images.items()):
restacked = client.submit(
self.restack_image, *(i, image, scaled_feature_space, svm)
)
feature_space.append(restacked)
else:
try:
for i, image in enumerate(images.items()):
restacked = client.submit(
self.restack_image, *(i, image, scaled_feature_space, svm)
)
feature_space.append(restacked)
except UnboundLocalError:
# scaled_feature_space does not exist.
for i, image in enumerate(images.items()):
restacked = client.submit(
self.restack_image, *(i, image, feature_space, svm)
)
feature_space.append(restacked)
feature_space = client.gather(feature_space)
if svm and purpose == "training":
# FIXME This might need to be improved
logger.info("Building array with reference space.")
hashes, reference_space = list(zip(*feature_space))
del hashes
reference_space = list(itertools.chain.from_iterable(reference_space))
logger.info("Finished reference space.")
feature_space = OrderedDict(feature_space)
fp_time = time.time() - initial_time
h, m, s = convert_elapsed_time(fp_time)
logger.info(
"Featurization finished in {} hours {} minutes {:.2f}"
" seconds.".format(h, m, s)
)
if svm and purpose == "training":
client.restart() # Reclaims memory aggressively
preprocessor.save_to_file(preprocessor, self.save_preprocessor)
if self.filename is not None:
logger.info("features saved to {}.".format(self.filename))
data = {"feature_space": feature_space}
data.update({"reference_space": reference_space})
dump(data, filename=self.filename)
self.feature_space = feature_space
self.reference_space = reference_space
return self.feature_space, self.reference_space
elif svm is False and purpose == "training":
client.restart() # Reclaims memory aggressively
preprocessor.save_to_file(preprocessor, self.save_preprocessor)
if self.filename is not None:
logger.info("features saved to {}.".format(self.filename))
dump(feature_space, filename=self.filename)
self.feature_space = feature_space
return self.feature_space
else:
self.feature_space = feature_space
return self.feature_space
def stack_features(self, symbols, image_index, stacked_features):
"""Stack features """
features = list(zip(symbols, stacked_features[image_index].result()))
return features
def to_pandas(self):
"""Convert features to pandas DataFrame"""
return pd.DataFrame.from_dict(self.feature_space, orient="index")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import http.server
import cgi
import socketserver
import rauth
import threading
import requests
import json
VERIFIER = None
def parse_address(address):
"""Create a tuple containing a string giving the address, and an
integer port number.
"""
base, port = address.split(':')
return (base, int(port))
def create_callback_server(server_address):
"""Create the callback server that is used to set the oauth verifier
after the request token is authorized.
"""
class CallbackHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
global VERIFIER
params = cgi.parse_qs(self.path.split('?', 1)[1],
keep_blank_values=False)
VERIFIER = params['oauth_verifier'][0]
self.send_response(200)
self.end_headers()
def log_request(self, code='-', size='-'):
pass
return socketserver.TCPServer(server_address, CallbackHandler)
def get_khan_session(consumer_key, consumer_secret, username, password,
server_url='http://www.khanacademy.org',
callback_address='127.0.0.1:0'):
"""Create an authenticated Khan Academy API session using rauth OAuth 1.0 flow.
This session give you access to the "Login Required" calls described
in the API Explorer.
You need a consumer key and consumer secret:
http://www.khanacademy.org/api-apps/register
You should also provide Khan Academy username and password. Only
coachs can access information about other users.
"""
# Create an OAuth1Service using rauth.
service = rauth.OAuth1Service(
consumer_key,
consumer_secret,
name = 'khan',
request_token_url = server_url + '/api/auth2/request_token',
access_token_url = server_url + '/api/auth2/access_token',
authorize_url = server_url + '/api/auth2/authorize',
base_url = server_url + '/api/auth2')
callback_server = create_callback_server(parse_address(callback_address))
# 1. Get a request token.
request_token, secret_request_token = service.get_request_token(
params={'oauth_callback': 'http://%s:%d/' %
callback_server.server_address})
# 2. Authorize your request token.
params = {'oauth_token': request_token, 'identifier': username,
'password': password}
handle = threading.Thread(target=callback_server.handle_request)
handle.start()
requests.post(service.authorize_url, params)
handle.join()
callback_server.server_close()
# 3. Get an access token.
session = service.get_auth_session(request_token, secret_request_token,
params={'oauth_verifier': VERIFIER})
return session
class KhanSession:
"""Khan Academy API session.
Loose wrapper class around Khan Academy rauth.OAuth1Session. If no
user credentials given, it is a dummy class around request.Session.
"""
def __init__(self, server_url='http://www.khanacademy.org',
consumer_key=None, consumer_secret=None,
username=None, password=None,
callback_address='127.0.0.1:0'):
self.server_url = server_url
if consumer_key and consumer_secret and username and password:
self.session = get_khan_session(consumer_key, consumer_secret,
username, password,
server_url, callback_address)
else:
self.session = requests.Session()
def call_api(self, rel_url, params={}):
"""Make an API call to a relative URL (e. g., `/api/v1/badges`).
Returns a parsed JSON response.
"""
resp = self.session.get(self.server_url + rel_url, params=params)
if resp.text == 'Unauthorized':
raise Exception('You are not authorized to retrieve this info.')
parsed_resp = json.loads(resp.text)
return parsed_resp
def get_user(self, userid=None, username=None, email=None):
"""Retrieve data about a user.
If no argument given, retrieves data about logged-in user. If
you are a coach, you can get data about your students.
userid is preferred over username and email, since it is the
only one of these fields that a user can't change.
Returns a dict containing information about user.
"""
params = {'userId': userid, 'username': username, 'email': email}
userinfo = self.call_api('/api/v1/user', params)
return userinfo
|
# DO NOT EDIT -- GENERATED BY CMake -- Change the CMakeLists.txt file if needed
STORAGE_CLIENT_VERSION_MAJOR = "0"
STORAGE_CLIENT_VERSION_MINOR = "2"
STORAGE_CLIENT_VERSION_PATCH = "0"
|
#!/usr/bin/env python
""" Simple git pre-commit script for executing some linters. """
import commands
import imp
import re
import pylint.lint
def check_cpp_lint(repo_root, staged_files):
"""Runs Google's cpplint on all C++ files staged for commit,"""
cpplint = imp.load_source('cpplint', repo_root + "/devtools/cpplint.py")
for changed_file in staged_files:
if re.search(r'\.cc$', changed_file):
changed_file = repo_root + "/" + changed_file
cpplint.ProcessFile(changed_file, 0)
elif re.search(r'\.h$', changed_file):
# Change global _root variable for desired behaviour of
# cpplint's header guard. Without this change the header include
# guard would have to look like: INCLUDE_REFILL_SUBPATH_HEADER_H_.
# We want it to look like: REFILL_SUBPATH_HEADER_H_
cpplint._root = "include" #pylint: disable=W0212
changed_file = repo_root + "/" + changed_file
cpplint.ProcessFile(changed_file, 0)
cpplint._root = None #pylint: disable=W0212
if cpplint._cpplint_state.error_count: #pylint: disable=W0212
print 'Aborting commit: cpplint is unhappy.'
exit(cpplint._cpplint_state.error_count) #pylint: disable=W0212
def check_modified_after_staging(staged_files):
"""Checks if one of the staged files was modified after staging."""
_, unstaged_changes = commands.getstatusoutput('git diff --name-only')
files_changed = unstaged_changes.split("\n")
files_changed = filter(None, files_changed)
staged_files_changed = 0
for changed_file in files_changed:
if changed_file in staged_files:
print changed_file + " modified after staging"
staged_files_changed = 1
if staged_files_changed:
print "Aborting commit: Staged files modified after staging."
exit(1)
def check_python_lint(repo_root, staged_files):
"""Runs pylint on all python scripts staged for commit."""
# Dummy class for pylint related IO.
class WritableObject(object):
"dummy output stream for pylint"
def __init__(self):
self.content = []
def write(self, input_str):
"dummy write"
self.content.append(input_str)
def read(self):
"dummy read"
return self.content
# Parse each pylint output line individualy and searches
# for errors in the code.
pylint_errors = []
for changed_file in staged_files:
if re.search(r'\.py$', changed_file):
print "Running pylint on " + repo_root + "/" + changed_file
pylint_output = WritableObject()
pylint_args = ["--rcfile=" + repo_root + "/devtools/pylint.rc",
"-rn",
repo_root + "/" + changed_file]
from pylint.reporters.text import TextReporter
pylint.lint.Run(pylint_args,
reporter=TextReporter(pylint_output),
exit=False)
for output_line in pylint_output.read():
if re.search(r'^(E|C|W):', output_line):
print changed_file + ": " + output_line
pylint_errors.append(output_line)
if len(pylint_errors) > 0:
print "Pylint found errors. Terminating."
exit(len(pylint_errors))
def main():
""" Checks for staged files and executes cpplint on them. """
_, output = commands.getstatusoutput('git diff --staged --name-only')
_, repo_root = commands.getstatusoutput('git rev-parse --show-toplevel')
staged_files = output.split("\n")
# Do not allow commiting files that were modified after staging. This
# avoids problems such as forgetting to stage fixes of cpplint complaints.
check_modified_after_staging(staged_files)
# Use Google's C++ linter to check for compliance with Google style guide.
check_cpp_lint(repo_root, staged_files)
# Use pylint to check for comimpliance with Tensofrflow python style guide.
check_python_lint(repo_root, staged_files)
if __name__ == "__main__":
main()
|
import typing
from app.util import log as logging
from .executor import Executor
from .settings import Settings
from .request import Request
from .response import Response
from ..info import Info
class Plugin:
"""Base Plugin Class.
This class defines, which Executor, Settings, Request and Response class is used.
The Methods defined here should not be overwritten.
"""
Settings = Settings
Executor = Executor
Request = Request
Response = Response
def __init__(self, info: Info, path: str):
with logging.LogCall(__file__, "__init__", self.__class__):
self.info = info
self.path = path
self.logger = logging.PluginLogger(self.info.uid)
self.logger.debug("%s initialized!", self.__class__.__name__)
def execute(self, request: Request) -> Response:
with logging.LogCall(__file__, "execute", self.__class__):
res = self.Response()
try:
exec = self.Executor(self, request)
exec.execute()
res.error = exec.get_error() # pylint: disable=assignment-from-none
if res.error:
res.error_text = exec.get_error_text()
res.text = exec.get_text()
res.points = exec.get_points()
except Exception as e:
res.set_exception(e)
return res
|
# -*- coding: utf-8 -*-
"""
Petri net simulation
------------------------
An example of petri net simulation of a chemical processes based on
code from http://www.azimuthproject.org/azimuth/show/Petri+net+programming.
:copyright: (c) 2015 by A. Kassahun.
:license: BSD.
"""
import random
import copy
# check arcs
# check check initial markings
#Run simulations
def get_enabled_transitions():
enabled_list = []
for t in arcs:
enabled = True
for p, m in arcs[t]['inputs'].iteritems():
if markings[p] < m: enabled = False; break
if enabled: enabled_list += [t]
return enabled_list
def get_markings_values(markings, places):
values = []
for p in places:
values += [markings[p]]
return tuple(values)
#run simulation
def run_simulation(places, arcs, markings, steps):
results = []
for i in xrange(steps):
# find enabled transition
fire_list = get_enabled_transitions()
if fire_list:
# select random
to_fire_i = random.randrange(len(fire_list))
to_fire_t = fire_list[to_fire_i]
# fire the event
for p, m in arcs[to_fire_t]['inputs'].iteritems():
markings[p] -= m
for p, m in arcs[to_fire_t]['outputs'].iteritems():
markings[p] += m
result = [get_markings_values(markings, places), to_fire_t]
results += [result]
else:
break
return results
if __name__ == '__main__':
# initialization of the petri net
# define places, transitions and arcs
places = ['H', 'O', 'H2O']
transitions = ['Combine', 'Split']
arcs = {
'Combine': {'inputs' : {'H': 2, 'O': 1},
'outputs': {'H2O': 1}},
'Split' : {'inputs': {'H2O': 1},
'outputs': {'H': 2, 'O': 1}}
}
# set the markings
init_markings = {"H": 5, "O": 3, "H2O": 3}
#decide on the number of steps to simulate
steps = 10
# run simulation
markings = copy.deepcopy(init_markings)
results = run_simulation(places, arcs, markings, steps)
fmt_str = ' %3s '*len(places)
print fmt_str % tuple(places), 'Transitions'
print fmt_str % get_markings_values(init_markings, places), '*Init*'
for markings, trans in results:
print fmt_str % markings, trans
|
import unittest
import transaction
from pyramid import testing
from dbas.database import DBDiscussionSession
from dbas.database.discussion_model import StatementReference, Statement
from dbas.tests.utils import construct_dummy_request
from dbas.views import set_references, get_reference
class AjaxReferencesTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
self.config.include('pyramid_chameleon')
self.config.testing_securitypolicy(userid='Tobias', permissive=True)
# test every ajax method, which is not used in other classes
def test_get_references_empty(self):
request = construct_dummy_request(json_body={
'uids': [14],
'is_argument': False
})
response = get_reference(request)
self.assertIsNotNone(response)
for uid in response['data']:
self.assertTrue(len(response['data'][uid]) == 0)
self.assertTrue(len(response['text'][uid]) != 0)
def test_get_references(self):
request = construct_dummy_request(json_body={
'uids': [15],
'is_argument': False
})
response = get_reference(request)
self.assertIsNotNone(response)
for uid in response['data']:
self.assertTrue(len(response['data'][uid]) != 0)
self.assertTrue(len(response['text'][uid]) != 0)
def test_get_references_failure(self):
request = construct_dummy_request(json_body={
'uids': 'ab',
'is_argument': False
})
response = get_reference(request)
self.assertIsNotNone(response)
self.assertEqual(400, response.status_code)
def test_set_references(self):
self.config.testing_securitypolicy(userid='Tobias', permissive=True)
statement: Statement = DBDiscussionSession.query(Statement).get(17)
request = construct_dummy_request(json_body={
'statement_id': statement.uid,
'issue': statement.issues[0].uid,
'text': 'This is a source',
'ref_source': 'http://www.google.de/some_source',
})
self.assertTrue(set_references(request))
request = construct_dummy_request(json_body={
'uids': [17],
'is_argument': False
})
response = get_reference(request)
self.assertIsNotNone(response)
for uid in response['data']:
self.assertTrue(17, uid)
self.assertTrue(len(response['data'][uid]) != 0)
self.assertTrue(len(response['text'][uid]) != 0)
DBDiscussionSession.query(StatementReference).filter_by(statement_uid=17).delete()
transaction.commit()
|
from .cwr_linear import CWRLinear
from .cwr_knn import CWRkNN
CONTEXTUAL_FUNCTIONS_IMPL = {
'cwr_linear': CWRLinear,
'cwr_knn': CWRkNN,
}
|
from django.db import models
from django.utils.timezone import now
class Vote(models.Model):
unique_together = (('user', 'versus'))
user = models.ForeignKey(
'votarkUser.VotarkUser',
on_delete=models.SET_NULL,
null=True,
blank=False
)
versus = models.ForeignKey(
'versus.Versus',
on_delete=models.SET_NULL,
null=True,
blank=False
)
date = models.DateTimeField(default=now)
winner = models.BooleanField(null=False,blank=False) #True for Post1 - False for Post2s
|
from .ttypes import *
from .constants import *
from .Hbase import *
|
import json
name = {
'mr': 'Miller-Rabin',
'mt': 'Marsenne Twiste',
'bbs': 'Blum Blum Shub',
}
keys = json.load(open('test_keys_seed.json'))
file_out = 'latex_table.txt'
str_out = ""
for combination, numbers in keys.items():
test_name, prng_name = combination.split('|')
test_name = name[test_name]
prng_name = name[prng_name]
for b_size, n_time in numbers.items():
str_out += f"{test_name} & {prng_name} & {b_size} & "
str_out += "\\begin{tabular}[c]{@{}l@{}}"
str_out += "\\\ ".join(
[ str(n_time['key'])[s:s+15]
for s in range(0,len(str(n_time['key'])),15)
]
)
str_out += "\\end{tabular} "
str_out += f"& {n_time['time']} & \\\ "
str_out += "\hline\n"
print(str_out)
|
import numpy
n, m = map(int, input().split())
a = numpy.array([input().split() for _ in range(n)], dtype=int)
numpy.set_printoptions(legacy='1.13')
print(numpy.mean(a, axis=1))
print(numpy.var(a, axis=0))
print(numpy.std(a, axis=None))
|
import decimal
from dataclasses import asdict, dataclass
from math import isclose
from typing import List, Optional, Union
import numpy as np
from lhotse.utils import Seconds
@dataclass
class Array:
"""
The Array manifest describes a numpy array that is stored somewhere: it might be
in an HDF5 file, a compressed numpy file, on disk, in the cloud, etc.
Array helps abstract away from the actual storage mechanism and location by
providing a method called :meth:`.Array.load`.
We don't assume anything specific about the array itself: it might be
a feature matrix, an embedding, network activations, posteriors, alignment, etc.
However, if the array has a temporal component, it is better to use the
:class:`.TemporalArray` manifest instead.
Array manifest can be easily created by calling
:meth:`lhotse.features.io.FeaturesWriter.store_array`, for example::
>>> from lhotse import NumpyHdf5Writer
>>> ivector = np.random.rand(300)
>>> with NumpyHdf5Writer('ivectors.h5') as writer:
... manifest = writer.store_array('ivec-1', ivector)
"""
# Storage type defines which features reader type should be instantiated
# e.g. 'lilcom_files', 'numpy_files', 'lilcom_hdf5'
storage_type: str
# Storage path is either the path to some kind of archive (like HDF5 file) or a path
# to a directory holding files with feature matrices (exact semantics depend on storage_type).
storage_path: str
# Storage key is either the key used to retrieve an array from an archive like HDF5,
# or the name of the file in a directory (exact semantics depend on the storage_type).
storage_key: str
# Shape of the array once loaded into memory.
shape: List[int]
@property
def ndim(self) -> int:
return len(self.shape)
def to_dict(self) -> dict:
return asdict(self)
@classmethod
def from_dict(cls, data: dict) -> "Array":
return cls(**data)
def load(self) -> np.ndarray:
"""
Load the array from the underlying storage.
"""
from lhotse.features.io import get_reader
# noinspection PyArgumentList
storage = get_reader(self.storage_type)(self.storage_path)
# Load and return the array from the storage
return storage.read(self.storage_key)
@dataclass
class TemporalArray:
"""
The :class:`.TemporalArray` manifest describes a numpy array that is stored somewhere:
it might be in an HDF5 file, a compressed numpy file, on disk, in the cloud, etc.
Like :class:`.Array`, it helps abstract away from the actual storage mechanism
and location by providing a method called :meth:`.TemporalArray.load`.
Unlike with :class:`.Array`, we assume that the array has a temporal dimension.
It allows us to perform partial reads for sub-segments of the data if the underlying
``storage_type`` allows that.
:class:`.TemporalArray` manifest can be easily created by calling
:meth:`lhotse.features.io.FeaturesWriter.store_array` and specifying arguments
related to its temporal nature; for example::
>>> from lhotse import NumpyHdf5Writer
>>> alignment = np.random.randint(500, size=131)
>>> assert alignment.shape == (131,)
>>> with NumpyHdf5Writer('alignments.h5') as writer:
... manifest = writer.store_array(
... key='ali-1',
... value=alignment,
... frame_shift=0.04, # e.g., 10ms frames and subsampling_factor=4
... temporal_dim=0,
... start=0
... )
"""
# Manifest describing the base array.
array: Array
# Indicates which dim corresponds to the time dimension:
# e.g., PCM audio samples indexes, feature frame indexes, chunks indexes, etc.
temporal_dim: int
# The time interval (in seconds, or fraction of a second) between the start timestamps
# of consecutive frames.
frame_shift: Seconds
# Information about the time range of the features.
# We only need to specify start, as duration can be computed from
# the shape, temporal_dim, and frame_shift.
start: Seconds
@property
def shape(self) -> List[int]:
return self.array.shape
@property
def ndim(self) -> int:
return len(self.shape)
@property
def duration(self) -> Seconds:
return self.shape[self.temporal_dim] * self.frame_shift
@property
def end(self) -> Seconds:
return self.start + self.duration
def to_dict(self) -> dict:
return asdict(self)
@classmethod
def from_dict(cls, data: dict) -> "TemporalArray":
array = Array.from_dict(data.pop("array"))
return cls(array=array, **data)
def load(
self,
start: Optional[Seconds] = None,
duration: Optional[Seconds] = None,
) -> np.ndarray:
"""
Load the array from the underlying storage.
Optionally perform a partial read along the ``temporal_dim``.
:param start: when specified, we'll offset the read by ``start`` after
converting it to a number of frames based on ``self.frame_shift``.
:param duration: when specified, we'll limit the read to a number of
frames equivalent to ``duration`` under ``self.frame_shift``.
:return: A numpy array or a relevant slice of it.
"""
from lhotse.features.io import get_reader
# noinspection PyArgumentList
storage = get_reader(self.array.storage_type)(self.array.storage_path)
left_offset_frames, right_offset_frames = 0, None
if start is None:
start = self.start
# In case the caller requested only a sub-span of the features, trim them.
# Left trim
if start < self.start - 1e-5:
raise ValueError(
f"Cannot load array starting from {start}s. "
f"The available range is ({self.start}, {self.end}) seconds."
)
if not isclose(start, self.start):
left_offset_frames = seconds_to_frames(
start - self.start,
frame_shift=self.frame_shift,
max_index=self.shape[self.temporal_dim],
)
# Right trim
if duration is not None:
right_offset_frames = left_offset_frames + seconds_to_frames(
duration,
frame_shift=self.frame_shift,
max_index=self.shape[self.temporal_dim],
)
# Load and return the features (subset) from the storage
return storage.read(
self.array.storage_key,
left_offset_frames=left_offset_frames,
right_offset_frames=right_offset_frames,
)
def seconds_to_frames(
duration: Seconds, frame_shift: Seconds, max_index: Optional[int] = None
) -> int:
"""
Convert time quantity in seconds to a frame index.
It takes the shape of the array into account and limits
the possible indices values to be compatible with the shape.
"""
assert duration >= 0
index = int(
decimal.Decimal(
# 8 is a good number because cases like 14.49175 still work correctly,
# while problematic cases like 14.49999999998 are typically breaking much later than 8th decimal
# with double-precision floats.
round(duration / frame_shift, ndigits=8)
).quantize(0, rounding=decimal.ROUND_HALF_UP)
)
if max_index is not None:
return min(index, max_index)
return index
def deserialize_array(raw_data: dict) -> Union[Array, TemporalArray]:
"""
Figures out the right manifest type to use for deserialization.
:param raw_data: The result of calling ``.to_dict`` on :class:`.Array`
or :class:`.TemporalArray`.
:return an :class:`.Array.` or :class:`.TemporalArray` instance.
"""
if "array" in raw_data:
return TemporalArray.from_dict(raw_data)
if "shape" in raw_data:
return Array.from_dict(raw_data)
raise ValueError(f"Cannot deserialize array from: {raw_data}")
def pad_array(
array: np.ndarray,
temporal_dim: int,
frame_shift: Seconds,
offset: Seconds,
padded_duration: Seconds,
pad_value: Union[int, float],
) -> np.ndarray:
"""
Pad a numpy array guided by duration based constraints.
Example::
>>> arr = np.array([1, 2, 3])
>>> pad_array(arr, temporal_dim=0, frame_shift=0.1,
... offset=0.1, padded_duration=0.6, pad_value=0)
array([0, 1, 2, 3, 0, 0])
:param array: array to be padded.
:param temporal_dim: time dimension index.
:param frame_shift: time interval (seconds) between the starts of consecutive frames.
:param offset: how much padding goes before the array (seconds).
:param padded_duration: expected duration of array after padding (seconds).
:param pad_value: value used for padding.
:return: a padded array.
"""
array_frames = array.shape[temporal_dim]
total_frames = seconds_to_frames(padded_duration, frame_shift=frame_shift)
total_padding_frames = total_frames - array_frames
assert total_padding_frames >= 0, (
f"Invalid argument values for pad_array: array with shape {array.shape} cannot be "
f"padded to padded_duration of {padded_duration} as it results in smaller temporal_dim "
f"of {total_frames} frames (under frame_shift={frame_shift})."
)
if total_padding_frames == 0:
return array
left_pad_frames = seconds_to_frames(offset, frame_shift=frame_shift)
right_pad_frames = total_padding_frames - left_pad_frames
# Automatically fix edge cases where we're off by one padding frame.
# This usually happens when duration of padding is a bit more than
# padding_num_frames * frame_shift, but the duration of unpadded cut
# is a bit less than cut_num_frames * frame_shift.
if right_pad_frames == -1:
right_pad_frames = 0
left_pad_frames -= 1
assert right_pad_frames >= 0, "Something went wrong..."
pad_width = [
(left_pad_frames, right_pad_frames) if dim == temporal_dim else (0, 0)
for dim, size in enumerate(array.shape)
]
return np.pad(
array, pad_width=pad_width, mode="constant", constant_values=pad_value
)
|
"""
PyZMQ REQ socket client example module.
This is the client script for the zmq_server_rep_pthread program (@ examples/c/zmq_demo).
"""
import time
import zmq
import struct
def main():
port = 5555;
context = zmq.Context()
print("Connecting to server...")
client = context.socket(zmq.REQ)
with client.connect(f"tcp://localhost:{port}"):
for i in range(10):
# Send request
# Assuming little-endian in C side
req_type = 2
req_val = 42 + i
req = struct.pack('<BI', req_type, req_val)
client.send(req)
# Receive response
rep = client.recv()
rep_val_a, rep_val_b = struct.unpack('<QB', rep)
print(f"Received response [val_a: {rep_val_a}, val_b: {rep_val_b}]")
if __name__ == "__main__":
main()
|
# copy from https://github.com/bitsauce/Carla-ppo/blob/master/CarlaEnv/planner.py
import carla
import numpy as np
import random
from numba.typed import List
from agents.navigation.local_planner import RoadOption
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.tools.misc import vector
# cache waypoint for entire lifecycle of application
_route_waypoints = None
_transformed_waypoint_routes = None
def carla_to_vector(obj):
''' Turn Carla object which have some kind of coordinate attributes to `np.ndarray` '''
if isinstance(obj, carla.Location) or isinstance(obj, carla.Vector3D):
return np.array([obj.x, obj.y, obj.z])
elif isinstance(obj, carla.Rotation):
return np.array([obj.pitch, obj.yaw, obj.roll])
else:
raise TypeError(f'obj must be `Location`, `Vector3D` or `Rotation`, not {type(obj)}')
class ManualRoutePlanner:
def __init__(self, start_waypoint, end_waypoint, world, resolution=2.0, plan=None,
initial_checkpoint=0, use_section=False, enable=True, debug_route_waypoint_len=None):
''' route_waypoint_len is purely for testing purpose '''
global _route_waypoints, _transformed_waypoint_routes
self._vehicle = None
self._world = world
self._map = world.get_map()
self.plan = plan
self._sampling_radius = resolution
self._min_distance = self._sampling_radius - 1 if self._sampling_radius > 1 else 1
self.start_waypoint = start_waypoint
self.end_waypoint = end_waypoint
self.lap_count = 0
self._repeat_count = 0
self._repeat_count_threshold = 5
self._checkpoint_frequency = 25
self._checkpoint_waypoint_index = initial_checkpoint
self._start_waypoint_index = self._checkpoint_waypoint_index
self._current_waypoint_index = self._checkpoint_waypoint_index
self._intermediate_checkpoint_waypoint_index = self._checkpoint_waypoint_index + self._checkpoint_frequency
if enable:
_route_waypoints = self._compute_route_waypoints()
_transformed_waypoint_routes = List(self._transform_waypoints(_route_waypoints))
self.spawn_transform = _route_waypoints[self._checkpoint_waypoint_index][0].transform
self._in_random_spawn_point = False
# for section checkpoint
if use_section:
route_waypoint_len = len(_route_waypoints) if debug_route_waypoint_len is None else debug_route_waypoint_len
# (start, end, checkpoint frequency)
self.sections_indexes = [(0, 140, 35), (143, 173, 30), (176, route_waypoint_len - 1, 35)]
self.sections_start = [s[0] for s in self.sections_indexes]
self.sections_end = [s[1] for s in self.sections_indexes]
self.sections_frequency = [s[2] for s in self.sections_indexes]
self.sections_ends = [140, 141, 142, 173, 174, 175, 591]
if initial_checkpoint < self.sections_end[0]:
frequency = self.sections_indexes[0][2]
elif initial_checkpoint < self.sections_end[1]:
frequency = self.sections_indexes[1][2]
elif initial_checkpoint < self.sections_end[2]:
frequency = self.sections_indexes[2][2]
self._intermediate_checkpoint_waypoint_index = self._checkpoint_waypoint_index + frequency
if self._intermediate_checkpoint_waypoint_index > route_waypoint_len - 1:
self._intermediate_checkpoint_waypoint_index = 0
def set_vehicle(self, vehicle):
''' Set internal state to current vehicle, must be called in `reset` '''
self._vehicle = vehicle
if not self._in_random_spawn_point:
self._start_waypoint_index = self._checkpoint_waypoint_index
self._current_waypoint_index = self._checkpoint_waypoint_index
self.lap_count = 0
def run_step(self):
waypoint_routes_len = len(_route_waypoints)
current_transform = self._vehicle.get_transform()
waypoint_index = self._current_waypoint_index
for _ in range(waypoint_routes_len):
# check if we passed next waypoint along the route
next_waypoint_index = waypoint_index + 1
wp, _ = _route_waypoints[next_waypoint_index % waypoint_routes_len]
dot = np.dot(carla_to_vector(wp.transform.get_forward_vector())[:2],
carla_to_vector(current_transform.location - wp.transform.location)[:2])
# did we pass the waypoint?
if dot > 0.0:
# if passed, go to next waypoint
waypoint_index += 1
else:
break
self._current_waypoint_index = waypoint_index % waypoint_routes_len
# update checkpoint
# self._checkpoint_waypoint_index = (self._current_waypoint_index // self._checkpoint_frequency) * self._checkpoint_frequency
if not self._in_random_spawn_point:
self._update_checkpoint_by_section()
# update here because vehicle is spawn before set_vehicle call and spawning requires spawn point
self.spawn_transform = _route_waypoints[self._checkpoint_waypoint_index][0].transform
self.lap_count = (waypoint_index - self._start_waypoint_index) / len(_route_waypoints)
return _transformed_waypoint_routes[self._current_waypoint_index:]
def get_route_waypoints(self):
''' Return list of (waypoint, RoadOption) '''
return _route_waypoints
def get_transformed_route_waypoints(self):
return _transformed_waypoint_routes
def _compute_route_waypoints(self):
"""
Returns a list of (waypoint, RoadOption)-tuples that describes a route
starting at start_waypoint, ending at end_waypoint.
start_waypoint (carla.Waypoint):
Starting waypoint of the route
end_waypoint (carla.Waypoint):
Destination waypoint of the route
resolution (float):
Resolution, or lenght, of the steps between waypoints
(in meters)
plan (list(RoadOption) or None):
If plan is not None, generate a route that takes every option as provided
in the list for every intersections, in the given order.
(E.g. set plan=[RoadOption.STRAIGHT, RoadOption.LEFT, RoadOption.RIGHT]
to make the route go straight, then left, then right.)
If plan is None, we use the GlobalRoutePlanner to find a path between
start_waypoint and end_waypoint.
"""
if self.plan is None:
# Setting up global router
grp = GlobalRoutePlanner(self._map, self._sampling_radius)
# Obtain route plan
route = grp.trace_route(
self.start_waypoint.transform.location,
self.end_waypoint.transform.location)
else:
# Compute route waypoints
route = []
current_waypoint = self.start_waypoint
for i, action in enumerate(self.plan):
# Generate waypoints to next junction
wp_choice = [current_waypoint]
while len(wp_choice) == 1:
current_waypoint = wp_choice[0]
route.append((current_waypoint, RoadOption.LANEFOLLOW))
wp_choice = current_waypoint.next(self._sampling_radius)
# Stop at destination
if i > 0 and current_waypoint.transform.location.distance(self.end_waypoint.transform.location) < self._sampling_radius:
break
if action == RoadOption.VOID:
break
# Make sure that next intersection waypoints are far enough
# from each other so we choose the correct path
step = self._sampling_radius
while len(wp_choice) > 1:
wp_choice = current_waypoint.next(step)
wp0, wp1 = wp_choice[:2]
if wp0.transform.location.distance(wp1.transform.location) < self._sampling_radius:
step += self._sampling_radius
else:
break
# Select appropriate path at the junction
if len(wp_choice) > 1:
# Current heading vector
current_transform = current_waypoint.transform
current_location = current_transform.location
projected_location = current_location + \
carla.Location(
x=np.cos(np.radians(current_transform.rotation.yaw)),
y=np.sin(np.radians(current_transform.rotation.yaw)))
v_current = vector(current_location, projected_location)
direction = 0
if action == RoadOption.LEFT:
direction = 1
elif action == RoadOption.RIGHT:
direction = -1
elif action == RoadOption.STRAIGHT:
direction = 0
select_criteria = float("inf")
# Choose correct path
for wp_select in wp_choice:
v_select = vector(
current_location, wp_select.transform.location)
cross = float("inf")
if direction == 0:
cross = abs(np.cross(v_current, v_select)[-1])
else:
cross = direction * np.cross(v_current, v_select)[-1]
if cross < select_criteria:
select_criteria = cross
current_waypoint = wp_select
# Generate all waypoints within the junction
# along selected path
route.append((current_waypoint, action))
current_waypoint = current_waypoint.next(self._sampling_radius)[0]
while current_waypoint.is_intersection:
route.append((current_waypoint, action))
current_waypoint = current_waypoint.next(self._sampling_radius)[0]
assert route
# Change action 5 wp before intersection
num_wp_to_extend_actions_with = 5
action = route[0][1]
for i in range(1, len(route)):
next_action = route[i][1]
if next_action != action:
if next_action != RoadOption.LANEFOLLOW:
for j in range(num_wp_to_extend_actions_with):
route[i-j-1] = (route[i-j-1][0], route[i][1])
action = next_action
return route
def _transform_waypoints(self, waypoints):
''' Transform a waypoint into list of x, y and yaw '''
return list(map(lambda wp: (wp[0].transform.location.x, wp[0].transform.location.y, wp[0].transform.rotation.yaw), waypoints))
def _update_checkpoint(self):
''' implement checkpoint logic that encourage the agent to remember more past road before trying next portion of the road '''
idx = (self._current_waypoint_index // self._checkpoint_frequency) * self._checkpoint_frequency
if idx > self._intermediate_checkpoint_waypoint_index:
self._repeat_count += 1
if self._repeat_count >= self._repeat_count_threshold:
if self._checkpoint_waypoint_index == 0:
self._checkpoint_waypoint_index = self._intermediate_checkpoint_waypoint_index
self._intermediate_checkpoint_waypoint_index += self._checkpoint_frequency
else:
self._checkpoint_waypoint_index = 0
self._repeat_count = 0
def _update_checkpoint_by_section(self):
s1, s2, s3 = self.sections_indexes
# s[0] is start and s[1] is end of section. s[2] is checkpoint frequency
if s1[0] <= self._start_waypoint_index <= s1[1]:
start = s1[0]
end = s1[1]
frequency = s1[2]
elif s2[0] <= self._start_waypoint_index <= s2[1]:
start = s2[0]
end = s2[1]
frequency = s2[2]
else:
# s3
start = s3[0]
end = s3[1]
frequency = s3[2]
idx = (((self._current_waypoint_index - start) // frequency) * frequency) + start
if idx >= self._intermediate_checkpoint_waypoint_index:
self._repeat_count += 1
if self._repeat_count >= self._repeat_count_threshold:
if self._checkpoint_waypoint_index == start:
if self._intermediate_checkpoint_waypoint_index >= end:
self._checkpoint_waypoint_index, frequency = self._get_next_section_start_and_frequency(end)
self._intermediate_checkpoint_waypoint_index = self._checkpoint_waypoint_index + frequency
else:
self._checkpoint_waypoint_index = self._intermediate_checkpoint_waypoint_index
self._intermediate_checkpoint_waypoint_index += frequency
else:
self._checkpoint_waypoint_index = start
self._repeat_count = 0
def _get_next_section_start_and_frequency(self, end_of_section):
end_idx = self.sections_end.index(end_of_section)
next_start = self.sections_start[(end_idx + 1) % len(self.sections_start)]
next_frequency = self.sections_frequency[(end_idx + 1) % len(self.sections_frequency)]
return next_start, next_frequency
def get_random_spawn_point(self):
start_original = random.random() >= 0.4
if start_original:
self._in_random_spawn_point = False
return self._checkpoint_waypoint_index, self.spawn_transform
self._in_random_spawn_point = True
if random.random() >= 0.3 or self._checkpoint_waypoint_index in self.sections_start:
# random start in the same section
random_idx = self._checkpoint_waypoint_index + (random.randint(5, 20) // 2 * 2)
else:
# random start at any point before current checkpoint
lower_bound = 0
for start, end in zip(self.sections_start, self.sections_ends):
if start <= self._checkpoint_waypoint_index < end:
lower_bound = start
break
random_idx = random.randint(lower_bound, self._checkpoint_waypoint_index)
self._start_waypoint_index = random_idx
self._current_waypoint_index = random_idx
self.spawn_transform = _route_waypoints[random_idx][0].transform
return random_idx, self.spawn_transform
@property
def next_waypoint(self):
return _route_waypoints[(self._current_waypoint_index + 1) % len(_route_waypoints)][0]
@property
def current_waypoint(self):
return _route_waypoints[self._current_waypoint_index % len(_route_waypoints)][0]
@property
def is_end_of_section(self):
return self._current_waypoint_index in self.sections_ends
TOWN7_PLAN = [RoadOption.STRAIGHT] + [RoadOption.RIGHT] * 2 + [RoadOption.STRAIGHT] * 5
TOWN7_REVERSE_PLAN = [RoadOption.STRAIGHT] * 4 + [RoadOption.LEFT] * 2 + [RoadOption.STRAIGHT]
|
import re
def range_parser(s):
result = []
for a in s.split(','):
nums = [int(b) for b in re.split(r'\D', a) if b]
if len(nums) == 1:
result.extend(nums)
else:
nums[1] += 1
result.extend(xrange(*nums))
return result
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021 shmilee
'''
Contains ipynb UI class.
'''
import ipywidgets
from IPython.display import display, HTML
from ..__about__ import __data_path__
from ..processors import get_processor, Processor_Names
from ..processors.lib import Processor_Lib
__all__ = ['IpynbUI', 'ScrollTool']
class IpynbUI(object):
'''
UI(User Interface) used in Jupyter notebook.
'''
__slots__ = ['path', 'parallel', 'processor', 'widgets', 'panel_widgets',
'grouplabels', 'figlabel']
def __init__(self, path, parallel='off'):
self.path = path
self.parallel = parallel
self.processor = None
names = ['%s%s' % (Processor_Lib[n][1][0], n) for n in Processor_Names]
self.widgets = dict(
processor=ipywidgets.Dropdown(
options=names,
value=names[0],
description='Processor:'),
pick=ipywidgets.Button(
description='', disabled=False,
button_style='primary', # 'success', 'info', or ''
tooltip='Pick', icon='arrow-circle-right',
layout=ipywidgets.Layout(width='5%')),
group=ipywidgets.Dropdown(
options=[None],
value=None,
description='Group:'),
figlabel=ipywidgets.Dropdown(
options=[None],
value=None,
description='Figure:',
layout=ipywidgets.Layout(left='-15px')),
plot=ipywidgets.Button(
description='', disabled=False,
button_style='primary',
tooltip='Plot', icon='paint-brush',
layout=ipywidgets.Layout(width='5%')),
terminal=ipywidgets.Output(),
panel=ipywidgets.Output(),
canvas=ipywidgets.Output(),
)
self.panel_widgets = {}
self.grouplabels = {}
self.figlabel = None
self.widgets['processor'].observe(self.init_group, 'value')
self.widgets['pick'].on_click(self.update_group)
self.widgets['group'].observe(self.update_figlabel, 'value')
self.widgets['figlabel'].observe(self.update_panel, 'value')
self.widgets['plot'].on_click(self.update_canvas)
def init_group(self, *args):
self.widgets['group'].options = [None]
self.widgets['group'].value = None
self.widgets['pick'].button_style = 'primary'
def update_group(self, *args):
with self.widgets['terminal']:
gdp = get_processor(path=self.path,
name=self.widgets['processor'].value[1:],
parallel=self.parallel)
if gdp.pckloader:
self.processor = gdp
self.grouplabels = {}
for l in gdp.availablelabels:
g = l[:l.find('/')]
if g in self.grouplabels:
self.grouplabels[g].append(l[l.find('/')+1:])
else:
self.grouplabels[g] = [l[l.find('/')+1:]]
options = sorted(self.grouplabels.keys())
self.widgets['group'].options = options
self.widgets['group'].value = options[0]
self.widgets['pick'].button_style = 'success'
def update_figlabel(self, *args):
options = self.grouplabels.get(
self.widgets['group'].value, [None])
self.widgets['figlabel'].options = options
self.widgets['figlabel'].value = options[0]
self.widgets['plot'].button_style = 'primary'
def update_panel(self, *args):
self.widgets['plot'].button_style = 'primary'
if self.widgets['figlabel'].value:
self.figlabel = '%s/%s' % (
self.widgets['group'].value, self.widgets['figlabel'].value)
with self.widgets['terminal']:
result = self.processor.export(self.figlabel, what='options')
else:
self.figlabel, result = None, None
if result:
options = dict(**result['digoptions'], **result['visoptions'])
self.panel_widgets = self.get_panel_widgets(options)
self.widgets['panel'].clear_output(wait=True)
with self.widgets['panel']:
w = list(self.panel_widgets.values())
def observer(change):
self.widgets['plot'].button_style = 'primary'
for wi in w:
wi.observe(observer, 'value')
w = [ipywidgets.HBox(w[i:i+2]) for i in range(0, len(w), 2)]
display(ipywidgets.VBox(w))
def get_panel_widgets(self, options):
controls = {}
common_kw = dict(
style={'description_width': 'initial'},
layout=ipywidgets.Layout(width='40%', margin='1% 2% auto 2%'),
disabled=False)
for k, v in options.items():
if v['widget'] in (
'IntSlider', 'FloatSlider',
'IntRangeSlider', 'FloatRangeSlider'):
controls[k] = getattr(ipywidgets, v['widget'])(
value=v['value'],
min=v['rangee'][0],
max=v['rangee'][1],
step=v['rangee'][2],
description=v['description'],
continuous_update=False,
orientation='horizontal', readout=True,
**common_kw)
elif v['widget'] in ('Dropdown', 'SelectMultiple'):
controls[k] = getattr(ipywidgets, v['widget'])(
options=v['options'],
value=v['value'],
description=v['description'],
**common_kw)
elif v['widget'] in ('Checkbox',):
controls[k] = getattr(ipywidgets, v['widget'])(
value=v['value'],
description=v['description'],
**common_kw)
else:
pass
return controls
def update_canvas(self, *args):
if self.figlabel:
figkwargs = {k: v.value for k, v in self.panel_widgets.items()}
with self.widgets['terminal']:
accfiglabel = self.processor.visplt(
self.figlabel, show=False, **figkwargs)
if accfiglabel:
self.widgets['canvas'].clear_output(wait=True)
with self.widgets['canvas']:
# print(figkwargs)
display(self.processor.visplter.get_figure(accfiglabel))
self.widgets['plot'].button_style = 'success'
else:
self.widgets['canvas'].clear_output(wait=True)
with self.widgets['canvas']:
print("No figure to plot!")
@property
def UI(self):
return display(ipywidgets.VBox([
ipywidgets.HBox([
self.widgets['processor'],
self.widgets['pick'],
self.widgets['group'],
self.widgets['figlabel'],
self.widgets['plot'],
]),
self.widgets['panel'],
self.widgets['canvas'],
]))
def clear_log(self, wait=False):
self.widgets['terminal'].clear_output(wait=wait)
@property
def log(self):
return display(self.widgets['terminal'])
class ScrollTool(object):
'''
ScrollTool.bar: scroll-head, scroll-hidecode, scroll-bottom
'''
__slots__ = ['html']
_replace_keys_ = ['scroll_head_title', 'scroll_bottom_title',
'scroll_showcode_title', 'scroll_hidecode_title']
def __init__(self):
import os
import locale
import configparser
lang = locale.getlocale()[0]
config = configparser.ConfigParser(default_section='en_US')
datapath = os.path.join(__data_path__, 'ipynb_scrollbar')
configfile = config.read(os.path.join(datapath, 'locale'))
with open(os.path.join(datapath, 'scroll_bar.css')) as fcss, \
open(os.path.join(datapath, 'scroll_bar.html')) as fhtml, \
open(os.path.join(datapath, 'scroll_bar.js')) as fjs:
css, html, js = fcss.read(), fhtml.read(), fjs.read()
for k in self._replace_keys_:
if lang in config and k in config[lang]:
v = config[lang][k]
else:
v = config['en_US'][k]
css = css.replace('{{ %s }}' % k, v)
html = html.replace('{{ %s }}' % k, v)
js = js.replace('{{ %s }}' % k, v)
self.html = HTML('%s\n%s\n%s' % (css, html, js))
@property
def bar(self):
return display(self.html)
|
version = '2.0.749'
|
import os
from tests import TestClient
class TestClientProduct(TestClient):
# for testing
test_product_group = None
the_first_productId = None
def __init__(self, *args, **kwargs):
super(TestClientProduct, self).__init__(*args, **kwargs)
self.__class__.test_product_group = os.getenv('PROCOUNTOR_PRODUCT_PRODUCT_GROUP', None)
def test_001_get_products(self):
""" get all products from API """
response = self.client.get_products()
self.assertEqual(response['status'], 200)
self.assertIsInstance(response['content']['products'], list)
self.__class__.the_first_productId = response['content']['products'][0]['id']
def test_002_get_product(self):
""" get info of one product """
response = self.client.get_product(self.__class__.the_first_productId)
self.assertEqual(response['status'], 200)
self.assertIsNotNone(response['content'])
def test_003_get_product_groups(self):
""" get product groups (by product type) """
if self.__class__.test_product_group:
response = self.client.get_product_groups(productType=self.__class__.test_product_group)
self.assertEqual(response['status'], 200)
self.assertIsInstance(response['content'], list)
if __name__ == '__main__':
unittest.main()
|
import os
import requests
import glob
import json
import http.client
from redashAPI import RedashAPIClient
from dotenv import load_dotenv
http.client._MAXLINE = 655360 * 4
load_dotenv()
redashUrl = os.getenv('REDASH_URL', 'http://localhost:5000')
setupPayload = {
'name': os.getenv('USER_NAME', 'admin'), 'email': os.getenv('USER_EMAIL', 'admin@example.org'),
'password': os.getenv('USER_PASS', 'supersecret123'), 'security_notifications': 'y',
'org_name': os.getenv('ORG_NAME', 'organization')
}
setupHeaders = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,'
'application/signed-exchange;v=b3;q=0.9'
}
setupResp = requests.post(url=redashUrl + "/setup", data=setupPayload,
headers=setupHeaders, allow_redirects=False)
print('User created')
ctJson = {'Content-Type': 'application/json;charset=UTF-8'}
datasourceQuery = {
'options': {
'host': os.getenv('DB_HOST', 'localhost'), 'port': int(os.getenv('DB_PORT', '5432')),
'user': os.getenv('DB_USER', 'postgres'), 'password': os.getenv('DB_PASS'),
'dbname': os.getenv('DB_NAME', 'public')
},
'type': os.getenv('DB_TYPE', 'pg'), 'name': os.getenv('DATASOURCE_NAME', 'default')
}
if os.getenv('DB_SSL_MODE') is not None:
datasourceQuery['options']['sslmode'] = os.getenv('DB_SSL_MODE')
datasourceResp = requests.post(url=redashUrl + "/api/data_sources", cookies=setupResp.cookies, json=datasourceQuery,
headers=ctJson)
datasourceId = datasourceResp.json()['id']
print('Datasource created')
usersResp = requests.get(url=redashUrl + "/api/users/1",
cookies=setupResp.cookies)
apiKey = usersResp.json()['api_key']
print('Api key:', apiKey)
redash = RedashAPIClient(apiKey, redashUrl)
dashboardName = os.getenv('DASHBOARD_NAME')
dashboardResp = redash.create_dashboard(dashboardName)
dashboardId = dashboardResp.json()['id']
print('Created dashboard', dashboardName)
queriesDir = os.getenv('QRY_DIR', './')
if not queriesDir.endswith('/'):
queriesDir += '/'
for fileName in glob.iglob(queriesDir + '*.json', recursive=True):
f = open(fileName, "r")
widgetJson = f.read()
if len(widgetJson) > 0:
widget = json.loads(widgetJson)
widget['dashboard_id'] = dashboardId
widgetResp = redash.post('widgets', widget)
print('Created widget from', fileName)
for fileName in glob.iglob(queriesDir + '*.sql', recursive=True):
f = open(fileName, "r")
queryName = f.readline()[2:].strip()
queryDescription = f.readline()[2:].strip()
visualization = json.loads(f.readline()[2:].strip())
widgetJson = f.readline()[2:].strip()
widget = {}
if len(widgetJson) > 3:
widget = json.loads(widgetJson)
query = f.read()
queryResp = redash.create_query(
ds_id=datasourceId, name=queryName, qry=query, desc=queryDescription)
queryId = queryResp.json()['id']
print('Created query', queryName, 'id:', queryId)
if len(visualization) > 3:
visualization['query_id'] = queryId
visResp = redash.post('visualizations', visualization)
visId = visResp.json()['id']
print('Created visualisation for', queryName,
'query. Visualization id:', visId)
redash.generate_query_results(
ds_id=datasourceId, qry=query, qry_id=queryId)
print('Generated query results for', queryName, 'query.')
publishResp = requests.post(url="{}{}{}{}".format(redashUrl, "/queries", queryId, "/source"),
cookies=setupResp.cookies, headers=ctJson,
data={'id': queryId, 'version': queryResp.json()['version'], 'is_draft': False})
if len(widgetJson) > 3:
widget['dashboard_id'] = dashboardId
widget['visualization_id'] = visId
widgetResp = redash.post('widgets', widget)
print('Created widget for', queryName, 'query')
redash.publish_dashboard(dashboardId)
print('Published dashboard', dashboardName)
|
from .gp_tuner import GPTuner, GPClassArgsValidator
|
from pylibpd import *
import array
import pygame
import numpy
from os import environ
BUFFERSIZE = 1024
BLOCKSIZE = 64
SCREENSIZE = (640, 480)
environ['SDL_VIDEO_CENTERED'] = '1'
pygame.init()
screen = pygame.display.set_mode(SCREENSIZE)
m = PdManager(1, 2, pygame.mixer.get_init()[0], 1)
patch = libpd_open_patch('funtest.pd', '.')
print "$0: ", patch
# this is basically a dummy since we are not actually going to read from the mic
inbuf = array.array('h', range(BLOCKSIZE))
# the pygame channel that we will use to queue up buffers coming from pd
ch = pygame.mixer.Channel(0)
# python writeable sound buffers
sounds = [pygame.mixer.Sound(numpy.zeros((BUFFERSIZE, 2), numpy.int16)) for s in range(2)]
samples = [pygame.sndarray.samples(s) for s in sounds]
rectangles = []
rectcolor = (255, 0, 0)
bg = (255, 255, 255)
rectsize = 200
def updatexy(event):
libpd_float('x', float(event.pos[1]) / SCREENSIZE[1])
libpd_float('y', float(event.pos[0]) / SCREENSIZE[0])
libpd_bang('trigger')
rectangles.append([event.pos, 0])
# we go into an infinite loop selecting alternate buffers and queueing them up
# to be played each time we run short of a buffer
selector = 0
quit = False
while not quit:
# we have run out of things to play, so queue up another buffer of data from Pd
if not ch.get_queue():
# make sure we fill the whole buffer
for x in range(BUFFERSIZE):
# let's grab a new block from Pd each time we're out of BLOCKSIZE data
if x % BLOCKSIZE == 0:
outbuf = m.process(inbuf)
# de-interlace the data coming from libpd
samples[selector][x][0] = outbuf[(x % BLOCKSIZE) * 2]
samples[selector][x][1] = outbuf[(x % BLOCKSIZE) * 2 + 1]
# queue up the buffer we just filled to be played by pygame
ch.queue(sounds[selector])
# next time we'll do the other buffer
selector = int(not selector)
for event in pygame.event.get():
if event.type == pygame.QUIT or event.type == pygame.KEYDOWN and event.key == 27:
quit = True
if event.type == pygame.MOUSEBUTTONDOWN:
updatexy(event)
screen.fill(bg)
delrects = []
for r in rectangles:
dr = pygame.Rect(r[0][0], r[0][1], r[1], r[1])
dr.center = r[0]
cv = 255 * (rectsize - r[1]) / rectsize
pygame.draw.rect(screen, (255, 255 - cv, 255 - cv), dr, 2)
r[1] += 1
if r[1] >= rectsize:
delrects.append(r)
for r in delrects:
rectangles.remove(r)
pygame.display.flip()
libpd_release()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.