content
stringlengths 5
1.05M
|
|---|
# Generated by Django 3.1 on 2020-12-02 19:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('extras', '0052_customfield_cleanup'),
]
operations = [
migrations.RenameField(
model_name='webhook',
old_name='obj_type',
new_name='content_types',
),
]
|
from copy import deepcopy
from time import time
import numpy as np
from Agents.randomAgent import RandomInvestor
from LightningSimulator import LightningSimulator, transfer_money_in_graph
from utils.common import human_format
from utils.graph_helpers import create_sub_graph_by_node_capacity
from routing.LND_routing import get_route
SIMULATOR_NUM_NODES = 100
GRAPH_DENSITY_OFFSET=50
SIMULATOR_NUM_TRANSACTIONS = 100000
INITIAL_FUNDS = 10 ** 9
SIMULATOR_TRANSFERS_MAX_AMOUNT = 5*10 ** 6
LN_DEFAULT_CHANNEL_COST = 4 * 10 ** 4
REPS=5
from tqdm import tqdm
RANDOM_MATRICES = [np.random.rand(10,10) for _ in range(1000)]
def
def dummy_computation_sync(num_tasks=1000):
results = dict()
for i in range(len(RANDOM_MATRICES)):
results[i] = np.random.rand(10,10).mean()
def dummy_computation_Aync():
pass
def get_all_routes_sync(graph):
results = dict()
for src in tqdm(graph.nodes()):
for dest in graph.nodes():
if dest != src:
results[(src, dest)] = get_route(graph, src, dest, SIMULATOR_TRANSFERS_MAX_AMOUNT)
return results
def get_all_routes_Async(graph):
for src in tqdm(graph.nodes()):
for dest in graph.nodes():
if dest != src:
results[(src, dest)] = get_route(graph, src, dest, SIMULATOR_TRANSFERS_MAX_AMOUNT)
results = dict()
return results
def transfer_in_all_routes_sync(graph, routes):
success = 0
for key, route in routes.items():
length = transfer_money_in_graph(graph, SIMULATOR_TRANSFERS_MAX_AMOUNT,route)
if length == len(route):
success += 1
return success/float(len(routes))
def main():
graph = create_sub_graph_by_node_capacity(dump_path="../LightningGraph/old_dumps/LN_2020.05.13-08.00.01.json",
k=SIMULATOR_NUM_NODES,
highest_capacity_offset=GRAPH_DENSITY_OFFSET)
simulator = LightningSimulator(graph, num_transactions=SIMULATOR_NUM_TRANSACTIONS,
transfer_amount=SIMULATOR_TRANSFERS_MAX_AMOUNT,
other_balance_proportion=1.0)
new_node_pub_key = simulator.create_agent_node()
agent = RandomInvestor(public_key=new_node_pub_key,
initial_funds=INITIAL_FUNDS,
channel_cost=LN_DEFAULT_CHANNEL_COST)
new_edges = agent.get_channels(simulator.graph)
simulator.add_edges(new_edges)
find_routes_time = []
use_routes_time = []
print("Startint measuring time")
for rep in range(REPS):
graph_copy = deepcopy(simulator.graph)
start = time()
all_routes = get_all_routes_sync(graph_copy)
find_routes_time.append(time() - start)
start = time()
success_rate = transfer_in_all_routes_sync(graph_copy, all_routes)
use_routes_time.append(time() - start)
# print(f"Sucess_rate: {success_rate:.2f}")
find_routes_time = np.array(find_routes_time)
use_routes_time = np.array(use_routes_time)
print(f"Find all routes: avg ({find_routes_time.mean():.2f}), std ({find_routes_time.std():.2f})")
print(f"Use all routes: avg ({use_routes_time.mean():.2f}), std ({use_routes_time.std():.2f})")
if __name__ == '__main__':
main()
|
class FitError(Exception):
def __init__(self, message, line=0):
self.message = message
self.line = line
def __str__(self):
return "FitError: {}".format(self.message) + \
(" the line {}!".format(self.line) if self.line > 0 else "!")
class FitNotExist(Exception):
def __init__(self, message, line=0):
self.message = message
self.line = line
def __str__(self):
return "FitNotExist: {}".format(self.message) + \
(" the line {}!".format(self.line) if self.line > 0 else "!")
class AlphaError(Exception):
def __init__(self, message, line=0):
self.message = message
self.line = line
def __str__(self):
return "AlphaError: {}".format(self.message) + \
(" the line {}!".format(self.line) if self.line > 0 else "!")
class ProbabilityError(Exception):
def __init__(self, message, line=0):
self.message = message
self.line = line
def __str__(self):
return "ProbabilityError: {}".format(self.message) + \
(" the line {}!".format(self.line) if self.line > 0 else "!")
class ValueError(Exception):
def __init__(self, message, line=0):
self.message = message
self.line = line
def __str__(self):
return "ValueError: {}".format(self.message) + \
(" the line {}!".format(self.line) if self.line > 0 else "!")
class ValueNotExist(Exception):
def __init__(self, message, line=0):
self.message = message
self.line = line
def __str__(self):
return "ValueNotExist: {}".format(self.message) + \
(" the line {}!".format(self.line) if self.line > 0 else "!")
class DataError(Exception):
def __init__(self, message, line=0):
self.message = message
self.line = line
def __str__(self):
return "DataError: {}".format(self.message) + \
(" the line {}!".format(self.line) if self.line > 0 else "!")
class DataNotExist(Exception):
def __init__(self, message, line=0):
self.message = message
self.line = line
def __str__(self):
return "DataNotExist: {}".format(self.message) + \
(" the line {}!".format(self.line) if self.line > 0 else "!")
class ParameterNotExist(Exception):
def __init__(self, message, line=0):
self.message = message
self.line = line
def __str__(self):
return "ParameterNotExist: {}".format(self.message) + \
(" the line {}!".format(self.line) if self.line > 0 else "!")
class ParameterError(Exception):
def __init__(self, message, line=0):
self.message = message
self.line = line
def __str__(self):
return "ParameterError: {}".format(self.message) + \
(" the line {}!".format(self.line) if self.line > 0 else "!")
class DistributionNotExist(Exception):
def __init__(self, message, line=0):
self.message = message
self.line = line
def __str__(self):
return "ParameterError: {}".format(self.message) + \
(" the line {}!".format(self.line) if self.line > 0 else "!")
class DistributionError(Exception):
def __init__(self, message, line=0):
self.message = message
self.line = line
def __str__(self):
return "ParameterError: {}".format(self.message) + \
(" the line {}!".format(self.line) if self.line > 0 else "!")
|
import threading
import pyttsx3
import queue
class TTSThread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
self.daemon = True
self.start()
def run(self):
global tts_engine
tts_engine = pyttsx3.init()
tts_engine.startLoop(False)
t_running = True
while t_running:
if self.queue.empty():
tts_engine.iterate()
else:
data = self.queue.get()
if data == "exit":
t_running = False
else:
tts_engine.say(data)
tts_engine.endLoop()
q = queue.Queue()
tts_thread = TTSThread(q)
|
from exopy.tasks.api import (InstrumentTask)
class ResumeProgramTask(InstrumentTask):
""" Resumes a paused program.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def perform(self):
self.driver.resume()
|
#!python3
"""
Article name : Optimal Envy-Free Cake Cutting
Authors : Yuga J. Cohler, John K. Lai, David C. Parkes and Ariel D. Procaccia
Algorithm #1 : opt_piecewise_constant
Algorithm #2 : opt_piecewise_linear
Programmer: Tom Goldenberg
Since: 2020-05
"""
from fairpy import Allocation
from fairpy.agents import *
import operator
from logging import Logger
import logging
import cvxpy
import numpy as np
logger: Logger = logging.getLogger(__name__)
def opt_piecewise_constant(agents: List[Agent]) -> Allocation:
"""
algorithm for finding an optimal EF allocation when agents have piecewise constant valuations.
:param agents: a list of PiecewiseConstantAgent agents
:return: an optimal envy-free allocation
>>> alice = PiecewiseConstantAgent([15,15,0,30,30], name='alice')
>>> bob = PiecewiseConstantAgent([0,30,30,30,0], name='bob')
>>> gin = PiecewiseConstantAgent([10,0,30,0,60], name='gin')
>>> print(str(opt_piecewise_constant([alice,gin])))
alice gets {(0.0, 1.0),(1.0, 2.0),(3.0, 4.0)} with value 60.
gin gets {(2.0, 3.0),(4.0, 5.0)} with value 90.
<BLANKLINE>
>>> alice = PiecewiseConstantAgent([5], name='alice')
>>> bob = PiecewiseConstantAgent([5], name='bob')
>>> print(str(opt_piecewise_constant([alice,bob])))
alice gets {(0.0, 0.5)} with value 2.5.
bob gets {(0.5, 1.0)} with value 2.5.
<BLANKLINE>
>>> alice = PiecewiseConstantAgent([3], name='alice')
>>> bob = PiecewiseConstantAgent([5], name='bob')
>>> print(str(opt_piecewise_constant([alice,bob])))
alice gets {(0.0, 0.5)} with value 1.5.
bob gets {(0.5, 1.0)} with value 2.5.
<BLANKLINE>
>>> alice = PiecewiseConstantAgent([0,1,0,2,0,3], name='alice')
>>> bob = PiecewiseConstantAgent([1,0,2,0,3,0], name='bob')
>>> print(str(opt_piecewise_constant([alice,bob])))
alice gets {(1.0, 2.0),(3.0, 4.0),(5.0, 6.0)} with value 6.
bob gets {(0.0, 1.0),(2.0, 3.0),(4.0, 5.0)} with value 6.
<BLANKLINE>
"""
value_matrix = [list(agent.valuation.values) for agent in agents]
num_of_agents = len(value_matrix)
num_of_pieces = len(value_matrix[0])
# Check for correct number of agents
if num_of_agents < 2:
raise ValueError(f'Optimal EF Cake Cutting works only for two agents or more')
logger.info(f'Received {num_of_agents} agents')
if not all([agent.cake_length() == agents[0].cake_length() for agent in agents]):
raise ValueError(f'Agents cake lengths are not equal')
logger.info(f'Each agent cake length is {agents[0].cake_length()}')
# XiI[i][I] represents the fraction of interval I given to agent i. Should be in {0,1}.
XiI = [[cvxpy.Variable(name=f'{agents[agent_index].name()} interval {piece_index+1} fraction', integer=False)
for piece_index in range(num_of_pieces)]
for agent_index in range(num_of_agents)]
logger.info(f'Fraction matrix has {len(XiI)} rows (agents) and {len(XiI[0])} columns (intervals)')
constraints = feasibility_constraints(XiI)
agents_w = []
for i in range(num_of_agents):
value_of_i = sum([XiI[i][g] * value_matrix[i][g] for g in range(num_of_pieces)])
agents_w.append(value_of_i)
for j in range(num_of_agents):
if j != i:
value_of_j = sum([XiI[j][g] * value_matrix[i][g] for g in range(num_of_pieces)])
logger.info(f'Adding Envy-Free constraint for agent: {agents[i].name()},\n{value_of_j} <= {value_of_i}')
constraints.append(value_of_j <= value_of_i)
objective = sum(agents_w)
logger.info(f'Objective function to maximize is {objective}')
prob = cvxpy.Problem(cvxpy.Maximize(objective), constraints)
prob.solve()
logger.info(f'Problem status: {prob.status}')
pieces_allocation = get_pieces_allocations(num_of_pieces, XiI)
return Allocation(agents, pieces_allocation)
def feasibility_constraints(XiI: list) -> list:
"""
Generate the feasibility constraints of the given matrix, namely:
* Each XiI is between 0 and 1;
* For each g, the sum of XiI is 1.
:param XiI: a list of lists of variables: XiI[i,g] is the amount of interval g given to agent i.
:return: a list of constraints.
"""
constraints = []
num_of_agents = len(XiI)
num_of_items = len(XiI[0])
for g in range(num_of_items):
sum_of_fractions = 1 == sum([XiI[i][g] for i in range(num_of_agents)])
logger.info(f'Adding interval {g+1} "sum of fractions == 1" constraint: {sum_of_fractions}')
constraints.append(sum_of_fractions)
for i in range(num_of_agents):
bound_fraction = [XiI[i][g] >= 0, XiI[i][g] <= 1]
logger.info(f'Adding agent {i + 1} fraction constraint for piece {g + 1} {bound_fraction[0]}, {bound_fraction[1]}')
constraints += bound_fraction
return constraints
def get_pieces_allocations(num_of_pieces: int, XiI: list) -> list:
"""
Generate a list of interval allocation per agent
:param num_of_pieces: number of intervals
:param XiI: a list of lists of variables: XiI[i,g] is the amount of interval g given to agent i.
:return: list of interval allocation per agent
"""
piece_help = [0.0 for _ in range(num_of_pieces)]
piece_alloc = []
for fraction_list in XiI:
agent_alloc = []
for i in range(len(fraction_list)):
fraction = np.round(fraction_list[i].value, 2)
if fraction > 0:
int_start = piece_help[i] + i
agent_alloc.append((int_start, int_start + fraction))
piece_help[i] += fraction
piece_alloc.append(agent_alloc)
logger.info(f'Agent {len(piece_alloc)} pieces are {agent_alloc}')
return piece_alloc
def opt_piecewise_linear(agents: List[Agent]) -> Allocation:
"""
algorithm for finding an optimal EF allocation when agents have piecewise linear valuations.
:param agents: a list of agents
:return: an optimal envy-free allocation
>>> alice = PiecewiseLinearAgent([11,22,33,44],[1,0,3,-2],name="alice")
>>> bob = PiecewiseLinearAgent([11,22,33,44],[-1,0,-3,2],name="bob")
>>> print(str(opt_piecewise_linear([alice,bob])))
alice gets {(0.5, 1),(1, 1.466),(2.5, 3),(3, 3.5)} with value 55.
bob gets {(0, 0.5),(1.466, 2),(2, 2.5),(3.5, 4)} with value 55.
<BLANKLINE>
>>> alice = PiecewiseLinearAgent([5], [0], name='alice')
>>> bob = PiecewiseLinearAgent([5], [0], name='bob')
>>> print(str(opt_piecewise_linear([alice,bob])))
alice gets {(0, 0.5)} with value 2.5.
bob gets {(0.5, 1)} with value 2.5.
<BLANKLINE>
>>> alice = PiecewiseLinearAgent([5], [-1], name='alice')
>>> bob = PiecewiseLinearAgent([5], [0], name='bob')
>>> print(str(opt_piecewise_linear([alice,bob])))
alice gets {(0, 0.5)} with value 2.62.
bob gets {(0.5, 1)} with value 2.5.
<BLANKLINE>
>>> alice = PiecewiseLinearAgent([5], [-1], name='alice')
>>> bob = PiecewiseLinearAgent([5], [-1], name='bob')
>>> print(str(opt_piecewise_linear([alice,bob])))
alice gets {(0, 0.475)} with value 2.5.
bob gets {(0.475, 1)} with value 2.25.
<BLANKLINE>
>>> alice = PiecewiseLinearAgent([0,1,0,2,0,3], [0,0,0,0,0,0], name='alice')
>>> bob = PiecewiseLinearAgent([1,0,2,0,3,0], [0,0,0,0,0,0],name='bob')
>>> print(str(opt_piecewise_linear([alice,bob])))
alice gets {(1, 2),(3, 4),(5, 6)} with value 6.
bob gets {(0, 1),(2, 3),(4, 5)} with value 6.
<BLANKLINE>
"""
def Y(i, op, j, intervals) -> list:
"""
returns all pieces that s.t Y(i op j) = {x ∈ [0, 1] : vi(x) op vj (x)}
:param i: agent index
:param op: operator to apply
:param j: ajent index
:param intervals: (x's) to apply function and from pieces will be returned
:return: list of intervals
"""
return [(start, end) for start, end in intervals if op(agents[i].eval(start, end), agents[j].eval(start, end))]
def isIntersect(poly_1: np.poly1d, poly_2: np.poly1d) -> float:
"""
checks for polynomials intersection
:param poly_1: np.poly1d
:param poly_2: np.poly1d
:return: corresponding x or 0 if none exist
"""
logger.debug(f'isIntersect: {poly_1}=poly_1,{poly_2}=poly_2')
m_1, c_1 = poly_1.c if len(poly_1.c) > 1 else [0, poly_1.c[0]]
m_2, c_2 = poly_2.c if len(poly_2.c) > 1 else [0, poly_2.c[0]]
logger.debug(f'isIntersect: m_1={m_1} c_1={c_1}, m_2={m_2} c_2={c_2}')
return ((c_2 - c_1) / (m_1 - m_2)) if (m_1 - m_2) != 0 else 0.0
def R(x: tuple) -> float:
"""
R(x) = v1(x)/v2(x)
:param x: interval
:return: ratio
"""
if agents[1].eval(x[0], x[1]) > 0:
return agents[0].eval(x[0], x[1]) / agents[1].eval(x[0], x[1])
return 0
def V_l(agent_index, inter_list):
"""
sums agent's value along intervals from inter_list
:param agent_index: agent index 0 or 1
:param inter_list: list of intervals
:return: sum of intervals for agent
"""
logger.info(f'V_list(agent_index={agent_index}, inter_list={inter_list})')
return sum([V(agent_index, start, end) for start, end in inter_list])
def V(agent_index: int, start: float, end: float):
"""
agent value of interval from start to end
:param agent_index: agent index 0 or 1
:param start: interval starting point
:param end: interval ending point
:return: value of interval for agent
"""
logger.info(f'V(agent_index={agent_index},start={start},end={end})')
return agents[agent_index].eval(start, end)
def get_optimal_allocation():
"""
Creates maximum total value allocation
:return: optimal allocation for 2 agents list[list[tuple], list[tuple]] and list of new intervals
"""
logger.debug(f'length: {agents[0].cake_length()}')
intervals = [(start, start + 1) for start in range(agents[0].cake_length())]
logger.info(f'getting optimal allocation for initial intervals: {intervals}')
new_intervals = []
allocs = [[], []]
for piece, (start, end) in enumerate(intervals):
logger.debug(f'get_optimal_allocation: piece={piece}, start={start}, end={end}')
mid = isIntersect(agents[0].valuation.piece_poly[piece], agents[1].valuation.piece_poly[piece])
if 0 < mid < 1:
logger.debug(f'mid={mid}')
new_intervals.append((start, start + mid))
if V(0, start, start + mid) > V(1, start, start + mid):
allocs[0].append((start, start + mid))
else:
allocs[1].append((start, start + mid))
start += mid
if V(0, start, end) > V(1, start, end):
allocs[0].append((start, end))
else:
allocs[1].append((start, end))
new_intervals.append((start, end))
return allocs, new_intervals
def Y_op_r(intervals, op, r):
"""
Y op r = {x : (v1(x) < v2(x)) ∧ (R(x) op r)}
:param intervals: intervals to test condition
:param op: operator.le, operator.lt, operator.gt, operator.ge etc.
:param r: ratio
:return: list of valid intervals
"""
result = []
for start, end in intervals:
if agents[0].eval(start, end) < agents[1].eval(start, end) and op(R((start, end)), r):
result.append((start, end))
return result
allocs, new_intervals = get_optimal_allocation()
logger.info(f'get_optimal_allocation returned:\nallocation: {allocs}\npieces: {new_intervals}')
y_0_gt_1 = Y(0, operator.gt, 1, new_intervals)
y_1_gt_0 = Y(1, operator.gt, 0, new_intervals)
y_0_eq_1 = Y(0, operator.eq, 1, new_intervals)
y_0_ge_1 = Y(0, operator.ge, 1, new_intervals)
y_1_ge_0 = Y(1, operator.ge, 0, new_intervals)
y_0_lt_1 = Y(0, operator.lt, 1, new_intervals)
logger.debug(f'y_0_gt_1 {y_0_gt_1}')
logger.debug(f'y_1_gt_0 {y_1_gt_0}')
logger.debug(f'y_0_eq_1 {y_0_eq_1}')
logger.debug(f'y_0_ge_1 {y_0_ge_1}')
logger.debug(f'y_1_ge_0 {y_1_ge_0}')
if (V_l(0, y_0_ge_1) >= (agents[0].total_value() / 2) and
V_l(1, y_1_ge_0) >= (agents[1].total_value() / 2)):
if V_l(0, y_0_gt_1) >= (agents[0].total_value() / 2):
allocs = [y_0_gt_1, y_1_ge_0]
else:
missing_value = (agents[0].total_value() / 2) - V_l(0, y_0_gt_1)
interval_options = []
for start, end in y_0_eq_1:
mid = agents[0].mark(start, missing_value)
logger.debug(f'start {start}, end {end}, mid {mid}, missing value {missing_value}')
if mid:
interval_options.append([(start, mid), (mid, end)])
logger.debug(f'int_opt {interval_options}')
agent_0_inter, agent_1_inter = interval_options.pop()
y_0_gt_1.append(agent_0_inter)
y_1_gt_0.append(agent_1_inter)
logger.info(f'agent 0 pieces {y_0_gt_1}')
logger.info(f'agent 1 pieces {y_1_gt_0}')
allocs = [y_0_gt_1, y_1_gt_0]
return Allocation(agents, allocs)
if V_l(0, y_0_ge_1) < (agents[0].total_value() / 2):
# Create V1(Y(1≥2) ∪ Y(≥r)) ≥ 1/2
ratio_dict = {x: R(x) for x in y_0_lt_1}
y_le_r_dict = {r: Y_op_r(y_0_lt_1, operator.ge, r) for inter, r in ratio_dict.items()}
valid_r_dict = {}
r_star = {0: None}
for r, val in y_le_r_dict.items():
if V_l(0, y_0_gt_1 + val) >= (agents[0].cake_length() / 2):
highest_value, interval_dict = r_star.popitem()
temp_dict = {r: val}
if V_l(0, y_0_gt_1 + val) > highest_value:
highest_value = V_l(0, y_0_gt_1 + val)
interval_dict = temp_dict
valid_r_dict[r] = val
r_star[highest_value] = interval_dict
logger.info(f'Valid Y(≥r) s.t. V1(Y(1≥2 U Y(≥r))) is {valid_r_dict}')
logger.info(f'Y(≥r*) is {r_star}')
# Give Y>r∗ to agent 1
_, r_max_dict = r_star.popitem()
if not r_max_dict:
logger.info(f'Y > r* returned empty, returning')
return Allocation(agents, allocs)
r_max, inter_r_max = r_max_dict.popitem()
agent_0_allocation = y_0_gt_1 + Y_op_r(inter_r_max, operator.gt, r_max)
agent_1_allocation = y_0_lt_1
# divide Y=r∗ so that agent 1 receives exactly value 1
missing_value = (agents[0].total_value() / 2) - V_l(0, agent_0_allocation)
y_eq_r = Y_op_r(inter_r_max, operator.eq, r_max)
logger.info(f'Y(=r*) is {y_eq_r}')
for start, end in y_eq_r:
agent_1_allocation.remove((start, end))
mid = agents[0].mark(start, missing_value)
logger.debug(f'start {start}, end {end}, mid {mid}, missing value {missing_value}')
if mid <= end:
agent_0_allocation.append((start, mid))
agent_1_allocation.append((mid, end))
else:
agent_1_allocation.append((start, end))
logger.info(f'agent 0 pieces {agent_0_allocation}')
logger.info(f'agent 1 pieces {agent_1_allocation}')
allocs = [agent_0_allocation, agent_1_allocation]
logger.info(f'Is allocation {agent_0_allocation, agent_1_allocation}, Envy Free ? {a.isEnvyFree(3)}')
return Allocation(agents, allocs)
if __name__ == "__main__":
import doctest
(failures, tests) = doctest.testmod(report=True)
print("{} failures, {} tests".format(failures, tests))
|
#/*
# * Copyright (c) 2019,2020 Xilinx Inc. All rights reserved.
# *
# * Author:
# * Bruce Ashfield <bruce.ashfield@xilinx.com>
# *
# * SPDX-License-Identifier: BSD-3-Clause
# */
import copy
import struct
import sys
import types
import unittest
import os
import getopt
import re
import subprocess
import shutil
from pathlib import Path
from pathlib import PurePath
from io import StringIO
import contextlib
import importlib
from lopper import Lopper
from lopper import LopperFmt
import lopper
from lopper_tree import *
from re import *
sys.path.append(os.path.dirname(__file__))
from openamp_xlnx_common import *
RPU_PATH = "/rpu@ff9a0000"
def trim_ipis(sdt):
unneeded_props = ["compatible", "xlnx,ipi-bitmask","interrupts", "xlnx,ipi-id", "xlnx,ipi-target-count", "xlnx,cpu-name", "xlnx,buffer-base", "xlnx,buffer-index", "xlnx,int-id", "xlnx,bit-position"]
amba_sub_nodes = sdt.tree['/amba'].subnodes()
for node in amba_sub_nodes:
node_compat = node.propval("compatible")
if node_compat != [""]:
if 'xlnx,zynqmp-ipi-mailbox' in node_compat:
for i in unneeded_props:
node[i].value = ""
node.sync(sdt.FDT)
def is_compat( node, compat_string_to_test ):
if re.search( "openamp,xlnx-rpu", compat_string_to_test):
return xlnx_openamp_rpu
return ""
def update_mbox_cntr_intr_parent(sdt):
# find phandle of a72 gic for mailbox controller
a72_gic_node = sdt.tree["/amba_apu/interrupt-controller@f9000000"]
# set mailbox controller interrupt-parent to this phandle
mailbox_cntr_node = sdt.tree["/zynqmp_ipi1"]
mailbox_cntr_node["interrupt-parent"].value = a72_gic_node.phandle
sdt.tree.sync()
sdt.tree.resolve()
# 1 for master, 0 for slave
# for each openamp channel, return mapping of role to resource group
def determine_role(sdt, domain_node):
include_prop = domain_node["include"]
rsc_groups = []
current_rsc_group = None
if len(list(include_prop.value)) % 2 == 1:
print("list of include not valid. expected even number of elements. got ", len(list(include_prop.value)), include_prop.value)
return -1
for index,value in enumerate(include_prop.value):
if index % 2 == 0:
current_rsc_group = sdt.tree.pnode(value)
else:
if value == 1: # only for openamp master
if current_rsc_group == None:
print("invalid resource group phandle: ", value)
return -1
rsc_groups.append(current_rsc_group)
else:
print("only do processing in host openamp channel domain ", value)
return -1
return rsc_groups
# in this case remote is rpu
# find node that is other end of openamp channel
def find_remote(sdt, domain_node, rsc_group_node):
domains = sdt.tree["/domains"]
# find other domain including the same resource group
remote_domain = None
for node in domains.subnodes():
# look for other domains with include
if node.propval("include") != [''] and node != domain_node:
# if node includes same rsc group, then this is remote
for i in node.propval("include"):
included_node = sdt.tree.pnode(i)
if included_node != None and included_node == rsc_group_node:
return node
return -1
# tests for a bit that is set, going fro 31 -> 0 from MSB to LSB
def check_bit_set(n, k):
if n & (1 << (k)):
return True
return False
# return rpu cluster configuration
# rpu cpus property fields: Cluster | cpus-mask | execution-mode
#
#execution mode ARM-R CPUs:
#bit 30: lockstep (lockstep enabled == 1)
#bit 31: secure mode / normal mode (secure mode == 1)
# e.g. &cpus_r5 0x2 0x80000000>
# this maps to arg1 as rpu_cluster node
# arg2: cpus-mask: 0x2 is r5-1, 0x1 is r5-0, 0x3 is both nodes
# if 0x3/both nodes and in split then need to openamp channels provided,
# otherwise return error
# if lockstep valid cpus-mask is 0x3 needed to denote both being used
#
def construct_carveouts(sdt, rsc_group_node, core, openamp_app_inputs):
# static var that persists beyond lifetime of first function call
# this is needed as there may be more than 1 openamp channel
# so multiple carveouts' phandles are required
if not hasattr(construct_carveouts,"carveout_phandle"):
# it doesn't exist yet, so initialize it
construct_carveouts.carveout_phandle = 0x5ed0
# carveouts each have addr,range
mem_regions = [[0 for x in range(2)] for y in range(4)]
mem_region_names = {
0 : "elfload",
1 : "vdev0vring0",
2 : "vdev0vring1",
3 : "vdev0buffer",
}
for index,value in enumerate(rsc_group_node["memory"].value):
if index % 4 == 1:
mem_regions[index//4][0] = value
elif index % 4 == 3:
mem_regions[index//4][1] = value
carveout_phandle_list = []
for i in range(4):
name = "rpu"+str(core)+mem_region_names[i]
addr = mem_regions[i][0]
openamp_app_inputs[rsc_group_node.name + mem_region_names[i] + '_base'] = hex(mem_regions[i][0])
length = mem_regions[i][1]
openamp_app_inputs[rsc_group_node.name + mem_region_names[i] + '_size'] = hex(mem_regions[i][1])
new_node = LopperNode(-1, "/reserved-memory/"+name)
new_node + LopperProp(name="no-map", value=[])
new_node + LopperProp(name="reg",value=[0,addr,0,length])
new_node + LopperProp(name="phandle",value=construct_carveouts.carveout_phandle)
new_node.phandle = new_node
sdt.tree.add(new_node)
print("added node: ",new_node)
carveout_phandle_list.append(construct_carveouts.carveout_phandle)
construct_carveouts.carveout_phandle += 1
return carveout_phandle_list
def construct_mem_region(sdt, domain_node, rsc_group_node, core, openamp_app_inputs):
# add reserved mem if not present
res_mem_node = None
carveout_phandle_list = None
try:
res_mem_node = sdt.tree["/reserved-memory"]
print("found pre-existing reserved mem node")
except:
res_mem_node = LopperNode(-1, "/reserved-memory")
res_mem_node + LopperProp(name="#address-cells",value=2)
res_mem_node + LopperProp(name="#size-cells",value=2)
res_mem_node + LopperProp(name="ranges",value=[])
sdt.tree.add(res_mem_node)
print("added reserved mem node ", res_mem_node)
return construct_carveouts(sdt, rsc_group_node, core, openamp_app_inputs)
# set pnode id for current rpu node
def set_rpu_pnode(sdt, r5_node, rpu_config, core, platform, remote_domain):
if r5_node.propval("pnode-id") != ['']:
print("pnode id already exists for node ", r5_node)
return -1
rpu_pnodes = {}
if platform == SOC_TYPE.VERSAL:
rpu_pnodes = {0 : 0x18110005, 1: 0x18110006}
else:
print("only versal supported for openamp domains")
return -1
rpu_pnode = None
# rpu config : true is split
if rpu_config == "lockstep":
rpu_pnode = rpu_pnodes[0]
else:
rpu_pnode = rpu_pnodes[core]
r5_node + LopperProp(name="pnode-id", value = rpu_pnodes[core])
r5_node.sync(sdt.FDT)
return
def setup_mbox_info(sdt, domain_node, r5_node, mbox_ctr):
if mbox_ctr.propval("reg-names") == [''] or mbox_ctr.propval("xlnx,ipi-id") == ['']:
print("invalid mbox ctr")
return -1
r5_node + LopperProp(name="mboxes",value=[mbox_ctr.phandle,0,mbox_ctr.phandle,1])
r5_node + LopperProp(name="mbox-names", value = ["tx", "rx"]);
sdt.tree.sync()
r5_node.sync(sdt.FDT)
return
# based on rpu_cluster_config + cores determine which tcm nodes to use
# add tcm nodes to device tree
def setup_tcm_nodes(sdt, r5_node, platform, rsc_group_node):
tcm_nodes = {}
if platform == SOC_TYPE.VERSAL:
tcm_pnodes = {
"ffe00000" : 0x1831800b,
"ffe20000" : 0x1831800c,
"ffe90000" : 0x1831800d,
"ffeb0000" : 0x1831800e,
}
tcm_to_hex = {
"ffe00000" : 0xffe00000,
"ffe20000" : 0xffe20000,
"ffe90000" : 0xffe90000,
"ffeb0000" : 0xffeb0000,
}
else:
print("only versal supported for openamp domains")
return -1
# determine which tcm nodes to use based on access list in rsc group
bank = 0
for phandle_val in rsc_group_node["access"].value:
tcm = sdt.tree.pnode(phandle_val)
if tcm != None:
key = tcm.abs_path.split("@")[1]
node_name = r5_node.abs_path+"/tcm_remoteproc"+str(bank)+"@"+key
tcm_node = LopperNode(-1, node_name)
tcm_node + LopperProp(name="pnode-id",value=tcm_pnodes[key])
tcm_node + LopperProp(name="reg",value=[0,tcm_to_hex[key],0,0x10000])
sdt.tree.add(tcm_node)
bank +=1
print('added ',tcm_node.abs_path)
return 0
def setup_r5_core_node(rpu_config, sdt, domain_node, rsc_group_node, core, remoteproc_node, platform, remote_domain, mbox_ctr, openamp_app_inputs):
carveout_phandle_list = None
r5_node = None
# add r5 node if not present
try:
r5_node = sdt.tree["/rpu@ff9a0000/r5_"+str(core)]
print("node already exists: ", r5_node)
except:
r5_node = LopperNode(-1, "/rpu@ff9a0000/r5_"+str(core))
r5_node + LopperProp(name="#address-cells",value=2)
r5_node + LopperProp(name="#size-cells",value=2)
r5_node + LopperProp(name="ranges",value=[])
sdt.tree.add(r5_node)
print("added r5 node ", r5_node)
print("add props for ",str(r5_node))
# props
ret = set_rpu_pnode(sdt, r5_node, rpu_config, core, platform, remote_domain)
if ret == -1:
print("set_rpu_pnode failed")
return ret
ret = setup_mbox_info(sdt, domain_node, r5_node, mbox_ctr)
if ret == -1:
print("setup_mbox_info failed")
return ret
carveout_phandle_list = construct_mem_region(sdt, domain_node, rsc_group_node, core, openamp_app_inputs)
if carveout_phandle_list == -1:
print("construct_mem_region failed")
return ret
if carveout_phandle_list != None:
print("adding prop memory-region to ",r5_node)
r5_node + LopperProp(name="memory-region",value=carveout_phandle_list)
#tcm nodes
for i in r5_node.subnodes():
if "tcm" in i.abs_path:
"tcm nodes exist"
return -1
# tcm nodes do not exist. set them up
setup_tcm_nodes(sdt, r5_node, platform, rsc_group_node)
# add props to remoteproc node
def set_remoteproc_node(remoteproc_node, sdt, rpu_config):
props = []
props.append(LopperProp(name="reg", value = [0x0, 0xff9a0000, 0x0, 0x10000]))
props.append(LopperProp(name="#address-cells",value=2))
props.append(LopperProp(name="ranges",value=[]))
props.append(LopperProp(name="#size-cells",value=2))
props.append(LopperProp(name="core_conf",value=rpu_config))
props.append(LopperProp(name="compatible",value="xlnx,zynqmp-r5-remoteproc-1.0"))
for i in props:
remoteproc_node + i
#
core = []
# this should only add nodes to tree
# openamp_app_inputs: dictionary to fill with openamp header info for openamp code base later on
def construct_remoteproc_node(remote_domain, rsc_group_node, sdt, domain_node, platform, mbox_ctr, openamp_app_inputs):
rpu_cluster_node = remote_domain.parent
rpu_config = None # split or lockstep
cpus_prop_val = rpu_cluster_node.propval("cpus")
if cpus_prop_val != ['']:
if len(cpus_prop_val) != 3:
print("rpu cluster cpu prop invalid len")
return -1
rpu_config = "lockstep" if check_bit_set(cpus_prop_val[2], 30)==True else "split"
if rpu_config == "lockstep":
core = 0
else:
if cpus_prop_val[1] == 3:
# if here this means that cluster is in split mode. look at which core from remote domain
core_prop_val = remote_domain.propval("cpus")
if core_prop_val == ['']:
print("no cpus val for core ", remote_domain)
else:
if core_prop_val[1] == 2:
core = 1
elif core_prop_val[1] == 1:
core = 0
else:
print("invalid cpu prop for core ", remote_domain, core_prop_val[1])
return -1
else:
print("invalid cpu prop for rpu: ",remote_domain, cpus_prop_val[1])
return -1
# only add remoteproc node if mbox is present in access list of domain node
# check domain's access list for mbox
has_corresponding_mbox = False
if domain_node.propval("access") != ['']:
for i in domain_node.propval("access"):
possible_mbox = sdt.tree.pnode(i)
if possible_mbox != None:
if possible_mbox.propval("reg-names") != ['']:
has_corresponding_mbox = True
# setup remoteproc node if not already present
remoteproc_node = None
try:
remoteproc_node = sdt.tree["/rpu@ff9a0000"]
except:
print("remoteproc node not present. now add it to tree")
remoteproc_node = LopperNode(-1, "/rpu@ff9a0000")
set_remoteproc_node(remoteproc_node, sdt, rpu_config)
sdt.tree.add(remoteproc_node, dont_sync = True)
remoteproc_node.sync(sdt.FDT)
remoteproc_node.resolve_all_refs()
sdt.tree.sync()
return setup_r5_core_node(rpu_config, sdt, domain_node, rsc_group_node, core, remoteproc_node, platform, remote_domain, mbox_ctr, openamp_app_inputs)
def find_mbox_cntr(remote_domain, sdt, domain_node, rsc_group):
# if there are multiple openamp channels
# then there can be multiple mbox controllers
# with this in mind, there can be pairs of rsc groups and mbox cntr's
# per channel
# if there are i channels, then determine 'i' here by
# associating a index for the resource group, then find i'th
# mbox cntr from domain node's access list
include_list = domain_node.propval("include")
if include_list == ['']:
print("no include prop for domain node")
return -1
rsc_group_index = 0
for val in include_list:
# found corresponding mbox
if sdt.tree.pnode(val) != None:
if "resource_group" in sdt.tree.pnode(val).abs_path:
print("find_mbox_cntr: getting index for rsc group: ", sdt.tree.pnode(val).abs_path, rsc_group_index, sdt.tree.pnode(val).phandle)
if sdt.tree.pnode(val).phandle == rsc_group.phandle:
break
rsc_group_index += 1
access_list = domain_node.propval("access")
if access_list == ['']:
print("no access prop for domain node")
return -1
mbox_index = 0
for val in access_list:
mbox = sdt.tree.pnode(val)
if mbox != None and mbox.propval("reg-names") != [''] and mbox.propval("xlnx,ipi-id") != ['']:
if mbox_index == rsc_group_index:
return mbox
mbox_index += 1
print("did not find corresponding mbox")
return -1
def parse_openamp_domain(sdt, options, tgt_node):
print("parse_openamp_domain")
domain_node = sdt.tree[tgt_node]
root_node = sdt.tree["/"]
platform = SOC_TYPE.UNINITIALIZED
openamp_app_inputs = {}
if 'versal' in str(root_node['compatible']):
platform = SOC_TYPE.VERSAL
elif 'zynqmp' in str(root_node['compatible']):
platform = SOC_TYPE.ZYNQMP
else:
print("invalid input system DT")
return False
rsc_groups = determine_role(sdt, domain_node)
if rsc_groups == -1:
print("failed to find rsc_groups")
return rsc_groups
remote_ipi_to_irq_vect_id = {
0xFF340000 : 63,
0xFF350000 : 64,
0xFF360000 : 65,
}
ipi_to_agent = {
0xff330000 : 0x400,
0xff340000 : 0x600,
0xff350000 : 0x800,
0xff360000 : 0xa00,
0xff370000 : 0xc00,
0xff380000 : 0xe00,
}
source_agent_to_ipi = {
0x000: 'psm', 0x100: 'psm',
0x200: 'pmc', 0x300: 'pmc',
0x400: 'ipi0', 0x500: 'ipi0',
0x600: 'ipi1', 0x700: 'ipi1',
0x800: 'ipi2', 0x900: 'ipi2',
0xa00: 'ipi3', 0xb00: 'ipi3',
0xc00: 'ipi4', 0xd00: 'ipi4',
0xe00: 'ipi5', 0xf00: 'ipi5',
}
agent_to_ipi_bitmask = {
0x000: 0x1 ,
0x200: 0x2 ,
0x400: 0x4,
0x600: 0x8,
0x800: 0x10,
0xa00: 0x20,
0xc00: 0x40,
0xe00: 0x80,
0x100: 0x1 ,
0x300: 0x2 ,
0x500: 0x4,
0x700: 0x8,
0x900: 0x10,
0xb00: 0x20,
0xd00: 0x40,
0xf00: 0x80,
}
# if master, find corresponding slave
# if none report error
channel_idx = 0
for current_rsc_group in rsc_groups:
# each openamp channel's remote/slave should be different domain
# the domain can be identified by its unique combination of domain that includes the same resource group as the
# openamp remote domain in question
remote_domain = find_remote(sdt, domain_node, current_rsc_group)
if remote_domain == -1:
print("failed to find_remote")
return remote_domain
mbox_ctr = find_mbox_cntr(remote_domain, sdt, domain_node, current_rsc_group)
if mbox_ctr == -1:
# if here then check for userspace case
host_ipi_node = None
remote_ipi_node = None
domains_to_process = {
'host': domain_node,
'remote' : remote_domain,
}
for role in domains_to_process.keys():
domain = domains_to_process[role]
access_pval = domain.propval("access")
if len(access_pval) == 0:
print("userspace case: invalid "+role+" IPI - no access property")
return mbox_ctr
ipi_node = sdt.tree.pnode(access_pval[0])
if ipi_node == None:
print("userspace case: invalid "+role+" IPI - invalid phandle from access property.")
return mbox_ctr
if 'xlnx,zynqmp-ipi-mailbox' not in ipi_node.propval("compatible"):
print("userspace case: invalid "+role+" IPI - wrong compatible string")
return mbox_ctr
ipi_base_addr = ipi_node.propval("reg")
if len(ipi_base_addr) != 4:
print("userspace case: invalid "+role+" IPI - incorrect reg property of ipi", ipi_node)
return mbox_ctr
ipi_base_addr = ipi_base_addr[1]
agent = ipi_to_agent[ipi_base_addr]
bitmask = agent_to_ipi_bitmask[agent]
print('userspace case: ',domain, hex(ipi_base_addr), hex(bitmask ), role )
# if so also parse out remote IPI
print("find_mbox_cntr failed")
return mbox_ctr
#openamp_app_inputs[current_rsc_group.name+'host-bitmask'] = hex(agent_to_ipi_bitmask[source_agent])
#openamp_app_inputs[current_rsc_group.name+'remote-bitmask'] = hex(agent_to_ipi_bitmask[remote_agent])
#openamp_app_inputs['ring_tx'] = 'FW_RSC_U32_ADDR_ANY'
#openamp_app_inputs['ring_rx'] = 'FW_RSC_U32_ADDR_ANY'
#openamp_app_inputs[current_rsc_group.name+'-remote-ipi'] = hex(remote_ipi_node.propval('reg')[1])
#openamp_app_inputs[current_rsc_group.name+'-remote-ipi-irq-vect-id'] = remote_ipi_to_irq_vect_id[remote_ipi_node.propval('reg')[1]]
else:
print('mbox_ctr: ', mbox_ctr)
local_request_region_idx = -1
remote_request_region_idx = -1
for index,value in enumerate(mbox_ctr.propval('reg-names')):
if 'local_request_region' == value:
local_request_region_idx = index
if 'remote_request_region' == value:
remote_request_region_idx = index
if local_request_region_idx == -1:
print("could not find local_request_region in mailbox controller")
if remote_request_region_idx == -1:
print("could not find remote_request_region in mailbox controller")
mbox_ctr_local_request_region = mbox_ctr.propval('reg')[local_request_region_idx*2]
source_agent = mbox_ctr_local_request_region & 0xF00
openamp_app_inputs[current_rsc_group.name+'host-bitmask'] = hex(agent_to_ipi_bitmask[source_agent])
print("source agent for mbox ctr: ", hex(source_agent), source_agent_to_ipi[source_agent], agent_to_ipi_bitmask[source_agent] )
mbox_ctr_remote_request_region = mbox_ctr.propval('reg')[remote_request_region_idx*2]
remote_agent = mbox_ctr_remote_request_region & 0xF00
openamp_app_inputs[current_rsc_group.name+'remote-bitmask'] = hex(agent_to_ipi_bitmask[remote_agent])
print("remote agent for mbox ctr: ", hex(remote_agent), source_agent_to_ipi[remote_agent], agent_to_ipi_bitmask[remote_agent] )
# if mailbox controller exists, this means this is kernel mode. provide tx/rx vrings for this
openamp_app_inputs['ring_tx'] = 'FW_RSC_U32_ADDR_ANY'
openamp_app_inputs['ring_rx'] = 'FW_RSC_U32_ADDR_ANY'
print('remote_domain: ',remote_domain, sdt.tree.pnode(remote_domain.propval('access')[0]))
remote_ipi_node = sdt.tree.pnode(remote_domain.propval('access')[0])
print(remote_ipi_node, hex(remote_ipi_node.propval('reg')[1]), ' ipi irq vect id: ', remote_ipi_to_irq_vect_id[remote_ipi_node.propval('reg')[1]])
openamp_app_inputs[current_rsc_group.name+'-remote-ipi'] = hex(remote_ipi_node.propval('reg')[1])
openamp_app_inputs[current_rsc_group.name+'-remote-ipi-irq-vect-id'] = remote_ipi_to_irq_vect_id[remote_ipi_node.propval('reg')[1]]
# should only add nodes to tree
ret = construct_remoteproc_node(remote_domain, current_rsc_group, sdt, domain_node, platform, mbox_ctr, openamp_app_inputs)
if ret == -1:
print("construct_remoteproc_node failed")
return ret
openamp_app_inputs['channel'+ str(channel_idx)+ '_to_group'] = str(channel_idx) + '-to-' + current_rsc_group.name
channel_idx += 1
print("openamp_app_inputs: ")
for i in openamp_app_inputs.keys():
print(' ', i, openamp_app_inputs[i])
# ensure interrupt parent for openamp-related ipi message buffers is set
update_mbox_cntr_intr_parent(sdt)
# ensure that extra ipi mboxes do not have props that interfere with linux boot
trim_ipis(sdt)
print("ret true")
return True
# this is what it needs to account for:
#
# identify ipis, shared pages (have defaults but allow them to be overwritten
# by system architect
#
#
# kernel space case
# linux
# - update memory-region
# - mboxes
# - zynqmp_ipi1::interrupt-parent
# rpu
# - header
# user space case
# linux
# - header
# rpu
# - header
def xlnx_openamp_rpu( tgt_node, sdt, options ):
print("xlnx_openamp_rpu")
try:
verbose = options['verbose']
except:
verbose = 0
if verbose:
print( "[INFO]: cb: xlnx_openamp_rpu( %s, %s, %s )" % (tgt_node, sdt, verbose))
root_node = sdt.tree["/"]
platform = SOC_TYPE.UNINITIALIZED
if 'versal' in str(root_node['compatible']):
platform = SOC_TYPE.VERSAL
elif 'zynqmp' in str(root_node['compatible']):
platform = SOC_TYPE.ZYNQMP
else:
print("invalid input system DT")
return False
try:
domains_node = sdt.tree["/domains"]
for node in domains_node.subnodes():
if "openamp,domain-v1" in node.propval("compatible"):
include_pval = node.propval("include")
if len(include_pval) % 2 == 0 and len(include_pval) > 1:
if include_pval[1] == 0x1: # host
return parse_openamp_domain(sdt, options, node)
except:
print("ERR: openamp-xlnx rpu: no domains found")
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
# %load imports_default.py
#!/usr/bin/env python
# In[ ]:
import numpy as np
import math
import random as rn
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import json
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('pylab', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'svg'")
# In[3]:
start = [
[1,2,3,4,5,6,7,8,9],
[4,5,6,7,8,9,1,2,3],
[7,8,9,1,2,3,4,5,6],
[2,3,1,5,6,4,8,9,7],
[5,6,4,8,9,7,2,3,1],
[8,9,7,2,3,1,5,6,4],
[3,1,2,6,4,5,9,7,8],
[6,4,5,9,7,8,3,1,2],
[9,7,8,3,1,2,6,4,5]
]
def col_change(b, col1, col2):
for i in range(len(b)):
buff = b[i][col1]
b[i][col1] = b[i][col2]
b[i][col2] = buff
return b
import copy
def shuffle_board(seed):
random.seed()
row_shuffle = randint(1,100)
col_shuffle = randint(1,100)
for i in range(row_shuffle):
roll= randint(0,3)
changes = rn.sample(range(3), 2)
buff = seed[roll*3 + changes[0]]
seed[roll*3 + changes[0]] = seed[roll*3 + changes[1]]
seed[roll*3 + changes[1]] = buff
for i in range(col_shuffle):
roll = randint(0,3)
changes = rn.sample(range(3), 2)
seed = col_change(seed, roll*3+ changes[0], roll*3+changes[1])
return seed
def slots(s):
if s=='easy':
return (81-28)
elif s =='medium':
return (81-26)
elif s =='hard':
return (81-23)
elif s=='v_hard':
return (81-20)
def generate(seed,lvl):
""" With levels 'easy' 'medium', 'hard', 'v_hard' levels."""
seed = shuffle_board(seed)
num = slots(lvl)
a = rn.sample(range(0,9),9)
b = rn.sample(range(0,9),9)
removals = rn.sample([(x,y) for x in a for y in b],num)
seed1 = copy.deepcopy(seed)
for i in range(num):
# print(removals[i])
seed1[removals[i][0]][removals[i][1]] = 0
return seed1, seed
|
# -*- coding: utf-8 -*-
"""
@date: 2020/8/22 下午4:20
@file: demo.py
@author: zj
@description:
"""
import cv2
import glob
import os
import time
import torch
import argparse
import numpy as np
from PIL import Image
from zcls.config import cfg
from zcls.config.key_word import KEY_OUTPUT
from rotnet.rotnet import rotnet
from rotnet.data.transforms.build import build_transform
from rotnet.data.transforms.rotate import rotate
def parse_args():
parser = argparse.ArgumentParser(description="RotNet Demo.")
parser.add_argument(
"-cfg",
"--config_file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument("--ckpt", type=str, default=None, help="Trained weights.")
parser.add_argument("--images_dir", default='demo/src', type=str,
help='Specify a image dir to do prediction.')
parser.add_argument("--rotate_dir", default='demo/rotate', type=str,
help='Specify a image dir to save rotate images.')
parser.add_argument("--output_dir", default='demo/res', type=str,
help='Specify a image dir to save demo images.')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
return args
@torch.no_grad()
def run_demo(cfg, images_dir, rotate_dir, output_dir):
image_paths = sorted(glob.glob(os.path.join(images_dir, '*.jpg')))
if not os.path.exists(rotate_dir):
os.makedirs(rotate_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = rotnet(pretrained=True).to(device)
model.eval()
cpu_device = torch.device("cpu")
transform, target_transform = build_transform(cfg, is_train=False)
for i, image_path in enumerate(image_paths):
# First rotate the image, then correct it
t0 = time.time()
# Input images are converted to gray scale
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
t1 = time.time()
rotate_img, angle = target_transform(image)
t2 = time.time()
transform_img = transform(Image.fromarray(rotate_img, mode='L'))
t3 = time.time()
outputs = model(transform_img.unsqueeze(0).to(device))[KEY_OUTPUT][0].to(cpu_device).numpy()
pred_angle = np.argmax(outputs)
t4 = time.time()
meters = ' | '.join(
[
'load {:03d}ms'.format(round((t1 - t0) * 1000)),
'rotate {:03d}ms'.format(round((t2 - t1) * 1000)),
'transform {:03d}ms'.format(round((t3 - t2) * 1000)),
'inference {:03d}ms'.format(round((t4 - t3) * 1000)),
f'rotate_angle: {angle}, predicted angle: {pred_angle}'
]
)
file_name = os.path.basename(image_path)
print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths), file_name, meters))
img_name = os.path.splitext(file_name)[0]
rotate_img_path = os.path.join(rotate_dir, '%s-%d.jpg' % (img_name, angle))
cv2.imwrite(rotate_img_path, rotate_img)
res_img = rotate(rotate_img, -1 * pred_angle, 255)
res_img_path = os.path.join(output_dir, f'{img_name}-{pred_angle}.jpg')
cv2.imwrite(res_img_path, res_img)
def main():
args = parse_args()
print(args)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
if args.ckpt is not None:
cfg.MODEL.RECOGNIZER.PRELOADED = args.ckpt
cfg.freeze()
if args.config_file:
print("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, "r") as cf:
config_str = "\n" + cf.read()
print(config_str)
# print("Running with config:\n{}".format(cfg))
run_demo(cfg=cfg,
images_dir=args.images_dir,
rotate_dir=args.rotate_dir,
output_dir=args.output_dir)
if __name__ == '__main__':
main()
|
# Generated by Django 3.1.2 on 2020-10-15 03:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('survey', '0002_auto_20201010_0524'),
]
operations = [
migrations.RenameField(
model_name='survey',
old_name='survey_type_parameters',
new_name='survey_type_parameter',
),
]
|
from decouple import config
import time
import json
from cross_traffic import run
conf = config("TASK_PARAMS")
def f(data):
return json.dumps(run(data))
with open("/file.txt", "w") as writer:
writer.write(f(json.loads(conf)))
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
from astwro.exttools import Runner
class WCSUtilsRunner(Runner):
def __init__(self, translator=None):
# base implementation of __init__ calls `_reset` also
self.translator = translator
super(WCSUtilsRunner, self).__init__()
pass
|
"""
entradas
temperatura-->float-->a
Salidas
deporte-->str-->d
"""
a = float(input("Digite su temperatura :"))
if(a > 85):
print("Natacion")
elif(a > 71 and a <= 85):
print("Tenis")
elif(a > 32 and a <= 70):
print("Golf")
elif(a > 10 and a <= 32):
print("Esqui")
elif(a <= 10):
print("Marcha")
else:
print("No se indentifico ningun deporte")
|
from LaplaceWSAPIClient import *
from MLQueries import *
import pandas as pd
from io import StringIO
from ClientConfig import client_config
ut = UserTokenSerde(client_config["user"],client_config["token"])
target_column = "variety"
test = pd.read_csv('test.csv')
train = pd.read_csv('train.csv')
uri = client_config["wss"]
def listIt(x):
for n in x:
print(n)
if n == "result":
res = pd.read_csv(StringIO(x[n]))
print(res.info())
buildup = AutoEncoderQueryBuildup(30, "append", 100, 200)
query = AutoEncoderQuery(ut, "eforest_python", test, train, ["T"], buildup)
perforQueryAny(uri, prepareAutoEncoderQuery(query), "MLWrapper", lambda x: listIt(x))
|
import sys
sys.path.append("..")
from connections import connections
from aux_functions import aux_functions
from datetime import datetime
import psycopg2
################################################################# INSERT FACT ACESSO #################################################################
def insertFactAcesso(id_usuario, data_login, data_logoff, origem):
try:
connection = connections.conexaoBanco()
cursor = connection.cursor()
comando_insert = """ INSERT INTO fact_acesso (id_usuario, data_login, data_logoff, origem) VALUES (%s,%s,%s,%s)"""
values = (id_usuario, data_login, data_logoff, origem)
cursor.execute(comando_insert, values)
connection.commit()
print("INSERT FACT ACESSO OK: ", values)
return "Acesso inserido com sucesso!"
except (Exception, psycopg2.Error) as error:
print("Failed to insert record into mobile table", error)
return error
finally:
if connection:
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
##########################################################################################################################################################
# CRIAR FILTRO PARA ALIMENTAR TABELA FACT_ACESSO
"""""""""
Campos: ID_USUARIO; ORIGEM; DATA_LOGIN; DATA_LOGOFF;
"""""""""
def dataHoraLogout(datetimeLogin, listLogoutUser):
vetor = []
for logoutUser in listLogoutUser:
if logoutUser['DateTime'] > datetimeLogin:
linha = logoutUser['DateTime']
vetor.append(linha)
if vetor != []:
menorDatetimeLogout = min(vetor)
return menorDatetimeLogout
else:
menorDatetimeLogout = 0
return menorDatetimeLogout
def filtroFactAcesso():
collection_login = connections.mongoConnection()['Logs']['Login']
for loginLogout in collection_login.find({"etl": {'$exists': False}}):
all_login_user = list(collection_login.find({'iduser': loginLogout['iduser'], 'funcao': 'Login'}))
all_logout_user = list(collection_login.find({'iduser': loginLogout['iduser'], 'funcao': 'Logout'}))
for login_user in all_login_user:
data_login = login_user['DateTime']
data_logoff = dataHoraLogout(data_login, all_logout_user)
result = insertFactAcesso(loginLogout['iduser'], data_login, data_logoff, 'P')
insertFactAcesso(loginLogout['iduser'], data_login, data_logoff, 'C')
# INSERÇÃO DA FLAG 1 PARA DADO INSERIDO CORRETAMENTE E FALHA 0
_id = loginLogout['_id']
if result == "Acesso inserido com sucesso!":
aux_functions.atualizaFlag(_id, 'Logs', 'Login', 1)
elif "duplicate key value violates unique constraint" in str(result):
aux_functions.atualizaFlag(_id, 'Logs', 'Login', 1)
else:
aux_functions.atualizaFlag(_id, 'Logs', 'Login', 0)
|
from api.helper import is_me_query
from db.models import Student, ProfileState, ProfileType
def privacy_protection(match_only=False):
def decorator(func):
def wrapper(self: Student, info):
if is_me_query(info):
return func(self, info)
# check for possible matches
user = info.context.user
if not user.is_anonymous and user.type in ProfileType.valid_company_types():
if self.has_match(user.company):
return func(self, info)
# get out, if a match is required for this value
if match_only:
return None
if self.state == ProfileState.PUBLIC:
return func(self, info)
return None
return wrapper
return decorator
|
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
from moredata.utils.osm_downloader import OSM_util
import csv
import pandas as pd
if __name__ == "__main__":
osm = OSM_util()
place_id = osm.get_place_ID("Brasil")
print(place_id)
key = "tourism"
value = "museum"
query = """
[out:json];
area(%s)->.searchArea;
(
node[%s=%s](area.searchArea);
way[%s=%s](area.searchArea);
relation[%s=%s](area.searchArea);
);
out geom;
>;
out skel qt;
""" % (place_id, key, value, key, value, key, value)
print(key, value)
df = osm.get_places("Brasil", key, value, query = query)
df.to_csv("../../data/output/osm/{}-{}.csv".format(key, value))
|
# internal
from src.translation import _
from src.ui.components import BaseDialog, SMButton, SMEdit, SaveSMButton, CancelSMButton
# pyqt
from PyQt5.QtCore import pyqtSignal, QObject
from PyQt5.QtWidgets import QHBoxLayout, QFormLayout, QLabel
class RegisterFormSignals(QObject):
"""Register Form Signals"""
showOptions = pyqtSignal(int)
class RegisterForm(BaseDialog):
"""Register Form"""
ID = 0
WCID = 1
def __init__(self, parent):
self.signals = RegisterFormSignals()
super().__init__(parent)
def setupDialog(self):
self.form = QFormLayout()
self.dialogLayout.addLayout(self.form)
# id
self.id = SMEdit()
self.idOptions = SMButton('...')
self.idOptions.setObjectName('Options')
idLayout = QHBoxLayout()
idLayout.addWidget(self.id)
idLayout.addWidget(self.idOptions)
self.form.addRow(QLabel(_('ID')), idLayout)
# wcid
self.wcid = SMEdit()
self.wcidOptions = SMButton('...')
self.wcidOptions.setObjectName('Options')
wcidLayout = QHBoxLayout()
wcidLayout.addWidget(self.wcid)
wcidLayout.addWidget(self.wcidOptions)
self.form.addRow(QLabel(_('WCID')), wcidLayout)
def setupControl(self):
self.btnSave = SaveSMButton(_('Save'))
self.btnCancel = CancelSMButton(_('Cancel'))
self.controlLayout.addWidget(self.btnSave)
self.controlLayout.addWidget(self.btnCancel)
def connectSignals(self):
self.btnCancel.clicked.connect(self.close)
self.idOptions.clicked.connect(lambda: self.signals.showOptions.emit(self.ID))
self.wcidOptions.clicked.connect(lambda: self.signals.showOptions.emit(self.WCID))
def getId(self):
return self.id.text()
def setId(self, value):
self.id.setText(value)
def getWcid(self):
return self.wcid.text()
def setWcid(self, value):
self.wcid.setText(value)
def clear(self):
self.id.clear()
self.wcid.clear()
|
from pathlib import Path
import yaml
__all__ = (
'mask_match', 'msg_join', 'msg_split',
'userhost_split', 'validate_hostname',
)
DATA_DIR = Path(__file__).resolve().parent / 'data'
def load_data(name):
with (DATA_DIR / '{}.yaml'.format(name)).open(encoding='utf-8') as f:
data = yaml.safe_load(f)
return data
mask_match = load_data('mask-match')
msg_join = load_data('msg-join')
msg_split = load_data('msg-split')
userhost_split = load_data('userhost-split')
validate_hostname = load_data('validate-hostname')
|
"""
Exception handler hook for RQ (http://python-rq.org/)
How to use:
1. Instead of using the default "rqworker" script to run the worker, write your own short script
as shown in this example:
https://github.com/nvie/rq/blob/master/examples/run_worker.py
2. In this script, initialize rollbar with `handler='blocking'`, for example:
rollbar.init('your access token', 'production', handler='blocking')
3. After constructing the worker but before calling `.work()`, add
`rollbar.contrib.rq.exception_handler` as an exception handler.
Full example:
```
import rollbar
from rq import Connection, Queue, Worker
if __name__ == '__main__':
rollbar.init('your_access_token', 'production', handler='blocking')
with Connection():
q = Queue()
worker = Worker(q)
worker.push_exc_handler(rollbar.contrib.rq.exception_handler)
worker.work()
```
"""
import rollbar
def exception_handler(job, *exc_info):
"""
Called by RQ when there is a failure in a worker.
NOTE: Make sure that in your RQ worker process, rollbar.init() has been called with
handler='blocking'. The default handler, 'thread', does not work from inside an RQ worker.
"""
# Report data about the job with the exception.
job_info = job.to_dict()
# job_info['data'] is the pickled representation of the job, and doesn't json-serialize well.
# repr() works nicely.
job_info['data'] = repr(job_info['data'])
extra_data = {'job': job_info}
payload_data = {'framework': 'rq'}
rollbar.report_exc_info(exc_info, extra_data=extra_data, payload_data=payload_data)
# continue to the next handler
return True
|
"""
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import argparse
import cv2
import numpy as np
def get_args():
parser = argparse.ArgumentParser("Image to ASCII")
parser.add_argument("--input", type=str, default="data/input.jpg", help="Path to input image")
parser.add_argument("--output", type=str, default="data/output.txt", help="Path to output text file")
parser.add_argument("--mode", type=str, default="complex", choices=["simple", "complex"],
help="10 or 70 different characters")
parser.add_argument("--num_cols", type=int, default=150, help="number of character for output's width")
args = parser.parse_args()
return args
def main(opt):
if opt.mode == "simple":
CHAR_LIST = '@%#*+=-:. '
else:
CHAR_LIST = "$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\|()1{}[]?-_+~<>i!lI;:,\"^`'. "
num_chars = len(CHAR_LIST)
num_cols = opt.num_cols
image = cv2.imread(opt.input)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
height, width = image.shape
cell_width = width / opt.num_cols
cell_height = 2 * cell_width
num_rows = int(height / cell_height)
if num_cols > width or num_rows > height:
print("Too many columns or rows. Use default setting")
cell_width = 6
cell_height = 12
num_cols = int(width / cell_width)
num_rows = int(height / cell_height)
output_file = open(opt.output, 'w')
for i in range(num_rows):
for j in range(num_cols):
output_file.write(
CHAR_LIST[min(int(np.mean(image[int(i * cell_height):min(int((i + 1) * cell_height), height),
int(j * cell_width):min(int((j + 1) * cell_width),
width)]) * num_chars / 255), num_chars - 1)])
output_file.write("\n")
output_file.close()
if __name__ == '__main__':
opt = get_args()
main(opt)
|
"""
For example, consider just the first seven characters of FBFBBFFRLR:
Start by considering the whole range, rows 0 through 127.
F means to take the lower half, keeping rows 0 through 63.
B means to take the upper half, keeping rows 32 through 63.
F means to take the lower half, keeping rows 32 through 47.
B means to take the upper half, keeping rows 40 through 47.
B keeps rows 44 through 47.
F keeps rows 44 through 45.
The final F keeps the lower of the two, row 44.
The last three characters will be either L or R; these specify exactly
one of the 8 columns of seats on the plane (numbered 0 through 7).
The same process as above proceeds again, this time with only three steps.
L means to keep the lower half, while R means to keep the upper half.
"""
ROWS = list(range(128))
COLUMNS = list(range(8))
test_inp = [
'BFFFBBFRRR', # row 70, column 7, seat ID 567
'FFFBBBFRRR', # row 14, column 7, seat ID 119
'BBFFBBFRLL', # row 102, column 4, seat ID 820
]
def get_input():
with open("input_5", "r", encoding="utf-8") as f:
return [x.strip() for x in f.readlines()]
def narrow_choice(my_list, direction):
"""
Find the middle list index and return either the first or the second half of the list.
Middle index is respectively start or end point of the new list.
"""
index = len(my_list) // 2
if direction == "F" or direction == "L":
new_list = my_list[:index]
elif direction == "B" or direction == "R":
new_list = my_list[index:]
return new_list
def main():
contents = get_input()
max_seat_id = 0
for seat in contents:
rows = ROWS
cols = COLUMNS
for dir in seat[:7]:
rows = narrow_choice(rows, dir)
for dir in seat[7:]:
cols = narrow_choice(cols, dir)
seat_id = rows[0] * 8 + cols[0]
if seat_id > max_seat_id:
max_seat_id = seat_id
print(f"The maximal seat ID value is {max_seat_id}")
if __name__ == "__main__":
main()
|
import os
import time
import requests
import telebot
from dotenv import load_dotenv
load_dotenv()
TELEGRAM_TOKEN = os.getenv('TELEGRAM_TOKEN')
OPENWEATHER_TOKEN = os.getenv('OPENWEATHER_TOKEN')
bot = telebot.TeleBot(TELEGRAM_TOKEN)
@bot.message_handler(commands=['start'])
def start(message):
username = message.from_user.username
greeting = (f'Привет, {username}!'
'Введи название города и я расскажу тебе о погоде в нем.')
bot.send_message(message.from_user.id, greeting)
def weather_forcast(api_response):
condition = api_response['weather'][0]['description']
temp = round(api_response['main']['temp'])
feel_temp = round(api_response['main']['feels_like'])
pressure = round(api_response['main']['pressure']*100//133.3224)
humidity = api_response['main']['humidity']
wind = api_response['wind']['speed']
return (f"Сейчас {temp}°С, ощущается как {feel_temp}°С, {condition}.\n"
f"Давление {pressure} мм. ртутного столба. "
f"Влажность воздуха {humidity}%.\n"
f"Скорость ветра {wind} м/с.")
def weather_recomendations(api_response):
if api_response['weather'][0]['id'] in [731, 751, 761, 762, 771, 781]:
recomendation = "На улице ужасная погода, сиди дома."
elif api_response['main']['feels_like'] < -20:
recomendation = "На улице очень холодно. Может лучше остаться дома?"
elif api_response['weather'][0]['id'] in [602, 613, 621, 622]:
recomendation = "На улице метель. Может лучше остаться дома?."
elif api_response['main']['feels_like'] < 0:
recomendation = "На улице холодно, одевайся теплее."
elif api_response['weather'][0]['id'] in range(200, 233):
recomendation = "На улице гроза. Может лучше остаться дома?"
elif api_response['weather'][0]['id'] in [502, 503, 504, 522, 531]:
recomendation = "На улице ливень. Может лучше остаться дома?"
elif api_response['weather'][0]['id'] in [500, 501, 520, 521]:
recomendation = "На улице дождь, не забудь взять зонт."
elif api_response['main']['feels_like'] < 15:
recomendation = "На улице прохладно, лучше надень куртку."
elif (api_response['main']['feels_like'] < 25 and
api_response['weather'][0]['id'] in range(800, 805)):
recomendation = "На улице хорошая погода, самое время для прогулки."
elif (api_response['main']['feels_like'] < 35 and
api_response['weather'][0]['id'] == 800):
recomendation = "На улице жарко и солнечно, старайся держаться тени."
elif api_response['main']['feels_like'] < 35:
recomendation = "На улице жарко, можно надеть шорты."
else:
recomendation = "На улице очень жарко, старайся пить больше воды."
return recomendation
@bot.message_handler(content_types=['text'])
def get_weather(message):
city = message.text
params = {
"appid": OPENWEATHER_TOKEN, "q": city, "lang": "ru", "units": "metric"}
api_result = requests.get(
'http://api.openweathermap.org/data/2.5/weather', params)
api_response = api_result.json()
if api_response['cod'] == "404":
bot.send_message(message.from_user.id,
'Извини, я не нашел такого города. '
'Проверь, правильно ли введено название.')
elif api_response['cod'] == "429":
bot.send_message(message.from_user.id,
'Превышено максимальное количеcтво запрос к API. '
'Попробуйте позже.')
else:
forcast = weather_forcast(api_response)
recomendation = weather_recomendations(api_response)
bot.send_message(message.from_user.id, forcast)
bot.send_message(message.from_user.id, recomendation)
while True:
try:
bot.polling(none_stop=True, interval=0)
except ConnectionError as e:
print(f"Бот упал с ошибкой: {e}.")
time.sleep(5)
|
#!/usr/bin/env python
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from time import time
import structlog
log = structlog.get_logger()
class Session:
def __init__(self, session_id, user):
self.session_id = session_id
self.user = user
self.started_at = time()
self.session_opened = False
|
import mock
import pytest
from framework.auth import Auth
from osf.models import RegistrationSchema, Registration
from osf_tests.factories import (
AuthUserFactory,
ProjectFactory,
RegionFactory
)
@pytest.mark.django_db
class TestRegion:
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def project_with_different_regions(self, user):
"""
A complex project configuration with many regions.
:param user:
:return:
"""
parent_node = root_node = ProjectFactory(creator=user)
# components have nested children
for _ in range(0, 1):
parent_node = ProjectFactory(creator=user, parent=parent_node)
addon = parent_node.get_addon('osfstorage')
addon.region = RegionFactory()
addon.save()
# root project has two direct children
for _ in range(0, 1):
parent_node = ProjectFactory(creator=user, parent=root_node)
addon = parent_node.get_addon('osfstorage')
addon.region = RegionFactory()
addon.save()
addon = root_node.get_addon('osfstorage')
addon.region = RegionFactory()
addon.save()
return root_node
@mock.patch('website.settings.ENABLE_ARCHIVER', False)
def test_regions_stay_after_registration(self, user, project_with_different_regions):
"""
Registering a project with components of different regions should keep those regions after registration.
:param user:
:param project_with_different_regions:
:return:
"""
schema = RegistrationSchema.objects.first()
project_with_different_regions.register_node(schema, Auth(user=user), '41-33')
regs = Registration.objects.all()
# Sanity check all regions are different from each other
assert regs.count() == len({reg.get_addon('osfstorage').region._id for reg in regs})
# All registrations should have the same region as the node they are registered from.
assert all(reg.registered_from.get_addon('osfstorage').region ==
reg.get_addon('osfstorage').region for reg in regs)
|
from sardana.tango.macroserver import main
if __name__ == "__main__":
main()
|
# https://app.codesignal.com/challenge/Cg7JueyPBNPfycb7x
def find_value(string, i, end, k):
if k < 0:
return False
if i == end:
return True
while string[i] == string[end]:
i += 1
end -= 1
if abs(end - i) <= 1:
return True
if find_value(string, i + 1, end, k - 1):
return True
if find_value(string, i, end - 1, k - 1):
return True
return False
def kpalindrome(s, k):
return find_value(s, 0, len(s) - 1, k)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Define the *Z order* of drawn objects
See http://matplotlib.org/examples/pylab_examples/zorder_demo.html
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as mlines
# Plot data #################
fig, ax = plt.subplots(figsize=(5, 5))
ax.axis('equal')
plt.axis([-10, 10, -10, 10])
# Example background object with plot
ax.plot((-7, 7), (7, -7), lw=2, color="r", zorder=1)
# Example background object with patch
line = mlines.Line2D((-7, 7), (-7, 7), lw=2, color="g")
line.set_zorder(1) # <- PUT THE LINE BELOW THE CIRCLE
ax.add_line(line)
# Foreground object
circle = patches.Circle((0, 0), 3, fill=True, edgecolor="b", facecolor="r")
circle.set_zorder(2) # <- PUT THE CIRCLE ON TOP
ax.add_patch(circle)
# Save file #################
plt.savefig("zorder.png")
# Plot ######################
plt.show()
|
from copy import copy, deepcopy
import pytest
from returns.maybe import Nothing, Some, _Nothing
from returns.primitives.exceptions import ImmutableStateError
from returns.primitives.interfaces import (
Bindable,
Instanceable,
Mappable,
Unwrapable,
)
@pytest.mark.parametrize('container', [
Nothing,
Some(1),
])
@pytest.mark.parametrize('protocol', [
Bindable,
Mappable,
Unwrapable,
Instanceable,
])
def test_protocols(container, protocol):
"""Ensures that Maybe has all the right protocols."""
assert isinstance(container, protocol)
def test_equality():
"""Ensures that containers can be compared."""
assert Nothing is Nothing # noqa: WPS312
assert Nothing == _Nothing() == _Nothing(None)
assert Some(5) == Some(5)
assert hash(Some(1))
assert hash(Nothing)
def test_nonequality():
"""Ensures that containers are not compared to regular values."""
assert Nothing is not None
assert Nothing != None # noqa: E711
assert _Nothing(None) != None # noqa: E711
assert Some(5) != 5
assert Some(3) is not Some(3)
def test_is_compare():
"""Ensures that `is` operator works correctly."""
some_container = Some(1)
assert Nothing.bind(lambda state: state) is Nothing
assert some_container is not Some(1)
def test_immutability_failure():
"""Ensures that Failure container is immutable."""
with pytest.raises(ImmutableStateError):
Nothing._inner_state = 1 # noqa: WPS437
with pytest.raises(ImmutableStateError):
Nothing.missing = 2
with pytest.raises(ImmutableStateError):
del Nothing._inner_state # type: ignore # noqa: WPS420, WPS437
with pytest.raises(AttributeError):
Nothing.missing # type: ignore # noqa: WPS428
def test_immutability_success():
"""Ensures that Success container is immutable."""
with pytest.raises(ImmutableStateError):
Some(0)._inner_state = 1 # noqa: WPS437
with pytest.raises(ImmutableStateError):
Some(1).missing = 2
with pytest.raises(ImmutableStateError):
del Some(0)._inner_state # type: ignore # noqa: WPS420, WPS437
with pytest.raises(AttributeError):
Some(1).missing # type: ignore # noqa: WPS428
def test_success_immutable_copy():
"""Ensures that Success returns it self when passed to copy function."""
some = Some(1) # noqa: WPS110
assert some is copy(some)
def test_success_immutable_deepcopy():
"""Ensures that Success returns it self when passed to deepcopy function."""
some = Some(1) # noqa: WPS110
assert some is deepcopy(some)
def test_failure_immutable_copy():
"""Ensures that Failure returns it self when passed to copy function."""
nothing = _Nothing()
assert nothing is copy(nothing)
def test_failure_immutable_deepcopy():
"""Ensures that Failure returns it self when passed to deepcopy function."""
nothing = _Nothing()
assert nothing is deepcopy(nothing)
|
import numpy as np
import rospy
import gym # https://github.com/openai/gym/blob/master/gym/core.py
from gym.utils import seeding
from .gazebo_connection import GazeboConnection
class GymGazeboEnv(gym.Env):
def __init__(self, start_init_physics_parameters=True, reset_world_or_sim="WORLD"):
# To reset Simulations
rospy.logdebug("START init RobotGazeboEnv")
self.gazebo = GazeboConnection(start_init_physics_parameters,reset_world_or_sim)
self.seed()
rospy.logdebug("END init RobotGazeboEnv")
# Env methods
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""
obs, reward, done, info = env.step(action)
"""
# Convert the action num to movement action
self.gazebo.unpauseSim()
self._take_action(action)
self.gazebo.pauseSim()
obs = self._observe()
reward = self._compute_reward()
done = self._is_done(obs, self.goal_position)
info = self._get_info() # {"goal": self.goal_position}
return obs, reward, done, info
def reset(self):
"""
obs, info = env.reset()
"""
# self.gazebo.pauseSim()
rospy.logdebug("Reseting RobotGazeboEnvironment")
self._reset_sim()
self.gazebo.unpauseSim()
self.init_position, self.goal_position = self._set_init()
self.gazebo.pauseSim()
obs = self._observe()
info = self._get_info()
rospy.logdebug("END Reseting RobotGazeboEnvironment")
return obs, info
def close(self):
"""
Function executed when closing the environment.
Use it for closing GUIS and other systems that need closing.
:return:
"""
rospy.logwarn("Closing RobotGazeboEnvironment")
rospy.signal_shutdown("Closing RobotGazeboEnvironment")
def _reset_sim(self):
"""Resets a simulation
"""
rospy.logdebug("START robot gazebo _reset_sim")
self.gazebo.pauseSim()
self.gazebo.resetSim()
self.gazebo.unpauseSim()
self._check_all_systems_ready()
self.gazebo.pauseSim()
rospy.logdebug("END robot gazebo _reset_sim")
return True
def _set_init(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
raise NotImplementedError()
def _observe(self):
"""Returns the observation.
"""
raise NotImplementedError()
def _take_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _is_done(self, observations, goal_position):
"""Indicates whether or not the episode is done ( the robot has fallen for example).
"""
raise NotImplementedError()
def _compute_reward(self):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _env_setup(self, initial_qpos):
"""Initial configuration of the environment. Can be used to configure initial state
and extract information from the simulation.
"""
raise NotImplementedError()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import SimpleTestCase, TestCase
from social_hashtags.api import (
parse_hashtags_from_text, parse_hashtags
)
from social_hashtags.tests.models import Post
from social_hashtags.settings import TAGS_MANAGER_NAME
text_with_hashtags = """
Hashtags
#hashtag text
#hashtag
text #hashtag
text #1tag
text.#hashtag
text #hashtag
text #hashtag!
text #hashtag1 #hashtag2
text #hash_tagüäö
text #hash0tag
text #hash_tag
Hashtags non-ACSII
#хэш_тег
#中英字典
#الأبجدية
Not hashtags
text #1234
&#nbsp;
text#hashtag
"""
class ApiParseHashtagsFromTextTestCase(SimpleTestCase):
def test_if_correct_parse_hashtags_from_text(self):
self.assertEqual(
list(parse_hashtags_from_text(text_with_hashtags)),
['hashtag', 'hashtag', 'hashtag', '1tag', 'hashtag', 'hashtag',
'hashtag', 'hashtag1', 'hashtag2', 'hash_tagüäö', 'hash0tag',
'hash_tag', 'хэш_тег', '中英字典', 'الأبجدية']
)
class ApiParseHashtagsTestCase(TestCase):
def test_if_saves_hashtags(self):
post = Post.objects.create(message='Hello')
parse_hashtags(post, '#hashtag #хэш_тэг')
post = Post.objects.get(message='Hello')
hashtag_manager = getattr(post, TAGS_MANAGER_NAME)
self.assertIn('hashtag', hashtag_manager.names())
self.assertIn('хэш_тэг', hashtag_manager.names())
|
## A standard exception to indicate a type error in a function call.
class IntTypeError(Exception):
def __init__(self, desc):
self.desc = desc
def __str__(self):
return self.desc
## A standard Descriptor class to simplify and encapsulate class attributes.
class Descriptor(object):
def __init__(self, get_func, set_func):
self.get_func = get_func
self.set_func = set_func
def __get__(self, instance, owner):
return self.get_func(instance)
def __set__(self, instance, value):
return self.set_func(instance, value)
|
import pandas as pd
import os
from dateutil import parser
def get_match_history(gr_df, date, team1, team2):
previous_games = gr_df[(((gr_df['home'] == team1) | (gr_df['home'] == team2)) & ((gr_df['away'] == team1) | (gr_df['away'] == team2))) & (gr_df['date'] < date)]
team1_wins = 0
team1_win_differential = 0
team1_lose_differential = 0
team2_wins = 0
for index, game in previous_games.iterrows():
if game["home_score"] > game["away_score"]:
if game["home"] == team1:
team1_wins += 1
team1_win_differential += game["home_score"] - game["away_score"]
else:
team2_wins += 1
team1_lose_differential += game["away_score"] - game["home_score"]
else:
if game["away"] == team1:
team1_wins += 1
team1_win_differential += game["away_score"] - game["home_score"]
else:
team2_wins += 1
team1_lose_differential += game["home_score"] - game["away_score"]
if len(previous_games) == 0:
return team1_wins, team2_wins, team1_win_differential, team1_lose_differential, 0, 0, 0, 0, 0
else:
last_game = previous_games.iloc[-1]
last_game_date = parser.parse(last_game["date"])
curr_date = parser.parse(date)
num_days = (curr_date - last_game_date).days
if last_game["home"] == team1:
team1_last_differential = last_game["home_score"] - last_game["away_score"]
if last_game["home_score"] > last_game["away_score"]:
return team1_wins, team2_wins, team1_win_differential, team1_lose_differential, team1_win_differential / (team1_wins + team2_wins), team1_lose_differential / (team1_wins + team2_wins), team1_last_differential, 1, num_days
else:
return team1_wins, team2_wins, team1_win_differential, team1_lose_differential, team1_win_differential / (team1_wins + team2_wins), team1_lose_differential / (team1_wins + team2_wins), team1_last_differential, -1, num_days
else:
team1_last_differential = last_game["away_score"] - last_game["home_score"]
if last_game["away_score"] > last_game["home_score"]:
return team1_wins, team2_wins, team1_win_differential, team1_lose_differential, team1_win_differential / (team1_wins + team2_wins), team1_lose_differential / (team1_wins + team2_wins), team1_last_differential, 1, num_days
else:
return team1_wins, team2_wins, team1_win_differential, team1_lose_differential, team1_win_differential / (team1_wins + team2_wins), team1_lose_differential / (team1_wins + team2_wins), team1_last_differential, -1, num_days
def main():
pd.set_option('expand_frame_repr', False)
pd.set_option('display.max_columns', 500)
seasons = []
for year in os.listdir("../dataframes"):
if not year.startswith('.'):
seasons.append(year)
# seasons = ["2010"]
for season in seasons:
matchup_dict = {"date": [], "home": [], "away": [], "home_wins": [], "away_wins": [], "home_win_t_diff": [], "home_lose_t_diff": [], "home_win_avg_diff": [], "home_lose_avg_diff": [], "home_prev_diff": [], "home_prev_win": [], "last_match": []}
gr_df = pd.read_pickle("../dataframes/" + season + "/" + season + "_game_results")
for index, game in gr_df.iterrows():
date = game["date"]
team1 = game["home"]
team2 = game["away"]
home_wins, away_wins, home_win_diff, home_lose_diff, home_win_diff_avg, home_lose_diff_avg, home_prev_diff, home_prev_win, num_days = get_match_history(gr_df, date, team1, team2)
matchup_dict["date"].append(date)
matchup_dict["home"].append(team1)
matchup_dict["away"].append(team2)
matchup_dict["home_wins"].append(home_wins)
matchup_dict["away_wins"].append(away_wins)
matchup_dict["home_win_t_diff"].append(home_win_diff)
matchup_dict["home_lose_t_diff"].append(home_lose_diff)
matchup_dict["home_win_avg_diff"].append(home_win_diff_avg)
matchup_dict["home_lose_avg_diff"].append(home_lose_diff_avg)
matchup_dict["home_prev_diff"].append(home_prev_diff)
matchup_dict["home_prev_win"].append(home_prev_win)
matchup_dict["last_match"].append(num_days)
matchup_df = pd.DataFrame(matchup_dict)
# print(matchup_df)
matchup_df.to_pickle("../dataframes/" + season + "/" + season + "_matchup_history")
if __name__ == "__main__":
main()
|
import os
from libqtile import widget
def test_thermal_zone_getting_value():
# Create temporary zone file
tmp = "/var/tmp/qtile/test/widgets/thermal_zone"
zone_file = tmp + "/sys/class/thermal/thermal_zone0/temp"
os.makedirs(os.path.dirname(zone_file), exist_ok=True)
class FakeLayout:
pass
with open(zone_file, "w") as f:
f.write("22000")
thermal_zone = widget.ThermalZone(zone=zone_file)
thermal_zone.layout = FakeLayout()
output = thermal_zone.poll()
assert output == "22°C"
|
###################################################################################################
# Copyright © 2021 Neal Meswania
# Lisence: MIT
###################################################################################################
import json
import logging
import queue
from jsonschema import validate, RefResolver
from pathlib import PurePath
from multiprocessing import Queue
from dnd_app.core.config import Config
from dnd_app.failure_handler.log_failure import LogFailure
from dnd_app.request_handler.request import Request
from dnd_app.request_handler.response import Response
from dnd_app.request_handler.exceptions import RequestHandlerException, \
FailedToProcessRequest, \
FailedToValidateRequestedData
###################################################################################################
###################################################################################################
###################################################################################################
class RequestHandler:
def __init__(self, config: Config, request_queue: Queue, failure_queue: Queue):
self._config = config
self._request_queue = request_queue
self._failure_queue = failure_queue
###################################################################################################
def __call__(self):
while True:
try:
request_dispatch = self._request_queue.get(
block=True, timeout=self._config.get_common("queue_get_timeout"))
request_id = request_dispatch.request.id()
response = self._ProcessNewRequest(request_dispatch.request)
request_dispatch.pipe_connection.send(response)
logging.info(f"Processed request: {request_id}")
except queue.Empty:
pass
except RequestHandlerException as err:
LogFailure(type="Request Handler", message=err, queue=self._failure_queue)
###################################################################################################
def _ProcessNewRequest(self, request: Request) -> Response:
try:
response_data = self._ParseJSONMatchingGlob(f"{request.type()}/{request.value()}.json")
self._MaybeValidateResponseData(response_data, request)
except Exception as err:
logging.error(f"Failed to process request: {request.type()}::{request.value()}, "
f"{request.id()}")
raise FailedToProcessRequest(err)
else:
return Response(request, response_data)
###################################################################################################
def _MaybeValidateResponseData(self, response_data: dict, request: Request) -> dict:
schema = self._GetSchemaForRequestTypeValue(request.type(), request.value())
resolver = self._GetSchemaRefResolver(schema)
try:
validate(instance=response_data, schema=schema, resolver=resolver)
except Exception as err:
logging.critical(f"Validation failed, got exception:\n{err}")
raise FailedToValidateRequestedData(err)
###################################################################################################
def _GetSchemaForRequestTypeValue(self, request_type: str, request_value: str):
schema_type = request_type
if request_type == "character":
schema_type = f"{request_value.split('/')[-1]}"
return self._ParseJSONMatchingGlob(f"schemas/{schema_type}_schema.json")
###################################################################################################
def _GetSchemaRefResolver(self, schema: dict) -> RefResolver:
data_dir = self._config.get_data_dir() / "schemas"
data_dir = PurePath(data_dir)
data_dir = data_dir.relative_to(data_dir.drive)
return RefResolver(referrer=schema, base_uri="file://" + str(data_dir.as_posix()) + "/")
###################################################################################################
def _ParseJSONMatchingGlob(self, pattern: str) -> dict:
found_file = self._LoadFileMatchingGlob(pattern)
with open(found_file, 'r') as reader:
return json.load(reader)
###################################################################################################
def _LoadFileMatchingGlob(self, pattern: str):
data_dir = self._config.get_data_dir()
found_files = sorted(data_dir.glob(pattern))
if len(found_files) == 0:
logging.critical(f"No matching pattern '{pattern}'")
raise FailedToProcessRequest
elif len(found_files) > 1:
logging.warning(f"Found {len(found_files)} matching pattern '{pattern}', using first match")
return found_files[0]
###################################################################################################
###################################################################################################
###################################################################################################
|
from django.db import models
from rest_framework import authentication, permissions, filters
class ManagerMainMixin(models.Manager):
def get_queryset(self):
return super(ManagerMainMixin, self).get_queryset().filter(deleted_at__isnull=True)
class TimeStampedMixin(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
deleted_at = models.DateTimeField(blank=True, null=True)
objects = ManagerMainMixin()
class Meta:
abstract = True
class DefaultViewSetMixin(object):
# authentication_classes= (
# authentication.BaseAuthentication,
# authentication.TokenAuthentication,
# )
# permissions_classes = (
# permissions.IsAuthenticated,
# )
filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter, filters.SearchFilter,)
paginate_by = 25
paginate_by_param = 'page_size'
max_paginate_by = 100
|
"""
Tests for BASIL module
FIXME need more multi-step tests
FIXME need to test more error conditions
"""
import pytest
import numpy as np
from fsl.data.image import Image
from oxasl import AslImage, Workspace
import oxasl.basil as basil
pytest.skip("skipping basil tests as neeed rewrite", allow_module_level=True)
DEFAULTS = {
"method" : "vb",
"noise" : "white",
"allow-bad-voxels" : True,
"convergence" : "trialmode",
"max-iterations" : 20,
"max-trials" : 10,
"model" : "aslrest",
"disp" : "none",
"exch" : "mix",
"inctiss" : True,
"incbat" : True,
"infertiss" : True,
"inferbat" : True,
}
def _get_defaults(img):
options = dict(DEFAULTS)
for idx, ti in enumerate(img.tis):
options["ti%i" % (idx+1)] = ti
options["rpt%i" % (idx+1)] = img.rpts[idx]
return options
def _check_step(step, desc_text=None, options=None):
print(step.options)
if desc_text:
assert(desc_text.lower().strip() in step.desc.lower())
if options:
for k, v in options.items():
assert(step.options[k] == v)
def test_nodata():
"""
Check we get an error if there is no data
"""
wsp = Workspace()
with pytest.raises(ValueError):
steps = basil.basil_steps(wsp, None)
def test_infer_nothing():
"""
Check we get an error if there is nothing to infer
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace()
with pytest.raises(ValueError):
steps = basil.basil_steps(wsp, img)
def test_defaults():
"""
Check the basic defaults (infer tissue perfusion and bolus arrival time)
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace()
wsp.infertiss = True
wsp.inferbat = True
steps = basil.basil_steps(wsp, img)
assert(len(steps) == 1)
options = _get_defaults(img)
_check_step(steps[0], desc_text="tissue", options=options)
def test_fix_bat():
"""
Check fixing the arrival time, which is normally inferred
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=False)
steps = basil.basil_steps(wsp, img)
assert(len(steps) == 1)
options = _get_defaults(img)
options.pop("incbat")
options.pop("inferbat")
_check_step(steps[0], desc_text="tissue", options=options)
def test_inferart():
"""
Check inference of arterial component
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=True, inferart=True)
options = _get_defaults(img)
steps = basil.basil_steps(wsp, img)
assert(len(steps) == 2)
_check_step(steps[0], desc_text="tissue",
options=dict(options, **{
"incart" : True,
}))
_check_step(steps[1], desc_text="arterial",
options=dict(options, **{
"incart" : True,
"inferart" : True,
}))
def test_infertau():
"""
Check inference of bolus duration (tau)
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=True, infertau=True)
steps = basil.basil_steps(wsp, img)
assert(len(steps) == 2)
options = _get_defaults(img)
_check_step(steps[0], desc_text="tissue",
options=dict(options, **{
"inctau" : True,
}))
_check_step(steps[1], desc_text="bolus",
options=dict(options, **{
"inctau" : True,
"infertau" : True,
}))
def test_inferarttau():
"""
Check inference of bolus duration (tau) and arterial component
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=True, infertau=True, inferart=True)
steps = basil.basil_steps(wsp, img)
assert(len(steps) == 3)
options = _get_defaults(img)
_check_step(steps[0], desc_text="tissue",
options=dict(options, **{
"inctau" : True,
"incart" : True,
}))
_check_step(steps[1], desc_text="arterial",
options=dict(options, **{
"inctau" : True,
"incart" : True,
"inferart" : True,
}))
_check_step(steps[2], desc_text="bolus",
options=dict(options, **{
"inctau" : True,
"incart" : True,
"infertau" : True,
"inferart" : True,
}))
def test_infert1():
"""
Check inference of T1
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=True, infert1=True)
steps = basil.basil_steps(wsp, img)
assert(len(steps) == 2)
options = _get_defaults(img)
_check_step(steps[0], desc_text="tissue",
options=dict(options, **{
"inct1" : True,
}))
_check_step(steps[1], desc_text="T1",
options=dict(options, **{
"inct1" : True,
"infert1" : True,
}))
def test_t1im():
"""
Check T1 image priors are correctly handled
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=True, infert1=True)
wsp.t1im = Image(np.random.rand(5, 5, 5))
steps = basil.basil_steps(wsp, img)
assert(len(steps) == 2)
options = _get_defaults(img)
_check_step(steps[0], desc_text="tissue",
options=dict(options, **{
"inct1" : True,
}))
_check_step(steps[1], desc_text="T1",
options=dict(options, **{
"inct1" : True,
"infert1" : True,
"PSP_byname1" : "T_1",
"PSP_byname1_type" : "I",
#"PSP_byname1_image" : "t1file",
}))
def test_inferpc():
"""
Check the pre-capiliary component
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=True, inferpc=True)
steps = basil.basil_steps(wsp, img)
assert(len(steps) == 2)
options = _get_defaults(img)
_check_step(steps[0], desc_text="tissue",
options=dict(options, **{
"incpc" : True,
}))
_check_step(steps[1], desc_text="pre-capiliary",
options=dict(options, **{
"incpc" : True,
"inferpc" : True,
}))
def test_artonly():
"""
Check we can infer arterial component without tissue step
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=False, inferbat=True, inferart=True)
steps = basil.basil_steps(wsp, img)
assert(len(steps) == 1)
options = _get_defaults(img)
options.update({
"incart" : True,
"inferart" : True,
})
options.pop("inctiss")
options.pop("infertiss")
_check_step(steps[0], desc_text="arterial", options=options)
def test_initmvn():
"""
Check the supply of an initialization MVN
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=True)
wsp.initmvn = Image(np.random.rand(5, 5, 5, 6))
steps = basil.basil_steps(wsp, img)
assert(len(steps) == 1)
options = _get_defaults(img)
options.update({
"continue-from-mvn" : wsp.initmvn
})
_check_step(steps[0], desc_text="tissue", options=options)
def test_spatial():
"""
Check final spatial step
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=True, spatial=True)
steps = basil.basil_steps(wsp, img)
assert(len(steps) == 2)
options = _get_defaults(img)
_check_step(steps[0], desc_text="tissue", options=options)
options.update({
"method" : "spatialvb",
"param-spatial-priors" : "N+",
"PSP_byname1" : "ftiss",
"PSP_byname1_type" : "M",
"convergence" : "maxits",
})
options.pop("max-trials")
_check_step(steps[1], desc_text="spatial", options=options)
def test_onestep():
"""
Check that single step mode works when you would normally get multiple steps
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=True, infertau=True, inferart=True, spatial=True, onestep=True)
steps = basil.basil_steps(wsp, img)
assert(len(steps) == 1)
options = _get_defaults(img)
options.update({
"method" : "spatialvb",
"param-spatial-priors" : "N+",
"PSP_byname1" : "ftiss",
"PSP_byname1_type" : "M",
"inctau" : True,
"incart" : True,
"inferart" : True,
"infertau" : True,
"convergence" : "maxits",
})
options.pop("max-trials")
_check_step(steps[0], desc_text="spatial", options=options)
def test_max_iterations():
"""
Check that max iterations can be overridden
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=True)
kwargs = {
"max-iterations" : 123,
}
steps = basil.basil_steps(wsp, img, **kwargs)
assert(len(steps) == 1)
options = _get_defaults(img)
options.update({
"max-iterations" : 123,
})
_check_step(steps[0], desc_text="tissue", options=options)
def test_random_extra_options():
"""
Check that any additional keyword arguments are passed to Fabber
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=True)
kwargs = {
"phase-of-moon-correction-factor" : 7,
"random-output-proportion-percent" : 36,
}
steps = basil.basil_steps(wsp, img, **kwargs)
assert(len(steps) == 1)
options = _get_defaults(img)
options.update(kwargs)
_check_step(steps[0], desc_text="tissue", options=options)
def test_pvc_only_one_map_given1():
"""
Check that PVC correction fails if you only give the GM map
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=True)
wsp.pgm = Image(np.random.rand(5, 5, 5))
with pytest.raises(ValueError):
basil.basil_steps(wsp, img)
def test_pvc_only_one_map_given2():
"""
Check that PVC correction fails if you only give the WM map
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=True)
wsp.pwm = Image(np.random.rand(5, 5, 5))
with pytest.raises(ValueError):
basil.basil_steps(wsp, img)
def test_pvc_no_tissue():
"""
Check that PVC correction fails if you do not infer the tissue component
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=False, inferbat=True)
wsp.pgm = Image(np.random.rand(5, 5, 5))
wsp.pwm = Image(np.random.rand(5, 5, 5))
with pytest.raises(ValueError):
basil.basil_steps(wsp, img)
def test_pvc():
"""
FIXME we need to test the PVC initialization step
and how to do this is not finalized
"""
d = np.random.rand(5, 5, 5, 6)
img = AslImage(name="asldata", image=d, tis=[1.5], order="prt")
wsp = Workspace(infertiss=True, inferbat=True)
wsp.pgm = Image(np.random.rand(5, 5, 5))
wsp.pwm = Image(np.random.rand(5, 5, 5))
steps = basil.basil_steps(wsp, img)
assert(len(steps) == 3)
options = _get_defaults(img)
# options.update({
# "incpve" : True,
# })
_check_step(steps[0], desc_text="tissue", options=options)
# _check_step(steps[1], desc_text="PVE", options=options)
options.update({
"method" : "spatialvb",
"param-spatial-priors" : "N+",
"PSP_byname1" : "ftiss",
"PSP_byname1_type" : "M",
"max-iterations" : 200,
"convergence" : "maxits",
})
options.pop("max-trials")
_check_step(steps[2], desc_text="spatial")
|
from django.contrib.auth.models import User
from django.db import models
import binascii
import os
class Media(models.Model):
name = models.CharField(
max_length=10,
verbose_name='이름'
)
rss_list = models.TextField(
verbose_name='RSS 리스트'
)
political_view = models.FloatField(
verbose_name='성향'
)
icon = models.URLField(
verbose_name='아이콘'
)
mid = models.IntegerField(
verbose_name='식별자'
)
def __str__(self):
return self.name
class Cluster(models.Model):
cluster_id = models.IntegerField(
verbose_name='클러스터 식별자'
)
def __str__(self):
return str(self.cluster_id)
class Article(models.Model):
title = models.TextField(
verbose_name='제목'
)
content = models.TextField(
verbose_name='본문'
)
morphemed_content = models.TextField(
verbose_name='형태소 본문'
)
media = models.ForeignKey(
Media,
verbose_name='신문사',
on_delete=models.CASCADE
)
writer = models.CharField(
max_length=10,
verbose_name='작성자',
null=True
)
published_at = models.DateField(
auto_now_add=True,
verbose_name='발행일'
)
article_url = models.URLField(
verbose_name='URL 링크',
unique=True
)
category = models.CharField(
max_length=10,
verbose_name='분류',
default='정치',
null=True
)
cluster = models.ForeignKey(
Cluster,
verbose_name='클러스터',
null=True,
on_delete=models.SET_NULL
)
def __str__(self):
return str(self.media) + '-' + self.title
class UserBlackList(models.Model):
user = models.ForeignKey(
User,
verbose_name='사용자',
on_delete=models.CASCADE
)
media = models.ForeignKey(
Media,
verbose_name='언론사',
on_delete=models.CASCADE
)
def __str__(self):
return str(self.user) + '-' + str(self.media)
def get_media(self):
return str(self.media)
class Report(models.Model):
article_a = models.ForeignKey(
Article,
related_name='reported_article_a',
on_delete=models.CASCADE,
verbose_name='기사A'
)
article_b = models.ForeignKey(
Article,
related_name='reported_article_b',
on_delete=models.CASCADE,
verbose_name='기사B'
)
user = models.ForeignKey(
User,
verbose_name='사용자',
null=True,
on_delete=models.SET_NULL
)
content = models.TextField(
verbose_name='제보내용',
null=True
)
def __str__(self):
return str(self.user) + '-' + str(self.id)
class UserProfile(models.Model):
user = models.ForeignKey(
User,
verbose_name='사용자',
on_delete=models.CASCADE
)
shown_news = models.IntegerField(
default=0,
verbose_name='뉴스피드 등장한 뉴스'
)
clicked_news = models.IntegerField(
default=0,
verbose_name='클릭한 뉴스'
)
token = models.CharField(
max_length=10,
default='',
verbose_name='토큰',
)
def save_token(self):
self.token = binascii.hexlify(os.urandom(10)).decode("utf-8")
self.save()
return self.token
|
import ustruct
from eoslib import *
import db
g_scope = N('cache')
g_code = N('cache')
payer = N('cache')
class CList(object):
def __init__(self,table_id):
self._list = []
self._dirty_keys = {}
self.table_id = N('list'+str(table_id))
def load(self):
itr = db.end_i64(g_code, g_scope, self.table_id)
if itr == -1: #no value in table
return
while True:
itr, key = db.previous_i64(itr)
if itr < 0:
break
value = db.get_i64(itr)
value_type = ustruct.unpack('B', value)
_value = None
if value_type == 0: #int
_value = int.from_bytes(value[2:], 'little')
elif value_type == 1: #str
_value = value[2:]
else:
raise TypeError('unknown key type')
self._dict[_key] = _value
def update(self, key, val):
id = key
value_type, raw_value_length, raw_value_data = self.get_raw_data(val)
_value = ustruct.pack('B', value_type)
_value += raw_value_data
itr = db.find_i64(g_code, g_scope, self.table_id, id)
if itr < 0:
db.store_i64(g_scope, self.table_id, payer, id, _value)
else:
db.update_i64(itr, payer, _value)
def store(self):
for key in self._dirty_keys:
self.update(key, self._list[key])
def get_hash(self, v):
if type(v) is int:
return v
elif type(v) in (str, bytes):
return hash64(v)
elif type(v) in (storage_dict, storage_list):
return v.table_id
else:
raise TypeError('unsupported value type')
def get_type(self,val):
if type(val) == int:
return 0
elif type(val) == str:
return 1
elif type(val) == storage_list:
return 2
elif type(val) == storage_dict:
return 3
else:
raise TypeError('unsupported type')
def get_raw_data(self, data):
data_type = self.get_type(data)
raw_data = 0
raw_length = 0
if data_type == 0: #int
raw_length = 8
raw_data = int.to_bytes(data, 8, 'little')
elif data_type == 1: #str
raw_length = len(data)
raw_data = data
return (data_type, raw_length, raw_data)
def __getitem__(self, index):
return self._list[index]
def __setitem__(self, index, val):
if index < len(self._list) and self._list[index] == val:
return
else:
self._list[index] = val
self._dirty_keys[index] = True
def __iter__(self):
return iter(self._list)
def __len__(self):
return len(self._list)
def __delitem__(self, index):
id = index
del self._list[key]
del self._dirty_keys[index]
itr = db.find_i64(g_code, g_scope, self.table_id, id)
if itr >= 0:
db.remove_i64(itr)
def __repr__(self):
return '%s(%s)' % (type(self).__name__, str(self._list))
# key_type key_length value_type value_length key_data value_data
class cache_dict(object):
def __init__(self,table_id):
self._dict = {}
self._dirty_keys = {}
self.table_id = N('cache'+str(table_id))
def load(self):
itr = db.end_i64(g_code, g_scope, self.table_id);
if itr == -1: #no value in table
return
while True:
itr, key = db.previous_i64(itr)
if itr < 0:
break
value = db.get_i64(itr)
key_type, key_length, value_type, value_length = ustruct.unpack('BHBH', value)
_value = None
if key_type == 0: #int
_key = int.from_bytes(value[8:8+key_length], 'little')
elif key_type == 1: #str
_key = value[8:8+key_length:]
elif key_type == 2: #list
table_id = int.from_bytes(value[8:8+key_length], 'little')
_key = storage_list(table_id)
elif key_type == 3:#dict
table_id = int.from_bytes(value[8:8+key_length], 'little')
_key = storage_dict(table_id)
else:
raise TypeError('unknown key type')
if value_type == 0: #int
_value = int.from_bytes(value[8+key_length:], 'little')
elif value_type == 1: #str
_value = value[8+key_length:]
elif value_type == 2: #list
table_id = int.from_bytes(value[8+key_length:], 'little')
_value = storage_list(table_id)
elif value_type == 3:#dict
table_id = int.from_bytes(value[8+key_length:], 'little')
_value = storage_dict(table_id)
else:
raise TypeError('unknown key type')
self._dict[_key] = _value
def update(self, key, val):
id = self.get_hash(key)
key_type, raw_key_length, raw_key_data = self.get_raw_data(key)
value_type, raw_value_length, raw_value_data = self.get_raw_data(val)
_value = ustruct.pack('BHBH', key_type, raw_key_length, value_type, raw_value_length)
_value += raw_key_data
_value += raw_value_data
itr = db.find_i64(g_code, g_scope, self.table_id, id)
if itr < 0:
db.store_i64(g_scope, self.table_id, payer, id, _value)
else:
db.update_i64(itr, payer, _value)
def store(self):
for key in self._dirty_keys:
self.update(key, self._dict[key])
def get_hash(self, v):
if type(v) is int:
return v
elif type(v) in (str, bytes):
return hash64(v)
elif type(v) in (storage_dict, storage_list):
return v.table_id
else:
raise TypeError('unsupported value type')
def get_type(self,val):
if type(val) == int:
return 0
elif type(val) == str:
return 1
elif type(val) == storage_list:
return 2
elif type(val) == storage_dict:
return 3
else:
raise TypeError('unsupported type')
def get_raw_data(self, data):
data_type = self.get_type(data)
raw_data = 0
raw_length = 0
if data_type == 0: #int
raw_length = 8
raw_data = int.to_bytes(data, 8, 'little')
elif data_type == 1: #str
raw_length = len(data)
raw_data = data
elif data_type == 2: #list
raw_length = 8
raw_data = int.to_bytes(data.table_id, 8, 'little')
elif data_type == 3: #dict
raw_length = 8
raw_data = int.to_bytes(data.table_id, 8, 'little')
return (data_type, raw_length, raw_data)
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, val):
if key in self._dict and self._dict[key] == val:
return
else:
self._dict[key] = val
self._dirty_keys[key] = True
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __delitem__(self, key):
id = self.get_hash(key)
del self._dict[key]
del self._dirty_keys[key]
itr = db.find_i64(g_code, g_scope, self.table_id, id)
if itr >= 0:
db.remove_i64(itr)
def __repr__(self):
return '%s(%s)' % (type(self).__name__, str(self._dict))
def apply(receiver, name, type):
require_auth(g_code)
a = cache(123)
a.load()
print('-----')
for key in a:
print(key, a[key])
a[100] = 'hello'
a[101] = 'world'
a['name'] = 'mike'
if 101 in a:
del a[101]
msg = read_action()
a[msg] = msg
a.store()
print('+++++++++')
for key in a:
print(key, a[key])
|
#!/usr/bin/env python3
import shutil
import os
os.chdir('/home/student/2022-01-04-Python/')
shutil.move('raynor.obj', 'Lab_21/')
xname = input('What is the new name for kerrigan.obj? ')
shutil.move('Lab_21/kerrigan.obj', 'Lab_21/' + xname)
|
import sys
import os
import time
from sklearn import metrics
import numpy as np
import pickle
from ensemble import Stacking
import warnings
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
from ensemble import Stacking
import warnings
warnings.filterwarnings(module='sklearn*', action='ignore', category=DeprecationWarning)
def read_data(data_file):
import gzip
f = gzip.open(data_file, "rb")
train, val, test = pickle.load(f, encoding='iso-8859-1')
f.close()
train_x = train[0]
train_y = train[1]
test_x = test[0]
test_y = test[1]
return train_x, train_y, test_x, test_y
data_file = "mnist.pkl.gz"
X_train, y_train, X_test, y_test = read_data(data_file)
models = [
ExtraTreesClassifier(random_state=0, n_jobs=-1,
n_estimators=100, max_depth=3),
RandomForestClassifier(random_state=0, n_jobs=-1,
n_estimators=100, max_depth=3),
XGBClassifier(random_state=0, n_jobs=-1, learning_rate=0.1,
n_estimators=100, max_depth=3)
]
meta_model = XGBClassifier(random_state=0, n_jobs=5, learning_rate=0.1,
n_estimators=100, max_depth=3)
start_time = time.time()
ens = Stacking(X_train, y_train, X_test, regression=False, bagged_pred=True,
needs_proba=False, save_dir=None, metric=accuracy_score,
n_folds=4, stratified=True, shuffle=True,
random_state=0, verbose=0)
start_time = time.time()
ens.add(models)
print('process(add) took %fs!' % (time.time() - start_time))
start_time = time.time()
y_pred = ens.add_meta(meta_model)
print('process(add_meta) took %fs!' % (time.time() - start_time))
print('Final prediction score: [%.8f]' % accuracy_score(y_test, y_pred))
|
from django.contrib import admin
from authority_list.models import OrganisationForm
class OrganisationFormAdmin(admin.ModelAdmin):
pass
admin.site.register(OrganisationForm, OrganisationFormAdmin)
|
import time
from sqlalchemy import String, Integer, Column, Text, UnicodeText, Unicode
from models.base_model import SQLMixin, db
from models.user import User
from models.reply import Reply
class Topic(SQLMixin, db.Model):
views = Column(Integer, nullable=False, default=0)
title = Column(Unicode(50), nullable=False)
content = Column(UnicodeText, nullable=False)
user_id = Column(Integer, nullable=False)
board_id = Column(Integer, nullable=False)
@classmethod
def new(cls, form, user_id):
form['user_id'] = user_id
m = super().new(form)
return m
@classmethod
def get(cls, id):
m = cls.one(id=id)
m.views += 1
m.save()
return m
def user(self):
u = User.one(id=self.user_id)
return u
def replies(self):
ms = Reply.all(topic_id=self.id)
return ms
def reply_count(self):
count = len(self.replies())
return count
|
import twitter
import speedtest
text_file = input("Put the name of your text file here")
codes = open(text_file, "r")
ACCESS_TOKEN = codes.readline().rstrip()
ACCESS_SECRET = codes.readline().rstrip()
KEY = codes.readline().rstrip()
KEY_SECRET = codes.readline().rstrip()
UPLOAD_SPEED = float(codes.readline().rstrip())
DOWNLOAD_SPEED = float(codes.readline().rstrip())
ISP_HANDLE = codes.readline().rstrip()
api = twitter.Api(KEY, KEY_SECRET, ACCESS_TOKEN, ACCESS_SECRET)
st = speedtest.Speedtest()
dl = st.download()/1000000
ul = st.upload()/1000000
if ul < UPLOAD_SPEED * 0.75 or dl < DOWNLOAD_SPEED * 0.75:
tweet_str = ISP_HANDLE + " I pay for 25 download 5 upload but I'm getting " + str(dl) + " down and "\
+ str(ul) + " up mbps"
api.PostUpdate(tweet_str)
print(tweet_str)
else:
print("The internet speed is currently good.")
|
#!/usr/bin/python3
from pyrob.api import *
@task(delay=0.05)
def task_6_6():
i = 0
while (not wall_is_on_the_right()):
move_right()
i += 1
while (not wall_is_above()):
move_up()
fill_cell()
while (not wall_is_beneath()):
move_down()
move_left(i)
return
if __name__ == '__main__':
run_tasks()
|
import sys
import urllib
import color
from color import *
def sql():
fullurl = input(''+T+'' + color.UNDERLINE + 'Full URL> ' + color.END)
errormsg = "You have an error in your SQL syntax"
payloads = ["'admin'or 1=1 or ''='", "'=1\' or \'1\' = \'1\'", "'or 1=1", "'1 'or' 1 '=' 1", "'or 1=1#", "'0 'or' 0 '=' 0", "'admin'or 1=1 or ''='", "'admin' or 1=1", "'admin' or '1'='1", "'or 1=1/*", "'or 1=1--"] #whatever payloads you want here ## YOU CAN ADD YOUR OWN
errorr = "yes"
for payload in payloads:
try:
payload = payload
resp = urllib.urlopen(fullurl + payload)
body = resp.read()
fullbody = body.decode('utf-8')
except:
print(R + "[-] Error! Manually check this payload: " + W + payload)
errorr = "no"
#sys.exit()
if errormsg in fullbody:
if errorr == "no":
print(R + "[-] That payload might not work!")
errorr = "yes"
else:
print(G + "[+] The website IS SQL injection vulnerable! Payload: " + W + payload)
else:
print(R + "[-] The website is NOT SQL injection vulnerable!" + W)
|
def isPalindrome(n):
reverse = 0
reminder = 0
while(n != 0):
remainder = n % 10
reverse = reverse * 10 + remainder
n = int(n / 10)
return reverse
num = int(input('Enter The Number'))
reverse = isPalindrome(num)
if(num == reverse):
print(num,'is a Palindrome')
else:
print(num,'is not a Palindrome')
|
from .formats import ssh_certificate_formats, ssh_pubkey_formats
from .cert import SSHCertificate
from .pubkey import SSHPublicKeyFile
from .request import SSHCSR
|
import torch
from collections.abc import Iterable
def _get_layers(model, all_layers=None, all_names=None, top_name=None, fn=None, sep='_'):
"""Auxiliar function. Recursive method for getting all in the model for which `fn(layer)=True`."""
if all_names is None:
all_names = []
if all_layers is None:
all_layers = []
if top_name is None:
top_name = ''
if fn is None:
fn = lambda l: True
for name, layer in model.named_children():
if list(layer.children()):
all_names, all_layers = _get_layers(layer, all_layers, all_names, top_name+name+sep, fn)
else:
if fn(layer):
all_names.append(top_name + name)
all_layers.append(layer)
return all_names, all_layers
def get_layers(model, fn=None, sep='_'):
"""Get all layers of torch.nn.Module for which `fn(layer)=True` using a depth-first search.
Given the module `model` and the function `fn(layer: torch.nn.Module) -> bool` return all layers
for which the function returns true. Return a list of tuples: ('name', Module). For nested blocks
the name is a single string, with subblocks names separed by `sep` (by default `sep=_`). For instance,
`layer1_0_conv1` for 3 nested blocks `layer1`, `0`, `conv1`."""
all_names, all_layers = _get_layers(model, fn=fn, sep=sep)
return list(zip(all_names, all_layers))
def replace_layer(model, layer_name, replace_fn):
"""Replace single layer in a (possibly nested) torch.nn.Module using `replace_fn`.
Given a module `model` and a layer specified by `layer_name` replace the layer using
`new_layer = replace_fn(old_layer)`. Here `layer_name` is a list of strings, each string
indexing a level of the nested model."""
if layer_name:
nm = layer_name.pop()
model._modules[nm] = replace_layer(model._modules[nm], layer_name, replace_fn)
else:
model = replace_fn(model)
return model
def replace_all_layers(model, layers, replace_fn, sep='_'):
"""Replace layers in a (possibly nested) torch.nn.Module using `replace_fn`.
Given a module `model` and a layer specified by `layer_name` replace the layer using
`new_layer = replace_fn(old_layer)`. Here `layer_name` is a list of strings, each string
indexing a level of the nested model."""
for l in layers:
model = replace_layer(model, l.split(sep)[::-1], replace_fn)
return model
class SaveIntermediaryValues(object):
"""Module for saving intermediary values."""
def __init__(self, collapsing_fn, is_layer_fn, n_samples):
self.collapsing_fn = collapsing_fn
self.is_layer_fn = is_layer_fn
self.batch_dim = 0
self.n_samples = n_samples
self.counter = None
self.is_first_execution = None
self.storage = None
self.layer_names = None
def save_forward_hooks(self, model):
all_layers = get_layers(model, fn=self.is_layer_fn)
self.layer_names = list(list(zip(*all_layers))[0])
self.storage = {name: None for name in self.layer_names}
self.counter = {name: 0 for name in self.layer_names}
self.is_first_execution = {name: True for name in self.layer_names}
for name in self.layer_names:
model = replace_all_layers(model, [name], replace_fn=self.hook(name))
return model
def hook(self, name):
def register_forward_hook(layer):
def forward_hook(_self, inp, _out):
x = self.collapsing_fn(inp[0], _self)
if self.is_first_execution[name]:
self.is_first_execution[name] = False
self.storage[name] = self.init_storage(x)
delta = self.update_storage(x, self.storage[name], self.counter[name])
self.counter[name] += delta
layer.register_forward_hook(forward_hook)
return layer
return register_forward_hook
def init_storage(self, x):
if type(x) == torch.Tensor:
shape = list(x.shape)
shape[self.batch_dim] = self.n_samples
return torch.zeros(shape, dtype=x.dtype)
elif type(x) == dict:
aux = {}
for key, value in x.items():
aux[key] = self.init_storage(value)
return aux
elif isinstance(x, Iterable):
aux = []
for xx in x:
aux.append(self.init_storage(xx))
return tuple(aux)
else:
raise NotImplementedError()
def update_storage(self, x, storage, counter):
if type(x) == torch.Tensor:
delta = x.shape[self.batch_dim]
storage[counter:counter + delta, ...] = x
return delta
elif type(x) == dict:
delta = 0
for key, value in x.items():
delta = self.update_storage(value, storage[key], counter)
return delta
elif isinstance(x, Iterable):
delta = 0
iter_storage = iter(storage)
for xx in x:
delta = self.update_storage(xx, next(iter_storage), counter)
return delta
else:
raise NotImplementedError()
def reset_storage(self, storage=None):
if storage is None:
storage = self.storage
if type(storage) == torch.Tensor:
storage[...] = 0
elif type(storage) == dict:
for key, value in storage.items():
self.reset_storage(storage[key])
elif isinstance(storage, Iterable):
iter_storage = iter(storage)
for xx in x:
self.reset_storage(next(iter_storage))
else:
raise NotImplementedError()
def reset(self):
self.counter = {name: 0 for name in self.layer_names}
self.is_first_execution = {name: True for name in self.layer_names}
self.reset_storage()
|
import numpy as np
import tensorflow as tf
import gpflow
import tensorflow_probability as tfp
from src.models.initialization import *
from src.models.base_kernel import BaseKernel
class ARD(BaseKernel, gpflow.kernels.Kernel):
"""
Own implementation of the squared exponential kernel with ard
property. Should workthe same way as
gpflow.kernels.SquaredExponential(ARD = True). Lengthscales and
variance can be randomized. This should be handled when initializing
the kernel.
Args:
variance (float) : kernel variance which scales the whole kernel
lengthscales (numpy array) : list of lengthscales
(should match the dimension of the input)
"""
def __init__(self, **kwargs):
super().__init__()
randomized = kwargs["randomized"]
dim = kwargs["dim"]
if not randomized:
lengthscales = np.ones(dim)
variance = 1.0
else:
lengthscales = np.random.uniform(0.5,2,dim)
variance = 1.0
self.variance = gpflow.Parameter(
variance, transform = gpflow.utilities.positive())
self.lengthscales = gpflow.Parameter(
lengthscales, transform = gpflow.utilities.positive())
self.dim = dim
def K_diag(self, X) -> tf.Tensor:
"""
Returns the diagonal vector when X1 == X2
(used in the background of gpflow)
"""
return self.variance * tf.ones_like(X[:,0])
def K(self, X1, X2=None) -> tf.Tensor:
"""
Returns the squared exponential ard kernel.
Args:
X1 (numpy array) : shaped N x D
X2 (numpy array) : shaped M x D
(D denotes the number of dimensions of the input)
"""
if X2 is None:
X2 = X1
# Precision is the inverse squared of the lengthscales
P = tf.linalg.diag(self.lengthscales**2)
X11 = tf.squeeze(
tf.expand_dims(X1,axis = 1) @ P @ tf.expand_dims(X1,axis = -1),-1)
X22 = tf.transpose(
tf.squeeze(
tf.expand_dims(X2,axis = 1) @ P @ tf.expand_dims(X2,axis = -1),
-1))
X12 = X1 @ P @ tf.transpose(X2)
K = self.variance * tf.exp(-0.5 * (X11 - 2*X12 + X22))
return K
def precision(self) -> tf.Tensor:
return tf.linalg.diag(self.lengthscales**(2))
class ARD_gpflow(BaseKernel, gpflow.kernels.SquaredExponential):
def __init__(self, **kwargs):
randomized = kwargs["randomized"]
dim = kwargs["dim"]
if not randomized:
lengthscales = np.ones(dim)
variance = 1.0
else:
lengthscales = np.random.uniform(0.5,3,dim)
variance = 1.0
super().__init__(variance, lengthscales)
def precision(self) -> tf.Tensor:
return tf.linalg.diag(self.lengthscales**(-2))
class FullGaussianKernel(BaseKernel, gpflow.kernels.Kernel):
"""
Implementation of the full Gaussian kernel which introduces also the
off-diagonal covariates of the precision matrix. Randomizing the
initialization should be handled outside of this class.
Args:
variance (float) : signal variance which scales the whole kernel
L (numpy array) : vector representation of L, where LL^T = P :
precision
"""
def __init__(self, **kwargs):
super().__init__()
randomized = kwargs["randomized"]
dim = kwargs["dim"]
if not randomized:
L = np.ones((dim*(dim+1))//2)
variance = 1.0
else:
L = init_precision(dim)
variance = 1.0
self.variance = gpflow.Parameter(
variance, transform = gpflow.utilities.positive())
self.L = gpflow.Parameter(L)
self.dim = dim
def K_diag(self, X) -> tf.Tensor:
"""
Returns the diagonal vector when X1 == X2
(used in the background of gpflow)
"""
return self.variance * tf.ones_like(X[:,0])
def K(self, X1, X2=None) -> tf.Tensor:
"""
Returns the full Gaussian kernel.
Args:
X1 (numpy array) : shaped N x D
X2 (numpy array) : shaped M x D
(D denotes the number of dimensions of the input)
"""
if X2 is None:
X2 = X1
#L = tfp.math.fill_triangular(self.L) # matrix representation of L
#A = X1 @ L
#B = X2 @ L
P = self.precision()
X11 = tf.squeeze(
tf.expand_dims(X1,axis = 1) @ P @ tf.expand_dims(X1,axis = -1),-1)
X22 = tf.transpose(
tf.squeeze(
tf.expand_dims(X2,axis = 1) @ P @ tf.expand_dims(X2,axis = -1),
-1))
X12 = X1 @ P @ tf.transpose(X2)
# kernel (N,1) - (N,M) + (1,M)
K = self.variance*tf.exp(-0.5 * (X11 - 2*X12 + X22))
return K
def precision(self) -> tf.Tensor:
L = tfp.math.fill_triangular(self.L)
return L@tf.transpose(L)
class LowRankFullGaussianKernel(BaseKernel, gpflow.kernels.Kernel):
"""
Implementation of the full Gaussian kernel which introduces also the
off-diagonal covariates of the precision matrix. Randomizing the
initialization should be handled outside of this class.
Args:
variance (float) : signal variance which scales the whole kernel
L (numpy array) : vector representation of L, where LL^T = P :
precision
"""
def __init__(self, **kwargs):
super().__init__()
randomized = kwargs["randomized"]
dim = kwargs["dim"]
rank = kwargs["rank"]
if not randomized:
L = np.ones((dim*(dim+1))//2)
variance = 1.0
else:
L = init_lowrank_precision(dim, rank)
variance = 1.0
self.length = L.shape[0]
self.variance = gpflow.Parameter(
variance, transform = gpflow.utilities.positive())
self.L = gpflow.Parameter(L)
self.rank = rank
def K_diag(self, X) -> tf.Tensor:
"""
Returns the diagonal vector when X1 == X2
(used in the background of gpflow)
"""
return self.variance * tf.ones_like(X[:,0])
def K(self, X1, X2=None) -> tf.Tensor:
"""
Returns the full Gaussian kernel.
Args:
X1 (numpy array) : shaped N x D
X2 (numpy array) : shaped M x D
(D denotes the number of dimensions of the input)
"""
if X2 is None:
X2 = X1
P = self.precision()
X11 = tf.squeeze(
tf.expand_dims(X1,axis = 1) @ P @ tf.expand_dims(X1,axis = -1),-1)
X22 = tf.transpose(
tf.squeeze(
tf.expand_dims(X2,axis = 1) @ P @ tf.expand_dims(X2,axis = -1),
-1))
X12 = X1 @ P @ tf.transpose(X2)
K = self.variance * tf.exp(-0.5 * (X11 - 2*X12 + X22))
return K
def precision(self) -> tf.Tensor:
L = fill_lowrank_triangular(self.L, self.rank, self.length)
return tf.transpose(L)@L
class SGHMC_Full(BaseKernel, gpflow.kernels.Kernel):
"""
Implementation of the full Gaussian kernel which introduces also the
off-diagonal covariates of the precision matrix. Randomizing the
initialization should be handled outside of this class.
Args:
variance (float) : signal variance which scales the whole kernel
L (numpy array) : vector representation of L, where LL^T = P :
precision
"""
def __init__(self, **kwargs):
super().__init__()
randomized = kwargs["randomized"]
dim = kwargs["dim"]
if not randomized:
L = np.ones((dim*(dim+1))//2)
variance = 0.0
else:
L = init_precision(dim, "wishart")
variance = np.random.randn()
self.variance = tf.Variable(variance, dtype = tf.float64,
trainable = True)
self.L = tf.Variable(L, dtype = tf.float64, trainable = False)
self.dim = dim
def K_diag(self, X) -> tf.Tensor:
"""
Returns the diagonal vector when X1 == X2 (used in the background of gpflow)
"""
return tf.exp(self.variance) * tf.ones_like(X[:,0])
def K(self, X1, X2=None) -> tf.Tensor:
"""
Returns the full Gaussian kernel.
Args:
X1 (numpy array) : shaped N x D
X2 (numpy array) : shaped M x D (D denotes the number of dimensions of the input)
"""
if X2 is None:
X2 = X1
L = tfp.math.fill_triangular(self.L) # matrix representation of L
A = X1 @ L
B = X2 @ L
X11 = tf.squeeze(
tf.expand_dims(A, axis = 1) @ tf.expand_dims(A, axis = -1),
axis = -1) # (N, 1)
X22 = tf.transpose(
tf.squeeze(
tf.expand_dims(B, axis = 1) @ tf.expand_dims(B, axis = -1),
axis = -1)) # (1,M)
X12 = A @ tf.transpose(B) # (N,M)
K = tf.exp(self.variance)*tf.exp(-0.5 * (X11 - 2*X12 + X22))
return K
def precision(self) -> tf.Tensor:
L = tfp.math.fill_triangular(self.L)
return L@tf.transpose(L)
class SGHMC_ARD(BaseKernel, gpflow.kernels.Kernel):
"""
Own implementation of the squared exponential kernel with ard
property. Should work the same way as
gpflow.kernels.SquaredExponential(ARD = True). Lengthscales and
variance can be randomized. This should be handled when initializing
the kernel.
Args:
variance (float) : kernel variance which scales the whole kernel
lengthscales (numpy array) : list of lengthscales
(should match the dimension of the input)
"""
def __init__(self, **kwargs):
super().__init__()
randomized = kwargs["randomized"]
dim = kwargs["dim"]
if not randomized:
L = np.ones(dim)
variance = 0.0
else:
L = np.random.randn(dim)
variance = np.random.randn()
self.variance = tf.Variable(variance, dtype = tf.float64,
trainable = True)
self.L = tf.Variable(L, dtype = tf.float64, trainable = False)
self.dim = dim
def K_diag(self, X) -> tf.Tensor:
"""
Returns the diagonal vector when X1 == X2
(used in the background of gpflow)
"""
return tf.exp(self.variance) * tf.ones_like(X[:,0])
def K(self, X1, X2=None) -> tf.Tensor:
"""
Returns the squared exponential ard kernel.
Args:
X1 (numpy array) : shaped N x D
X2 (numpy array) : shaped M x D
(D denotes the number of dimensions of the input)
"""
if X2 is None:
X2 = X1
# Precision is the inverse squared of the lengthscales
P = tf.linalg.diag(self.L**(2))
X11 = tf.squeeze(
tf.expand_dims(X1,axis = 1) @ P @ tf.expand_dims(X1,axis = -1),-1)
X22 = tf.transpose(
tf.squeeze(
tf.expand_dims(X2,axis = 1) @ P @ tf.expand_dims(X2,axis = -1),
-1)) # (1,M)
X12 = X1 @ P @ tf.transpose(X2) # (N,M)
# kernel (N,1) - (N,M) + (1,M)
K = tf.exp(self.variance) * tf.exp(-0.5 * (X11 - 2*X12 + X22))
return K
def precision(self) -> tf.Tensor:
return tf.linalg.diag(self.L**(2))
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
""" Test/example for the BindingEnergyWorkChain"""
import os
import click
import ase.io
from aiida.engine import run
from aiida.orm import Code, Dict, StructureData, Str
from aiida.plugins import WorkflowFactory
BindingEnergyWorkChain = WorkflowFactory('lsmo.cp2k_binding_energy')
@click.command('cli')
@click.argument('cp2k_code_label')
def main(cp2k_code_label):
"""Example usage: verdi run thistest.py cp2k@localhost"""
cp2k_code = Code.get_from_string(cp2k_code_label)
print("Testing CP2K BindingEnergy work chain for CO2 in Zn-MOF-74 ...")
print("[NOTE: this test will run on 4 cpus and take ca. 10 minutes]")
thisdir = os.path.dirname(os.path.abspath(__file__))
# Construct process builder
builder = BindingEnergyWorkChain.get_builder()
builder.structure = StructureData(ase=ase.io.read(os.path.join(thisdir, 'data/Zn-MOF-74.cif')))
builder.molecule = StructureData(ase=ase.io.read(os.path.join(thisdir, 'data/CO2_in_Zn-MOF-74.cif')))
builder.protocol_tag = Str('test')
builder.cp2k_base.cp2k.parameters = Dict(dict={ # Lowering CP2K default setting for a faster test calculation
'FORCE_EVAL': {
'DFT': {
'SCF': {
'EPS_SCF': 1.0E-4,
'OUTER_SCF': {
'EPS_SCF': 1.0E-4,
},
},
},
},
'MOTION': {
'GEO_OPT': {
'MAX_ITER': 5
}
},
})
builder.cp2k_base.cp2k.code = cp2k_code
builder.cp2k_base.cp2k.metadata.options.resources = {
"num_machines": 1,
"num_mpiprocs_per_machine": 4,
}
builder.cp2k_base.cp2k.metadata.options.max_wallclock_seconds = 1 * 5 * 60
run(builder)
if __name__ == '__main__':
main() # pylint: disable=no-value-for-parameter
# EOF
|
import unittest
from rankum.models import Doc
from rankum.exceptions import MissingDocIdException, EmptyDocListException
from rankum import JsonDocReader
class JsonDocReaderTest(unittest.TestCase):
def test_should_json_list(self):
input_json = '[{"id": 1, "category": 1}, {"id": "2", "category": 2}]'
expect = [Doc(id="1", category=1), Doc(id="2", category=2)]
actual = list(JsonDocReader(input_json).to_doc_list())
self.assertEqual(expect, actual)
def test_raise_an_exception_when_id_is_missing(self):
input_json = '[{"id": 1, "category": 1}, {"category": 2}]'
with self.assertRaises(MissingDocIdException):
list(JsonDocReader(input_json).to_doc_list())
def test_raise_an_exception_when_doc_list_is_empty(self):
input_json = '[]'
with self.assertRaises(EmptyDocListException):
list(JsonDocReader(input_json).to_doc_list())
def test_raise_an_exception_when_doc_list_is_none(self):
input_json = None
with self.assertRaises(EmptyDocListException):
list(JsonDocReader(input_json).to_doc_list())
if __name__ == '__main__':
unittest.main()
|
# Create a program that reads the name and two notes of several students and stores everything in a compound list.
# At the end, show a bulletin containing the average of each one and allow the user to show the grades of each student
# individually.
sheet = list()
while True:
name = str(input('Name: '))
note1 = float(input('Note 1: '))
note2 = float(input('Note 2: '))
average = (note1 + note2) / 2
sheet.append([name, [note1, note2], average])
choice = ' '
while choice not in 'YN':
choice = str(input('Do you want to continue? [Y/N] ')).strip().upper()[0]
if choice in 'N':
break
print('-=' * 25)
print(f'{"No.":<4}{"NAME":<10}{"AVERAGE":>8}')
print('-' * 22)
for index, student in enumerate(sheet):
print(f'{index:<4}{student[0]:<10}{student[2]:>8.1f}')
print('-' * 22)
print('-=' * 25)
while True:
choice = int(input('Show which student`s grades? (999 interrupts): '))
if choice == 999:
print('FINISHING...')
break
if choice <= len(sheet) - 1:
print(f'{sheet[choice][0]}`s grades are {sheet[choice][1]}')
print('-' * 50)
print('<<< WELCOME BACK >>>')
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import struct_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.home.graph.v1',
manifest={
'Device',
'DeviceNames',
'DeviceInfo',
'AgentOtherDeviceId',
},
)
class Device(proto.Message):
r"""Third-party device definition.
Attributes:
id (str):
Third-party device ID.
type_ (str):
Hardware type of the device. See `device
types <https://developers.google.com/assistant/smarthome/guides>`__.
traits (Sequence[str]):
Traits supported by the device. See `device
traits <https://developers.google.com/assistant/smarthome/traits>`__.
name (google.home.graph_v1.types.DeviceNames):
Names given to this device by your smart home
Action.
will_report_state (bool):
Indicates whether your smart home Action will report state
of this device to Google via
[ReportStateAndNotification][google.home.graph.v1.HomeGraphApiService.ReportStateAndNotification].
room_hint (str):
Suggested name for the room where this device
is installed. Google attempts to use this value
during user setup.
structure_hint (str):
Suggested name for the structure where this
device is installed. Google attempts to use this
value during user setup.
device_info (google.home.graph_v1.types.DeviceInfo):
Device manufacturer, model, hardware version,
and software version.
attributes (google.protobuf.struct_pb2.Struct):
Attributes for the traits supported by the
device.
custom_data (google.protobuf.struct_pb2.Struct):
Custom device attributes stored in Home Graph and provided
to your smart home Action in each
`QUERY <https://developers.google.com/assistant/smarthome/reference/intent/query>`__
and
`EXECUTE <https://developers.google.com/assistant/smarthome/reference/intent/execute>`__
intent.
other_device_ids (Sequence[google.home.graph_v1.types.AgentOtherDeviceId]):
Alternate IDs associated with this device. This is used to
identify cloud synced devices enabled for `local
fulfillment <https://developers.google.com/assistant/smarthome/concepts/local>`__.
notification_supported_by_agent (bool):
Indicates whether your smart home Action will report
notifications to Google for this device via
[ReportStateAndNotification][google.home.graph.v1.HomeGraphApiService.ReportStateAndNotification].
If your smart home Action enables users to control device
notifications, you should update this field and call
[RequestSyncDevices][google.home.graph.v1.HomeGraphApiService.RequestSyncDevices].
"""
id = proto.Field(
proto.STRING,
number=1,
)
type_ = proto.Field(
proto.STRING,
number=2,
)
traits = proto.RepeatedField(
proto.STRING,
number=3,
)
name = proto.Field(
proto.MESSAGE,
number=4,
message='DeviceNames',
)
will_report_state = proto.Field(
proto.BOOL,
number=5,
)
room_hint = proto.Field(
proto.STRING,
number=6,
)
structure_hint = proto.Field(
proto.STRING,
number=7,
)
device_info = proto.Field(
proto.MESSAGE,
number=8,
message='DeviceInfo',
)
attributes = proto.Field(
proto.MESSAGE,
number=9,
message=struct_pb2.Struct,
)
custom_data = proto.Field(
proto.MESSAGE,
number=10,
message=struct_pb2.Struct,
)
other_device_ids = proto.RepeatedField(
proto.MESSAGE,
number=11,
message='AgentOtherDeviceId',
)
notification_supported_by_agent = proto.Field(
proto.BOOL,
number=12,
)
class DeviceNames(proto.Message):
r"""Identifiers used to describe the device.
Attributes:
name (str):
Primary name of the device, generally
provided by the user.
nicknames (Sequence[str]):
Additional names provided by the user for the
device.
default_names (Sequence[str]):
List of names provided by the manufacturer
rather than the user, such as serial numbers,
SKUs, etc.
"""
name = proto.Field(
proto.STRING,
number=1,
)
nicknames = proto.RepeatedField(
proto.STRING,
number=2,
)
default_names = proto.RepeatedField(
proto.STRING,
number=3,
)
class DeviceInfo(proto.Message):
r"""Device information.
Attributes:
manufacturer (str):
Device manufacturer.
model (str):
Device model.
hw_version (str):
Device hardware version.
sw_version (str):
Device software version.
"""
manufacturer = proto.Field(
proto.STRING,
number=1,
)
model = proto.Field(
proto.STRING,
number=2,
)
hw_version = proto.Field(
proto.STRING,
number=3,
)
sw_version = proto.Field(
proto.STRING,
number=4,
)
class AgentOtherDeviceId(proto.Message):
r"""Alternate third-party device ID.
Attributes:
agent_id (str):
Project ID for your smart home Action.
device_id (str):
Unique third-party device ID.
"""
agent_id = proto.Field(
proto.STRING,
number=1,
)
device_id = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
fname = input("Enter file name: ")
if len(fname) < 1 :
fname = "mbox-short.txt"
fh = open(fname)
counts = dict()
for line in fh:
line = line.rstrip()
lst = line.split()
if len(lst)>1 and lst[0] == "From":
counts[lst[1]] = counts.get(lst[1],0)+1
bigWord = None
bigCount = None
for word, count in counts.items():
if bigCount is None or bigCount < count:
bigWord = word
bigCount = count
print(bigWord, bigCount)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-22 08:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ticket',
fields=[
('ticketid', models.AutoField(primary_key=True, serialize=False, verbose_name='TicketID')),
('status', models.CharField(max_length=20, verbose_name='Status')),
('sector', models.CharField(max_length=30, verbose_name='Bereich')),
('category', models.CharField(max_length=30, verbose_name='Art')),
('subject', models.CharField(max_length=50, verbose_name='Betreff')),
('description', models.CharField(max_length=400, verbose_name='Beschreibung')),
('creationdatetime', models.DateTimeField(verbose_name='Erstellungsdatum')),
('creator', models.CharField(max_length=40, verbose_name='Ersteller')),
('responsible_person', models.CharField(blank=True, max_length=40, verbose_name='Verantwortlicher')),
('comment', models.CharField(max_length=100, verbose_name='Kommentar')),
('solution', models.CharField(max_length=400, verbose_name='Lösung')),
('keywords', models.CharField(max_length=100, verbose_name='Keywords')),
('closingdatetime', models.DateTimeField(blank=True, null=True, verbose_name='Abschlussdatum')),
('workinghours', models.FloatField(default=0.0, verbose_name='Bearbeitungszeit')),
('image', models.FileField(blank=True, null=True, upload_to='uploads/')),
],
),
]
|
import import_watch._cyclic_import_package.b # noqa: F401
|
# TASK:
# Вхідні дані: рядок, що складається з відкриваючих та закриваючих круглих дужок -
# аргумент командного рядка. Для передачі в якості рядка послідовність береться в подвійні
# лапки.
# Результат роботи: рядок "YES", якщо вхідний рядок містить правильну дужкову
# послідовність; або рядок "NO", якщо послідовність є неправильною.
import sys
string = sys.argv[1]
# Initializing variables
count = 0
flag = True
# Checking count of brackets
for char in string:
if char == "(":
count = count + 1
elif char == ")":
count = count - 1
if count < 0:
flag = False
if flag and count == 0: # Assure that brackets order is correct
print("YES")
else:
print("NO")
|
import re
import toml
def toml_break_line(line):
m = re.match(r"^other = \"(.+?)\"$", line)
if not m:
return [line]
splits = m.group(1).replace(r"\n", "\n").splitlines()
if len(splits) == 1:
return [line]
lines = []
lines.append('other = """' + splits[0])
lines.extend(splits[1:])
lines[-1] = lines[-1] + '"""'
return lines
def toml_dump(obj, stream):
for line in toml.dumps(obj).splitlines():
for l in toml_break_line(line):
stream.write(l)
stream.write("\n")
|
import cv2
lena = cv2.imread(r"..\lena.jpg")
rgb = cv2.cvtColor(lena, cv2.COLOR_BGR2RGB)
cv2.imshow("lena", lena)
cv2.imshow("rgb", rgb)
cv2.waitKey()
cv2.destroyAllWindows()
|
import logging
import subprocess
import sys
import time
from tempfile import TemporaryDirectory
from typing import Dict, List
import requests
from common.models import BoundingBox as BoundingBoxModel
from common.models import setup_database_connections
from common.types import (BoundingBox, CitationData, CitationLocation, Match,
Matches, SerializableReference, Symbol)
from entities.citations.utils import (extract_ngrams, ngram_sim,
upload_citations)
from entities.symbols.types import SymbolData, SymbolId, SymbolWithId
from entities.symbols.utils import upload_symbols
import pdf.grobid_client
class PdfStructureParser:
def __init__(self, pdf_hash, structure_map):
self.pdf_hash = pdf_hash
self.structure_map = structure_map
self.pages = structure_map['tokens']['pages']
self.page_sizes = dict((p['page']['pageNumber'], p['page']) for p in self.pages)
self.page_indices = dict((p['page']['pageNumber'], i) for i,p in enumerate(self.pages))
self.elements = structure_map['elements']['elementTypes']
self.references = None
def find_token(self, index):
for p in self.pages:
if (index < len(p['tokens'])):
return p['page'], p['tokens'][index]
else:
index -= len(p['tokens'])
return None
def get_text(self,spans):
text = []
for s in spans:
direct = s.get('dehyphenizedText')
if direct:
text += direct
else:
for i in range(s['left'], s['right']):
page, token = self.find_token(i)
text.append(token['text'])
return ' '.join(text)
def union(self, bbox1: BoundingBoxModel, bbox2: BoundingBoxModel):
if not bbox1:
return bbox2
if not bbox2:
return bbox1
x1 = min(bbox1.left,bbox2.left)
y1 = min(bbox1.top,bbox2.top)
x2 = max(bbox1.left+bbox1.width, bbox2.left+bbox2.width)
y2 = max(bbox1.top+bbox1.height, bbox2.top+bbox2.height)
return BoundingBoxModel(page = bbox1.page,
left = x1, top = y1, width = x2-x1, height=y2-y1)
@staticmethod
def should_combine(bbox1: BoundingBoxModel, bbox2: BoundingBoxModel):
return bbox1.page == bbox2.page and (
abs(bbox1.top - bbox2.top) < 4 # Same y-coordinate
) and (
abs(bbox2.left - bbox1.left - bbox1.width) < 15 # To the right
)
def get_bounding_boxes(self, spans):
bboxes = []
for s in spans:
bbox = None
for i in range(s['left'], s['right']):
page, token = self.find_token(i)
page_index = self.page_indices[page['pageNumber']]
token_bbox = BoundingBoxModel(
page = page_index,
left = token['x'],
top = token['y'],
width = token['width'],
height=token['height']
)
if not bbox:
bbox = token_bbox
elif self.should_combine(bbox, token_bbox):
bbox = self.union(bbox, token_bbox)
else:
bboxes.append(bbox)
bbox = token_bbox
if bbox:
bboxes.append(bbox)
return bboxes
def find_cited_paper(self, bib_item_title):
if self.references is None:
resp = requests.get(f"https://api.semanticscholar.org/v1/paper/{self.pdf_hash}")
if resp.ok:
self.references = resp.json()['references']
else:
logging.warning('Got status %s for paper %s',resp.status_code, pdf_hash)
self.references = []
max_similarity = 0.0
best_matching_paper = None
for reference_data in self.references:
reference_title = reference_data['title']
similarity = ngram_sim(reference_title, bib_item_title)
if similarity > 0.5 and similarity > max_similarity:
max_similarity = similarity
best_matching_paper = reference_data
return best_matching_paper
def get_symbols(self) -> SymbolData:
symbols_with_ids: List[SymbolWithId] = []
boxes: Dict[SymbolId, BoundingBox] = {}
matches: Matches = {}
symbol_index = 0
for sym in self.elements['<symbol>']:
id = sym['tags'].get('id')
if id:
spans = sym['spans']
bboxes = self.get_bounding_boxes(spans)
mock_math_ml = '<pdf_symbol>{}</pdf_symbol>'.format(id)
symbol = Symbol([], mock_math_ml, [])
symbol_id = SymbolId(id, None, symbol_index)
symbols_with_ids.append(SymbolWithId(symbol_id, symbol))
if bboxes:
box = bboxes[0]
page = self.page_sizes[box.page]
box.page = self.page_indices[box.page]
box.left /= page['width']
box.top /= page['height']
box.width /= page['width']
box.height /= page['height']
boxes[symbol_id] = box
match = Match(mock_math_ml, mock_math_ml, 1)
matches.setdefault(mock_math_ml,[]).append(match)
symbol_index += 1
return SymbolData(
arxiv_id=None,
s2_id=pdf_hash,
symbols_with_ids=symbols_with_ids,
boxes = boxes,
symbol_sentence_model_ids={},
matches=matches
)
def get_citations(self) -> CitationData:
locations = {}
s2_ids_of_citations = {}
s2_data = {}
bib_item_titles = {}
for ref in self.elements.get('<bibItem_title>', []):
id = ref['tags'].get('id')
if id:
bib_item_titles[id] = self.get_text(ref['spans'])
citation_index = 0
for cit in self.elements.get('<citation_marker>', []):
ref = cit['tags'].get('ref')
spans = cit['spans']
if ref:
bib_item_title = bib_item_titles.get(ref)
if bib_item_title:
cited_paper = self.find_cited_paper(bib_item_title)
if cited_paper:
cited_paper_id = cited_paper['paperId']
s2_ids_of_citations[ref] = cited_paper_id
if cited_paper_id not in s2_data:
authors = ','.join(a['name'] for a in cited_paper['authors'])
s2_data[cited_paper_id] = SerializableReference(
s2_id = cited_paper_id,
arxivId=cited_paper.get('arxivId'),
doi = cited_paper.get('doi'),
title = cited_paper.get('title'),
authors=authors,
venue = cited_paper.get('venue'),
year = cited_paper.get('year')
)
citation_locations = set()
for box in self.get_bounding_boxes(spans):
page = self.page_sizes[box.page]
loc = CitationLocation(key=ref,
cluster_index=citation_index,
page=self.page_indices[box.page],
left=box.left / page['width'],
top=box.top / page['height'],
width=box.width / page['width'],
height=box.height / page['height'])
citation_locations.add(loc)
locations.setdefault(ref, {})[citation_index] = citation_locations
citation_index += 1
return CitationData(
arxiv_id = None,
s2_id = pdf_hash,
citation_locations=locations,
key_s2_ids=s2_ids_of_citations,
s2_data = s2_data
)
def upload(self):
citations = self.get_citations()
upload_citations(citations, 'pdf-pipeline')
symbols = self.get_symbols()
upload_symbols(symbols,'pdf-pipeline')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
pdf_hashes = ['8c53cd64e46cf382bf81ce2c4e0087ef31351919']
if sys.argv[1:]:
pdf_hashes = [s.strip() for s in open(sys.argv[1]).readlines()]
setup_database_connections('public')
with TemporaryDirectory() as tempdir:
for pdf_hash in pdf_hashes:
start_time = time.time()
logging.info("Processing PDF %s", pdf_hash)
try:
pdf_file = "{}/{}.pdf".format(tempdir, pdf_hash)
try:
subprocess.check_call([
"aws", "s3", "cp", "s3://ai2-s2-pdfs/{}/{}.pdf".format(pdf_hash[:4], pdf_hash[4:]), pdf_file
])
except:
subprocess.check_call([
"aws", "s3", "cp", "s3://ai2-s2-pdfs-private/{}/{}.pdf".format(pdf_hash[:4], pdf_hash[4:]), pdf_file
])
s = pdf.grobid_client.get_pdf_structure(pdf_file)
PdfStructureParser(pdf_hash, s).upload()
logging.info("Finished in %s second", time.time() - start_time)
except Exception:
logging.exception('Error processing {}'.format(pdf_hash))
|
import math
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Tuple, Union, cast
from cache import volatile_cache
from constants import SQUAD_DISMANTLE_RANGED, rmem_key_dismantler_squad_opts, role_squad_dismantle, role_squad_heal, \
role_squad_ranged
from creeps.roles.squads import SquadDrone
from creeps.squads.base import BasicOffenseSquad, squadmemkey_origin
from empire import honey, portals, stored_data
from jstools.screeps import *
from position_management import flags, locations
from utilities import movement, positions, robjs
if TYPE_CHECKING:
from position_management.locations import Location
__pragma__('noalias', 'name')
__pragma__('noalias', 'undefined')
__pragma__('noalias', 'Infinity')
__pragma__('noalias', 'keys')
__pragma__('noalias', 'get')
__pragma__('noalias', 'set')
__pragma__('noalias', 'type')
__pragma__('noalias', 'update')
__pragma__('noalias', 'values')
specialty_order = [ATTACK, WORK, HEAL, RANGED_ATTACK]
def drone_role(specialty):
# type: (str) -> str
if specialty == WORK:
return role_squad_dismantle
elif specialty == RANGED_ATTACK:
return role_squad_ranged
elif specialty == HEAL:
return role_squad_heal
dismemkey_gathered = "g"
dismemkey_ordered = "d"
dismemkey_regrouping = "t"
class DismantleSquad(BasicOffenseSquad):
def calculate_movement_order(self):
return _.sortByAll(self.members, lambda x: specialty_order.index(x.findSpecialty()), 'name')
def move_to_stage_0(self, target):
# type: (RoomPosition) -> None
self.new_move(target)
def move_to_stage_1(self, target, any_hostiles):
# type: (RoomPosition, bool) -> None
self.new_move(target)
def move_to_stage_2(self, target):
# type: (RoomPosition) -> None
self.new_move(target)
def move_to(self, target):
# type: (RoomPosition) -> None
self.new_move(target)
def new_move(self, target):
# type: (RoomPosition) -> None
if not self.mem[dismemkey_gathered]:
self.initial_gathering(target)
return
self.move_together(target)
def initial_gathering(self, target):
# type: (RoomPosition) -> None
origin = self.find_home()
if not self.mem[squadmemkey_origin]:
self.set_origin(origin)
serialized_obj = self.home.hive.honey.get_serialized_path_obj(origin, target, self.new_movement_opts())
ordered_rooms_in_path = honey.get_room_list_from_serialized_obj(serialized_obj)
second_from_home = ordered_rooms_in_path[1]
path = Room.deserializePath(serialized_obj[second_from_home])
halfway = path[int(len(path) / 2)]
meet_at = __new__(RoomPosition(halfway.x, halfway.y, second_from_home))
anyone_near = []
for member in self.members:
if member.pos.inRangeTo(meet_at, 3):
anyone_near.append(member)
if len(anyone_near) != len(self.members):
for creep in self.members:
creep.move_to(meet_at)
else:
self.mem[dismemkey_gathered] = True
def target_exit_positions(self, current_room, next_room):
# type: (str, str) -> str
saved = self.mem['_texit']
enemies_this_room = None
if saved and Game.time < saved['e'] and None:
pass
return ''
def move_to_exit(self, dismantle, attack, heal, exit_positions):
# type: (List[SquadDrone], List[SquadDrone], List[SquadDrone], str) -> List[SquadDrone]
robjs.get_str_codepoint(exit_positions, 0)
return []
def move_together(self, target):
# type: (RoomPosition) -> None
origin = self.find_home()
serialized_obj = self.home.hive.honey.get_serialized_path_obj(origin, target, self.new_movement_opts())
ordered_rooms_in_path = honey.get_room_list_from_serialized_obj(serialized_obj)
dismantle = []
attack = []
heal = []
for creep in self.members:
specialty = creep.findSpecialty()
if specialty == WORK:
dismantle.append(creep)
elif specialty == RANGED_ATTACK:
attack.append(creep)
elif specialty == HEAL:
heal.append(creep)
else:
self.log("unknown specialty creep in dismantling squad: {} ({}). treating as ranged.",
specialty, creep)
attack.append(creep)
if not len(dismantle):
self.log("dismantle squad has no dismantle creeps!")
elif not len(heal):
self.log("dismantle squad has no healers!")
dismantle = _.sortBy(dismantle, 'name')
heal = _.sortBy(heal, 'name')
attack = _.sortBy(attack, 'name')
groups = []
if len(dismantle):
groups.append(dismantle)
if len(heal):
groups.append(heal)
if len(attack):
groups.append(attack)
self.log("dismantler squad moving: {}",
"<-".join(["[{}]".format(_.pluck(g, 'name')) for g in groups]))
memory = self.mem
if not memory[dismemkey_ordered]:
self.log("ordering")
ordered_now, repath = self.get_ordered(target, serialized_obj, dismantle, heal, attack)
if not ordered_now:
return
current_room = groups[0][0].pos.roomName
if self.mem[dismemkey_regrouping]:
self.log("regrouping")
if self.regroup(target, groups, ordered_rooms_in_path):
return
else:
# TODO: don't require this! we shouldn't be relying on the main path except for the rooms,
# and for the first reordering outside of base.
del self.mem[dismemkey_ordered]
elif _.any(self.members, lambda c: c.pos.roomName != current_room):
self.log("enabling regrouping - not in same room.")
self.mem[dismemkey_regrouping] = True
self.regroup(target, groups, ordered_rooms_in_path)
return
grouped = [groups[0][0]]
ungrouped = _.clone(self.members)
iterations = len(ungrouped) ** 2
for _i in range(0, iterations):
index = 0
while index < len(ungrouped):
this_creep = ungrouped[index]
any_matched = False
for creep in grouped:
if this_creep.pos.isNearTo(creep):
any_matched = True
break
if any_matched:
grouped.append(this_creep)
ungrouped.splice(this_creep, 1)
else:
index += 1
if len(ungrouped):
self.log("enabling regrouping - in same room, but not together.")
self.mem[dismemkey_regrouping] = True
if not self.regroup(target, groups, ordered_rooms_in_path):
self.log("warning: tried to stop regrouping immediately after choosing to regroup.")
return
if _.any(self.members, 'fatigue'):
self.log('fatigue')
return
next_room = None # type: Optional[str] # TODO: this
exit_positions = self.target_exit_positions(current_room, next_room)
return BasicOffenseSquad.move_to_stage_2(self, target)
def regroup(self, target, groups, ordered_rooms_in_path):
# type: (RoomPosition, List[List[SquadDrone]], List[str]) -> bool
current_room = groups[0][0].pos.roomName
room_index = robjs.rindex_list(ordered_rooms_in_path, current_room)
last_room = ordered_rooms_in_path[room_index - 1]
next_room = ordered_rooms_in_path[room_index + 1]
gather_at_x = groups[0][0].pos.x
gather_at_y = groups[0][0].pos.y
extra_condition = None
if next_room:
if last_room:
min_x = 1
min_y = 1
max_x = 48
max_y = 48
room_x_diff, room_y_diff = movement.room_diff(last_room, current_room)
if abs(room_x_diff) > 1 or abs(room_y_diff) > 1:
portal_list = Game.rooms[current_room].find(FIND_STRUCTURES, {
'filter': {'structureType': STRUCTURE_PORTAL}
})
def portal_condition(x, y):
return not _.any(portal_list, lambda p: (abs(p.pos.x - x) < 5
or abs(p.pos.y - y) < 5))
extra_condition = portal_condition
self.log(".. through a portal")
else:
if room_x_diff > 0:
min_x = 6
elif room_x_diff < 0:
max_x = 44
if room_y_diff > 0:
min_y = 6
elif room_y_diff < 0:
max_y = 44
else:
min_x = 6
max_x = 44
min_y = 6
max_y = 44
if gather_at_x < min_x or gather_at_x > max_x or gather_at_y < min_y or gather_at_y > max_y \
or (extra_condition and not extra_condition(gather_at_x, gather_at_y)):
open_space = movement.find_an_open_space_around(current_room, gather_at_x, gather_at_y,
min_x, min_y, max_x, max_y, extra_condition)
gather_at_x = open_space.x
gather_at_y = open_space.y
target_itself = False
else:
gather_at_x = target.x
gather_at_y = target.y
current_room = target.roomName
min_x = 0
max_x = 50
min_y = 0
max_y = 50
target_itself = True
pos = __new__(RoomPosition(gather_at_x, gather_at_y, current_room))
last_group_gathered = True
self.log('.. at {} (conditions: [{}-{},{}-{}])', pos, min_x, max_x, min_y, max_y)
def move_to_closest_of(c, targets):
# type: (SquadDrone, List[Union[RoomPosition, RoomObject, SquadDrone]]) -> bool
target = None
distance = Infinity
for test_target in targets:
test_distance = movement.chebyshev_distance_room_pos(c.pos, robjs.pos(test_target))
if test_distance < distance:
distance = test_distance
target = test_target
target = robjs.pos(target)
if c.pos.roomName == target.roomName:
if c.pos.isNearTo(target):
return False
else:
c.move_to(target)
return True
elif movement.chebyshev_distance_room_pos(c.pos, target) < 100:
c.move_to(target)
return True
else:
# TODO: this should accommodate reroutes _faster_?
reroute = portals.recommended_reroute(c.pos, target)
if reroute is not None:
target = reroute[0]
c.move_to(target)
return True
for creep in groups[0]:
if move_to_closest_of(creep, [pos]):
self.log(".. breaking in group 0")
last_group_gathered = False
for index in range(1, len(groups)):
last_group = groups[index - 1]
this_group = groups[index]
if last_group_gathered:
for in_group_index in range(0, len(this_group)):
to_test = last_group
this_creep = this_group[in_group_index]
# if in_group_index == 0:
# to_test = last_group
# else:
# last_creep = this_group[in_group_index - 1]
# to_test = [last_creep]
# to_test.extend(last_group)
if move_to_closest_of(this_creep, to_test):
last_group_gathered = False
self.log(".. breaking in group {}", index)
else:
for this_creep in groups[index]:
if target_itself:
move_to_closest_of(this_creep, last_group)
else:
move_to_closest_of(this_creep, [pos])
if last_group_gathered:
del self.mem[dismemkey_regrouping]
return False
else:
return True
def get_ordered(self,
target: RoomPosition,
serialized_obj: Dict[str, str],
dismantle: List[SquadDismantle],
heal: List[SquadDrone],
attack: List[SquadDrone],
already_repathed: bool = False) -> Tuple[bool, bool]:
rebuilt = robjs.concat_lists(dismantle, heal, attack)
first_creep = rebuilt[0]
serialized_path_this_room = serialized_obj[first_creep.pos.roomName]
if serialized_path_this_room:
path_this_room = Room.deserializePath(serialized_path_this_room)
total_positions_this_room = len(path_this_room)
else:
total_positions_this_room = 0
path_this_room = None
if path_this_room is not None and total_positions_this_room >= len(self.members) + 4:
if total_positions_this_room >= len(self.members) * 2 + 4:
first_index = int(len(path_this_room) / 2)
else:
first_index = len(path_this_room) - 2
any_off = False
for index in range(0, len(rebuilt)):
pos = path_this_room[first_index - index]
creep = rebuilt[index]
pos = __new__(RoomPosition(pos.x, pos.y, first_creep.pos.roomName))
if creep.pos.isEqualTo(pos):
continue
else:
any_off = True
creep.move_to(pos)
if not any_off:
self.mem[dismemkey_ordered] = True
return True, already_repathed
else:
next_intermediate_goal = target
origin = _.max(self.members,
lambda m: movement.chebyshev_distance_room_pos(m.pos, next_intermediate_goal)).pos
reroute = portals.recommended_reroute(origin, target)
if reroute is not None:
next_intermediate_goal = reroute[0]
origin = _.max(self.members, lambda m: movement.chebyshev_distance_room_pos(m, next_intermediate_goal))
self.set_origin(origin)
serialized_obj = self.home.hive.honey.get_serialized_path_obj(origin, target, self.new_movement_opts())
if not serialized_obj[first_creep.pos.roomName]:
self.log("Uh-oh - path from furthest creep to target did not include the room the first creep is in."
" Setting origin to first creep's pos.")
self.set_origin(first_creep.pos)
serialized_obj = self.home.hive.honey.get_serialized_path_obj(origin, target, self.new_movement_opts())
if not serialized_obj[first_creep.pos.roomName]:
self.log("Path from first creep {} to {} did not include room {}! ...",
first_creep.pos, target, first_creep.pos.roomName)
return False, False
return self.get_ordered(target, serialized_obj, dismantle, heal, attack, True)
return False, already_repathed
def is_heavily_armed(self):
return True
def cost_of_wall_hits(hits):
# type: (int) -> int
return int(math.ceil(20 * math.log(hits / (DISMANTLE_POWER * MAX_CREEP_SIZE / 2 * 40), 50)))
def is_saveable_amount(amount, resource):
# type: (int, str) -> bool
return amount > 5000 and (resource != RESOURCE_ENERGY or amount > 100 * 1000)
def can_target_struct(structure, opts):
# type: (Structure, Dict[str, bool]) -> bool
if '__valid_dismantle_target' not in cast(Any, structure):
structure_type = structure.structureType
invalid = (
cast(OwnedStructure, structure).my
or structure_type == STRUCTURE_CONTROLLER
or structure_type == STRUCTURE_PORTAL
or (
cast(StructureContainer, structure).store
and _.findKey(cast(StructureContainer, structure).store, is_saveable_amount)
)
or (
opts['just_vitals']
and structure_type != STRUCTURE_SPAWN
and structure_type != STRUCTURE_NUKER
and structure_type != STRUCTURE_TOWER
and structure_type != STRUCTURE_RAMPART
and structure_type != STRUCTURE_POWER_SPAWN
and structure_type != STRUCTURE_OBSERVER
)
)
cast(Any, structure)['__valid_dismantle_target'] = not invalid
return cast(Any, structure)['__valid_dismantle_target']
def get_opts(room_name):
# type: (str) -> Dict[str, bool]
if room_name in Memory.rooms:
room_mem = Memory.rooms[room_name]
if rmem_key_dismantler_squad_opts in room_mem:
return cast(Dict[str, bool], room_mem[rmem_key_dismantler_squad_opts])
return {'just_vitals': True}
def dismantle_pathfinder_callback(room_name):
# type: (str) -> Union[PathFinder.CostMatrix, bool]
room = Game.rooms[room_name]
if room:
opts = get_opts(room_name)
plain_cost = 1
matrix = honey.create_custom_cost_matrix(room_name, plain_cost, plain_cost * 5, 1, False)
any_lairs = False
for structure in cast(List[Structure], room.find(FIND_STRUCTURES)):
structure_type = structure.structureType
if structure_type == STRUCTURE_RAMPART and (cast(StructureRampart, structure).my
or cast(StructureRampart, structure).isPublic):
continue
elif structure_type == STRUCTURE_ROAD:
continue
elif not can_target_struct(structure, opts):
matrix.set(structure.pos.x, structure.pos.y, 255)
elif structure_type == STRUCTURE_ROAD:
if matrix.get(structure.pos.x, structure.pos.y) == 0:
matrix.set(structure.pos.x, structure.pos.y, plain_cost)
continue
elif structure_type == STRUCTURE_CONTAINER:
continue
elif structure_type == STRUCTURE_KEEPER_LAIR:
any_lairs = True
matrix.set(structure.pos.x, structure.pos.y, 255)
elif structure_type == STRUCTURE_SPAWN or structure_type == STRUCTURE_EXTENSION:
for x in range(structure.pos.x - 1, structure.pos.x + 2):
for y in range(structure.pos.y - 1, structure.pos.y + 2):
existing = matrix.get_existing(x, y)
matrix.set(x, y, existing - 1)
elif structure_type == STRUCTURE_TOWER and cast(StructureTower, structure).energy:
initial_x = structure.pos.x
initial_y = structure.pos.y
for x in range(initial_x - 10, initial_x + 10):
for y in range(initial_y - 10, initial_y + 10):
distance = movement.chebyshev_distance_xy(initial_x, initial_y, x, y)
if distance <= 5:
matrix.increase_at(x, y, None, 20 * plain_cost)
else:
matrix.increase_at(x, y, None, (25 - distance) * plain_cost)
elif structure.hits:
matrix.increase_at(structure.pos.x, structure.pos.y, None,
cost_of_wall_hits(structure.hits) * plain_cost)
else:
matrix.set(structure.pos.x, structure.pos.y, 255)
for site in cast(List[ConstructionSite], room.find(FIND_MY_CONSTRUCTION_SITES)):
if site.structureType == STRUCTURE_RAMPART or site.structureType == STRUCTURE_ROAD \
or site.structureType == STRUCTURE_CONTAINER:
continue
matrix.set(site.pos.x, site.pos.y, 255)
# Note: this depends on room being a regular Room, not a RoomMind, since RoomMind.find(FIND_HOSTILE_CREEPS)
# excludes allies!
if not room.controller or not room.controller.my or not room.controller.safeMode:
for creep in room.find(FIND_HOSTILE_CREEPS):
matrix.set(creep.pos.x, creep.pos.y, 255)
if any_lairs:
for source in room.find(FIND_SOURCES):
for x in range(source.pos.x - 4, source.pos.x + 5):
for y in range(source.pos.y - 4, source.pos.y + 5):
matrix.set(x, y, 200)
for mineral in room.find(FIND_MINERALS):
for x in range(mineral.pos.x - 4, mineral.pos.x + 5):
for y in range(mineral.pos.y - 4, mineral.pos.y + 5):
matrix.set(x, y, 200)
print('Dismantler cost matrix for {}:\nStart.\n{}\nEnd.'.format(room_name, matrix.visual()))
else:
matrix = __new__(PathFinder.CostMatrix())
data = stored_data.get_data(room_name)
if not data:
return matrix
for obstacle in data.obstacles:
if obstacle.type == StoredObstacleType.ROAD:
if matrix.get(obstacle.x, obstacle.y) == 0:
matrix.set(obstacle.x, obstacle.y, 1)
else:
if obstacle.type == StoredObstacleType.SOURCE_KEEPER_SOURCE \
or obstacle.type == StoredObstacleType.SOURCE_KEEPER_MINERAL:
for x in range(obstacle.x - 4, obstacle.x + 5):
for y in range(obstacle.y - 4, obstacle.y + 5):
matrix.set(x, y, 200)
matrix.set(obstacle.x, obstacle.y, 255)
return matrix.cost_matrix
_dismantle_move_to_opts = {
"maxRooms": 1,
"maxOps": 4000,
"reusePath": 100,
"plainCost": 1,
"swampCost": 5,
"roomCallback": dismantle_pathfinder_callback,
}
def get_dismantle_condition_not_a_road(opts):
# type: (Dict[str, bool]) -> Callable[[Structure], bool]
return lambda structure: structure.structureType != STRUCTURE_ROAD and can_target_struct(structure, opts)
def creep_condition_enemy(creep):
# type: (Creep) -> bool
return not creep.my and not Memory.meta.friends.includes(creep.owner.username.lower())
class SquadDismantle(SquadDrone):
def run_squad(self, members, target):
# type: (List[SquadDrone], Location) -> None
if movement.chebyshev_distance_room_pos(self.pos, target) > 150:
return
opts = get_opts(self.pos.roomName)
self.log("running with opts {}", JSON.stringify(opts))
owner = stored_data._find_room_owner(self.room.room)
if (owner and (Memory.meta.friends.includes(owner.name.lower())
or owner.name == self.creep.owner.username)):
return
next_pos = None
path = _.get(self.memory, ['_move', 'path'])
if path:
next_pos = self.creep.findNextPathPos(path)
if not _.isObject(next_pos) or not self.pos.isNearTo(next_pos):
next_pos = None
if next_pos is None and self.creep.__direction_moved:
next_pos = movement.apply_direction(self.pos, self.creep.__direction_moved)
if next_pos is not None:
best_structure = cast(Structure, _.find(self.room.look_at(LOOK_STRUCTURES, next_pos),
get_dismantle_condition_not_a_road(opts)))
if best_structure:
result = self.creep.dismantle(best_structure)
if result != OK:
self.log("Unknown result from {}.dismantle({}): {}", self.creep, best_structure, result)
if result != ERR_NOT_IN_RANGE:
return
elif next_pos == ERR_NOT_FOUND:
del self.memory['_move']
structures_around = cast(List[Dict[str, Structure]],
self.room.look_for_in_area_around(LOOK_STRUCTURES, self.pos, 1))
best_structure = None
our_dismantle_power = DISMANTLE_POWER * self.creep.getActiveBodypartsBoostEquivalent(WORK, 'dismantle')
if len(structures_around) > 1:
ramparts_at = None
for structure_obj in structures_around:
if structure_obj[LOOK_STRUCTURES].structureType == STRUCTURE_RAMPART:
if ramparts_at is None:
ramparts_at = {}
ramparts_at[positions.serialize_pos_xy(structure_obj[LOOK_STRUCTURES].pos)] \
= structure_obj[LOOK_STRUCTURES].hits
best_rank = -Infinity
for structure_obj in cast(List[Dict[str, Structure]],
self.room.look_for_in_area_around(LOOK_STRUCTURES, self.pos, 1)):
structure = structure_obj[LOOK_STRUCTURES]
if not can_target_struct(structure, opts):
continue
structure_type = structure.structureType
if structure_type == STRUCTURE_TOWER:
rank = 55
elif structure_type == STRUCTURE_SPAWN:
rank = 50
elif structure_type == STRUCTURE_LAB:
rank = 45
elif structure_type == STRUCTURE_EXTENSION:
rank = 40
elif structure_type == STRUCTURE_LINK:
rank = 30
else:
rank = 10
hits = structure.hits
if structure_type != STRUCTURE_RAMPART and ramparts_at:
rampart = ramparts_at[positions.serialize_pos_xy(structure.pos)]
if rampart and rampart.hits:
hits += rampart.hits
if hits < our_dismantle_power:
rank -= hits / our_dismantle_power
if rank > best_rank:
best_rank = rank
best_structure = structure
elif len(structures_around):
best_structure = structures_around[0][LOOK_STRUCTURES]
if not can_target_struct(best_structure, opts):
return
else:
return
if best_structure:
result = self.creep.dismantle(best_structure)
volatile_cache.mem('dismantle_squad_dismantling').set(target.name, best_structure)
if result == OK:
if best_structure.hits < our_dismantle_power \
or best_structure.hits < our_dismantle_power + _.sum(
members, lambda x: x.creep.getActiveBodypartsBoostEquivalent(RANGED_ATTACK, 'rangedAttack')
* RANGED_ATTACK_POWER):
del self.memory._move
else:
self.log("Unknown result from {}.dismantle({}): {}"
.format(self.creep, best_structure, result))
def find_target_here(self, target):
# type: (Location) -> Optional[RoomPosition]
opts = get_opts(self.pos.roomName)
if self.memory.tloctimeout > Game.time:
pos = positions.deserialize_xy_to_pos(self.memory.tloc, target.roomName)
if pos:
if _.some(self.room.look_at(LOOK_STRUCTURES, pos), get_dismantle_condition_not_a_road(opts)):
return pos
structure_target = _.find(self.room.look_at(LOOK_STRUCTURES, target), get_dismantle_condition_not_a_road(opts))
if structure_target:
self.memory.tloc = positions.serialize_pos_xy(structure_target.pos)
self.memory.tloctimeout = Game.time + 50
return structure_target.pos
if self.pos.roomName != target.roomName:
return None
best_target = None
best_rank = -Infinity
enemy_structures = cast(List[OwnedStructure], self.room.find(FIND_HOSTILE_STRUCTURES))
opts = get_opts(self.pos.roomName)
for struct in enemy_structures:
structure_type = struct.structureType
if not can_target_struct(struct, opts):
continue
if structure_type == STRUCTURE_SPAWN:
rank = 50
elif structure_type == STRUCTURE_LAB:
rank = 40
elif structure_type == STRUCTURE_TOWER:
rank = 30
elif structure_type == STRUCTURE_EXTENSION:
rank = 20
elif structure_type != STRUCTURE_RAMPART:
rank = 10
else:
rank = 0
rank -= movement.chebyshev_distance_room_pos(self.pos, struct.pos) / 20
if structure_type != STRUCTURE_RAMPART:
rampart = cast(StructureRampart, _.find(self.room.look_at(LOOK_STRUCTURES, struct.pos),
{'structureType': STRUCTURE_RAMPART}))
if rampart:
rank -= 10 * rampart.hits / (DISMANTLE_POWER * MAX_CREEP_SIZE / 2 * CREEP_LIFE_TIME * 0.9)
if rank > best_rank:
best_target = struct
best_rank = rank
if best_target:
self.memory.tloc = positions.serialize_pos_xy(best_target.pos)
self.memory.tloctimeout = Game.time + 100
return best_target.pos
else:
if self.pos.isNearTo(target):
flag = flags.look_for(self.room, target, SQUAD_DISMANTLE_RANGED)
if flag:
msg = "[dismantle squad][{}][{}] Dismantle job in {} completed at {}! Removing flag {} ({})." \
.format(self.home.name, self.name, self.pos.roomName, Game.time, flag, flag.pos)
self.log(msg)
Game.notify(msg)
flag.remove()
self.memory.tloc = positions.serialize_pos_xy(target)
self.memory.tloctimeout = Game.time + 20
return target
def _move_options(self, target_room, opts):
# type: (str, Dict[str, Any]) -> Dict[str, Any]
target = locations.get(self.memory.squad)
if target and target.roomName == self.pos.roomName and target.roomName == target_room:
self.log("using dismantler callback for {}", target_room)
return _dismantle_move_to_opts
else:
return SquadDrone._move_options(self, target_room, opts)
def findSpecialty(self):
return WORK
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
import numpy as np
import oneflow as flow
from test_util import GenArgList, type_name_to_flow_type
import oneflow.typing as oft
def test_naive(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
@flow.global_function(func_config)
def ModJob(a: oft.Numpy.Placeholder((5, 2)), b: oft.Numpy.Placeholder((5, 2))):
return a == b
x = np.random.rand(5, 2).astype(np.float32)
y = np.random.rand(5, 2).astype(np.float32)
z = ModJob(x, y).get().numpy()
r = func_equal(x, y)
test_case.assertTrue(np.array_equal(z, x == y))
flow.clear_default_session()
def test_broadcast(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
@flow.global_function(func_config)
def ModJob(a: oft.Numpy.Placeholder((5, 2)), b: oft.Numpy.Placeholder((1, 2))):
return a == b
x = np.random.rand(5, 2).astype(np.float32)
y = np.random.rand(1, 2).astype(np.float32)
z = None
z = ModJob(x, y).get().numpy()
test_case.assertTrue(np.array_equal(z, x == y))
flow.clear_default_session()
def func_equal(a, b):
return a == b
def func_not_equal(a, b):
return a != b
def func_greater_than(a, b):
return a > b
def func_greater_equal(a, b):
return a >= b
def func_less_than(a, b):
return a < b
def func_less_equal(a, b):
return a <= b
# def func_logical_and(a, b):
# return a & b
def np_array(dtype, shape):
if dtype == flow.int8:
return np.random.randint(0, 127, shape).astype(np.int8)
elif dtype == flow.int32:
return np.random.randint(0, 10000, shape).astype(np.int32)
elif dtype == flow.int64:
return np.random.randint(0, 10000, shape).astype(np.int64)
elif dtype == flow.float:
return np.random.rand(*shape).astype(np.float32)
elif dtype == flow.double:
return np.random.rand(*shape).astype(np.double)
else:
assert False
def GenerateTest(
test_case, func, a_shape, b_shape, dtype=flow.int32, device_type="cpu"
):
func_config = flow.FunctionConfig()
func_config.default_data_type(dtype)
@flow.global_function(func_config)
def ModJob1(a: oft.Numpy.Placeholder(a_shape, dtype=dtype)):
with flow.scope.placement(device_type, "0:0"):
return func(a, a)
@flow.global_function(func_config)
def ModJob2(
a: oft.Numpy.Placeholder(a_shape, dtype=dtype),
b: oft.Numpy.Placeholder(b_shape, dtype=dtype),
):
with flow.scope.placement(device_type, "0:0"):
return func(a, b)
a = np_array(dtype, a_shape)
b = np_array(dtype, b_shape)
y = ModJob1(a).get().numpy()
test_case.assertTrue(np.array_equal(y, func(a, a)))
y = ModJob2(a, b).get().numpy()
test_case.assertTrue(np.array_equal(y, func(a, b)))
flow.clear_default_session()
def test_broadcast_logical(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["func"] = [
func_equal,
func_not_equal,
func_greater_than,
func_greater_equal,
func_less_than,
func_less_than,
]
arg_dict["a_shape"] = [(64, 64), (64, 64, 64)]
arg_dict["b_shape"] = [(1, 64), (64, 1), (64, 1, 64), (1, 64, 1)]
arg_dict["data_type"] = [flow.int8, flow.int32, flow.int64, flow.float, flow.double]
arg_dict["device_type"] = ["cpu", "gpu"]
for arg in GenArgList(arg_dict):
if arg[5] == "cpu" and arg[4] == "float16":
continue
if len(arg[2]) < len(arg[3]):
continue
GenerateTest(*arg)
def test_xy_mod_x1(test_case):
GenerateTest(test_case, func_less_than, (64, 64), (64, 1), flow.int8)
def test_xy_mod_1y(test_case):
GenerateTest(test_case, func_greater_than, (64, 64), (1, 64))
def test_xyz_mod_x1z(test_case):
GenerateTest(test_case, func_equal, (64, 64, 64), (64, 1, 64))
def test_xyz_mod_1y1(test_case):
GenerateTest(test_case, func_not_equal, (64, 64, 64), (1, 64, 1))
|
from __future__ import annotations
from datetime import datetime
class Issue:
cursor: str
closed: bool
participantTotalCount: str
def __init__(self, data: dict) -> None:
self.cursor = data.get('cursor')
self.closed = data['closed']
self.participantTotalCount = data.get('totalCount')
@staticmethod
def from_github(data: dict) -> Issue:
node = data.get('node')
return Issue({
'cursor': data.get('cursor'),
'closed': node['closed'],
'participantTotalCount': node['participants']['totalCount'],
})
|
##################
##Script for the AI Financial Forecaster's Main Code and Trading Strategy Component from Team 1(CE903)
##################
#!pip install nasdaq-data-link
import Core_Infrastructure_1stHalf ##Also forms the Project Directory
from Core_Infrastructure_1stHalf import historical_data_recorder
from Core_Infrastructure_1stHalf import stock_market_dataset_preprocessor
import Core_Infrastructure_2ndHalf
from Core_Infrastructure_2ndHalf import getROC
from Core_Infrastructure_2ndHalf import willR
from Core_Infrastructure_2ndHalf import midPrice
from Core_Infrastructure_2ndHalf import TAA_Dataset_Transformer
from Core_Infrastructure_2ndHalf import TAI_Dataset_Preprocessor
import Trading_Strategy_1stHalf
from Trading_Strategy_1stHalf import MLP_model
import Trading_Strategy_2ndHalf
from Trading_Strategy_2ndHalf import modelLSTM
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
'''
###############################################
###############################################
MAIN CODE
'''
if __name__ == '__main__':
while 1:
'''
Implementation of Stock Market Data Storage
'''
dataset_source = input("Choose source of Stock Market Dataset (Device Storage/ Nasdaq API/ Yahoo Finance): ")##Needs Error Handling due to User Input
if dataset_source == "Device Storage":
company_ticker = input("\nProvide the Company Ticker (Use Project Directory if needed): ")
Stock_Market_Dataset = pd.read_csv("C:\Group Project\Stock Market Datasets\ %s.csv" % company_ticker)
start = Stock_Market_Dataset['Date'].iloc[0]
end = Stock_Market_Dataset['Date'].iloc[-1]
Stock_Market_Dataset['Date'] = pd.to_datetime(Stock_Market_Dataset['Date'])
Stock_Market_Dataset = Stock_Market_Dataset.set_index('Date')
print("\nStock Market Dataset successfully accessed from Device Storage.\n" )
#Note: The Index/Date Column now contains "datetime Objects (datetime64[ns])"
if dataset_source == "Nasdaq API" or dataset_source == "Yahoo Finance":
company_ticker, start, end, Stock_Market_Dataset = historical_data_recorder(dataset_source)
#Note: The Index/Date Column contains "datetime Objects (datetime64[ns])"
'''
Implementations of Stock Market Data Preprocessor and TAA Dataset Transformer
'''
S_M_Dataset_copy = Stock_Market_Dataset
S_M_Dataset_preprocessed = stock_market_dataset_preprocessor(S_M_Dataset_copy)
Price_History, Prices_Dataframe, TA_Indicators_Dataset = TAA_Dataset_Transformer(S_M_Dataset_preprocessed, 'Close')
'''
Implementaions for Line Plots for Time Series of:
Historical Close Price and Technical Analysis Indicators
'''
plot_cmd = input("Are Plots of CLose Price and Technical Analysis Indicators Required (Yes/No): ")
if plot_cmd == "Yes":
#Hist_fig = plt.figure()
#Prices_Dataframe.plot(title = ("Close Price VS Timesteps for Stocks of %s" % company_ticker))
#TA_Indicators_Dataset.set_index('Date')
TA_Indicators_Dataset.set_index('Date').plot(subplots = True, layout=(4, 2), figsize = (12, 12), title = ("Technical Analysis Indicators for Stocks of %s" % company_ticker))
'''
Implementation of the Preprocessor of the Transformed Dataset/TAI Dataset
'''
print("\nUnivariate LSTM is the Baseline ML Model.\n")
ml_model = input("\nName the ML Model (e.g. MLP/uni_LSTM): ")
MOA_model = input("\nName the Mode of Operation of the ML Model (e.g. Train/Predict): ")
TAIinput_train_scaled, TAIinput_test_scaled, TAIoutput_train_scaled, TAIoutput_test_scaled, ip_scalar, op_scalar = TAI_Dataset_Preprocessor(TA_Indicators_Dataset, ml_model)
'''
Implementation of the Trading Strategy Component
MUST TRAIN AND PREDICT IN THE SAME RUN AS MODELS ARE NOT SAVED OR LOADED
MUST TRAIN AND PREDICT IN THE SAME RUN AS MODELS ARE NOT SAVED OR LOADED
'''
if ml_model == "MLP":
MLP_model(MOA_model, TAIinput_train_scaled, TAIoutput_train_scaled, TAIinput_test_scaled, op_scalar, TA_Indicators_Dataset)
#if ml_model == "uni_LSTM":#Univariate LSTM is the Baseline
#Use Code from Trading_Strategy_2ndHalf here
##To END the Program:
end_cmd = input("Do you wish to Finish Now (Yes/No): ")
if end_cmd == "Yes":
break
|
import argparse
import collections
import os
from collections import OrderedDict
from collections import defaultdict
from PIL import ImageOps, Image
from pandas import DataFrame
def get_image(input_file):
im = Image.open(input_file).convert('RGB') # Can be many different formats.
im = ImageOps.mirror(im)
im = im.transpose(Image.ROTATE_90)
return im
def convert(input_file):
file_name, ext = os.path.splitext(input_file)
im = get_image(input_file)
pix = im.load()
color_dict = OrderedDict()
color_counter = defaultdict(int)
dataframe = DataFrame(index=range(im.size[0]), columns=range(im.size[1]))
counter = 0
print('printing image : ')
for i in range(im.size[0]):
for j in range(im.size[1]):
if pix[i, j] not in color_dict.keys():
counter += 1
color_dict[pix[i, j]] = counter
color_counter[pix[i, j]] += 1
print(color_dict[pix[i, j]],)
dataframe.loc[i, j] = color_dict[pix[i, j]]
print('')
print('legende')
print('---------')
print('dimensions : {}'.format(im.size))
print('')
print('{: >20} {: >20} {: >20} {: >20} {: >20}'.format('R', 'G', 'B', 'number', 'count'))
for k, v in color_dict.items():
print('{: >20} {: >20} {: >20} {: >20} {: >20}'.format(k[0], k[1], k[2], v, color_counter[k]))
# color_dict = sorted(color_dict)
sorted_color_dict = collections.OrderedDict(sorted(color_dict.items(), reverse=True))
dataframe = dataframe.applymap(lambda x: x * -1)
color_dict = {}
# the index of the color defines the "luminostory" *ahum*.
# so if the first element was nr 5 -> it will become now nr 1 in the df and the color dict
for i, colour in enumerate(sorted_color_dict.items(), start=1):
dataframe = dataframe.applymap(lambda x: i if x * -1 == colour[1] else x)
# create a new color dict with the sorted colour ranging from 1 to x
color_dict[colour[0]] = i
color_dict = collections.OrderedDict(sorted(color_dict.items(), reverse=True))
file_name_csv = file_name + '.csv'
dataframe.to_csv(file_name_csv, index=False, header=False,
sep=';')
fd = open(file_name_csv, 'a')
fd.write('legende \n')
fd.write('--------- \n')
fd.write('dimensions : {} \n'.format(im.size))
fd.write('')
fd.write('{: >20} {: >20} {: >20} {: >20} {: >20} \n'.format('R', 'G', 'B', 'number', 'count'))
for k, v in color_dict.items():
fd.write('{: >20} {: >20} {: >20} {: >20} {: >20} \n'.format(k[0], k[1], k[2], v, color_counter[k]))
fd.close()
number_of_colours = len(color_dict)
LENGTH_SQUARE_SIDE = 50
TOTAL_WIDTH = number_of_colours * LENGTH_SQUARE_SIDE
TOTAL_HEIGHT = LENGTH_SQUARE_SIDE
x_y = (TOTAL_WIDTH, TOTAL_HEIGHT)
im = Image.new("RGB", (x_y[0], x_y[1]))
pix = im.load()
for y in range(x_y[1]):
counter = 0
for i, x in enumerate(range(x_y[0])):
pix[x, y] = list(color_dict.keys())[counter]
if i != 0 and i % LENGTH_SQUARE_SIDE == 0:
counter += 1
file_name_png = file_name + '_sorted_colours' + '.png'
im.save(file_name_png, "PNG")
return file_name_csv, file_name_png
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def migrate_profile_pics(apps, schema_editor):
from events.tasks import firebase_get, firebase_put
from events.handlers.events.profile import UpdateProfilePicHandler
User = apps.get_model("gatekeeper", "User")
for u in User.objects.all():
try:
if u.buid:
print("Migrating profile picture for %s" % u.email,)
public = firebase_get("/users/%s/public" % u.buid)
if public:
firebase_put("/users/%s/pictures" % u.buid, {"original": public["profile_pic"]})
h = UpdateProfilePicHandler({
"creator": u.buid,
})
h.handle()
public = firebase_get("/users/%s/public" % u.buid)
firebase_put("/users/%s/public/profile_pic" % u.buid, {
".value": public["profile_pic_url_medium"]
})
print(".. pictures migrated.")
else:
print(".. no profile found.")
except:
import traceback
traceback.print_exc()
class Migration(migrations.Migration):
dependencies = [
('gatekeeper', '0022_auto_20150710_1914'),
]
operations = [
migrations.RunPython(migrate_profile_pics),
]
|
# proxy module
from pyface.workbench.debug.debug_view import *
|
"""
Tested with Python 3.4
This program uses Tsorter by Terrill Dent, licensed under the MIT licence:
http://terrill.ca/sorting/tsorter/LICENSE
This is a very early draft of a program. Use more as food for though rather than
as a production tool. Please excuse the lengthy code. I am only a beginner
programmer :)
"""
import pandas as pd
from bokeh.plotting import figure, output_file, save
from bokeh.embed import file_html
from bokeh.resources import CDN
import collections
from sys import argv
from gexf import Gexf
from itertools import combinations
import numpy as np
# import matplotlib.pyplot as plt # Not used right now
# from wordcloud import WordCloud, STOPWORDS # Not used right now
# import image # Not used right now.
# Takes filename from command prompt as input
script, filename = argv
"""
This script makes use of a number of dictionaries and lists
for creating the html output files. The counter variables
are mostly used as control data but also in the html file.
"""
plotyears = []
cr = []
authors = []
journals = []
keywords = []
keywordset = []
records = 0
citrefcounter = 0
authorcounter = 0
journalcounter = 0
keywordcounter = 0
#indexbody = [] Use to write everything to index. Memory overload, see below
authorbody = []
journalbody = []
keywordbody = []
wclist = []
indexbodydict= {}
citationnetworkdict = {}
# Open up the Web of Science source data as a tsv file.
with open(filename,'r') as tsv: # change file-name here
next(tsv) #Skips the first line in the file, containing the TSV headers
WoSdata = [line.strip().split('\t') for line in tsv] #everything as a list
# This begins the creation of a .gexf graph for keyword co-occurrence analysis.
# It comes already here because nodes are added in the loop below.
gexf = Gexf("Keyword Co-occurrence Network", "File:" + filename + ".")
graph = gexf.addGraph("directed", "static", "Web of Science Keyword network")
"""
This loop parses, redefines and writes the values selecte to 'index.html'
However, it also contains surplus variables for future uses, following the
non-existent documentation of the ISI format. Use however you feel like.
"""
for W in WoSdata:
PT = W[0] # Publication Type
AU = W[1] # Authors
BA = W[2] # ?
BE = W[3] # Editors of Proceedings
GP = W[4] # ?
AF = W[5] # Authors Full
BF = W[6] # ?
CA = W[7] # Group Authors
TI = W[8] # Title
SO = W[9] # Source (Journal title, full)
SE = W[10] # Book Series title
BS = W[11] # ?
LA = W[12] # Language
DT = W[13] # Document Type
CT = W[14] # Conference Title
CY = W[15] # Conference Date
CL = W[16] # Conference Location
SP = W[17] # Conference Sponsors
HO = W[18] # Conference Host
DE = W[19] # Original Keywords
ID = W[20] # New Keywords by ISI (keywords plus)
AB = W[21] # Abstract
C1 = W[22] # Research Addresses: Note [] in fields.
RP = W[23] # Reprint Address
EM = W[24] # E-mail (Semi-colon separated)
RI = W[25] # Researcher ID
OI = W[26] # ?
FU = W[27] # Funding agency and grant number
FX = W[28] # Funding text
CR = W[29] # Cited references (Semi-colon separated)
NR = int(W[30]) # Cited reference count (Numerical value)
TC = int(W[31]) # Times cited (Numerical value)
Z9 = int(W[32]) # Total times Cited
U1 = int(W[33]) # ?
U2 = int(W[34]) # ?
PU = W[35] # Publisher
PI = W[36] # Publisher city
PA = W[37] # Publisher Address
SN = W[38] # ISSN (String value)
EI = W[39] # ?
BN = W[40] # ISBN
J9 = W[41] # 29 Character Journal Abbreviation
JI = W[42] # ISO Journal Title Abbreviation
PD = W[43] # Publication date (mixed string and possible integer value)
PY = int(W[44]) # Publication Year (Could also be parsed with date module)
VL = W[45] # Volume (could also be parsed as integer, but not really useful)
IS = W[46] # Issue (contains both numerical values and hyphenations)
PN = W[47] # Part Number
SU = W[48] # Supplement (number)
SI = W[49] # Special Issue
MA = W[50] # ?
BP = W[51] # Beginning page
EP = W[52] # End page
AR = W[53] # Article number of APS journals
DI = W[54] # DOI Number
D2 = W[55] # ?
PG = int(W[56]) # Number of Pages
WC = W[57] # Research Field
SC = W[58] # Science Categories?
GA = W[59] # IDS number, ISI original
UT = W[60] # WOS ISI unique artile identifier
# Count records, good for verification of your original data.
if PT:
records += 1
else:
continue
# This appends out the Web of Science categories to list.
for category in WC.split('; '):
wclist.append(category)
"""
Now, this adds the content of the index.html by creating a dictionary
with the Author, Title, Journal and Year as keys and the Times Cited
as values. In a later stage, this dictionary will be limited to 500
records sorted by Times Cited to prevent creating browser hangups.
"""
indexbodydict.update({'<tr>\n<td>' + AU + '<br><a href="http://dx.doi.org/'
+ DI + '" target="_blank"><div title="' + AB + '">' + TI +
'</div>\n</a></td>' + '\n<td>' + SO + '</td>\n <td>' + str(PY) + '</td><td>'
+ str(TC) + '</td>\n</tr>\n': TC})
# Create list of years
plotyears.append(PY)
# and a list of journals
journals.append(SO)
# and a list of keywords (splitted with semicolons)
keywordset.append(DE.split('; '))
# This dictionary is later used to create a citation network file.
citationnetworkdict.update({AU: CR.split('; ')})
# This loop splits and adds cited refereces to create cr.html page
for citref in CR.split('; '):
cr.append(citref)
citrefcounter += 1
# This loop splits and adds authors for the authors.html page
for au in AU.split('; '):
authors.append(au)
# This loop splits and adds keywords to the keywords.html page
for keyw in DE.split('; '):
if len(keyw) < 1: # removes empty keywords
continue
else:
keywords.append(keyw.lower()) # make everything lower case
graph.addNode(keyw.lower(), keyw.lower()) # add nodes to graph
"""
Below the body data of the static pages are created. Increase the values
after 'most_common' to include more data. But keep in mind that this will
slow down the browser.
"""
# Create author list
authorcount = collections.Counter(authors)
for a in authorcount: # count unique authors, not duplicates
authorcounter += 1
for author, count in authorcount.most_common(500):
authorbody.append("<tr>\n<td>" + author + "</td>\n<td>"
+ str(count) + "</td>\n</tr>\n")
# Create journal list
journalcount = collections.Counter(journals)
for j in journalcount:
journalcounter +=1
for journal, count in journalcount.most_common(500):
journalbody.append("<tr>\n<td>" + journal + "</td>\n<td>"
+ str(count) + "</td>\n</tr>\n")
# Create keyword list
keywordcount = collections.Counter(keywords)
for k in keywordcount:
keywordcounter +=1
for keyword, count in keywordcount.most_common(1000):
keywordbody.append("<tr>\n<td>" + keyword + "</td>\n<td>"
+ str(count) + "</td>\n</tr>\n")
# This is just for printing the top 20 wc categories to the terminal.
# !!! Make a proper page out of this in future version
wclistcount = collections.Counter(wclist)
for wc, count in wclistcount.most_common(20):
print(wc + "\t" + str(count))
# Create edges for a keyword cooccurrence network as defined alredy on line 60.
edgelist = []
for k in keywordset:
cooccurrence = list(combinations(k, 2))
for c in cooccurrence:
edgelist.append(c)
for enumer, edge in enumerate(edgelist):
# print(enumer, edge[0].lower(), edge[1].lower())
graph.addEdge(enumer, edge[0].lower(), edge[1].lower())
# Write file
gexf_file = open(filename + "Keywords.gexf", "wb")
gexf.write(gexf_file)
"""
# Create graphviz visualization (experimental) Not used right now
from graphviz import Digraph
u = Digraph('unix', filename='testargraphviz.gv')
u.body.append('size="6,6"')
u.node_attr.update(color='lightblue2', style='filled')
testlist = [('katt', 'fax'), ('kille', 'tjej')]
edgetoplist = collections.Counter(edgelist)
#print(edgetoplist)
for n in edgetoplist.most_common(100):
print(n)
for edge, value in edgetoplist.most_common(100):
edge1 = edge[0].lower()
edge2 = edge[1].lower()
u.edge(edge1, edge2)
u.view()
"""
# Create nodes and edges for Citation network.
gexf = Gexf("Citation Network", "File:" + filename + ".")
graph = gexf.addGraph("directed", "static", "Web of Science Citation network")
numberofedges = 0
for key, value in citationnetworkdict.items():
graph.addNode(key, key)
for v in value:
graph.addNode(v,v)
#print(str(numberofedges) + "***" + key + "***" + v)
graph.addEdge(str(numberofedges), key, v)
numberofedges += 1
# Write file
gexf_file = open(filename + "Citations.gexf", "wb")
gexf.write(gexf_file)
# Create the header graph with Bokeh
counter = collections.Counter(plotyears) #count them
output_file("years.html", title="Citepy - Yearly distribution of records")
years = []
val = []
yearvaldict = {}
for number in sorted(counter): #This puts years and values
years.append(number)
value = counter[number]
val.append(value)
yearvaldict[number] = [value]
for key, value in yearvaldict.items():
print(key, value)
# Convert data into a panda DataFrame format
data=pd.DataFrame({'year':years, 'value':val})
# Create new column (yearDate) equal to the year Column but with datetime format
data['yearDate']=pd.to_datetime(data['year'],format='%Y')
# Create a line graph with datetime x axis and use datetime column(yearDate)
# for this axis
p = figure(width=600, height=150, x_axis_type="datetime")
p.logo = None
p.toolbar_location = None #"right"
p.line(x=data['yearDate'],y=data['value'], color="#B7ADCF", line_width=2)
#show(p) # for debugging
bokehhtml = file_html(p, CDN, "Yearly Distribution of Records")
save(p)
"""
#Create wordcloud of keywords. Not used in the current version. Just a suggest.
# Stopwords, just add more if needed.
stopwords = STOPWORDS
stopwords.add("et")
stopwords.add("will")
stopwords.add("al")
stopwords.add("also")
# Generating wordcloud (change size)
wordcloud = WordCloud(
background_color="white",
max_words=500,
stopwords=STOPWORDS,
width=3000,
height=1500
)
wordcloud.generate(str(keywords))
# Generating the image and showing it.
plt.imshow(wordcloud)
plt.axis("off")
print('Saving Wordcloud as: ' + filename + '.png')
plt.savefig(filename + '.png') # change to .svg, .pdf etc. for other outputs.
##plt.show()
"""
"""
Below begins the html rendering to files.
"""
# Open the files.
htmlfile = open('index.html','w')
crfile = open('cr.html','w')
authorfile = open('authors.html','w')
journalfile = open('journals.html', 'w')
keywordfile = open('keywords.html', 'w')
# This header is shared for all pages.
header = '''
<!DOCTYPE html>
<html>
<head>
<link href="style.css" rel="stylesheet">
</head>
<script>
function init() {
var sorter = tsorter.create('result_table');
}
window.onload = init;
</script>
<h1>Results for <em>''' + filename + ''' </em></h1>
<p> ''' + bokehhtml + '''
<a href="index.html">Records</a>: ''' + str(records) + '''
<a href="authors.html">Authors</a>: ''' + str(authorcounter) + '''
<a href="journals.html">Journals</a>: ''' + str(journalcounter) + '''
<a href="cr.html">Cited References</a>: ''' + str(citrefcounter) + '''
<a href="keywords.html">Original Keywords</a>: ''' + str(keywordcounter) + '''
<br><a href="years.html">Yearly output (graph)</a> |
<a href="''' + filename + '''
Keywords.gexf">Keyword Co-occurrence Network (.gexf)</a> |
<a href="''' + filename + '''
Citations.gexf">Citation Network (Authors, Cited references) (.gexf)</a> |
</p>
'''
# This top is specific to index.html
indexbodytop = """
<table id="result_table" class="sortable">
<thead>
<tr>
<th>Author / Title</th>
<th>Journal</th>
<th data-tsorter="numeric">Year</th>
<th data-tsorter="numeric">Citations</th>
</tr>
</thead>
<tbody>
"""
# This top is specific to cr.html
crbodytop = """
<table id="result_table" class="sortable">
<thead>
<tr>
<th>Author</th>
<th data-tsorter="numeric">Year</th>
<th>Journal</th>
<th>Volume</th>
<th>Start page</th>
<th>DOI</th>
<th data-tsorter="numeric">Cited in dataset</th>
</tr>
</thead>
<tbody>
"""
# This top is specific to authors.html
authorbodytop = """
<table id="result_table" class="sortable">
<thead>
<tr>
<th>Author</th>
<th data-tsorter="numeric">Authorship in dataset</th>
</tr>
</thead>
<tbody>
"""
# This top is specific to journals.html
journalbodytop = """
<table id="result_table" class="sortable">
<thead>
<tr>
<th>Journals</th>
<th data-tsorter="numeric">Records in dataset</th>
</tr>
</thead>
<tbody>
"""
# This top is specific to keywords.html
keywordbodytop = """
<table id="result_table" class="sortable">
<thead>
<tr>
<th>Keyword</th>
<th data-tsorter="numeric">Occurrences in dataset</th>
</tr>
</thead>
<tbody>
"""
# Write to files but keep them open.
htmlfile.write(header + indexbodytop)
crfile.write(header + crbodytop)
authorfile.write(header + authorbodytop)
journalfile.write(header + journalbodytop)
keywordfile.write(header + keywordbodytop)
"""
Write the content of the main table
Note, arranged by most cited with the 'most_common' variable
Change to increase/decrease the value.
This should be rewritten in a future version.
"""
for record in dict(collections.Counter(indexbodydict).most_common(500)):
htmlfile.write(record)
"""
This one could be used to write everything as index (without the
limitation above). Makes computer run out of memory when using large data sets.
for i in indexbody:
htmlfile.write(i)
"""
#Write html code to file
for a in authorbody:
authorfile.write(a)
for j in journalbody:
journalfile.write(j)
for k in keywordbody:
keywordfile.write(k)
"""
Create Cited Refere (CR) (cr.html) table content. This a bit complicated because
the CR format is not very consistent, and DOI numbers are not very standardized.
The loop reads from the 'cr' list, which is filled with cited references. It
counts them and creates the list from 500 most frequent occurrences. To parse
the CRs, they are split up and accessed as lists (within the list) and then
tried (try:) for content. If data is lacking, it writes "N/A".
"""
crcount = collections.Counter(cr) #Count them
for citedreference, count in crcount.most_common(500):
crfile.write("<tr>\n")
crsplit = (citedreference.split(', '))
try:
if len(crsplit[0]) > 1:
crfile.write("<td>" + crsplit[0] + "</td>")
else:
crfile.write("<td>Unknown</td>")
except IndexError:
crfile.write("<td>" + "N/A" + "</td>")
try:
crfile.write("<td>" + crsplit[1] + "</td>")
except IndexError:
crfile.write("<td>" + "N/A" + "</td>")
try:
crfile.write("<td>" + crsplit[2] + "</td>")
except IndexError:
crfile.write("<td>" + "N/A" + "</td>")
try:
"""Very rarely the data starts with "DOI DOI".
Another elif-statement may prevent this."""
if crsplit[3].startswith("DOI"):
crfile.write('<td>N/A</td><td>N/A</td><td>'
+ '<a href="http://dx.doi.org/'
+ crsplit[3][4:] + '">' + crsplit[3][4:] + '</td>\n<td>'
+ str(count) + '</td></tr>\n')
continue
else:
crfile.write("<td>" + crsplit[3] + "</td>")
except IndexError:
crfile.write("<td>" + "N/A" + "</td>")
try:
if crsplit[4].startswith("DOI"):
crfile.write('<td>N/A</td><td><a href="http://dx.doi.org/'
+ crsplit[4][4:] + '">' + crsplit[4][4:] + '</td>\n<td>'
+ str(count) + '</td></tr>\n')
continue
else:
crfile.write("<td>" + crsplit[4] + "</td>")
except IndexError:
crfile.write("<td>" + "N/A" + "</td>")
try:
crfile.write('<td><a href="http://dx.doi.org/'
+ crsplit[5][4:] + '">' + crsplit[5][4:] + '</td>')
except IndexError:
crfile.write("<td>" + "N/A" + "</td>")
crfile.write("<td>" + str(count) + "</td>")
crfile.write("</tr>\n")
#Define a footer
footer = """
</tbody>
</table>
<script src="tsorter.min.js" type="text/javascript"></script>
</body>
</html>
"""
#Write footer and save files
htmlfile.write(footer)
htmlfile.close()
crfile.write(footer)
crfile.close()
authorfile.write(footer)
authorfile.close()
journalfile.write(footer)
journalfile.close()
keywordfile.write(footer)
keywordfile.close()
# Boot up web server
import http.server
import socketserver
PORT = 8000
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
print("Serving at port", PORT)
httpd.serve_forever()
|
from ircclient.const import NOTICE, PRIVMSG
crontable = []
outputs = []
relay_outs = []
def catch_all(data):
ignore = ['CONNECTED', 'PING', 'MODE', 'JOIN', 'PART', 'QUIT', 'INVITE', 'KICK', 'BAN']
if data.type == PRIVMSG or data.type == NOTICE:
channel = data.args[1]
if channel.startswith('#'):
channel = channel[1:]
message = data.args[2]
if data.type == NOTICE:
message = 'NOTICE:' + message
relay_outs.append({'type': 'message', 'channel': channel, 'user': data.nick, 'text': message})
else:
relay_outs.append({'debug': True, 'type': data.type, 'description': 'pm from {}'.format(data.user)})
else:
relay_outs.append({'debug': True, 'type': data.type, 'description': unicode(data)})
def process_001(data):
relay_outs.append({'type': 'connected'})
autojoin = data.config['irc'].get('autojoin', [])
if isinstance(autojoin, str):
autojoin = [autojoin]
for channel in autojoin:
outputs.append('join :#{}'.format(channel))
def process_join(data):
pass
#relay_outs.append({'type': 'join', 'user': data.nick, 'channel': data.args[1][1:]})
|
#
# @lc app=leetcode id=297 lang=python3
#
# [297] Serialize and Deserialize Binary Tree
#
# @lc code=start
# Definition for a binary tree node.
import collections
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if not root:
return ''
def levorder(root):
if root is None:
return []
re = [str(root.val)]
q = [root]
while True:
temp = []
for node in q:
if node.left:
temp.append(node.left)
re.append(str(node.left.val))
if not node.left:
re.append('None')
if node.right:
temp.append(node.right)
re.append(str(node.right.val))
if not node.right:
re.append('None')
if not temp:
return re
q = temp
ls = levorder(root)
return '~'.join(ls)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
if not data:
return None
ls = data.split('~')
tree = []
for item in ls:
if item == 'None':
tree.append(None)
else:
tree.append(int(item))
def generator(ls):
if not ls:
return
r = TreeNode(ls[0])
q = collections.deque([r])
tree_len = len(ls)
cnt = 1
while cnt < tree_len:
if not q:
break
node = q.popleft()
if node:
node.left = TreeNode(ls[cnt]) if ls[cnt] is not None else None
q.append(node.left)
if cnt + 1 < tree_len:
node.right = TreeNode(ls[cnt + 1]) if ls[cnt + 1] is not None else None
q.append(node.right)
cnt += 1
cnt += 1
return r
ans = generator(tree)
return ans
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# re = codec.deserialize("1-2-3-None-None-4-5-None-None-None-None")
# print(re)
# @lc code=end
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.MangaListView.as_view(), name='index'),
path('manga/<int:id>/', views.MangaDetailView.as_view(), name='manga-details-page'),
path('readlists/<int:userid>/', views.ReadlistView.as_view(), name='user-readlist-page'),
]
|
# Generated by Django 2.1.5 on 2019-04-13 22:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0003_auto_20190413_1553'),
]
operations = [
migrations.AlterField(
model_name='company',
name='company_address',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='company',
name='company_city',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='company',
name='company_name',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='visitors',
name='first_name',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='visitors',
name='last_name',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='visitreason',
name='description',
field=models.CharField(max_length=255),
),
]
|
from copy import deepcopy
import logging
from typing import Any, Callable, Coroutine, List, Union, Dict
log = logging.getLogger(__name__)
Comparable = Any
SortedList = List
DictSortedByKeys = Dict
def producer(
inp: Any,
trash_transformation: Coroutine,
transformations: DictSortedByKeys[Comparable, Coroutine],
elements: List[Comparable],
) -> None:
if inp is None:
trash_transformation.close()
list(transformations.values())[0].close()
else:
start = start_key(inp, elements)
if start is not None:
transformations[start].send(inp)
else:
trash_transformation.send(inp)
def trash_transformation(
start: Comparable,
end: Comparable,
sink: Coroutine,
initial_state: Any,
update_state: Callable[[Any, Any, Comparable, Comparable], Any],
) -> None:
state = deepcopy(initial_state)
try:
while True:
inp = yield
state = update_state(state, inp, start, end)
except GeneratorExit:
if sink is not None:
sink.send(state)
else:
log.info(f"trash_transformation state at closing: {state}")
def transformation(
start: Comparable,
end: Comparable,
next_transformation: Union[Coroutine, None],
sink: Coroutine,
initial_state: Any,
update_state: Callable[[Any, Any, Comparable, Comparable], Any],
) -> None:
state = deepcopy(initial_state)
try:
while True:
inp = yield
state = update_state(state, inp, start, end)
if (inp.end > end) and (next_transformation is not None):
next_transformation.send(inp)
except GeneratorExit:
sink.send(state)
if next_transformation is not None:
next_transformation.close()
else:
sink.close()
def sink(collector: Callable) -> None:
try:
while True:
inp = yield
collector(inp)
except GeneratorExit:
pass
def start_key(inp: Any, elements: SortedList[Comparable]) -> Comparable:
if (inp.end < elements[0]) or (inp.start > elements[-1]):
return None
if inp.start < elements[0]:
return elements[0]
for i, next_elem in enumerate(elements[1:]):
if inp.start < next_elem:
return elements[i]
|
import collections
import numpy as np
import os
import time
from tqdm import tqdm
from apex import amp
import torch
import torch.nn.functional as F
from pycocotools.cocoeval import COCOeval
from simpleAICV.classification.common import ClassificationDataPrefetcher, AverageMeter, accuracy
from simpleAICV.detection.common import DetectionDataPrefetcher
from simpleAICV.segmentation.common import SegmentationDataPrefetcher
def validate_classification(val_loader, model, criterion, config):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
model_on_cuda = next(model.parameters()).is_cuda
for images, targets in tqdm(val_loader):
if model_on_cuda:
images, targets = images.cuda(), targets.cuda()
data_time.update(time.time() - end)
end = time.time()
outputs = model(images)
batch_time.update(time.time() - end)
loss = criterion(outputs, targets)
acc1, acc5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
end = time.time()
# per image data load time(ms) and inference time(ms)
per_image_load_time = data_time.avg / config.batch_size * 1000
per_image_inference_time = batch_time.avg / config.batch_size * 1000
return top1.avg, top5.avg, losses.avg, per_image_load_time, per_image_inference_time
def train_classification(train_loader, model, criterion, optimizer, scheduler,
epoch, logger, config):
'''
train classification model for one epoch
'''
top1 = AverageMeter()
top5 = AverageMeter()
losses = AverageMeter()
# switch to train mode
model.train()
local_rank = torch.distributed.get_rank() if config.distributed else None
if config.distributed:
gpus_num = torch.cuda.device_count()
iters = len(train_loader.dataset) // (
config.batch_size * gpus_num) if config.distributed else len(
train_loader.dataset) // config.batch_size
else:
iters = len(train_loader.dataset) // config.batch_size
prefetcher = ClassificationDataPrefetcher(train_loader)
images, targets = prefetcher.next()
iter_index = 1
while images is not None:
images, targets = images.cuda(), targets.cuda()
outputs = model(images)
loss = criterion(outputs, targets)
loss = loss / config.accumulation_steps
if config.apex:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if iter_index % config.accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
# measure accuracy and record loss
acc1, acc5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
images, targets = prefetcher.next()
if iter_index % config.print_interval == 0:
log_info = f'train: epoch {epoch:0>4d}, iter [{iter_index:0>5d}, {iters:0>5d}], lr: {scheduler.get_lr()[0]:.6f}, top1: {acc1.item():.2f}%, top5: {acc5.item():.2f}%, loss: {loss.item():.4f}'
logger.info(log_info) if (config.distributed and local_rank
== 0) or not config.distributed else None
iter_index += 1
scheduler.step()
return top1.avg, top5.avg, losses.avg
def validate_KD(val_loader, model, criterion):
top1 = AverageMeter()
top5 = AverageMeter()
total_losses = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
model_on_cuda = next(model.parameters()).is_cuda
for images, targets in tqdm(val_loader):
if model_on_cuda:
images, targets = images.cuda(), targets.cuda()
tea_outputs, stu_outputs = model(images)
total_loss = 0
for loss_name in criterion.keys():
if 'KD' in loss_name:
temp_loss = criterion[loss_name](stu_outputs, tea_outputs)
else:
temp_loss = criterion[loss_name](stu_outputs, targets)
total_loss += temp_loss
acc1, acc5 = accuracy(stu_outputs, targets, topk=(1, 5))
total_losses.update(total_loss.item(), images.size(0))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
return top1.avg, top5.avg, total_losses.avg
def train_KD(train_loader, model, criterion, optimizer, scheduler, epoch,
logger, config):
'''
train classification model for one epoch
'''
top1 = AverageMeter()
top5 = AverageMeter()
total_losses = AverageMeter()
# switch to train mode
model.train()
local_rank = torch.distributed.get_rank() if config.distributed else None
if config.distributed:
gpus_num = torch.cuda.device_count()
iters = len(train_loader.dataset) // (
config.batch_size * gpus_num) if config.distributed else len(
train_loader.dataset) // config.batch_size
else:
iters = len(train_loader.dataset) // config.batch_size
prefetcher = ClassificationDataPrefetcher(train_loader)
images, targets = prefetcher.next()
iter_index = 1
while images is not None:
images, targets = images.cuda(), targets.cuda()
tea_outputs, stu_outputs = model(images)
loss = 0
loss_value = {}
for loss_name in criterion.keys():
if 'KD' in loss_name:
temp_loss = criterion[loss_name](stu_outputs, tea_outputs)
else:
temp_loss = criterion[loss_name](stu_outputs, targets)
loss_value[loss_name] = temp_loss
loss += temp_loss
total_losses.update(loss.item(), images.size(0))
loss = loss / config.accumulation_steps
if config.apex:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if iter_index % config.accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
# measure accuracy and record loss
acc1, acc5 = accuracy(stu_outputs, targets, topk=(1, 5))
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
images, targets = prefetcher.next()
log_info = ''
if iter_index % config.print_interval == 0:
log_info += f'train: epoch {epoch:0>4d}, iter [{iter_index:0>5d}, {iters:0>5d}], lr: {scheduler.get_lr()[0]:.6f}, top1: {acc1.item():.2f}%, top5: {acc5.item():.2f}%, total_loss: {loss.item():.4f} '
for loss_name in criterion.keys():
log_info += f'{loss_name}: {loss_value[loss_name].item():.4f} '
logger.info(log_info) if (config.distributed and local_rank
== 0) or not config.distributed else None
iter_index += 1
scheduler.step()
return top1.avg, top5.avg, total_losses.avg
def compute_voc_ap(recall, precision, use_07_metric=True):
if use_07_metric:
# use voc 2007 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(recall >= t) == 0:
p = 0
else:
# get max precision for recall >= t
p = np.max(precision[recall >= t])
# average 11 recall point precision
ap = ap + p / 11.
else:
# use voc>=2010 metric,average all different recall precision as ap
# recall add first value 0. and last value 1.
mrecall = np.concatenate(([0.], recall, [1.]))
# precision add first value 0. and last value 0.
mprecision = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mprecision.size - 1, 0, -1):
mprecision[i - 1] = np.maximum(mprecision[i - 1], mprecision[i])
# to calculate area under PR curve, look for points where X axis (recall) changes value
i = np.where(mrecall[1:] != mrecall[:-1])[0]
# sum (\Delta recall) * prec
ap = np.sum((mrecall[i + 1] - mrecall[i]) * mprecision[i + 1])
return ap
def compute_ious(a, b):
'''
:param a: [N,(x1,y1,x2,y2)]
:param b: [M,(x1,y1,x2,y2)]
:return: IoU [N,M]
'''
a = np.expand_dims(a, axis=1) # [N,1,4]
b = np.expand_dims(b, axis=0) # [1,M,4]
overlap = np.maximum(0.0,
np.minimum(a[..., 2:], b[..., 2:]) -
np.maximum(a[..., :2], b[..., :2])) # [N,M,(w,h)]
overlap = np.prod(overlap, axis=-1) # [N,M]
area_a = np.prod(a[..., 2:] - a[..., :2], axis=-1)
area_b = np.prod(b[..., 2:] - b[..., :2], axis=-1)
iou = overlap / (area_a + area_b - overlap)
return iou
def evaluate_voc_detection(val_dataset, val_loader, model, decoder, config):
batch_time = AverageMeter()
data_time = AverageMeter()
preds, gts = [], []
model_on_cuda = next(model.parameters()).is_cuda
end = time.time()
for i, data in tqdm(enumerate(val_loader)):
if model_on_cuda:
images, gt_annots, scales, origin_hws = data['image'].cuda(
), data['annots'], data['scale'], data['origin_hw']
else:
images, gt_annots, scales, origin_hws = data['image'], data[
'annots'], data['scale'], data['origin_hw']
data_time.update(time.time() - end, images.size(0))
end = time.time()
outs_tuple = model(images)
pred_scores, pred_classes, pred_boxes = decoder(*outs_tuple)
pred_scores, pred_classes, pred_boxes = pred_scores.cpu(
), pred_classes.cpu(), pred_boxes.cpu()
scales = scales.unsqueeze(-1).unsqueeze(-1)
pred_boxes /= scales
batch_time.update(time.time() - end, images.size(0))
gt_bboxes, gt_classes = gt_annots[:, :, 0:4], gt_annots[:, :, 4]
gt_bboxes /= scales
for per_image_pred_scores, per_image_pred_classes, per_image_pred_boxes, per_image_gt_bboxes, per_image_gt_classes, per_image_origin_hw in zip(
pred_scores, pred_classes, pred_boxes, gt_bboxes, gt_classes,
origin_hws):
per_image_pred_scores = per_image_pred_scores[
per_image_pred_classes > -1]
per_image_pred_boxes = per_image_pred_boxes[
per_image_pred_classes > -1]
per_image_pred_classes = per_image_pred_classes[
per_image_pred_classes > -1]
# clip boxes
per_image_pred_boxes[:, 0] = torch.clamp(per_image_pred_boxes[:,
0],
min=0)
per_image_pred_boxes[:, 1] = torch.clamp(per_image_pred_boxes[:,
1],
min=0)
per_image_pred_boxes[:,
2] = torch.clamp(per_image_pred_boxes[:, 2],
max=per_image_origin_hw[1])
per_image_pred_boxes[:,
3] = torch.clamp(per_image_pred_boxes[:, 3],
max=per_image_origin_hw[0])
preds.append([
per_image_pred_boxes, per_image_pred_classes,
per_image_pred_scores
])
per_image_gt_bboxes = per_image_gt_bboxes[
per_image_gt_classes > -1]
per_image_gt_classes = per_image_gt_classes[
per_image_gt_classes > -1]
gts.append([per_image_gt_bboxes, per_image_gt_classes])
end = time.time()
all_ap = {}
for class_index in tqdm(range(config.num_classes)):
per_class_gt_boxes = [
image[0][image[1] == class_index] for image in gts
]
per_class_pred_boxes = [
image[0][image[1] == class_index] for image in preds
]
per_class_pred_scores = [
image[2][image[1] == class_index] for image in preds
]
fp = np.zeros((0, ))
tp = np.zeros((0, ))
scores = np.zeros((0, ))
total_gts = 0
# loop for each sample
for per_image_gt_boxes, per_image_pred_boxes, per_image_pred_scores in zip(
per_class_gt_boxes, per_class_pred_boxes,
per_class_pred_scores):
total_gts = total_gts + len(per_image_gt_boxes)
# one gt can only be assigned to one predicted bbox
assigned_gt = []
# loop for each predicted bbox
for index in range(len(per_image_pred_boxes)):
scores = np.append(scores, per_image_pred_scores[index])
if per_image_gt_boxes.shape[0] == 0:
# if no gts found for the predicted bbox, assign the bbox to fp
fp = np.append(fp, 1)
tp = np.append(tp, 0)
continue
pred_box = np.expand_dims(per_image_pred_boxes[index], axis=0)
iou = compute_ious(per_image_gt_boxes, pred_box)
gt_for_box = np.argmax(iou, axis=0)
max_overlap = iou[gt_for_box, 0]
if max_overlap >= config.eval_iou_threshold and gt_for_box not in assigned_gt:
fp = np.append(fp, 0)
tp = np.append(tp, 1)
assigned_gt.append(gt_for_box)
else:
fp = np.append(fp, 1)
tp = np.append(tp, 0)
# sort by score
indices = np.argsort(-scores)
fp = fp[indices]
tp = tp[indices]
# compute cumulative false positives and true positives
fp = np.cumsum(fp)
tp = np.cumsum(tp)
# compute recall and precision
recall = tp / total_gts
precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = compute_voc_ap(recall, precision)
all_ap[class_index] = ap
mAP = 0.
for _, class_mAP in all_ap.items():
mAP += float(class_mAP)
mAP /= config.num_classes
result_dict = collections.OrderedDict()
result_dict['mAP'] = mAP
result_dict['all_ap'] = all_ap
# per image data load time(ms) and inference time(ms)
per_image_load_time = data_time.avg / config.batch_size * 1000
per_image_inference_time = batch_time.avg / config.batch_size * 1000
result_dict['per_image_load_time'] = f'{per_image_load_time:.3f}ms'
result_dict[
'per_image_inference_time'] = f'{per_image_inference_time:.3f}ms'
return result_dict
def evaluate_coco_detection(val_dataset, val_loader, model, decoder, config):
batch_time = AverageMeter()
data_time = AverageMeter()
ids = [idx for idx in range(len(val_dataset))]
results, image_ids = [], []
model_on_cuda = next(model.parameters()).is_cuda
end = time.time()
for i, data in tqdm(enumerate(val_loader)):
if model_on_cuda:
images, scales, origin_hws = data['image'].cuda(
), data['scale'], data['origin_hw']
else:
images, scales, origin_hws = data['image'], data['scale'], data[
'origin_hw']
per_batch_ids = ids[i * config.batch_size:(i + 1) * config.batch_size]
data_time.update(time.time() - end, images.size(0))
end = time.time()
outs_tuple = model(images)
scores, classes, boxes = decoder(*outs_tuple)
scores, classes, boxes = scores.cpu(), classes.cpu(), boxes.cpu()
scales = scales.unsqueeze(-1).unsqueeze(-1)
boxes /= scales
batch_time.update(time.time() - end, images.size(0))
for per_image_scores, per_image_classes, per_image_boxes, index, per_image_origin_hw in zip(
scores, classes, boxes, per_batch_ids, origin_hws):
# clip boxes
per_image_boxes[:, 0] = torch.clamp(per_image_boxes[:, 0], min=0)
per_image_boxes[:, 1] = torch.clamp(per_image_boxes[:, 1], min=0)
per_image_boxes[:, 2] = torch.clamp(per_image_boxes[:, 2],
max=per_image_origin_hw[1])
per_image_boxes[:, 3] = torch.clamp(per_image_boxes[:, 3],
max=per_image_origin_hw[0])
# for coco_eval,we need [x_min,y_min,w,h] format pred boxes
per_image_boxes[:, 2:] -= per_image_boxes[:, :2]
for object_score, object_class, object_box in zip(
per_image_scores, per_image_classes, per_image_boxes):
object_score = float(object_score)
object_class = int(object_class)
object_box = object_box.tolist()
if object_class == -1:
break
image_result = {
'image_id': val_dataset.image_ids[index],
'category_id':
val_dataset.coco_label_to_cat_id[object_class],
'score': object_score,
'bbox': object_box,
}
results.append(image_result)
image_ids.append(val_dataset.image_ids[index])
print('{}/{}'.format(index, len(val_dataset)), end='\r')
end = time.time()
if len(results) == 0:
return None
# load results in COCO evaluation tool
coco_true = val_dataset.coco
coco_pred = coco_true.loadRes(results)
coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
eval_result = coco_eval.stats
variable_definitions = {
0: 'IoU=0.5:0.95,area=all,maxDets=100,mAP',
1: 'IoU=0.5,area=all,maxDets=100,mAP',
2: 'IoU=0.75,area=all,maxDets=100,mAP',
3: 'IoU=0.5:0.95,area=small,maxDets=100,mAP',
4: 'IoU=0.5:0.95,area=medium,maxDets=100,mAP',
5: 'IoU=0.5:0.95,area=large,maxDets=100,mAP',
6: 'IoU=0.5:0.95,area=all,maxDets=1,mAR',
7: 'IoU=0.5:0.95,area=all,maxDets=10,mAR',
8: 'IoU=0.5:0.95,area=all,maxDets=100,mAR',
9: 'IoU=0.5:0.95,area=small,maxDets=100,mAR',
10: 'IoU=0.5:0.95,area=medium,maxDets=100,mAR',
11: 'IoU=0.5:0.95,area=large,maxDets=100,mAR',
}
result_dict = collections.OrderedDict()
for i, var in enumerate(eval_result):
result_dict[variable_definitions[i]] = var
# per image data load time(ms) and inference time(ms)
per_image_load_time = data_time.avg / config.batch_size * 1000
per_image_inference_time = batch_time.avg / config.batch_size * 1000
result_dict['per_image_load_time'] = f'{per_image_load_time:.3f}ms'
result_dict[
'per_image_inference_time'] = f'{per_image_inference_time:.3f}ms'
return result_dict
def validate_detection(val_dataset, val_loader, model, decoder, config):
# switch to evaluate mode
model.eval()
assert config.dataset_name in ['COCO', 'VOC']
func_dict = {
'COCO': evaluate_coco_detection,
'VOC': evaluate_voc_detection,
}
with torch.no_grad():
result_dict = func_dict[config.dataset_name](val_dataset, val_loader,
model, decoder, config)
return result_dict
def train_detection(train_loader, model, criterion, optimizer, scheduler,
epoch, logger, config):
'''
train classification model for one epoch
'''
losses = AverageMeter()
# switch to train mode
model.train()
local_rank = torch.distributed.get_rank() if config.distributed else None
if config.distributed:
gpus_num = torch.cuda.device_count()
iters = len(train_loader.dataset) // (
config.batch_size * gpus_num) if config.distributed else len(
train_loader.dataset) // config.batch_size
else:
iters = len(train_loader.dataset) // config.batch_size
prefetcher = DetectionDataPrefetcher(train_loader)
images, targets = prefetcher.next()
iter_index = 1
while images is not None:
images, targets = images.cuda(), targets.cuda()
outs_tuple = model(images)
loss_dict = criterion(targets, *outs_tuple)
loss = sum(loss_dict.values())
if loss == 0.:
optimizer.zero_grad()
continue
if config.apex:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
optimizer.zero_grad()
losses.update(loss.item(), images.size(0))
images, targets = prefetcher.next()
if iter_index % config.print_interval == 0:
log_info = f'train: epoch {epoch:0>4d}, iter [{iter_index:0>5d}, {iters:0>5d}], lr: {scheduler.get_lr()[0]:.6f}, total_loss: {loss.item():.4f}'
for key, value in loss_dict.items():
log_info += f', {key}: {value.item():.4f}'
logger.info(log_info) if (config.distributed and local_rank
== 0) or not config.distributed else None
iter_index += 1
scheduler.step()
return losses.avg
def evaluate_coco_segmentation(val_dataset,
val_loader,
model,
decoder,
config,
mask_threshold=0.5):
batch_time = AverageMeter()
data_time = AverageMeter()
ids = [idx for idx in range(len(val_dataset))]
results, image_ids = [], []
model_on_cuda = next(model.parameters()).is_cuda
end = time.time()
for i, data in tqdm(enumerate(val_loader)):
if model_on_cuda:
images, scales, origin_hws = data['image'].cuda(
), data['scale'], data['origin_hw']
else:
images, scales, origin_hws = data['image'], data['scale'], data[
'origin_hw']
per_batch_ids = ids[i * config.batch_size:(i + 1) * config.batch_size]
data_time.update(time.time() - end, images.size(0))
end = time.time()
outs_tuple = model(images)
scores, classes, masks, boxes = decoder(*outs_tuple)
scores, classes, masks, boxes = scores.cpu(), classes.cpu(), masks.cpu(
), boxes.cpu()
scales = scales.unsqueeze(-1).unsqueeze(-1)
boxes /= scales
batch_time.update(time.time() - end, images.size(0))
for per_image_scores, per_image_classes, per_image_boxes, index, per_image_masks, per_image_scale, per_image_origin_hw in zip(
scores, classes, boxes, per_batch_ids, masks, scales,
origin_hws):
# clip boxes
per_image_boxes[:, 0] = torch.clamp(per_image_boxes[:, 0], min=0)
per_image_boxes[:, 1] = torch.clamp(per_image_boxes[:, 1], min=0)
per_image_boxes[:, 2] = torch.clamp(per_image_boxes[:, 2],
max=per_image_origin_hw[1])
per_image_boxes[:, 3] = torch.clamp(per_image_boxes[:, 3],
max=per_image_origin_hw[0])
# for coco_eval,we need [x_min,y_min,w,h] format pred boxes
per_image_boxes[:, 2:] -= per_image_boxes[:, :2]
input_h, input_w = int(
per_image_masks.shape[-2] / per_image_scale), int(
per_image_masks.shape[-1] / per_image_scale)
per_image_masks = F.interpolate(
per_image_masks.float().unsqueeze(0),
size=(input_h, input_w),
mode='nearest').squeeze(0)
per_image_origin_hw = per_image_origin_hw.int()
per_image_masks = per_image_masks[:, 0:per_image_origin_hw[0],
0:per_image_origin_hw[1]]
per_image_masks = (per_image_masks > mask_threshold).int()
for object_score, object_class, object_mask, object_box in zip(
per_image_scores, per_image_classes, per_image_masks,
per_image_boxes):
object_score = float(object_score)
object_class = int(object_class)
object_box = object_box.tolist()
object_mask = np.asfortranarray(object_mask).astype(np.uint8)
if object_class == -1:
break
image_result = {
'image_id':
val_dataset.image_ids[index],
'category_id':
val_dataset.coco_label_to_cat_id[object_class],
'score':
object_score,
'bbox':
object_box,
'segmentation':
val_dataset.transform_mask_to_rle_mask(object_mask),
}
results.append(image_result)
image_ids.append(val_dataset.image_ids[index])
print('{}/{}'.format(index, len(val_dataset)), end='\r')
end = time.time()
if len(results) == 0:
return None
# load results in COCO evaluation tool
coco_true = val_dataset.coco
coco_pred = coco_true.loadRes(results)
variable_definitions = {
0: 'IoU=0.5:0.95,area=all,maxDets=100,mAP',
1: 'IoU=0.5,area=all,maxDets=100,mAP',
2: 'IoU=0.75,area=all,maxDets=100,mAP',
3: 'IoU=0.5:0.95,area=small,maxDets=100,mAP',
4: 'IoU=0.5:0.95,area=medium,maxDets=100,mAP',
5: 'IoU=0.5:0.95,area=large,maxDets=100,mAP',
6: 'IoU=0.5:0.95,area=all,maxDets=1,mAR',
7: 'IoU=0.5:0.95,area=all,maxDets=10,mAR',
8: 'IoU=0.5:0.95,area=all,maxDets=100,mAR',
9: 'IoU=0.5:0.95,area=small,maxDets=100,mAR',
10: 'IoU=0.5:0.95,area=medium,maxDets=100,mAR',
11: 'IoU=0.5:0.95,area=large,maxDets=100,mAR',
}
result_dict = collections.OrderedDict()
coco_eval_segm = COCOeval(coco_true, coco_pred, 'segm')
coco_eval_segm.params.imgIds = image_ids
coco_eval_segm.evaluate()
coco_eval_segm.accumulate()
coco_eval_segm.summarize()
segm_eval_result = coco_eval_segm.stats
result_dict['sgem_eval_result'] = {}
for i, var in enumerate(segm_eval_result):
result_dict['sgem_eval_result'][variable_definitions[i]] = var
coco_eval_box = COCOeval(coco_true, coco_pred, 'bbox')
coco_eval_box.params.imgIds = image_ids
coco_eval_box.evaluate()
coco_eval_box.accumulate()
coco_eval_box.summarize()
box_eval_result = coco_eval_box.stats
result_dict['box_eval_result'] = {}
for i, var in enumerate(box_eval_result):
result_dict['box_eval_result'][variable_definitions[i]] = var
# per image data load time(ms) and inference time(ms)
per_image_load_time = data_time.avg / config.batch_size * 1000
per_image_inference_time = batch_time.avg / config.batch_size * 1000
result_dict['per_image_load_time'] = f'{per_image_load_time:.3f}ms'
result_dict[
'per_image_inference_time'] = f'{per_image_inference_time:.3f}ms'
return result_dict
def validate_segmentation(val_dataset, val_loader, model, decoder, config):
# switch to evaluate mode
model.eval()
assert config.dataset_name in ['COCO']
func_dict = {
'COCO': evaluate_coco_segmentation,
}
with torch.no_grad():
result_dict = func_dict[config.dataset_name](val_dataset, val_loader,
model, decoder, config)
return result_dict
def compute_segmentation_test_loss(val_loader, model, criterion):
losses = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
model_on_cuda = next(model.parameters()).is_cuda
for data in tqdm(val_loader):
images, gt_annots = data['image'], data['annots']
boxes, masks, classes = gt_annots['box'], gt_annots[
'mask'], gt_annots['class']
if model_on_cuda:
images, boxes, masks, classes = images.cuda(), boxes.cuda(
), masks.cuda(), classes.cuda()
targets = {
'box': boxes,
'mask': masks,
'class': classes,
}
outs_tuple = model(images)
loss_dict = criterion(targets, *outs_tuple)
loss = sum(loss_dict.values())
losses.update(loss.item(), images.size(0))
return losses.avg
def train_segmentation(train_loader, model, criterion, optimizer, scheduler,
epoch, logger, config):
'''
train classification model for one epoch
'''
losses = AverageMeter()
# switch to train mode
model.train()
local_rank = torch.distributed.get_rank() if config.distributed else None
if config.distributed:
gpus_num = torch.cuda.device_count()
iters = len(train_loader.dataset) // (
config.batch_size * gpus_num) if config.distributed else len(
train_loader.dataset) // config.batch_size
else:
iters = len(train_loader.dataset) // config.batch_size
prefetcher = SegmentationDataPrefetcher(train_loader)
images, boxes, masks, classes = prefetcher.next()
iter_index = 1
while images is not None:
images, boxes, masks, classes = images.cuda(), boxes.cuda(
), masks.cuda(), classes.cuda()
targets = {
'box': boxes,
'mask': masks,
'class': classes,
}
outs_tuple = model(images)
loss_dict = criterion(targets, *outs_tuple)
loss = sum(loss_dict.values())
if loss == 0.:
optimizer.zero_grad()
continue
if config.apex:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
optimizer.zero_grad()
losses.update(loss.item(), images.size(0))
images, boxes, masks, classes = prefetcher.next()
if iter_index % config.print_interval == 0:
log_info = f'train: epoch {epoch:0>4d}, iter [{iter_index:0>5d}, {iters:0>5d}], lr: {scheduler.get_lr()[0]:.6f}, total_loss: {loss.item():.4f}'
for key, value in loss_dict.items():
log_info += f', {key}: {value.item():.4f}'
logger.info(log_info) if (config.distributed and local_rank
== 0) or not config.distributed else None
iter_index += 1
scheduler.step()
return losses.avg
|
import matplotlib.pyplot as plt
import numpy as np
from environment.corridor_gridworld import ShortCorridor
class LogisticalFunc:
def __call__(self, w):
return np.exp(w) / (1. + np.exp(w)) * 0.9 + 0.05
def derivative(self, w):
return np.exp(w) / ((1. + np.exp(w)) ** 2) * 0.9
class MyPolicy:
"""
p = (1-l(w))^x l(w)^{1-x}
"""
def __init__(self):
self.weight = 2.
self.l = LogisticalFunc()
def __call__(self, state, action):
x = action
return np.power((1 - self.l(self.weight)), x) * np.power(self.l(self.weight), 1 - x)
def derivative_ln(self, state, action):
x = action
delta_p = -x * np.power((1 - self.l(self.weight)), x - 1) * self.l.derivative(self.weight) * np.power(
self.l(self.weight), 1 - x) + np.power((1 - self.l(self.weight)), x) * (1 - x) * self.l.derivative(
self.weight) * np.power(self.l(self.weight), - x)
return delta_p / (self.__call__(state, action))
class LinearStateValue:
def __init__(self):
self.weight = np.zeros(2)
def __call__(self, x):
return x * self.weight[0] + self.weight[1]
def derivative(self, x):
return np.array([x, 1.])
class Agent:
def __init__(self, env):
self.env = env
self.policy = MyPolicy()
self.state_value = LinearStateValue()
def select_action(self, state):
probability_distribution = []
for action_iter in self.env.action_space:
probability_distribution.append(self.policy(state, action_iter))
action = np.random.choice(env.action_space.n, 1, p=probability_distribution)
return action[0]
def play(self, number_of_episodes, alpha_theta, alpha_w, lambda_w, lambda_theta, gamma):
left_policy_prob = []
for eps_iter in range(number_of_episodes):
reward_sum = 0
state = self.env.reset()
value_i = 1.
eligibility_trace_theta = 0
eligibility_trace_w = np.zeros(2)
while True:
action = self.select_action(state)
new_state, reward, is_done, _ = self.env.step(action)
if not is_done:
delta = reward + gamma * self.state_value(new_state) - self.state_value(state)
else:
delta = reward - self.state_value(state)
delta_ln_theta = self.policy.derivative_ln(state, action)
delta_state_value = self.state_value.derivative(state)
eligibility_trace_w = gamma * lambda_w * eligibility_trace_w + delta_state_value
eligibility_trace_theta = gamma * lambda_theta * eligibility_trace_theta + value_i * delta_ln_theta
self.state_value.weight += alpha_w * delta * eligibility_trace_w
self.policy.weight += alpha_theta * delta * eligibility_trace_theta
value_i *= gamma
reward_sum += reward
if is_done:
break
state = new_state
if eps_iter % 100 == 0:
np.set_printoptions(precision=11)
print(eps_iter, self.policy(0, 0), self.policy(0, 1), self.state_value.weight, reward_sum)
left_policy_prob.append(self.state_value.weight[1])
return np.array(left_policy_prob)
if __name__ == '__main__':
# for i in range(0, 1):
episode_len = 50000
repeat_time = 1
steps = np.zeros(episode_len)
for i in range(repeat_time):
print('repeat time ' + str(i))
env = ShortCorridor()
agent = Agent(env)
step = agent.play(episode_len, 1e-3, 1e-2, 1e-3, 1e-3, 0.9)
# steps += step
plt.plot(step, alpha=0.7, label='$\\alpha_{\\theta}=1e-3,\\alpha_w=1e-2$')
agent = Agent(env)
step = agent.play(episode_len, 1e-3, 1e-4, 1e-3, 1e-3, 0.9)
# steps += step
plt.plot(step, alpha=0.7, label='$\\alpha_{\\theta}=1e-3,\\alpha_w=1e-4$')
agent = Agent(env)
step = agent.play(episode_len, 1e-2, 1e-4, 1e-3, 1e-3, 0.9)
# steps += step
plt.plot(step, alpha=0.7, label='$\\alpha_{\\theta}=1e-2,\\alpha_w=1e-4$')
plt.legend()
plt.show()
# plt.plot(steps / repeat_time, alpha=0.7, c='r', label='$\\alpha_{\\theta}=1e-3,\\alpha_w=1e-3$')
# plt.show()
|
"""
在终端中录入一个内容,循环打印每个文字的编码值。
效果:
请输入文字:qtx
113
116
120
"""
#
# num=input("请输入文字:")
# for item in num:
# print(ord(item))
"""
循环录入编码值打印文字,直到输入空字符串停止。
效果:
请输入数字:113
q
请输入数字:116
t
请输入数字:
Process finished with exit code 0
"""
while True:
num=input("请输入数字:")
if num=="":
break
print(chr(int(num)))
|
n = int(input())
m = int(input())
s = float(0)
for i in range(1,n+1):
for j in range (1,m+1):
s = s + (i**2) * j/((3**i) * (j * (3**i) + i * (3**j)))
print("%.4f"%s)
|
#!/bin/env/python
'''
test boss_sbi.galaxies
'''
import os, time
import numpy as np
from boss_sbi.halos import Quijote_LHC_HR
from boss_sbi import galaxies as Galaxies
from boss_sbi import forwardmodel as FM
# --- plotting ---
import matplotlib as mpl
mpl.use('pdf')
import matplotlib.pyplot as plt
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
# read in halo catalog
t0 = time.time()
halos = Quijote_LHC_HR(1, z=0.5)
print('halo readin takes %f sec' % ((time.time() - t0)))
# get LOWZ HOD parameters
theta_hod = Galaxies.thetahod_lowz_sgc()
# apply HOD
t0 = time.time()
hod = Galaxies.hodGalaxies(halos, theta_hod, seed=0)
print('HOD takes %f sec' % ((time.time() - t0)))
# apply forward model
t0 = time.time()
gals = FM.BOSS(hod, sample='lowz-south', seed=0, silent=False)
print('forward model takes %f sec' % ((time.time() - t0)))
# read BOSS sample for comparison
boss = Galaxies.BOSSGalaxies(sample='lowz-south')
zlim = (np.array(boss['Z']) > 0.2) & (np.array(boss['Z']) < 0.37)
# compare footprint
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
sub.scatter(np.array(gals['RA']), np.array(gals['DEC']), c='C0', s=1, rasterized=True, label='Forward Model')
sub.scatter(np.array(gals['RA'])-360, np.array(gals['DEC']), c='C0', s=1, rasterized=True)
sub.scatter(np.array(boss['RA']), np.array(boss['DEC']), c='k', s=1, rasterized=True, label='LOWZ')
sub.scatter(np.array(boss['RA'])-360., np.array(boss['DEC']), c='k', s=1, rasterized=True)
sub.legend(loc='upper right', fontsize=15, handletextpad=0, markerscale=10)
sub.set_xlabel('RA', fontsize=25)
sub.set_xlim(-50, 70)
sub.set_ylabel('Dec', fontsize=25)
fig.savefig(os.path.join(os.path.dirname(os.path.realpath(__file__)), '_fm_footprint.png'), bbox_inches='tight')
# compare n(z)
fig = plt.figure(figsize=(5,5))
sub = fig.add_subplot(111)
_ = sub.hist(np.array(boss['Z'])[zlim], color='k', histtype='step', density=True)
_ = sub.hist(np.array(gals['Z']), color='C0', histtype='step', density=True)
#sub.legend(loc='upper right', fontsize=15, handletextpad=0, markerscale=10)
sub.set_xlabel('redshift', fontsize=25)
sub.set_xlim(0.15, 0.4)
sub.set_ylabel('noramlized $n(z)$', fontsize=25)
fig.savefig(os.path.join(os.path.dirname(os.path.realpath(__file__)), '_fm_nz.png'), bbox_inches='tight')
|
#!/usr/bin/env python3
import argparse
import sys
from glob import glob
from matplotlib import pyplot as plt
from spectra import ConvSpectrum
from spectra.plot import plotter
def main(argv=None) -> None:
parser = argparse.ArgumentParser(description="Plot the spectra from csv file(s).")
parser.add_argument("-i", "--input", help="The file(s) to be read (accepts *).", type=str, nargs="+", default=[])
parser.add_argument("-l", "--limits", help="The limits for the graph, x1, x2.", type=float, nargs="+", default=[])
parser.add_argument(
"-p", "--peaks", help="Label the most prominent peaks with their location.", default=False, action="store_true"
)
parser.add_argument(
"-n", "--name", help="The name(s) of the files to be read.", type=str, nargs="+", default="{autogenerate}"
)
parser.add_argument("-t", "--spectra_type", help="Type of spectra to plot.", type=str, default="IR")
parser.add_argument("-s", "--save", help="Where to save the figure.", type=str, default=False)
parser.add_argument(
"-b", "--baseline", help="Subtract the baseline.", type=int, nargs="?", const=True, default=False
)
parser.add_argument("--smooth", help="Smooth the plots.", type=int, nargs="?", const=True, default=False)
parser.add_argument(
"--subtract",
help="Subtract two Spectra from each other.",
nargs="?",
const=True,
default=False,
)
parser.add_argument("--title", help="Figure Title", type=str, default=None)
parser.add_argument(
"-z",
"--normalize",
help="Normalize all plots based on the highest (or selected) peak.",
type=float,
nargs="?",
const=True,
default=False,
)
args = parser.parse_args()
inps = [i for inp in args.input for i in glob(inp)]
if not inps:
print("You must specify file(s) to be read from.")
sys.exit(1)
names = list(range(len(inps))) if args.name == "{autogenerate}" else args.name
spectra = ConvSpectrum.from_csvs(*inps, names=names)
assert not (len(args.limits) % 2)
xlim = args.limits[:2] if args.limits else None
ylim = args.limits[2:4] if len(args.limits) > 2 else None
if args.subtract:
if len(spectra) != 2:
raise ValueError(f"Can only subtract two spectra from each other, got: {len(spectra)}")
if args.subtract == "all":
spectra.append(spectra[0] - spectra[1])
else:
spectra = [spectra[0] - spectra[1]]
fig, ax = plotter(
spectra,
title=args.title,
style=args.spectra_type,
baseline_subtracted=args.baseline,
normalized=args.normalize,
smoothed=args.smooth,
plot=None,
xlim=xlim,
ylim=ylim,
xticks=None,
legend=True,
colors=None,
markers=None,
peaks=args.peaks,
savefig=args.save,
)
plt.show()
if __name__ == "__main__":
main()
|
from support.c import *
class CppSupport(CSupport):
def generateotransform(self, info, index):
"""Generates the code to transform values"""
k = "override/#" + str(index) + "/transform/cpp"
if k in info:
f = info.get(k)
return f;
return "return value"
def generateftransform(self, info, index):
"""Generates the code to transform values"""
k = "fallback/#" + str(index) + "/transform/cpp"
if k in info:
f = info.get(k)
return f;
return "return value"
def funcpretty(self, key):
"""Return pretty printed key name for functions"""
return key.title().replace('_','').replace('/','').replace('#','')
def getfuncname(self, key):
"""CamelCase"""
return "get"+self.funcname(key)
def setfuncname(self, key):
"""CamelCase"""
return "set"+self.funcname(key)
def valof(self, info):
"""Return the default value for given parameter"""
val = info["default"]
type = info["type"]
if self.isenum(info):
return " = "+self.enumname(info)+"::"+val+";"
elif type == "string" and val == "":
return ' = "";'
return " = "+val+";"
def typeof(self, info):
"""Return the type for given parameter"""
type = info["type"]
if type == "string":
return "std::string"
elif self.isenum(info):
return self.enumname(info)
else:
return "kdb::"+type+"_t"
if __name__ == "__main__":
import doctest
doctest.testmod()
|
#importing modules
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_absolute_error as mae
from sklearn.tree import DecisionTreeClassifier
from sklearn import svm
from sklearn.svm import LinearSVC
from yellowbrick.classifier import ClassificationReport
from yellowbrick.classifier import ClassPredictionError
#getting data
myConnection = psycopg2.connect( host=host, user=user, password=password, dbname=dbname )
import pandas as pd
data = pd.read_sql("Select * FROM final_data_thayn;", con=myConnection)
data.dropna(inplace=True)
cols=['price', 'rating', 'review_count', 'is_african', 'is_asian_fusion', 'is_bakeries', 'is_bars',
'is_breakfast_brunch', 'is_buffets', 'is_cafes', 'is_caribbean',
'is_chinese', 'is_deli', 'is_eastern_european', 'is_european',
'is_fast_food', 'is_hawaiian', 'is_health_food', 'is_icecream',
'is_indian', 'is_italian', 'is_japanese', 'is_korean', 'is_latin',
'is_mediterranean', 'is_mexican', 'is_middleasten', 'is_new_american',
'is_piza', 'is_seafood', 'is_south_east_asian', 'is_southern',
'is_street_food', 'is_sweets', 'is_thai', 'is_other_category',
'is_pickup', 'is_delivery', 'is_restaurant_reservation', 'Canvass',
'Complaint', 'reinspection', 'License', 'FoodPoison', 'high_risk_1',
'medium_risk_2', 'low_risk_2', 'grocery', 'Bakery', 'Mobile']
X = data[cols]
y = data['pass']
#function for printing mean_squared_error, mean_absolute_error and r2
def model_performance(X, y,test_size=0.10, random_state = 42, penalty="l1"):
models = [GaussianNB(),KNeighborsClassifier(),SGDClassifier(), BaggingClassifier(KNeighborsClassifier()),
DecisionTreeClassifier(), LinearSVC(penalty=penalty, dual=False)]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state = random_state)
Reg_len = len(models)
i=0
while i < Reg_len:
model = models[i]
model.fit(X_train, y_train)
print(models[i])
print ('')
expected = y_test
predicted = model.predict(X_test)
# Evaluate fit of the model
print("Mean Squared Error: %0.6f" % mse(expected, predicted))
print("Mean Absolute Error: %0.6f" % mae(expected, predicted))
print("Coefficient of Determination: %0.6f" % model.score(X_test, y_test))
print ('')
i = i + 1
#calling function
model_performance(X, y)
#function for classification_report visualization
def classification_report(X, y, test_size=0.10, random_state = 42):
models = [GaussianNB(),KNeighborsClassifier(),SGDClassifier(), BaggingClassifier(KNeighborsClassifier()),
DecisionTreeClassifier(), LinearSVC(penalty="l1", dual=False)]
classes = ["not_passed", "passed"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state = random_state)
Reg_len = len(models)
i=0
while i < Reg_len:
model = models[i]
model.fit(X_train, y_train)
visualizer = ClassificationReport(model, classes=classes)
visualizer.fit(X_train, y_train) # Fit the visualizer and the model
visualizer.score(X_test, y_test) # Evaluate the model on the test data
print("Coefficient of Determination: %0.6f" % model.score(X_test, y_test))
g = visualizer.poof()
print ('')
i = i + 1
#calling function
classification_report(X, y)
# function for predicting error visualization
def pred_error(X, y, test_size=0.10, random_state = 42):
models = [GaussianNB(),KNeighborsClassifier(),SGDClassifier(), BaggingClassifier(KNeighborsClassifier()),
DecisionTreeClassifier(), LinearSVC(penalty="l1", dual=False)]
classes = ["not_passed", "passed"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state = random_state)
Reg_len = len(models)
i=0
while i < Reg_len:
model = models[i]
model.fit(X_train, y_train)
visualizer = ClassPredictionError(model, classes=classes)
visualizer.fit(X_train, y_train) # Fit the visualizer and the model
visualizer.score(X_test, y_test) # Evaluate the model on the test data
print("Coefficient of Determination: %0.6f" % model.score(X_test, y_test))
g = visualizer.poof()
print ('')
i = i + 1
#calling function
pred_error(X, y)
|
from enum import Enum
from common import mace_check, ModuleName
class SkelMode(Enum):
NON_DOMAINS = 0
DOMAINS = 1
class Skel(Enum):
V60 = 'V60'
V65 = 'V65'
V66 = 'V66'
class DspType(Enum):
ADSP = 'ADSP'
CDSP = 'CDSP'
class SocSkelTable:
def __init__(self, soc_id, mode, skel, dsp_type):
self.soc_id = soc_id
self.mode = mode
self.skel = skel
self.dsp_type = dsp_type
SocSkelInfo = [
SocSkelTable(246, SkelMode.NON_DOMAINS, None, None),
SocSkelTable(291, SkelMode.NON_DOMAINS, None, None),
SocSkelTable(292, SkelMode.DOMAINS, Skel.V60, DspType.ADSP),
SocSkelTable(305, SkelMode.NON_DOMAINS, None, None),
SocSkelTable(310, SkelMode.NON_DOMAINS, None, None),
SocSkelTable(311, SkelMode.NON_DOMAINS, None, None),
SocSkelTable(312, SkelMode.NON_DOMAINS, None, None),
SocSkelTable(317, SkelMode.DOMAINS, Skel.V60, DspType.CDSP),
SocSkelTable(318, SkelMode.DOMAINS, Skel.V60, DspType.CDSP),
SocSkelTable(319, SkelMode.DOMAINS, Skel.V60, DspType.ADSP),
SocSkelTable(321, SkelMode.DOMAINS, Skel.V65, DspType.CDSP),
SocSkelTable(324, SkelMode.DOMAINS, Skel.V60, DspType.CDSP),
SocSkelTable(325, SkelMode.DOMAINS, Skel.V60, DspType.CDSP),
SocSkelTable(326, SkelMode.DOMAINS, Skel.V60, DspType.CDSP),
SocSkelTable(327, SkelMode.DOMAINS, Skel.V60, DspType.CDSP),
SocSkelTable(336, SkelMode.DOMAINS, Skel.V65, DspType.CDSP),
SocSkelTable(337, SkelMode.DOMAINS, Skel.V65, DspType.CDSP),
SocSkelTable(339, SkelMode.DOMAINS, Skel.V66, DspType.CDSP),
SocSkelTable(341, SkelMode.DOMAINS, Skel.V65, DspType.CDSP),
SocSkelTable(347, SkelMode.DOMAINS, Skel.V65, DspType.CDSP),
SocSkelTable(352, SkelMode.DOMAINS, Skel.V66, DspType.CDSP),
SocSkelTable(355, SkelMode.DOMAINS, Skel.V66, DspType.CDSP),
SocSkelTable(356, SkelMode.DOMAINS, Skel.V66, DspType.CDSP),
SocSkelTable(360, SkelMode.DOMAINS, Skel.V65, DspType.CDSP),
SocSkelTable(362, SkelMode.DOMAINS, Skel.V66, DspType.CDSP),
SocSkelTable(365, SkelMode.DOMAINS, Skel.V65, DspType.CDSP),
SocSkelTable(366, SkelMode.DOMAINS, Skel.V65, DspType.CDSP),
SocSkelTable(367, SkelMode.DOMAINS, Skel.V66, DspType.CDSP),
SocSkelTable(373, SkelMode.DOMAINS, Skel.V66, DspType.CDSP),
SocSkelTable(377, SkelMode.DOMAINS, Skel.V66, DspType.CDSP),
SocSkelTable(384, SkelMode.DOMAINS, Skel.V66, DspType.CDSP),
SocSkelTable(393, SkelMode.DOMAINS, Skel.V65, DspType.CDSP),
SocSkelTable(394, SkelMode.DOMAINS, Skel.V66, DspType.CDSP),
SocSkelTable(400, SkelMode.DOMAINS, Skel.V66, DspType.CDSP),
SocSkelTable(407, SkelMode.DOMAINS, Skel.V66, DspType.CDSP),
SocSkelTable(415, SkelMode.DOMAINS, Skel.V66, DspType.CDSP), # Custom
SocSkelTable(450, SkelMode.DOMAINS, Skel.V66, DspType.CDSP), # Custom
SocSkelTable(0, None, None, None)
]
def get_soc_skel_info(soc_id):
for info in SocSkelInfo:
if info.soc_id == soc_id:
return info
mace_check(False, ModuleName.RUN, "Unsupported dsp soc")
|
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
import sys
import re
import json
def read_json_file(file_path):
data = None
with open(file_path, "r") as f:
data = json.load(f)
return data[0:10]
def write_json_file(terms, file_path):
feed_list = [
"/search/?query=text:"+term+"\n"
for term in terms
]
with open(file_path, "w") as f:
f.writelines(feed_list)
def read_common_words(file_path):
common_words = set()
with open(file_path, "r") as f:
lines = f.readlines()
for line in lines:
common_words.add(line.strip())
return common_words
def clean_text(text):
return re.split(r"[^a-z0-9]+", text.lower())
def count_terms(doc, term_length=1, common_words=set()):
terms_set = set()
win_len = term_length
for i in range(len(doc) + 1 - win_len):
if not any(
map(lambda word: word in common_words, doc[i : i + win_len])
):
term = "+text:".join(doc[i : i + win_len])
terms_set.add(term)
return terms_set
def process_doc(doc_fields, term_length=1, common_words=set()):
if doc_fields["text"]:
terms_set = count_terms(
clean_text(doc_fields["text"]), term_length, common_words
)
return terms_set
else:
return set()
def process_docs(obj, term_length=1, common_words=set()):
doc_counts = [
process_doc(doc["fields"], term_length, common_words) for doc in obj
]
terms_set = set()
# all unique terms
for counts in doc_counts:
for term in counts:
terms_set.add(term)
return terms_set
def main():
if len(sys.argv) == 3:
write_json_file(process_docs(read_json_file(sys.argv[1])), sys.argv[2])
elif len(sys.argv) == 4:
write_json_file(
process_docs(read_json_file(sys.argv[1]), int(sys.argv[3])),
sys.argv[2],
)
elif len(sys.argv) == 5:
write_json_file(
process_docs(
read_json_file(sys.argv[1]),
int(sys.argv[3]),
read_common_words(sys.argv[4]),
),
sys.argv[2],
)
else:
print(
"Wrong number of arguments:",
"python3",
"count_terms.py",
"infile.json",
"outfile.json",
"[number_of_words_in_term [common_words.txt]]",
)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Test Restaurant Module
"""
import unittest
from app import db
from app.modules.restaurant.models import Restaurant, Table, Booking
class TestRestaurantModule(unittest.TestCase):
""" Test restaurant module and methods """
def test_create_restaurant(self):
""" Create Restaurant """
restaurant = Restaurant.create(name="restaurant Create", \
address="address 1", food="seafood", user_id=1)
self.assertEqual(restaurant.name, "restaurant Create")
self.assertEqual(restaurant.address, "address 1")
self.assertEqual(restaurant.user_id, 1)
def test_edit_restaurant(self):
""" Update Restaurant """
current_restaurant = Restaurant.query.order_by('-id').first()
current_restaurant.name = "restaurant Edit"
current_restaurant.address = "address 2"
Restaurant.update(current_restaurant)
updated_restaurant = Restaurant.query.order_by('-id').first()
self.assertEqual(updated_restaurant.name, "restaurant Edit")
self.assertEqual(updated_restaurant.address, "address 2")
Restaurant.delete(updated_restaurant)
def test_create_table(self):
""" Create Table """
restaurant = Restaurant.create(name="restaurant Table", \
user_id=1)
table = Table.create(name="table 1", capacity=4, minimum=2, \
restaurant_id=restaurant.id)
self.assertEqual(table.restaurant_id, restaurant.id)
def test_edit_table(self):
""" Update Table """
current_table = Table.query.order_by('-id').first()
current_table.name = "table test"
current_table.capacity = 3
current_table.minimum = 3
Table.update(current_table)
updated_table = Table.query.order_by('-id').first()
self.assertEqual(updated_table.name, "table test")
self.assertEqual(updated_table.capacity, 3)
self.assertEqual(updated_table.minimum, 3)
Table.delete(updated_table)
def test_create_booking(self):
""" Create Booking """
table = Table.query.order_by('-id').first()
new_booking = Booking.create(email="test@test.com", \
user_id=1, table_id=table.id)
self.assertEqual(new_booking.table_id, table.id)
Booking.delete(new_booking)
def test_delete_restaurant(self):
""" Delete Restaurant """
last_restaurant = Restaurant.query.order_by('-id').first()
self.assertEqual(last_restaurant.name, "restaurant Table")
Restaurant.delete(last_restaurant)
|
import os
import json
from multiprocessing import cpu_count
import numpy as np
import pandas as pd
from linora.utils._logger import Logger
from linora.sample_splits import kfold, train_test_split
from linora.param_search._HyperParameters import HyperParametersRandom
from linora.param_search._config import __xgboost_version__
class RandomSearch():
def __init__(self):
hp = HyperParametersRandom()
hp.Float('learning_rate', 0.01, 0.1)
hp.Int('n_estimators', 100, 850)
hp.Choice('max_depth', [3, 4, 5, 6, 7])
hp.Choice('min_child_weight', [1, 2, 3, 4, 5, 6, 7])
hp.Choice('max_delta_step', [0])
hp.Choice('reg_alpha', np.concatenate([np.linspace(0, 1, 101), np.linspace(2, 100, 99)]).round(2))
hp.Choice('reg_lambda', np.concatenate([np.linspace(0, 1, 101), np.linspace(2, 100, 99)]).round(2))
hp.Choice('subsample', [0.5, 0.6, 0.7, 0.8, 0.9, 1. ])
hp.Choice('colsample_bytree', [0.5, 0.6, 0.7, 0.8, 0.9, 1. ])
hp.Choice('colsample_bylevel', [0.5, 0.6, 0.7, 0.8, 0.9, 1. ])
hp.Choice('colsample_bynode', [0.5, 0.6, 0.7, 0.8, 0.9, 1. ])
hp.Choice('gamma', np.concatenate([np.linspace(0, 1, 101), np.linspace(2, 100, 99)]).round(2))
hp.Choice('scale_pos_weight', [1])
hp.Choice('random_state', [27])
hp.Choice('booster', ['gbtree'])
hp.Choice('importance_type', ["gain", "weight", "cover", "total_gain", "total_cover"])
hp.Choice('verbosity', [0])
self.HyperParameter = hp
def search(self, feature, label, loss, metrics, iter_num=1000, scoring=0.5, cv=5, cv_num=3,
metrics_min=True, speedy=True, speedy_param=(20000, 0.3), gpu=False,
save_model_dir=None):
"""XGBRegressor model params search use RandomSearch method.
Args:
feature: pandas dataframe, model's feature.
label: pandas series, model's label.
loss: XGBRegressor param 'objective'.
metrics: model metrics function.
iter_num: random search count.
scoring: metrics error opt base line value.
cv: cross validation fold.
cv_num: minimum cross validation fold.
metrics_min: metrics value whether the smaller the better.
speedy: whether use speedy method.
speedy_param: if use speedy method, test_size will be set,
test_size = 1-round(min(speedy_param[0], feature.shape[0]*speedy_param[1])/feature.shape[0], 2).
gpu: whether use gpu.
save_model_dir: save model folder.
Returns:
a best XGBRegressor model params dict.
Raises:
params error.
"""
logger = Logger(name='xgb')
import warnings
warnings.filterwarnings("ignore")
import xgboost as xgb
assert xgb.__version__>=__xgboost_version__, f'xgboost version should be >={__xgboost_version__}.'
best_params={}
if speedy:
test_size = 1-round(min(speedy_param[0], feature.shape[0]*speedy_param[1])/feature.shape[0], 2)
tree_method = ['gpu_hist'] if gpu else ['auto', 'exact', 'approx', 'hist']
n_job = 1 if gpu else int(np.ceil(cpu_count()*0.8))
gpu_id = 0 if gpu else None
self.HyperParameter.Choice('n_jobs', [n_job])
self.HyperParameter.Choice('objective', [loss])
self.HyperParameter.Choice('tree_method', tree_method)
self.HyperParameter.Choice('gpu_id', [gpu_id])
logger.info(f"Start XGBRegressor hyperparameter random search.")
for i in range(1, iter_num+1):
self.HyperParameter.update()
model = xgb.XGBRegressor(**self.HyperParameter.params)
score = []
if speedy:
for _ in range(cv_num):
index_list = train_test_split(feature, test_size=test_size, shuffle=True, random_state=np.random.choice(range(100), 1)[0])
model.fit(feature.loc[index_list[0]], label[index_list[0]])
cv_pred = pd.Series(model.predict(feature.loc[index_list[1]]), index=label[index_list[1]].index)
score.append(metrics(label[index_list[1]], cv_pred))
else:
index_list = kfold(feature, n_splits=cv, shuffle=True, random_state=np.random.choice(range(100), 1)[0])
for n, index in enumerate(index_list):
if n == cv_num:
break
model.fit(feature.loc[index[0]], label[index[0]])
cv_pred = pd.Series(model.predict(feature.loc[index[1]]), index=label[index[1]].index)
score.append(metrics(label[index[1]], cv_pred))
cv_score = np.mean(score)
if metrics_min:
if cv_score<scoring:
scoring = cv_score
best_params = self.HyperParameter.params.copy()
if save_model_dir is not None:
model.save_model(os.path.join(save_model_dir, "xgb_model.json"))
with open(os.path.join(save_model_dir, "xgb_params.json"),'w') as f:
json.dump(best_params, f)
else:
if cv_score>scoring:
scoring = cv_score
best_params = self.HyperParameter.params.copy()
if save_model_dir is not None:
model.save_model(os.path.join(save_model_dir, "xgb_model.json"))
with open(os.path.join(save_model_dir, "xgb_params.json"),'w') as f:
json.dump(best_params, f)
logger.info(f"random search progress: {round(i/iter_num*100,1)}%, best score: {scoring:.4}", enter=False if i<iter_num else True)
logger.info(f"XGBRegressor random search best score: {scoring:.4}", close=True)
return best_params
|
import os
ROOTDIR = "./after_modify/no_stop_noun/"
TRAIN_DIR = "./after_modify/train/"
TEST_DIR = "./after_modify/test/"
change_path = {"07", "08", "10", "13", "14", "16", "20", "22", "23", "24"}
for i in change_path:
for parent, dirnames, filenames in os.walk(ROOTDIR + i):
for filename in filenames:
if int(filename[:-4]) < 1500:
os.system("mv " + ROOTDIR+i+"/"+filename +" "+ TRAIN_DIR+i+"_"+filename)
else :
os.system("mv " + ROOTDIR+i+"/"+filename +" "+ TEST_DIR+i+"/"+filename)
|
from abc import ABCMeta, abstractmethod
from tempfile import NamedTemporaryFile
from unittest import TestCase
from mock import Mock, patch
class BaseTestCase(TestCase):
def patch(self, target, mock_=None):
if not mock_:
mock_ = Mock()
patcher = patch(target, mock_)
self.addCleanup(patcher.stop)
return patcher.start()
class HasTempfileTestCase(BaseTestCase):
_temp_file_mode = 'w+b'
_temp_file_prefix = 'tmp'
_temp_file_dir = None
def _create_temp_file(self, auto_close=True):
temp_file = NamedTemporaryFile(
mode=self._temp_file_mode,
prefix=self._temp_file_prefix,
dir=self._temp_file_dir
)
if auto_close:
self.addCleanup(temp_file.close)
return temp_file
def setUp(self):
self._tempfile = self._create_temp_file()
self._tempfile_name = self._tempfile.name
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 25 20:48:39 2017
@author: ratnadeepb
@License: MIT
"""
# System Import
import numpy as np
import sys
# Local Import
from InnerProductSpaces.norm import unit_vec
from InnerProductSpaces.projection import proj
'''
Gram Schmidt process of creating orthonormal basis from an arbitrary basis
'''
def gram_schmidt(B):
try:
B = np.array(B, dtype=np.float16)
except:
sys.exit("Not a vector")
W = np.zeros_like(B)
# Create and orthogonal basis
W[0] = B[0]
i = 1
while i < len(B):
p = 0
j = 1
while j < i:
p += proj(B[j], W[i - 1])
j += 1
W[i] = B[i] - p
i += 1
# Normalise
U = np.zeros_like(W)
for ind, w in enumerate(W):
temp = unit_vec(w)
for s, u in enumerate(temp):
U[ind][s] = u
return U
if __name__ == "__main__":
B = [[0, 1, 1],
[2, 1, 0],
[-1, 0, -1]]
U = gram_schmidt(B)
print(U)
'''
U = [[0, 1/np.sqrt(2), 1/np.sqrt(2)],
[2/np.sqrt(6), 1/np.sqrt(6), -1/np.sqrt(6)],
[-1/np.sqrt(3), 1/np.sqrt(3), -1/np.sqrt(3)]]
'''
|
"""Auth0 OAuth 2.0 Provider.
Implementation of an Auth0 OAuth 2.0 Provier.
"""
import requests
import json
from jose import jwt
from functools import wraps, partial
from flask import request
from brighthive_authlib.providers import OAuth2Provider, OAuth2ProviderError
class AuthZeroProvider(OAuth2Provider):
"""Auth0 OAuth 2.0 Provider."""
def __init__(self):
super().__init__()
def validate_token(self, token=None, scopes=[]):
if not token:
token = self.get_token()
try:
headers = {'content-type': 'application/json'}
jwks_keys = requests.get(self.jwks_url, headers=headers).json()
unverified_header = jwt.get_unverified_header(token)
rsa_key = {}
for key in jwks_keys['keys']:
if key['kid'] == unverified_header['kid']:
rsa_key = {
'kty': key['kty'],
'kid': key['kid'],
'use': key['use'],
'n': key['n'],
'e': key['e']
}
if rsa_key:
payload = jwt.decode(
token,
rsa_key,
algorithms=self.algorithms,
audience=self.audience,
issuer='{}/'.format(self.base_url))
if len(scopes) == 0:
return True
else:
unverified_claims = jwt.get_unverified_claims(token)
if unverified_claims.get('scope'):
token_scopes = unverified_claims['scope'].split()
for scope in scopes:
if scope not in token_scopes:
raise OAuth2ProviderError(
'Required scope ({}) is not present for this client'.format(scope))
return True
except Exception:
raise OAuth2ProviderError('Access Denied')
|
"""
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Author: Jun Zhu <jun.zhu@xfel.eu>
Copyright (C) European X-Ray Free-Electron Laser Facility GmbH.
All rights reserved.
"""
import unittest
from unittest.mock import patch, MagicMock
from extra_foam.pipeline.tests import _TestDataMixin
from extra_foam.pipeline.processors.digitizer import DigitizerProcessor
from extra_foam.database import SourceItem
from extra_foam.pipeline.exceptions import UnknownParameterError
class TestDigitizer(_TestDataMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._channels = DigitizerProcessor._pulse_integral_channels.keys()
def testGeneral(self):
data, processed = self.simple_data(1234, (2, 2))
meta = data['meta']
raw = data['raw']
catalog = data['catalog']
proc = DigitizerProcessor()
proc._meta.hdel = MagicMock()
category = 'Digitizer'
# empty source
self.assertNotIn(category, catalog)
proc.process(data)
# pipeline source with unknown property
item = SourceItem(category, 'digitizer1:network', [], 'data.intensityTD',
slice(None, None), (0, 1000))
catalog.add_item(item)
src = f"{item.name} {item.property}"
meta[src] = {'tid': 12346}
raw[src] = [100, 200, 300]
with self.assertRaises(UnknownParameterError):
proc.process(data)
catalog.remove_item(src)
# pipeline source with valid property
for ch in self._channels:
item = SourceItem(category, 'digitizer1:network', [],
f'digitizers.channel_1_{ch}.apd.pulseIntegral',
slice(None, None), (0, 1000))
catalog.add_item(item)
src = f"{item.name} {item.property}"
meta[src] = {'tid': 12346}
raw[src] = [100, 200, 300]
proc.process(data)
self.assertListEqual([100, 200, 300],
processed.pulse.digitizer[ch].pulse_integral.tolist())
self.assertEqual(ch, processed.pulse.digitizer.ch_normalizer)
self._reset_processed(processed)
# test moving average
# first reset
with patch.object(proc._meta, "hdel") as patched:
proc._update_moving_average({
'reset_ma_digitizer': 1,
'ma_window': 5
})
patched.assert_called_once()
# 1st train
raw[src] = [10, 20, 30]
proc.process(data)
self.assertListEqual([10, 20, 30], processed.pulse.digitizer[ch].pulse_integral.tolist())
# 2nd train
raw[src] = [30, 60, 90]
proc.process(data)
self.assertListEqual([20, 40, 60], processed.pulse.digitizer[ch].pulse_integral.tolist())
self._reset_processed(processed)
def _reset_processed(self, processed):
for ch in self._channels:
processed.pulse.digitizer[ch].pulse_integral = None
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pytsp` package."""
import unittest
from pytsp import pytsp
class TestPytsp(unittest.TestCase):
"""Tests for `pytsp` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
|
#!env/bin/python
import sys
import os
import json
from werkzeug import generate_password_hash
from app.controller import Controller
from app.models import DB
basedir = os.getcwd()
db = DB()
def main():
print '\n'
print 'STACK'
print '----------'
print '\n'
print 'Welcome to the STACK setup tool. Follow the instructions below to\nsetup your first project account and initialize the configuration\nfiles for your STACK toolkit.'
print '\n'
project_name = raw_input('Enter a project account name: ')
password = raw_input('Enter a project account password: ')
description = raw_input('Enter a project account description: ')
hashed_password = generate_password_hash(password)
resp = db.create(project_name=project_name, password=password, hashed_password=hashed_password,
description=description)
if resp['status']:
print '\n'
print 'SUCCESS! You can now login to your account %s from the\n STACK front-end. Happy researching.' % project_name
else:
print '\n'
print 'Oops. Something went wrong. Please try again and make sure\n the account name you entered does not already exist.'
if __name__ == "__main__":
main()
|
import sys
if sys.version_info < (3,):
sys.exit("triku requires Python >= 3.7")
from pathlib import Path
from setuptools import setup, find_packages
__author__ = ", ".join(["Alex M. Ascensión"])
__email__ = ", ".join(
[
"alexmascension@gmail.com",
# We don’t need all, the main authors are sufficient.
]
)
__version__ = "2.1.3"
setup(
name="triku",
version=__version__,
description="Feature selection method for Single Cell data.",
long_description=Path("README.md").read_text("utf-8"),
url="https://gitlab.com/alexmascension/triku",
author=__author__,
author_email=__email__,
license="BSD",
python_requires=">=3.7",
install_requires=[
module.strip()
for module in Path("requirements.txt").read_text("utf-8").splitlines()
],
packages=find_packages(),
# `package_data` does NOT work for source distributions!!!
# you also need MANIFTEST.in
# https://stackoverflow.com/questions/7522250/how-to-include-package-data-with-setuptools-distribute
# package_data={'': '*.txt'},
# include_package_data=True,
entry_points=dict(),
zip_safe=False,
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
)
|
"""
Ledger output
Supports both `Ledger <https://www.ledger-cli.org/>`_ and
`hledger <https://hledger.org/>`_ plain text accounting formats.
By default the output should be valid for Ledger, but can be customized for
hledger or other variants via formatting options. Invalid variants are
possible, so the user should be familiar with the requirements of the target
format.
Relevant sections of the Ledger manual:
* `Commodities and Currencies
<https://www.ledger-cli.org/3.0/doc/ledger3.html#Commodities-and-Currencies>`_
* `Commoditized Amounts
<https://www.ledger-cli.org/3.0/doc/ledger3.html#Commoditized-Amounts>`_
Relevant sections of the hledger manual:
* `Declaring market prices <https://hledger.org/hledger.html#declaring-market-prices>`_:
* `Declaring commodities <https://hledger.org/hledger.html#declaring-commodities`_:
Classes:
Ledger
"""
from pricehist.format import Format
from .baseoutput import BaseOutput
class Ledger(BaseOutput):
def format(self, series, source=None, fmt=Format()):
output = ""
for price in series.prices:
date = fmt.format_date(price.date)
base = fmt.base or series.base
quote = fmt.quote or series.quote
quote_amount = fmt.format_quote_amount(quote, price.amount)
timesep = " " if fmt.time else ""
output += f"P {date}{timesep}{fmt.time} {base} {quote_amount}\n"
return output
|
# coding=utf-8
import time
from selenium import webdriver
browser = webdriver.Chrome()
browser.maximize_window() # 窗口最大化
browser.get('https://www.baidu.com') # 在当前浏览器中访问百度
browser.find_element_by_id("kw").send_keys("selenium")
browser.find_element_by_id("su").click()
time.sleep(3)
browser.find_element_by_xpath("/html/body/div[1]/div[5]/div[1]/div[3]/div[4]/h3/a").click()
time.sleep(3)
print(browser.current_window_handle) # 输出当前窗口句柄
handles = browser.window_handles # 获取当前窗口句柄集合(列表类型)
browser.switch_to.window(handles[1]) # 切换窗口
print(browser.find_element_by_xpath("/html/head/title").get_attribute("innerHTML"))
# 新开一个窗口,通过执行js来新开一个窗口
# js = 'window.open("https://www.sogou.com");'
# browser.execute_script(js)
# for handle in handles: # 切换窗口(切换到搜狗)
# if handle != browser.current_window_handle:
# print('switch to ', handle)
# browser.switch_to.window(handle)
# print(browser.current_window_handle) # 输出当前窗口句柄(搜狗)
# break
# browser.close() # 关闭当前窗口(搜狗)
# browser.switch_to.window(handles[0]) # 切换回百度窗口
# time.sleep(10)
|
'''
Author: liziwei01
Date: 2022-02-18 18:35:53
LastEditors: liziwei01
LastEditTime: 2022-03-17 15:47:49
Description: 加载数据
'''
from array import array
from sklearn.cluster import KMeans
from collections import defaultdict
import scipy.io as sio
import numpy as np
import os
class MatLoader:
__filenames = ['testX', 'trainX', 'trainY', 'testY']
__dataTypes = ['train', 'test']
__suffix = '.mat'
__preData = defaultdict(np.ndarray)
__data = defaultdict(dict)
__cenrtoidData = defaultdict(dict)
def __init__(self, p_foldernames):
self.__foldernames = p_foldernames
def Do(self):
return self.__load().__squeeze().__transpose().__link().__kmeans()
def __load(self):
# save data to dict
# eg: {'testX': [], 'trainX': [], 'trainY': [], 'testY': []}
for foldername in self.__foldernames:
for filename in self.__filenames:
filepath = self.__getFileAbsPath(foldername, filename)
matfile = sio.loadmat(filepath)
matrix = matfile[filename]
self.__preData[foldername+filename] = matrix
return self
def __getFileAbsPath(self, p_foldername, p_filename):
cwd = os.getcwd()
return os.path.join(cwd, p_foldername, p_filename + self.__suffix)
def __squeeze(self):
for k, v in self.__preData.items():
self.__preData[k] = v.squeeze()
return self
def __transpose(self):
# reverse the data
# to make the data more convenient to use
for k, v in self.__preData.items():
self.__preData[k] = v.T
return self
def __link(self):
# eg: {'ATNT face/': {
# 'train': {
# '1': [vec1, vec2, ...],
# ...,
# },
# 'test': {
# '1': [vec1, vec2, ...],
# ...,
# },
# }
# ...
# }
for foldername in self.__foldernames:
self.__data[foldername] = {'train': {}, 'test': {}}
for dataType in self.__dataTypes:
mid_dict = defaultdict(list)
size = self.__labelFile(foldername, dataType).shape[0]
for idx in range(size):
classification = self.__labelFile(foldername, dataType)[idx]
vec = self.__vectorFile(foldername, dataType)[idx]
mid_dict[classification].append(vec)
self.__data[foldername][dataType] = mid_dict
return self
def __kmeans(self):
# eg: {'ATNT face/': {
# 'train': {
# '1': vec1,
# ...,
# },
# 'test': {
# '1': vec1,
# ...,
# },
# }
# ...
# }
for foldername in self.__foldernames:
self.__cenrtoidData[foldername] = {'train': {}, 'test': {}}
for dataType in self.__dataTypes:
mid_dict = defaultdict(array)
for idx in range(1, len(self.GetData(foldername, dataType))):
arr = self.__data[foldername][dataType][idx]
KMeans_model = KMeans(n_clusters=1).fit(arr)
mid_dict[idx] = KMeans_model.cluster_centers_[0]
self.__cenrtoidData[foldername][dataType] = mid_dict
return self
def __vectorFile(self, p_foldername, dataType):
return self.__preData[p_foldername+dataType+'X']
def __labelFile(self, p_foldername, dataType):
return self.__preData[p_foldername+dataType+'Y']
def GetData(self, p_foldername, p_dataType):
return self.__data[p_foldername][p_dataType]
def GetCentroidData(self, p_foldername, p_dataType):
return self.__cenrtoidData[p_foldername][p_dataType]
def GetTestData(self, p_foldername):
return self.__data[p_foldername]['test']
def GetCentroidTestData(self, p_foldername):
return self.__cenrtoidData[p_foldername]['test']
def GetTestDataLen(self, p_foldername):
length = 0
for k, v in self.__data[p_foldername]['test'].items():
length += len(v)
return length
def GetTrainData(self, p_foldername):
return self.__data[p_foldername]['train']
def GetCentroidTrainData(self, p_foldername):
return self.__cenrtoidData[p_foldername]['train']
def GetTrainDataLen(self, p_foldername):
length = 0
for k, v in self.__data[p_foldername]['train'].items():
length += len(v)
return length
def GetVectorSet(self, p_foldername, p_dataType):
return self.__vectorFile(p_foldername, p_dataType)
def GetTrainVectorSet(self, p_foldername):
return self.__vectorFile(p_foldername, 'train')
def GetTestVectorSet(self, p_foldername):
return self.__vectorFile(p_foldername, 'test')
def GetFoldernames(self):
return self.__foldernames
|
##################################################################
## (c) Copyright 2015- by Jaron T. Krogel ##
##################################################################
#====================================================================#
# hdfreader.py #
# Support for reading HDF5 files into local structured format #
# containing numpy arrays. #
# #
# Content summary: #
# HDFreader #
# Main class to read HDF files and convert to object format. #
# #
# HDFgroup #
# Class representing an HDF group. #
# Contains other HDFgroup's or named data as numpy arrays #
# #
#====================================================================#
from numpy import array,ndarray,minimum,abs,ix_,resize
import sys
import keyword
from inspect import getmembers
from superstring import valid_variable_name
from generic import obj
from developer import DevBase,unavailable
try:
import h5py
except ImportError:
h5py = unavailable('h5py')
#end try
from debug import *
class HDFglobals(DevBase):
view = False
#end class HDFglobals
class HDFgroup(DevBase):
def _escape_name(self,name):
if name in self._escape_names:
name=name+'_'
#end if
return name
#end def escape_name
def _set_parent(self,parent):
self._parent=parent
return
#end def set_parent
def _add_dataset(self,name,dataset):
self._datasets[name]=dataset
return
#end def add_dataset
def _add_group(self,name,group):
group._name=name
self._groups[name]=group
return
#end def add_group
def _contains_group(self,name):
return name in self._groups.keys()
#end def _contains_group
def _contains_dataset(self,name):
return name in self._datasets.keys()
#end def _contains_dataset
def _to_string(self):
s=''
if len(self._datasets)>0:
s+=' datasets:\n'
for k,v in self._datasets.iteritems():
s+= ' '+k+'\n'
#end for
#end if
if len(self._groups)>0:
s+= ' groups:\n'
for k,v in self._groups.iteritems():
s+= ' '+k+'\n'
#end for
#end if
return s
#end def list
# def __str__(self):
# return self._to_string()
# #end def __str__
#
# def __repr__(self):
# return self._to_string()
# #end def __repr__
def __init__(self):
self._name=''
self._parent=None
self._groups={};
self._datasets={};
self._group_counts={}
self._escape_names=None
self._escape_names=set(dict(getmembers(self)).keys()) | set(keyword.kwlist)
return
#end def __init__
def _remove_hidden(self,deep=True):
if '_parent' in self:
del self._parent
#end if
if deep:
for name,value in self.iteritems():
if isinstance(value,HDFgroup):
value._remove_hidden()
#end if
#end for
#end if
for name in list(self.keys()):
if name[0]=='_':
del self[name]
#end if
#end for
#end def _remove_hidden
# read in all data views (h5py datasets) into arrays
# useful for converting a single group read in view form to full arrays
def read_arrays(self):
self._remove_hidden()
for k,v in self.iteritems():
if isinstance(v,HDFgroup):
v.read_arrays()
else:
self[k] = array(v)
#end if
#end for
#end def read_arrays
def get_keys(self):
if '_groups' in self:
keys = list(self._groups.keys())
else:
keys = list(self.keys())
#end if
return keys
#end def get_keys
#project interface methods
def zero(self,*names):
for name in names:
if name in self and isinstance(self[name],ndarray):
self[name][:] = 0
#end if
#end for
for name in self.get_keys():
value = self[name]
if isinstance(value,HDFgroup):
value.zero(*names)
#end if
#end for
#self.sum(*names)
#end def zero
def minsize(self,other,*names):
name_set = set(names)
snames = set(self.keys()) & name_set
onames = set(other.keys()) & name_set
if snames==onames:
for name in snames:
svalue = self[name]
ovalue = other[name]
if not isinstance(svalue,ndarray) or not isinstance(ovalue,ndarray):
self.error(name+' is not an array')
#end if
shape = minimum(svalue.shape,ovalue.shape)
self[name] = resize(svalue,shape)
#end for
#end if
for name in self.get_keys():
value = self[name]
if isinstance(value,HDFgroup):
if name in other and isinstance(other[name],HDFgroup):
value.minsize(other[name])
else:
self.error(name+' not found in minsize partner')
#end if
#end if
#end for
#self.sum(*names)
#end def minsize
def accumulate(self,other,*names):
name_set = set(names)
snames = set(self.keys()) & name_set
onames = set(other.keys()) & name_set
if snames==onames:
for name in snames:
svalue = self[name]
ovalue = other[name]
if not isinstance(svalue,ndarray) or not isinstance(ovalue,ndarray):
self.error(name+' is not an array')
#end if
shape = minimum(svalue.shape,ovalue.shape)
if abs(shape-array(svalue.shape)).sum() > 0:
self.error(name+' in partner is too large')
#end if
ranges = []
for s in shape:
ranges.append(range(s))
#end for
#add the part of the other data that fits into own data
svalue += ovalue[ix_(*ranges)]
#end for
#end if
for name in self.get_keys():
value = self[name]
if isinstance(value,HDFgroup):
if name in other and isinstance(other[name],HDFgroup):
value.accumulate(other[name])
else:
self.error(name+' not found in accumulate partner')
#end if
#end if
#end for
#self.sum(*names)
#end def accumulate
def normalize(self,normalization,*names):
for name in names:
if name in self and isinstance(self[name],ndarray):
self[name] /= normalization
#end if
#end for
for name in self.get_keys():
value = self[name]
if isinstance(value,HDFgroup):
value.normalize(normalization,*names)
#end if
#end for
#self.sum(*names)
#end def normalize
def sum(self,*names):
for name in names:
if name in self and isinstance(self[name],ndarray) and name=='value':
s = self[name].mean(0).sum()
print ' sum = {0}'.format(s)
#end if
#end for
#end def sum
#end class HDFgroup
class HDFreader(DevBase):
datasets = set(["<class 'h5py.highlevel.Dataset'>","<class 'h5py._hl.dataset.Dataset'>"])
groups = set(["<class 'h5py.highlevel.Group'>","<class 'h5py._hl.group.Group'>"])
def __init__(self,fpath,verbose=False,view=False):
HDFglobals.view = view
if verbose:
print ' Initializing HDFreader'
self.fpath=fpath
if verbose:
print ' loading h5 file'
try:
self.hdf = h5py.File(fpath,'r')
except IOError:
self._success = False
self.hdf = obj(obj=obj())
else:
self._success = True
#end if
if verbose:
print ' converting h5 file to dynamic object'
#convert the hdf 'dict' into a dynamic object
self.nlevels=1
self.ilevel=0
# Set the current hdf group
self.obj = HDFgroup()
self.cur=[self.obj]
self.hcur=[self.hdf]
if self._success:
cur = self.cur[self.ilevel]
hcur = self.hcur[self.ilevel]
for kr,v in hcur.iteritems():
k=cur._escape_name(kr)
if valid_variable_name(k):
vtype = str(type(v))
if vtype in HDFreader.datasets:
self.add_dataset(cur,k,v)
elif vtype in HDFreader.groups:
self.add_group(hcur,cur,k,v)
else:
print 'hdfreader error: encountered invalid type: '+vtype
sys.exit()
#end if
else:
print 'hdfreader warning: attribute '+k+' is not a valid variable name and has been ignored'
#end if
#end for
#end if
if verbose:
print ' end HDFreader Initialization'
return
#end def __init__
def increment_level(self):
self.ilevel+=1
self.nlevels = max(self.ilevel+1,self.nlevels)
if self.ilevel+1==self.nlevels:
self.cur.append(None)
self.hcur.append(None)
#end if
self.pad = self.ilevel*' '
return
#end def increment_level
def decrement_level(self):
self.ilevel-=1
self.pad = self.ilevel*' '
return
#end def decrement_level
def add_dataset(self,cur,k,v):
if not HDFglobals.view:
cur[k]=array(v)
else:
cur[k] = v
#end if
cur._add_dataset(k,cur[k])
return
#end def add_dataset
def add_group(self,hcur,cur,k,v):
cur[k] = HDFgroup()
cur._add_group(k,cur[k])
cur._groups[k]._parent = cur
self.increment_level()
self.cur[self.ilevel] = cur._groups[k]
self.hcur[self.ilevel] = hcur[k]
cur = self.cur[self.ilevel]
hcur = self.hcur[self.ilevel]
for kr,v in hcur.iteritems():
k=cur._escape_name(kr)
if valid_variable_name(k):
vtype = str(type(v))
if vtype in HDFreader.datasets:
self.add_dataset(cur,k,v)
elif vtype in HDFreader.groups:
self.add_group(hcur,cur,k,v)
#end if
else:
print 'hdfreader warning: attribute '+k+' is not a valid variable name and has been ignored'
#end if
#end for
return
#end def add_group
#end class HDFreader
def read_hdf(fpath,verbose=False,view=False):
return HDFreader(fpath=fpath,verbose=verbose,view=view).obj
#end def read_hdf
|
from Tree import *
# create new list of random features in root original length
def generate_features(features_list):
new_random_features = features_list.to_numpy()
np.random.shuffle(new_random_features)
return new_random_features[0:int(np.sqrt(len(features_list)))]
# create a new sub dataframe of random samples
# size defined by ratio of original dataframe
def create_subsample(full_df, split_ratio):
return full_df.sample(frac=split_ratio, random_state=np.random.randint(1, 50, 1))
# generate forest from given data and features list in requested size and depth
# returns arrays of trees (Root type)
def generate_forest(forest_df: pd.DataFrame, features_list, forest_size, trees_depth, bagging_ratio):
new_forest = []
forest_df_bootstrapped = pd.DataFrame(forest_df['label'])
# bootstrapping the df
for value in features_list:
forest_df_bootstrapped[value] = forest_df[value]
new_features_list = np.arange(-1, len(features_list), 1)
forest_df_bootstrapped.columns = new_features_list
forest_df_bootstrapped = forest_df_bootstrapped.rename(columns={-1: 'label'})
for i in range(forest_size):
# create a fresh forest_df_bootstrapped copy and get subsample of it (bagging)
forest_df_subsample = forest_df_bootstrapped.copy()
forest_df_subsample = create_subsample(forest_df_subsample, bagging_ratio)
forest_df_subsample = forest_df_subsample.reset_index(drop=True)
new_forest.append(build_tree(forest_df_subsample.drop('label', axis=1),
pd.DataFrame(forest_df_subsample['label']), trees_depth))
return new_forest
# generate predictions from all tree per row in test dataframe
def predict_forest_labels(random_forest, x_test):
predictions = []
for tree in random_forest:
predictions.append(forest_predict_labels(tree, x_test))
return predictions
# compute the label of each data row in a given test dataframe
# input: decision tree (root object) and test dataframe
# returns: an arrays of labels in length of test dataframe
def forest_predict_labels(decision_tree, x_test):
predictions = []
for index, row in x_test.iterrows():
predictions.append(forest_find_value_and_get_label(decision_tree, list(zip(row, row.index))))
return predictions
# recursively scan the branches of the tree. decide to take the left or right branch by existence of data or by
# appropriate values (current data in range of feature). find the optimum leaf by reaching the end of the line or by
# irrelevant branching.
def forest_find_value_and_get_label(node, row):
if node.current_node.leaf == 1 or (node.current_node.left_node.empty and node.current_node.right_node.empty):
return np.bincount(node.current_node.current_df['label']).argmax()
elif not node.current_node.left_node.empty and node.current_node.right_node.empty:
if row[node.current_node.feature][0] < node.current_node.left_node[node.current_node.feature].iloc[
len(node.current_node.left_node) - 1]:
return forest_find_value_and_get_label(node.left_node, row)
else:
return np.bincount(node.current_node.current_df['label']).argmax()
elif node.current_node.left_node.empty and not node.current_node.right_node.empty:
if row[node.current_node.feature][0] >= node.current_node.right_node[node.current_node.feature].iloc[0]:
return forest_find_value_and_get_label(node.right_node, row)
else:
return np.bincount(node.current_node.current_df['label']).argmax()
else:
if row[node.current_node.feature][0] < node.current_node.left_node[node.current_node.feature].iloc[
len(node.current_node.left_node) - 1]:
return forest_find_value_and_get_label(node.left_node, row)
if row[node.current_node.feature][0] >= node.current_node.right_node[node.current_node.feature].iloc[0]:
return forest_find_value_and_get_label(node.right_node, row)
return np.bincount(node.current_node.current_df['label']).argmax()
def main():
# import and organizing the data
bc_df = pd.read_csv('wdbc.data', names=np.arange(-2, 30, 1))
bc_df = bc_df.rename(columns={-2: 'index', -1: 'label'})
bc_df = bc_df.drop('index', axis=1)
new_label = []
for x in bc_df['label']:
if x == 'M':
new_label.append(1)
else:
new_label.append(0)
bc_df['label'] = new_label
x_train, x_test, y_train, y_test = split_df(bc_df, 'label', 0.8, 42)
x_all = x_train.copy()
x_all['label'] = y_train
x_all = x_all.reset_index(drop=True)
# amount of trees, their depth bagging ratio
# forest_size = 5
trees_depth = 2
bagging_ratio = 0.5
for i in range(0, 3, 1):
# randomize features in root size of original quantity of features
random_features = generate_features(x_train.columns)
print('Features list:', random_features)
for forest_size in range(1, 10, 1):
# create a new forest_df_bootstrapped and subsample forest
new_random_forest = generate_forest(x_all, random_features, forest_size, trees_depth, bagging_ratio)
# update x_test to contain only relevant features
x_test_reduced_features = pd.DataFrame()
for value in random_features:
x_test_reduced_features[value] = x_test[value]
new_features_list = np.arange(0, len(random_features), 1)
x_test_reduced_features.columns = new_features_list
# received matrix of predictions in length of tree VS amount of tested samples.
predictions_matrix = predict_forest_labels(new_random_forest, x_test_reduced_features)
# find most common prediction per sample
argmax_predictions_per_sample = [np.bincount(prediction_array).argmax()
for prediction_array in zip(*predictions_matrix)]
success_prediction_rate = np.equal(argmax_predictions_per_sample, y_test['label'])
count_true = np.count_nonzero(success_prediction_rate)
print('forest_size = {} max_depth = {}: correct {} times out of {}, success rate of {}%'.format(
forest_size, trees_depth, count_true, len(y_test), round(100 * count_true / len(y_test), 2)))
if __name__ == '__main__':
main()
|
from kivy.app import App
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.config import Config
from components.LayoutManager import LayoutManager
Config.set('kivy', 'log_level', 'info')
# Config.set('kivy', 'log_level', 'critical')
Config.set('graphics', 'borderless', 0)
Config.set('graphics', 'width', 1080)
Config.set('graphics', 'height', 720)
# Config.set('graphics', 'window_state', 'minimized')
Config.set('graphics', 'window_state', "visible")
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
Config.write()
kv_lay = Builder.load_file('../data/chill_layout.kv')
Clock.max_iteration = 5000 # określa maksymalną liczbę zagniżdżonych zegarów
Window.restore()
layout_manager = LayoutManager()
class ChillApp(App):
def build(self):
self.icon = '../data/graphics/CMDownloader_logo.png'
self.title = 'Chill Music Downloader'
return layout_manager.window_manager
if __name__ == '__main__':
ChillApp().run()
print('App started')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.