max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
tests/python_venv/test_env.py
|
jmknoble/python-venv
| 1
|
12778251
|
"""Provide unit tests for `~python_venv.env`:py:mod:."""
import unittest
import parameterized # https://pypi.org/project/parameterized/
from python_venv import const, env, reqs
class TestEnv_000_General(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_PV_ENV_000_symbols_exist(self):
_ = const.PYTHON
_ = const.CONDA
_ = const.PYENV
_ = const.VENV_DIR
_ = const.DEV_SUFFIX
_ = const.DIST_DIR_PLACEHOLDER
_ = const.ENV_DIR_PLACEHOLDER
_ = const.ENV_TYPES_NAMED
class TestEnv_010_BaseVirtualEnvironment(unittest.TestCase):
def setUp(self):
self.saved_requirements = reqs.REQUIREMENTS
def tearDown(self):
reqs.REQUIREMENTS = self.saved_requirements
def test_PV_ENV_BAS_000_instantiate_empty(self):
with self.assertRaises(TypeError) as raised:
env.BaseVirtualEnvironment()
msg = raised.exception.args[0]
self.assertTrue(
msg.startswith("__init__() missing 1 required positional argument")
)
def test_PV_ENV_BAS_001_instantiate(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
self.assertEqual(x.req_scheme, "dummy_req_scheme")
@parameterized.parameterized.expand(
[
("dry_run", {"dry_run": True}, "dry_run", True),
("force", {"force": True}, "force", True),
(
"message_prefix",
{"message_prefix": "dummy_message_prefix"},
"message_prefix",
"dummy_message_prefix",
),
("python", {"python": "dummy_python"}, "python", "dummy_python"),
("basename", {"basename": "dummy_basename"}, "_basename", "dummy_basename"),
("env_name", {"env_name": "dummy_env_name"}, "_env_name", "dummy_env_name"),
(
"env_prefix",
{"env_prefix": "dummy_env_prefix"},
"_env_prefix",
"dummy_env_prefix",
),
]
)
def test_PV_ENV_BAS_002_instantiate_kwargs(self, name, kwargs, attr, value):
x = env.BaseVirtualEnvironment("dummy_req_scheme", **kwargs)
self.assertEqual(getattr(x, attr), value)
def test_PV_ENV_BAS_010_requirements(self):
dummy_requirements = {"dummy_req_source": ["dummy_value"]}
reqs.REQUIREMENTS = {"dummy_req_scheme": [dummy_requirements]}
x = env.BaseVirtualEnvironment("dummy_req_scheme")
self.assertListEqual(x.requirements.requirements, [dummy_requirements])
def test_PV_ENV_BAS_020_package_name(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
self.assertEqual(x.package_name, "python_venv")
@parameterized.parameterized.expand(
[
("default", None, "python-venv"),
("specified", "dummy-package", "dummy-package"),
]
)
def test_PV_ENV_BAS_030_basename(self, name, basename, expected):
kwargs = {} if basename is None else {"basename": basename}
x = env.BaseVirtualEnvironment("dummy_req_scheme", **kwargs)
self.assertEqual(x.basename, expected)
def test_PV_ENV_BAS_040_abstract_env_name(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.env_name
@parameterized.parameterized.expand(
[
("default", None, ""),
("specified", "dummy-prefix", "dummy-prefix"),
]
)
def test_PV_ENV_BAS_045_env_prefix(self, name, env_prefix, expected):
kwargs = {} if env_prefix is None else {"env_prefix": env_prefix}
x = env.BaseVirtualEnvironment("dummy_req_scheme", **kwargs)
self.assertEqual(x.env_prefix, expected)
def test_PV_ENV_BAS_050_abstract_env_dir(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.env_dir
def test_PV_ENV_BAS_051_abstract_env_bin_dir(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.env_bin_dir
def test_PV_ENV_BAS_052_abstract_env_python(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.env_python
def test_PV_ENV_BAS_055_abstract_abs_env_dir(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.abs_env_dir
def test_PV_ENV_BAS_060_abstract_env_description(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.env_description
def test_PV_ENV_BAS_100_abstract_create(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.create()
def test_PV_ENV_BAS_200_abstract_remove(self):
x = env.BaseVirtualEnvironment("dummy_req_scheme")
with self.assertRaises(NotImplementedError):
x.remove()
| 2.828125
| 3
|
online/cflib/utils/multiranger.py
|
jmslagmay/apoleid
| 0
|
12778252
|
<reponame>jmslagmay/apoleid<gh_stars>0
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2018 Bitcraze AB
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from cflib.crazyflie.log import LogConfig
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
class Multiranger:
FRONT = 'range.front'
BACK = 'range.back'
LEFT = 'range.left'
RIGHT = 'range.right'
UP = 'range.up'
DOWN = 'range.zrange'
def __init__(self, crazyflie, rate_ms=100, zranger=False):
if isinstance(crazyflie, SyncCrazyflie):
self._cf = crazyflie.cf
else:
self._cf = crazyflie
self._log_config = self._create_log_config(rate_ms)
self._up_distance = None
self._front_distance = None
self._back_distance = None
self._left_distance = None
self._right_distance = None
self._down_distance = None
def _create_log_config(self, rate_ms):
log_config = LogConfig('multiranger', rate_ms)
log_config.add_variable(self.FRONT)
log_config.add_variable(self.BACK)
log_config.add_variable(self.LEFT)
log_config.add_variable(self.RIGHT)
log_config.add_variable(self.UP)
log_config.add_variable(self.DOWN)
log_config.data_received_cb.add_callback(self._data_received)
return log_config
def start(self):
self._cf.log.add_config(self._log_config)
self._log_config.start()
def _convert_log_to_distance(self, data):
if data >= 8000:
return None
else:
return data / 1000.0
def _data_received(self, timestamp, data, logconf):
self._up_distance = self._convert_log_to_distance(data[self.UP])
self._front_distance = self._convert_log_to_distance(data[self.FRONT])
self._back_distance = self._convert_log_to_distance(data[self.BACK])
self._left_distance = self._convert_log_to_distance(data[self.LEFT])
self._right_distance = self._convert_log_to_distance(data[self.RIGHT])
if self.DOWN in data:
self._down_distance = self._convert_log_to_distance(data[self.DOWN]
)
def stop(self):
self._log_config.delete()
@property
def up(self):
return self._up_distance
@property
def left(self):
return self._left_distance
@property
def right(self):
return self._right_distance
@property
def front(self):
return self._front_distance
@property
def back(self):
return self._back_distance
@property
def down(self):
return self._down_distance
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
| 1.960938
| 2
|
ryu/app/network_awareness/shortest_forwarding.py
|
lzppp/mylearning
| 0
|
12778253
|
<reponame>lzppp/mylearning
# Copyright (C) 2016 <NAME> at Beijing University of Posts
# and Telecommunications. www.muzixing.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# conding=utf-8
import logging
import struct
import networkx as nx
import netaddr
from operator import attrgetter
from ryu import cfg
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib import hub
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.packet import arp
from ryu.topology import event, switches
from ryu.topology.api import get_switch, get_link
import network_awareness
import network_monitor
import network_delay_detector
import httphd
import sql
import sa
import copy
import random
import setting
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import StreamRequestHandler
import io,shutil
import urllib,time
import getopt,string
import logging
CONF = cfg.CONF
GPATH = '/home/mini/tempmessage/switchinfo.db'
FPATH = '/home/mini/tempmessage/flow.db'
TABLESWITCH = '''CREATE TABLE `switch` (
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
`sw1` varchar(20) NOT NULL,
`po1` int(11) NOT NULL,
`sw2` varchar(20) NOT NULL,
`po2` int(11) NOT NULL,
`delay` varchar(20) DEFAULT NULL,
`bw` varchar(20) DEFAULT NULL,
`qoe` varchar(20) DEFAULT NULL
)'''
TABLEFLOW = '''CREATE TABLE `flow` (
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
`ip_src` varchar(20) NOT NULL,
`ip_dst` varchar(20) DEFAULT NULL,
`flowspeed` varchar(20) DEFAULT NULL,
`routepath` varchar(40) DEFAULT NULL
)'''
class ShortestForwarding(app_manager.RyuApp):
"""
ShortestForwarding is a Ryu app for forwarding packets in shortest
path.
This App does not defined the path computation method.
To get shortest path, this module depends on network awareness,
network monitor and network delay detecttor modules.
"""
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
"network_awareness": network_awareness.NetworkAwareness,
"network_monitor": network_monitor.NetworkMonitor,
"network_delay_detector": network_delay_detector.NetworkDelayDetector}
WEIGHT_MODEL = {'hop': 'weight', 'delay': "delay", "bw": "bw"}
def __init__(self, *args, **kwargs):
super(ShortestForwarding, self).__init__(*args, **kwargs)
self.name = 'shortest_forwarding'
self.awareness = kwargs["network_awareness"]
self.monitor = kwargs["network_monitor"]
self.delay_detector = kwargs["network_delay_detector"]
self.datapaths = {}
self.weight = self.WEIGHT_MODEL[CONF.weight]
self.vip = {}
self.conn = sql.get_conn(GPATH)
self.flowconn = sql.get_conn(FPATH)
self.busy = False
self.doing_list = set()
self.flow_infome = {}
self.dotime = 0
self.fltime = 0
sql.drop_table(self.conn , 'switch')
sql.create_table(self.conn , TABLESWITCH)
sql.drop_table(self.conn , 'flow')
sql.create_table(self.conn , TABLEFLOW)
self.vip_thread = hub.spawn(self._vip)
def qoe(self):
"""
todo!!!!!
"""
if self.busy == True:
'''
do QoE APP AWARE
print ("----------------------------start qoe-------------------------------")
self.flow_infome[(ip_src , ip_dst)]['in_port'] = in_port
self.flow_infome[(ip_src , ip_dst)]['eth_type'] = eth_type
self.flow_infome[(ip_src , ip_dst)]['buffer_id'] = msg.buffer_id
self.flow_infome[(ip_src , ip_dst)]['datapath']=datapath
'''
graph = copy.deepcopy(self.awareness.graph)
flow_in_road = copy.deepcopy(self.monitor.flow_in_road)
'''
to finish this part
'''
'''
for ip in self.vip:
result = self.get_sw(self.flow_infome[(ip_src , ip)]['datapath'].id, self.flow_infome[(ip_src , ip)]['in_port'], ip_src, ip)
src_sw, dst_sw = result[0] , result[1]
self.flow_infome[(ip_src , ip_dst)]['src'] = src_sw
self.flow_infome[(ip_src , ip_dst)]['dst'] = dst_sw
if dst_sw:
path = self.get_path(src_sw, dst_sw , 'delay')
self.flow_infome[(ip_src , ip)]['path'] = path
flow_info = (self.flow_infome[(ip_src , ip)]['eth_type'], ip_src, ip, self.flow_infome[(ip_src , ip)]['in_port'])
self.install_flow( self.datapaths,
self.awareness.link_to_port,
self.awareness.access_table, path,
flow_info, msg.buffer_id, msg.data)
graphchange(graph , path)
del flow_in_road[(ip_src , ip)]
if result:
src_sw, dst_sw = result[0], result[1]
if dst_sw:
# Path has already calculated, just get it.
path = self.get_path(src_sw, dst_sw, weight=self.weight)
self.logger.info("[PATH]%s<-->%s: %s" % (ip_src, ip_dst, path))
flow_info = (eth_type, ip_src, ip_dst, in_port)
# install flow entries to datapath along side the path.
self.install_flow(self.datapaths,
self.awareness.link_to_port,
self.awareness.access_table, path,
flow_info, msg.buffer_id, msg.data)
self.flow_infome[(ip_src , ip_dst)]['path'] = path
self.flow_infome[(ip_src , ip_dst)]['in_port'] = in_port
self.flow_infome[(ip_src , ip_dst)]['eth_type'] = eth_type
self.flow_infome[(ip_src , ip_dst)]['buffer_id'] = msg.buffer_id
self.flow_infome[(ip_src , ip_dst)]['datapath'] = datapath
self.flow_infome[(ip_src , ip_dst)]['src'] = src_sw
self.flow_infome[(ip_src , ip_dst)]['dst'] = dst_sw
'''
for key in flow_in_road.keys():
flow_in_road[key]['src'] = self.flow_infome[key]['src']
flow_in_road[key]['dst'] = self.flow_infome[key]['dst']
pathset = {}
selectpath = {}
for flowkey in flow_in_road.keys():
pathset[flowkey] = []
if flowkey[0] == "192.168.127.12" or flowkey[1] == "202.116.7.106" or flowkey[0] == "10.0.0.10" or flowkey[1] == "10.0.0.10":
pathset[flowkey].append(nx.shortest_path(self.awareness.graph, source=flow_in_road[flowkey]['src'],
target=flow_in_road[flowkey]['dst'], weight='hop'))
continue
#only get sp for qoe
for a in nx.shortest_simple_paths(self.awareness.graph, source=flow_in_road[flowkey]['src'],
target=flow_in_road[flowkey]['dst'], weight='delay'):
pathset[flowkey].append(a)
for flowkey in flow_in_road:
selectpath[flowkey] = self.flow_infome[flowkey]['path']
safunction = sa.recalculatebySA(selectpath , self.awareness.graph)
safunction.path = pathset
safunction.flow = flow_in_road
safunction.copy_strategy = "method"
print ("----------------------------start sa-------------------------------")
safunction.TMax = 350000
safunction.Tmin = 100
safunction.steps = 2400
state , e = safunction.anneal()
if e > 50000:
self.fltime = self.fltime + 1
else :
for key in state.keys():
if state[key] != self.flow_infome[key]['path']:
#resend flow table
self.logger.info("[PATH]%s<-->%s: %s" % (key[0], key[1], state[key]))
self.flow_infome[key]['path'] = state[key]
flow_info = (self.flow_infome[key]['eth_type'],
key[0], key[1], self.flow_infome[key]['in_port'])
self.install_flow(self.datapaths,
self.awareness.link_to_port,
self.awareness.access_table, state[key],
flow_info, self.flow_infome[key]['buffer_id'], None)
self.busy = False
def _vip(self):
"""
read the flow table in flow.db that is an vip list
"""
while self.busy != True:
print ("flrate = %d/%d" % (self.fltime , self.dotime))
breakflag = False
fetchall_sql = '''SELECT * FROM flow'''
result = sql.fetchall(self.flowconn , fetchall_sql)
if result == None:
pass
else:
self.busy = True
flow_in_road = copy.deepcopy(self.monitor.flow_in_road)
self.dotime = self.dotime + 1
self.qoe()
hub.sleep(10)
# for src in self.awareness.graph:
# if breakflag == True:
# break
# for dst in self.awareness.graph[src]:
# if delay == self.awareness.graph[src][dst]['delay'] > 0.5:
# breakflag = True
# break
# if breakflag == True:
# self.qoe()
hub.sleep(setting.DELAY_DETECTING_PERIOD)
def set_weight_mode(self, weight):
"""
set weight mode of path calculating.
"""
self.weight = weight
if self.weight == self.WEIGHT_MODEL['hop']:
self.awareness.get_shortest_paths(weight=self.weight)
return True
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
"""
Collect datapath information.
"""
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
def add_flow(self, dp, p, match, actions, idle_timeout=0, hard_timeout=0):
"""
Send a flow entry to datapath.
"""
ofproto = dp.ofproto
parser = dp.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=dp, priority=p,
idle_timeout=idle_timeout,
hard_timeout=hard_timeout,
match=match, instructions=inst)
dp.send_msg(mod)
def send_flow_mod(self, datapath, flow_info, src_port, dst_port):
"""
Build flow entry, and send it to datapath.
"""
parser = datapath.ofproto_parser
actions = []
actions.append(parser.OFPActionOutput(dst_port))
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2])
self.add_flow(datapath, 1, match, actions,
idle_timeout=15, hard_timeout=60)
def _build_packet_out(self, datapath, buffer_id, src_port, dst_port, data):
"""
Build packet out object.
"""
actions = []
if dst_port:
actions.append(datapath.ofproto_parser.OFPActionOutput(dst_port))
msg_data = None
if buffer_id == datapath.ofproto.OFP_NO_BUFFER:
if data is None:
return None
msg_data = data
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=buffer_id,
data=msg_data, in_port=src_port, actions=actions)
return out
def send_packet_out(self, datapath, buffer_id, src_port, dst_port, data):
"""
Send packet out packet to assigned datapath.
"""
out = self._build_packet_out(datapath, buffer_id,
src_port, dst_port, data)
if out:
datapath.send_msg(out)
def get_port(self, dst_ip, access_table):
"""
Get access port if dst host.
access_table: {(sw,port) :(ip, mac)}
"""
'''
???
'''
if dst_ip != None and netaddr.IPAddress(dst_ip) not in netaddr.IPNetwork("10.0.0.0/8"):
dst_ip = "10.0.0.10"
'''
/\
|
|
NAT
'''
if access_table:
if isinstance(access_table.values()[0], tuple):
for key in access_table.keys():
if dst_ip == access_table[key][0]:
dst_port = key[1]
return dst_port
return None
def get_port_pair_from_link(self, link_to_port, src_dpid, dst_dpid):
"""
Get port pair of link, so that controller can install flow entry.
"""
if (src_dpid, dst_dpid) in link_to_port:
return link_to_port[(src_dpid, dst_dpid)]
else:
self.logger.info("dpid:%s->dpid:%s is not in links" % (
src_dpid, dst_dpid))
return None
def flood(self, msg):
"""
Flood ARP packet to the access port
which has no record of host.
"""
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
for dpid in self.awareness.access_ports:
for port in self.awareness.access_ports[dpid]:
if (dpid, port) not in self.awareness.access_table.keys():
datapath = self.datapaths[dpid]
out = self._build_packet_out(
datapath, ofproto.OFP_NO_BUFFER,
ofproto.OFPP_CONTROLLER, port, msg.data)
datapath.send_msg(out)
self.logger.debug("Flooding msg")
def arp_forwarding(self, msg, src_ip, dst_ip):
""" Send ARP packet to the destination host,
if the dst host record is existed,
else, flow it to the unknow access port.
"""
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
result = self.awareness.get_host_location(dst_ip)
if result: # host record in access table.
datapath_dst, out_port = result[0], result[1]
datapath = self.datapaths[datapath_dst]
out = self._build_packet_out(datapath, ofproto.OFP_NO_BUFFER,
ofproto.OFPP_CONTROLLER,
out_port, msg.data)
datapath.send_msg(out)
self.logger.debug("Reply ARP to knew host")
else:
self.flood(msg)
def get_path(self, src, dst, weight):
"""
Get shortest path from network awareness module.
"""
shortest_paths = self.awareness.shortest_paths
graph = self.awareness.graph
if weight == self.WEIGHT_MODEL['hop']:
try:
paths = shortest_paths.get(src).get(dst)
return paths[0]
except:
paths = self.awareness.k_shortest_paths(graph, src, dst,
weight=weight)
shortest_paths.setdefault(src, {})
shortest_paths[src].setdefault(dst, paths)
return paths[0]
elif weight == self.WEIGHT_MODEL['delay']:
# If paths existed, return it, else calculate it and save it.
try:
paths = shortest_paths.get(src).get(dst)
return paths[0]
except:
paths = self.awareness.k_shortest_paths(graph, src, dst,
weight=weight)
shortest_paths.setdefault(src, {})
shortest_paths[src].setdefault(dst, paths)
return paths[0]
elif weight == self.WEIGHT_MODEL['bw']:
# Because all paths will be calculate
# when call self.monitor.get_best_path_by_bw
# So we just need to call it once in a period,
# and then, we can get path directly.
try:
# if path is existed, return it.
path = self.monitor.best_paths.get(src).get(dst)
return path
except:
# else, calculate it, and return.
result = self.monitor.get_best_path_by_bw(graph,
shortest_paths)
paths = result[1]
best_path = paths.get(src).get(dst)
return best_path
def get_sw(self, dpid, in_port, src, dst):
"""
Get pair of source and destination switches.
"""
src_sw = dpid
dst_sw = None
src_location = self.awareness.get_host_location(src)
if in_port in self.awareness.access_ports[dpid]:
if (dpid, in_port) == src_location:
src_sw = src_location[0]
else:
return None
dst_location = self.awareness.get_host_location(dst)
if dst_location:
dst_sw = dst_location[0]
return src_sw, dst_sw
def install_flow(self, datapaths, link_to_port, access_table, path,
flow_info, buffer_id, data=None):
'''
Install flow entires for roundtrip: go and back.
@parameter: path=[dpid1, dpid2...]
self.flow_infome=(eth_type, src_ip, dst_ip, in_port)
'''
if path is None or len(path) == 0:
self.logger.info("Path error!")
return
in_port = flow_info[3]
first_dp = datapaths[path[0]]
out_port = first_dp.ofproto.OFPP_LOCAL
back_info = (flow_info[0], flow_info[2], flow_info[1])
# inter_link
if len(path) > 2:
for i in xrange(1, len(path) - 1):
port = self.get_port_pair_from_link(link_to_port,
path[i - 1], path[i])
port_next = self.get_port_pair_from_link(link_to_port,
path[i], path[i + 1])
if port and port_next:
src_port, dst_port = port[1], port_next[0]
datapath = datapaths[path[i]]
self.send_flow_mod(datapath, flow_info, src_port, dst_port)
#self.send_flow_mod(datapath, back_info, dst_port, src_port)#back path
self.logger.debug("inter_link flow install")
if len(path) > 1:
# the last flow entry: tor -> host
port_pair = self.get_port_pair_from_link(link_to_port,
path[-2], path[-1])
if port_pair is None:
self.logger.info("Port is not found")
return
src_port = port_pair[1]
dst_port = self.get_port(flow_info[2], access_table)
if dst_port is None:
self.logger.info("Last port is not found.")
return
last_dp = datapaths[path[-1]]
self.send_flow_mod(last_dp, flow_info, src_port, dst_port)
#self.send_flow_mod(last_dp, back_info, dst_port, src_port)#back path
# the first flow entry
port_pair = self.get_port_pair_from_link(link_to_port,
path[0], path[1])
if port_pair is None:
self.logger.info("Port not found in first hop.")
return
out_port = port_pair[0]
self.send_flow_mod(first_dp, flow_info, in_port, out_port)
#self.send_flow_mod(first_dp, back_info, out_port, in_port)#back path
self.send_packet_out(first_dp, buffer_id, in_port, out_port, data)
# src and dst on the same datapath
else:
out_port = self.get_port(flow_info[2], access_table)
if out_port is None:
self.logger.info("Out_port is None in same dp")
return
self.send_flow_mod(first_dp, flow_info, in_port, out_port)
#self.send_flow_mod(first_dp, back_info, out_port, in_port)#back path
self.send_packet_out(first_dp, buffer_id, in_port, out_port, data)
def shortest_forwarding(self, msg, eth_type, ip_src, ip_dst):
"""
To calculate shortest forwarding path and install them into datapaths.
"""
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
if self.flow_infome.has_key((ip_src , ip_dst)) == False:
self.flow_infome[(ip_src , ip_dst)]={}
result = self.get_sw(datapath.id, in_port, ip_src, ip_dst)
#quick get path
if self.flow_infome[(ip_src , ip_dst)].setdefault('path')!=None:
path = self.flow_infome[(ip_src , ip_dst)]['path']
#self.logger.info("have path:[PATH]%s<-->%s: %s" % (ip_src, ip_dst, path))
flow_info = (eth_type, ip_src, ip_dst, in_port)
# install flow entries to datapath along side the path.
self.install_flow( self.datapaths,
self.awareness.link_to_port,
self.awareness.access_table, path,
flow_info, msg.buffer_id, msg.data)
else:
if result:
src_sw, dst_sw = result[0], result[1]
if dst_sw:
# Path has already calculated, just get it.
path = self.get_path(src_sw, dst_sw, weight=self.weight)
self.logger.info("[PATH]%s<-->%s: %s" % (ip_src, ip_dst, path))
flow_info = (eth_type, ip_src, ip_dst, in_port)
# install flow entries to datapath along side the path.
self.install_flow(self.datapaths,
self.awareness.link_to_port,
self.awareness.access_table, path,
flow_info, msg.buffer_id, msg.data)
self.flow_infome[(ip_src , ip_dst)]['path'] = path
self.flow_infome[(ip_src , ip_dst)]['in_port'] = in_port
self.flow_infome[(ip_src , ip_dst)]['eth_type'] = eth_type
self.flow_infome[(ip_src , ip_dst)]['buffer_id'] = msg.buffer_id
self.flow_infome[(ip_src , ip_dst)]['datapath'] = datapath
self.flow_infome[(ip_src , ip_dst)]['src'] = src_sw
self.flow_infome[(ip_src , ip_dst)]['dst'] = dst_sw
return
def saving_path(self , src , dst , path):
"""
TBD:saving the topo path
path is a list
"""
# try:
# fetchall_sql = 'SELECT * FROM flow WHERE ip_src = ? AND ip_dst = ?'
# data = [(str(src) , str(dst))]
# if fetchone(self.flowconn, fetchall_sql , data) :
# #update
# _sql = 'UPDATE flow SET path = ? WHERE ip_src = ? AND ip_dst = ? '
# data =[(str(path),str(src) , str(dst))]
# sql.update(self.flowconn , _sql , data)
# else:
# _sql = '''INSERT INTO flow (ip_src ,ip_dst , path) values (?, ? , ?)'''
# data = (str(src) ,str (dst) , str(path))
# sql.save(self.flowconn , _sql , data)
# except:
# print "try false"
print "TBD"
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
'''
In packet_in handler, we need to learn access_table by ARP.
Therefore, the first packet from UNKOWN host MUST be ARP.
'''
msg = ev.msg
datapath = msg.datapath
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
arp_pkt = pkt.get_protocol(arp.arp)
ip_pkt = pkt.get_protocol(ipv4.ipv4)
if isinstance(arp_pkt, arp.arp):
self.logger.debug("ARP processing")
self.arp_forwarding(msg, arp_pkt.src_ip, arp_pkt.dst_ip)
if isinstance(ip_pkt, ipv4.ipv4):
self.logger.debug("IPV4 processing")
if len(pkt.get_protocols(ethernet.ethernet)):
eth_type = pkt.get_protocols(ethernet.ethernet)[0].ethertype
self.shortest_forwarding(msg, eth_type, ip_pkt.src, ip_pkt.dst)
| 1.6875
| 2
|
tests/features/steps/database.py
|
manoadamro/jason
| 0
|
12778254
|
<reponame>manoadamro/jason<gh_stars>0
import shlex
import subprocess
import time
from datetime import datetime
from behave import given, then, when
from jason import JSONEncoder, jsonify, make_config, service
from jason.ext.sqlalchemy import SQLAlchemy
EXPOSED_FIELDS = ["created", "name"]
db = SQLAlchemy()
@JSONEncoder.encode_fields(*EXPOSED_FIELDS)
class MyModel(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
name = db.Column(db.String, nullable=False)
def create_postgres_container(context):
if "postgres" in context.containers:
return context.containers["postgres"]
container = context.docker.containers.run(
"sameersbn/postgresql:10-1",
name="postgresql",
hostname="localhost",
ports={"5432/tcp": 5432},
environment=["PG_PASSWORD=<PASSWORD>"],
detach=True,
)
context.host = "localhost"
if not context.is_circle:
command = shlex.split(f"sh scripts/wait_for_port.sh {context.host} 5432 10")
subprocess.Popen(command)
else:
time.sleep(5)
context.containers["postgres"] = container
return container
@given("we have postgres running")
def step_impl(context):
create_postgres_container(context)
@given("we have a postgres service")
def step_impl(context):
config = make_config("postgres")
config.DB_HOST = context.host
@service(config)
def my_simple_api(app):
db.init_app(app=app, migrate=None) # optional instance of flask_migrate.Migrate
context.app = my_simple_api.test_app()
with context.app.app_context():
db.create_all()
context.service = my_simple_api
@when("we create a row")
def step_impl(context):
with context.app.app_context():
instance = MyModel(name="something")
db.session.add(instance)
db.session.commit()
@then("we can select the row again")
def step_impl(context):
with context.app.app_context():
obj = MyModel.query.filter_by(name="something").first()
assert obj is not None
| 2.375
| 2
|
translation.py
|
ogg17/fb2translate
| 0
|
12778255
|
<reponame>ogg17/fb2translate
import translators as ts
class BookType(enum.Enum):
fb2 = 0
epub = 1
class TranslateType(enum.Enum):
google = 0
yandex = 1
def text_translate(translate_type, text):
trans_text = ''
if translate_type == TranslateType.google:
trans_text = ts.google(text, from_language='en', to_language='ru', if_use_cn_host=False)
elif translate_type == TranslateType.yandex:
trans_text = ts.yandex(text, from_language='en', to_language='ru', if_use_cn_host=False)
return trans_text
| 2.90625
| 3
|
ncharts/management/commands/clear_clients.py
|
ncareol/ncharts
| 0
|
12778256
|
from django.core.management.base import BaseCommand
from ncharts.models import VariableTimes
from ncharts.models import ClientState
from ncharts import views as nc_views
from django.contrib.sessions.models import Session
class Command(BaseCommand):
def handle(self, **options):
sessions = Session.objects.all()
print("#sessions=%d" % len(sessions))
clnts = ClientState.objects.all()
print("#clients=%d" % len(clnts))
clnts_active = set()
ndeleted = 0
for sess in sessions:
sess_dict = sess.get_decoded()
for sess_key in sess_dict:
for clnt in clnts:
dset = clnt.dataset
project = dset.project
cid_name = nc_views.client_id_name(
project.name, dset.name)
if cid_name == sess_key and sess_dict[cid_name] == clnt.pk:
clnts_active.add(clnt.pk)
break
dtimes_active = set()
for clnt in clnts:
if clnt.pk in clnts_active:
print("client found in session: pk=%d, dataset=%s" % \
(clnt.pk, clnt.dataset))
# dtimes = clnt.data_times.all()
# for dt in dtimes:
# print("client pk=%d, active data_time, type(dt)=%s, dt.pk=%d" % \
# (clnt.pk, type(dt), dt.pk))
dtimes_active.update(clnt.data_times.all())
else:
print("client not found in session: pk=%d, dataset=%s, deleting" % \
(clnt.pk, clnt.dataset))
clnt.delete()
ndeleted += 1
print("#clients deleted=%d" % (ndeleted))
vtimes = VariableTimes.objects.all()
print("#vtimes=%d" % len(vtimes))
ndeleted = 0
for vt in vtimes:
# print("type vt=%s" % type(vt))
if vt not in dtimes_active:
print("VariableTime not found in a client: pk=%d, deleting" % \
vt.pk)
vt.delete()
ndeleted += 1
print("#vtimes deleted=%d" % (ndeleted))
| 2.109375
| 2
|
pyth/plugins/plaintext/__init__.py
|
eriol/pyth
| 47
|
12778257
|
"""
Plaintext
"""
| 0.96875
| 1
|
bayes_implicit_solvent/rjmc_experiments/tree_rjmc_w_elements.py
|
openforcefield/bayes-implicit-solvent
| 4
|
12778258
|
<reponame>openforcefield/bayes-implicit-solvent
from jax.config import config
config.update("jax_enable_x64", True)
from numpy import load, random
from simtk import unit
from bayes_implicit_solvent.molecule import Molecule
import sys
valid_lls = ['student-t']
try:
job_id = int(sys.argv[1])
ll = sys.argv[2]
assert(ll in valid_lls)
except:
print("Didn't parse input, selecting job parameters at random")
job_id = random.randint(10000)
ll = valid_lls[random.randint(len(valid_lls))]
def sample_path_to_unitted_snapshots(path_to_npy_samples):
xyz = load(path_to_npy_samples)
traj = [snapshot * unit.nanometer for snapshot in xyz]
return traj
from glob import glob
from pkg_resources import resource_filename
path_to_vacuum_samples = resource_filename('bayes_implicit_solvent',
'vacuum_samples/vacuum_samples_*.npy')
paths_to_samples = glob(path_to_vacuum_samples)
from numpy import random
#random.seed(job_id)
#random.shuffle(paths_to_samples)
# 90:10 split
# TODO: Consider running on a 90:10 train/test split again...
paths_to_samples = paths_to_samples#[:int(0.9*len(paths_to_samples))]
print('number of molecules being considered: {}'.format(len(paths_to_samples)))
def extract_cid_key(path):
i = path.find('mobley_')
j = path.find('.npy')
return path[i:j]
cids = list(map(extract_cid_key, paths_to_samples))
print('first few CIDs', cids[:5])
mols = []
# TODO: potentially adjust this -- using n_configuration_samples=25 for faster debugging...
n_configuration_samples = 25
from bayes_implicit_solvent.freesolv import cid_to_smiles
from bayes_implicit_solvent.constants import beta
def unreduce(value):
"""Input value is in units of kB T, turn it into units of kilocalorie_per_mole"""
return value / (beta * unit.kilocalorie_per_mole)
for path in paths_to_samples:
cid = extract_cid_key(path)
smiles = cid_to_smiles[cid]
vacuum_samples = sample_path_to_unitted_snapshots(path)
thinning = int(len(vacuum_samples) / n_configuration_samples)
mol = Molecule(smiles, vacuum_samples=vacuum_samples[::thinning])
if (unreduce(mol.experimental_value) > -15) and (unreduce(mol.experimental_value) < 5):
mols.append(mol)
else:
print('discarding {} ({}) because its free energy was outside of the range [-15, +5] kcal/mol'.format(smiles, cid))
oemols = [mol.mol for mol in mols]
from jax import numpy as np
from jax import jit, vmap
from bayes_implicit_solvent.gb_models.jax_gb_models import compute_OBC_energy_vectorized
from bayes_implicit_solvent.solvation_free_energy import kj_mol_to_kT, one_sided_exp
def unpack(theta):
n_types = int(len(theta) / 2)
radii_, scaling_factors_ = theta[:n_types], theta[n_types:]
return 0.1 * radii_, scaling_factors_
@jit
def predict_solvation_free_energy_jax(radii, scaling_factors, distance_matrices, charges):
def compute_component(distance_matrix):
return compute_OBC_energy_vectorized(distance_matrix, radii, scaling_factors, charges)
W_F = vmap(compute_component)(distance_matrices)
w_F = W_F * kj_mol_to_kT
return one_sided_exp(w_F)
distance_matrices = [mol.distance_matrices for mol in mols]
charges = [mol.charges for mol in mols]
expt_means = np.array([mol.experimental_value for mol in mols])
expt_uncertainties = np.array([mol.experimental_uncertainty for mol in mols])
from bayes_implicit_solvent.typers import RADIUS_UNIT
from bayes_implicit_solvent.typers import AtomSpecificationProposal
from bayes_implicit_solvent.gb_models.obc2_parameters import mbondi_model
initial_tree = mbondi_model
initial_tree.remove_node('[#14]') # otherwise everything is -inf, because this type will be empty
initial_tree.proposal_sigmas['radius'] = 1e-2 * RADIUS_UNIT
initial_tree.proposal_sigmas['scale_factor'] = 1e-2
# add one more parameter per element appearing in FreeSolv but not specified in obc2 parameter set to initial tree
for i in [17, 35, 53]:
smirks = '[#{}]'.format(i)
initial_tree.add_child(smirks, '*')
initial_tree.un_delete_able_types.add(smirks)
ring_specifiers = ['r0', 'r3', 'r4', 'r5', 'r6', 'r7', 'a', 'A']
charge_specifiers = ['-1', '+0', '+1', '+2']
hydrogen_count_specifiers = ['H0', 'H1', 'H2', 'H3', 'H4']
connectivity_specifiers = ['X1', 'X2', 'X3', 'X4']
specifiers = ring_specifiers + charge_specifiers + hydrogen_count_specifiers + connectivity_specifiers
atom_specification_proposal = AtomSpecificationProposal(atomic_specifiers=specifiers)
smirks_elaboration_proposal = atom_specification_proposal
print('initial tree:')
print(initial_tree)
name = 'tree_rjmc_n_config={}_{}_ll'.format(n_configuration_samples, ll)
from bayes_implicit_solvent.prior_checking import check_no_empty_types
error_y_trees = []
# handle the RJ moves differently from the random-walk moves
from jax.scipy.stats import norm
radius_prior = 1.5
scale_prior = 0.8
@jit
def log_prior(theta):
radii, scales = unpack(theta)
return np.sum(norm.logpdf(radii - radius_prior)) + np.sum(norm.logpdf(scales - scale_prior))
# todo: save predictions on all train and test datapoints?
def get_predictions(theta, types):
radii_, scaling_factors_ = unpack(theta)
radii = [radii_[types[i]] for i in range(len(types))]
scaling_factors = [scaling_factors_[types[i]] for i in range(len(types))]
return np.array([predict_solvation_free_energy_jax(radii[i], scaling_factors[i], distance_matrices[i], charges[i]) for i in range(len(charges))])
def get_radii_scales(theta, type_slices):
radii_, scaling_factors_ = unpack(theta)
radii = [radii_[type_slice] for type_slice in type_slices]
scaling_factors = [scaling_factors_[type_slice] for type_slice in type_slices]
return radii, scaling_factors
if ll == 'student-t':
def log_likelihood_as_fxn_of_prediction(prediction, expt_mean, expt_uncertainty):
return student_t.logpdf(prediction, loc=expt_mean, scale=expt_uncertainty, df=7)
elif ll == 'gaussian':
def log_likelihood_as_fxn_of_prediction(prediction, expt_mean, expt_uncertainty):
return norm.logpdf(prediction, loc=expt_mean, scale=expt_uncertainty)
def compute_log_likelihood_component(theta, type_slice_array, distance_matrix_array, charge_array, expt_mean, expt_uncertainty):
r, s = unpack(theta)
radius_array, scale_array = r[type_slice_array], s[type_slice_array] # TODO: replace with index_update...
prediction = predict_solvation_free_energy_jax(radius_array, scale_array, distance_matrix_array, charge_array)
return log_likelihood_as_fxn_of_prediction(prediction, expt_mean, expt_uncertainty)
from jax import jit, grad
g_component = jit(grad(compute_log_likelihood_component))
all_inds = np.arange(len(distance_matrices))
def compute_grad_log_likelihood_w_loop(theta, type_slices):
g = np.zeros(len(theta))
for i in range(len(distance_matrices)):
g += g_component(theta, type_slices[i], distance_matrices[i], charges[i], expt_means[i], expt_uncertainties[i])
return g
from jax.lax import fori_loop
# TODO: complete and jit this...
def compute_grad_log_likelihood_w_fori_loop(theta, type_slices):
g = np.zeros(len(theta))
def g_component_update(i, g):
return g + g_component(theta, type_slices[i], distance_matrices[i], charges[i], expt_means[i],
expt_uncertainties[i])
g = fori_loop(0, len(distance_matrices), g_component_update, g)
return g
# TODO: gradcheck
def grad_log_likelihood(theta, type_slices):
return compute_grad_log_likelihood_w_loop(theta, type_slices)
# posterior
def log_posterior(theta, type_slices):
log_prior_val = log_prior(theta)
if np.isfinite(log_prior_val):
preds = get_predictions(theta, type_slices)
log_lik_val = np.sum(log_likelihood_as_fxn_of_prediction(preds, expt_means, expt_uncertainties))
return log_prior_val + log_lik_val
else:
return - np.inf
#@jit # do NOT jit this!
def grad_log_posterior(theta, type_slices):
log_prior_val = log_prior(theta)
if np.isfinite(log_prior_val):
return grad(log_prior)(theta) + grad_log_likelihood(theta, type_slices)
else:
return np.nan * theta
def get_theta(tree):
# TODO: don't be so gross about unit conversions...
return np.hstack([10 * tree.get_radii(), tree.get_scale_factors()])
from jax.scipy.stats import t as student_t
def log_prob(tree):
log_prior_value = check_no_empty_types(tree)
theta = get_theta(tree)
log_prior_value += log_prior(theta)
if log_prior_value > -np.inf:
types = tree.apply_to_molecule_list(oemols)
predictions = get_predictions(theta, types)
log_likelihood_value = np.sum(log_likelihood_as_fxn_of_prediction(predictions, expt_means, expt_uncertainties))
return log_prior_value + log_likelihood_value
else:
return log_prior_value
from bayes_implicit_solvent.samplers import tree_rjmc
n_within_model_steps_per_cross_model_proposal = 10
n_cross_model_proposals = 100
n_iterations = n_within_model_steps_per_cross_model_proposal * n_cross_model_proposals
trajs = []
from tqdm import tqdm
trange = tqdm(range(n_cross_model_proposals))
def make_one_rjmc_proposal(tree):
result = tree_rjmc(tree, log_prob, smirks_elaboration_proposal, n_iterations=1,
fraction_cross_model_proposals=1.0, progress_bar=False)
return result['traj'][-1]
tree_traj = [initial_tree]
n_types_traj = [initial_tree.number_of_nodes]
within_model_trajs = []
prediction_traj = []
from bayes_implicit_solvent.samplers import langevin
train_smiles = [mol.smiles for mol in mols]
def save():
name = 'tree_rjmc_from_elemental_n_iter={}_ll={}_job_id={}'.format(
n_iterations,
ll,
job_id
)
onp.savez(name + '.npz',
ll=ll,
job_id=job_id,
train_smiles=onp.array(train_smiles),
n_types_traj=n_types_traj,
within_model_trajs=within_model_trajs,
expt_means=expt_means,
expt_uncs=expt_uncertainties,
)
from pickle import dump
with open(name + '.pkl', 'wb') as f:
dump(tree_traj, f)
from copy import deepcopy
from jax import grad
import numpy as onp
kj_mol_to_kT = 0.40339497740718383
kj_mol_to_kcal_mol = 0.2390057361376673
kT_to_kcal_mol = (1.0 / kj_mol_to_kT) * kj_mol_to_kcal_mol
def get_rmse_in_kcal_per_mol(preds):
expt_means_in_kcal_per_mol = expt_means * kT_to_kcal_mol
preds_in_kcal_per_mol = preds * kT_to_kcal_mol
return float(np.sqrt(np.mean((preds_in_kcal_per_mol - expt_means_in_kcal_per_mol) ** 2)))
for chunk in trange:
tree = make_one_rjmc_proposal(tree_traj[-1])
types = tree.apply_to_molecule_list(oemols)
theta0 = get_theta(tree)
# should just change to using consistent units throughout!!
N = int(len(theta0) / 2)
stepsize = 0.001
if ll == 'gaussian':
stepsize *= 0.25
def within_model_log_prob(theta):
return log_posterior(theta, types)
def within_model_grad_log_prob(theta):
return grad_log_posterior(theta, types)
def run_langevin(theta0, stepsize=stepsize):
v0 = onp.random.randn(*theta0.shape)
within_model_traj = langevin(theta0, v0, within_model_log_prob, within_model_grad_log_prob,
n_steps=n_within_model_steps_per_cross_model_proposal,
stepsize=stepsize,
collision_rate=0.001/stepsize)
current_log_prob = within_model_log_prob(within_model_traj[-1])
return within_model_traj, current_log_prob
within_model_traj, current_log_prob = run_langevin(theta0, stepsize)
while not np.isfinite(current_log_prob):
print("that didn't go well! trying again with smaller stepsize...")
print("\told stepsize: ", stepsize)
stepsize *= 0.5
print("\tnew stepsize ", stepsize)
within_model_traj, current_log_prob = run_langevin(theta0, stepsize)
theta = within_model_traj[-1]
r, s = unpack(theta)
tree.set_radii(r)
tree.set_scale_factors(s)
tree_traj.append(deepcopy(tree))
predictions = get_predictions(within_model_traj[-1], types)
prediction_traj.append(predictions)
train_rmse = get_rmse_in_kcal_per_mol(predictions)
trange.set_postfix(
current_log_prob=current_log_prob,
current_train_rmse=train_rmse,
max_n_types=max(n_types_traj),
min_n_types=min(n_types_traj),
)
n_types_traj.append(N)
for t in within_model_traj:
within_model_trajs.append(t)
if (chunk + 1) % 100 == 0:
save()
save()
| 2.03125
| 2
|
analysis-and-complexity-of-algorithms/big-o-notation/time-complexity/linear/examples/example-02.py
|
DKSecurity99/academic-programming
| 2
|
12778259
|
# O(n)
from typing import List
Vector = List[int]
n = [1, 20, 30, 40, 50, 60]
total = 0
def sumArrayElements(array: Vector) -> int:
total = 0
for v in n:
total += v
return total
print(sumArrayElements(n))
| 3.484375
| 3
|
dataparser/queue/finder.py
|
idxn/sublime-robot-framework-assistant
| 103
|
12778260
|
import os
import fnmatch
def finder(path, ext):
"""Returns files from path by extension"""
l = []
if not ext.startswith('*.'):
ext = '*.{0}'.format(ext)
for path, dirs, files in os.walk(os.path.abspath(path)):
for f in fnmatch.filter(files, ext):
l.append(os.path.join(path, f))
return l
| 3.265625
| 3
|
650/main.py
|
JanaSabuj/Leetcode-solutions
| 13
|
12778261
|
class Solution:
def minSteps(self, n: int) -> int:
sum = 0
# prime factorise
for i in range(2, int(sqrt(n)) + 1):
while n % i == 0:
n//= i;
sum += i
if n > 1:
sum += n
return sum
| 3.328125
| 3
|
sgn.py
|
102/sign
| 0
|
12778262
|
<reponame>102/sign<gh_stars>0
#!/usr/bin/python3
import hash
import rsa
import argparse
from collections import deque
md5 = hash.MD5()
def int_to_bytearray(a):
x = deque()
while a:
x.appendleft(a & 0xff)
a >>= 8
return bytearray(x)
def generate(args):
public, private = rsa.get_key_pair(256)
with open(args.file + '_public', 'w+') as f:
f.write(str(public))
with open(args.file + '_private', 'w+') as f:
f.write(str(private))
def sign(args):
with open(args.file, 'rb') as f:
message = bytearray(f.read())
_hash = int_to_bytearray(md5.md5_digest(message))
print(_hash)
with open(args.private_key, 'r') as f:
private = rsa.PrivateKey.fromstring(f.readline().replace('\n', ''))
with open(args.file, 'ab') as f:
f.write(private.decrypt(_hash))
def validate(args):
with open(args.file, 'rb') as f:
_file = f.read()
probably_sign = bytearray(_file)[-32:]
message_body = bytearray(_file)[:-32]
with open(args.public_key, 'r') as f:
public = rsa.PublicKey.fromstring(f.readline().replace('\n', ''))
required_hash = public.encrypt(probably_sign)
hashed_body = int_to_bytearray(md5.md5_digest(message_body))
print('Sign is valid' if hashed_body == required_hash else 'Sign is invalid')
def unsign(args):
with open(args.file, 'rb') as f:
_file = bytearray(f.read())[:-32]
with open(args.file, 'wb') as f:
f.write(_file)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
generate_keys = subparsers.add_parser('generate')
generate_keys.add_argument('-f', '--file', required=True, type=str)
generate_keys.set_defaults(func=generate)
sign_file = subparsers.add_parser('sign')
sign_file.add_argument('-f', '--file', required=True, type=str)
sign_file.add_argument('-k', '--private-key', required=True, type=str)
sign_file.set_defaults(func=sign)
validate_sign = subparsers.add_parser('validate')
validate_sign.add_argument('-f', '--file', required=True, type=str)
validate_sign.add_argument('-k', '--public-key', required=True, type=str)
validate_sign.set_defaults(func=validate)
remove_sign = subparsers.add_parser('unsign')
remove_sign.add_argument('-f', '--file', required=True, type=str)
remove_sign.add_argument('-k', '--public-key', type=str)
remove_sign.set_defaults(func=unsign)
args = parser.parse_args()
args.func(args)
| 2.875
| 3
|
tests/formats/mysql/definitions/test_database.py
|
cmancone/mygrations
| 10
|
12778263
|
<filename>tests/formats/mysql/definitions/test_database.py<gh_stars>1-10
import unittest
from mygrations.formats.mysql.file_reader.database import database as database_reader
from mygrations.formats.mysql.file_reader.create_parser import create_parser
class test_database(unittest.TestCase):
def _get_sample_db(self):
strings = [
"""
CREATE TABLE `logs` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`message` TEXT NOT NULL,
`traceback` text,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
""", """
CREATE TABLE `more_logs` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`more_messages` TEXT NOT NULL,
`traceback` text,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
]
return database_reader(strings)
def test_simple(self):
db1 = self._get_sample_db()
strings = [
"""
CREATE TABLE `logs` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`message` TEXT NOT NULL,
`traceback` text,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
""", """
CREATE TABLE `less_logs` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`more_messages` TEXT NOT NULL,
`traceback` text,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
"""
]
db2 = database_reader(strings)
#differences = db2 - db1
#self.assertEquals( [], differences )
def test_add_table(self):
db = self._get_sample_db()
new_table = create_parser()
new_table.parse(
"""CREATE TABLE `log_changes` (
`id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT,
`log_id` INT(10) UNSIGNED NOT NULL,
`type_id` INT(10) UNSIGNED NOT NULL,
`change` VARCHAR(255),
PRIMARY KEY (id),
KEY `log_changes_log_id` (`log_id`),
KEY `log_changes_type_id` (`type_id`)
);
"""
)
db.add_table(new_table)
self.assertEquals(3, len(db.tables))
self.assertTrue('log_changes' in db.tables)
self.assertEquals(new_table, db.tables['log_changes'])
def test_remove_table(self):
db1 = self._get_sample_db()
db1.remove_table(db1.tables['more_logs'])
self.assertEquals(1, len(db1.tables))
self.assertTrue('logs' in db1.tables)
self.assertFalse('more_logs' in db1.tables)
def test_exception_on_remove_invalid_table(self):
db1 = self._get_sample_db()
new_table = create_parser()
new_table.parse(
"""CREATE TABLE `log_changes` (
`id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT,
`log_id` INT(10) UNSIGNED NOT NULL,
`type_id` INT(10) UNSIGNED NOT NULL,
`change` VARCHAR(255),
PRIMARY KEY (id),
KEY `log_changes_log_id` (`log_id`),
KEY `log_changes_type_id` (`type_id`)
);
"""
)
with self.assertRaises(ValueError):
db1.remove_table(new_table)
| 2.859375
| 3
|
Subproblem/parameters_subproblem.py
|
oyvorha/Master_Heuristic
| 1
|
12778264
|
class ParameterSub:
def __init__(self, route, vehicle, pattern, customer_arrivals, L_CS, L_FS, base_violations, V_0, D_O, base_deviations,
weights, hour):
# Sets
self.stations = [i for i in range(len(route.stations))]
self.charging_stations = list()
self.non_charging_stations = list()
self.depot_index = None
for i in range(1, len(route.stations)): # Don't include start station in subsets
if route.stations[i].charging_station:
self.charging_stations.append(i)
else:
self.non_charging_stations.append(i)
if route.stations[i].depot:
self.depot_index = i
self.stations += [len(route.stations)]
# Pattern
# Q_B, Q_CCL, Q_FCL, Q_CCU, Q_FCU
self.Q_B = pattern[0]
self.Q_CCL = pattern[1]
self.Q_FCL = pattern[2]
self.Q_CCU = pattern[3]
self.Q_FCU = pattern[4]
# Station specific
self.Q_S = [station.station_cap for station in route.stations] + [0]
self.L_CS = L_CS + [0]
self.L_FS = L_FS + [0]
self.I_IC = [customer_arrivals[i][0] for i in range(len(customer_arrivals))] + [0]
self.I_IF = [customer_arrivals[i][1] for i in range(len(customer_arrivals))] + [0]
self.I_OC = [customer_arrivals[i][2] for i in range(len(customer_arrivals))] + [0]
self.O = [station.get_ideal_state(hour) for station in route.stations] + [0]
# Vehicle specific
self.Q_BV = vehicle.battery_capacity
self.Q_CV = vehicle.bike_capacity + self.Q_CCL + self.Q_FCL - max(0, self.Q_CCU + self.Q_FCU)
if route.stations[0].depot:
self.depot_index = 0
self.L_BV = vehicle.battery_capacity
else:
self.L_BV = vehicle.current_batteries - self.Q_B
self.L_CV = vehicle.current_charged_bikes + self.Q_CCL - self.Q_CCU
self.L_FV = vehicle.current_flat_bikes + self.Q_FCL - self.Q_FCU
# Base Violations
self.V = base_violations + [0]
self.V_O = V_0
self.R_O = 0
self.D = base_deviations + [0]
self.D_O = D_O
if route.stations[0].charging_station:
self.R_O = max(0, self.Q_FCU - self.Q_FCL)
# Weights
self.W_V, self.W_R, self.W_D, self.W_N, self.W_L = weights
# self.print_all_params(pattern)
def print_all_params(self, pattern):
print("Stations: ", self.stations)
print("Charging Stations: ", self.charging_stations)
print("Non Charging Stations: ", self.non_charging_stations)
print("Depot index: ", self.depot_index)
print("Pattern: ", pattern)
print("Q_S: ", self.Q_S)
print("Ideal state: ", self.O)
print("L_CS: ", self.L_CS)
print("L_FS: ", self.L_FS)
print("I_IC: ", self.I_IC)
print("I_IF: ", self.I_IF)
print("I_OC: ", self.I_OC)
print("Q_BV: ", self.Q_BV)
print("Q_CV: ", self.Q_CV)
print("L_BV: ", self.L_BV)
print("L_CV: ", self.L_CV)
print("L_FV: ", self.L_FV)
print("Base Violations: ", self.V)
print("V_O: ", self.V_O)
print("Base Deviations: ", self.D)
print("D_O: ", self.D_O)
print("R_O: ", self.R_O)
| 2.671875
| 3
|
apps/exercises/views.py
|
ospreyelm/HarmonyLab
| 4
|
12778265
|
<filename>apps/exercises/views.py
import json
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django_tables2 import Column
from apps.exercises.models import Playlist, PerformanceData, User as Performers
from apps.exercises.tables import PlaylistActivityTable
User = get_user_model()
@staff_member_required
def playlist_performance_view(request, playlist_id):
data = []
performances = PerformanceData.objects.filter(
playlist__id=playlist_id
).select_related('user', 'playlist')
playlist = Playlist.objects.filter(id=playlist_id).first()
exercises = [exercise for exercise in playlist.exercise_list]
users = list(set(list(performances.values_list('user__email', flat=True))))
for user in users:
name = [n for n in list(Performers.objects.filter(email=user).values_list('first_name', 'last_name'))[0]]
user_data = {
'email': user,
'performer': " ".join([n for n in [
name[0],
name[1].upper(),
'<' + user + '>',
] if n != '']),
'performance_data': performances.filter(user__email=user).first().data
}
user_data.update({'exercise_count': len(user_data['performance_data'])})
data.append(user_data)
for d in data:
exercises_data = d['performance_data']
[d.update(**{exercise['id']:
f'{"Error(s) " if (isinstance(exercise["exercise_error_tally"], int) and exercise["exercise_error_tally"] > 0) else "Pass "}'
f'{"" if ((isinstance(exercise["exercise_error_tally"], int) and exercise["exercise_error_tally"] > 0) or not exercise["exercise_mean_tempo"]) else exercise["exercise_mean_tempo"]}'
f'{"" if (isinstance(exercise["exercise_error_tally"], int) and exercise["exercise_error_tally"] > 0) else "*" * exercise["exercise_tempo_rating"]} '
}) for exercise in exercises_data]
table = PlaylistActivityTable(
data=data,
extra_columns=[(exercise, Column()) for exercise in exercises]
)
playlist_name = Playlist.objects.filter(id=playlist_id).first().name
return render(request, "admin/performances.html", {
"table": table,
"playlist_name": playlist_name
})
@login_required
@method_decorator(csrf_exempt)
def submit_exercise_performance(request):
performance_data = json.loads(request.POST.get('data'))
user = request.user if request.user.is_authenticated else User.get_guest_user()
playlist_name, exercise_num = performance_data['exercise_ID'].split('/')
performance_data.pop('exercise_ID')
# performance_data.pop('performer')
playlist = Playlist.objects.filter(name=playlist_name).first()
exercise = playlist.get_exercise_obj_by_num(int(exercise_num))
PerformanceData.submit(
playlist_id=playlist._id,
exercise_id=exercise.id,
user_id=user.id,
data=performance_data
)
return HttpResponse(status=201)
@method_decorator(csrf_exempt)
def submit_playlist_performance(request):
performance_data = json.loads(request.POST.get('data'))
user = request.user if request.user.is_authenticated else User.get_guest_user()
playlist_name, _ = performance_data['exercise_ID'].split('/')
performance_data.pop('exercise_ID')
playlist = Playlist.objects.filter(name=playlist_name).first()
PerformanceData.submit_playlist_performance(
playlist_id=playlist._id,
user_id=user.id,
data=performance_data
)
return HttpResponse(status=201)
| 2.1875
| 2
|
neural_network.py
|
will-cromar/needy
| 4
|
12778266
|
<reponame>will-cromar/needy
# Luke
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import TanhLayer
from normalizer import normalize
from normalizer import denormalize
from price_parsing import *
from neural_network_functions import trainNetwork
from neural_network_functions import graphOutput
def graphNN(ticker, date, epochs, verbose):
"""
The function builds a data set of stock prices, normalizes that data set, builds a linked data set to
train the neural network, generates a neural network, trains the network, makes predictions, analyzes the
predictions against testing data to generate statistics for comparison, and uses the statistics to
generate graphs as a png file.
:param ticker: the stock sticker to train and predict on
:param date: the date to split the data on to create training and testing
:param epochs: the number of times to train the network
:param verbose: boolean value for verbose output
:return tomorrowPrice: the price prediction for tomorrow
:return totalTime: the total time in seconds it took to train the network on the data set
:return averageTimePerEpoch: the average time per training run
:return averagePercentError:the average percent error of the predictions and the testing data
:return minPercentError:the minimum percent error of the predictions and the testing data
"""
# request stock prices and split by the specified date to create training and testing data sets
if verbose: print 'Requesting data...'
data = getStockPrices(ticker, frequency="daily", update=True)
trainData, testData = splitByDate(data, date)
xTrain, yTrain = preprocessStocks(trainData)
xTest, yTest = preprocessStocks(testData)
# allocate space for predictions and error values
fucturePredictions = []
trainingPredictions = []
percentError = []
if verbose: print 'complete.'
if verbose: print 'Normalizing data...'
# normalize the values to a percentage of their max values to increase network training speed
xTrain, yTrain, xTest, yTest, priceScaleFactor, timeScaleFactor = normalize(xTrain, yTrain, xTest, yTest)
if verbose: print 'complete.'
if verbose: print 'Building dataset...'
# build a linked data set to allow for training and error calculation
ds = SupervisedDataSet(1,1)
for i in range(0, len(xTrain)):
ds.appendLinked(xTrain[i], yTrain[i])
if verbose: print 'complete.'
if verbose: print 'Buidling network...'
rnn = buildNetwork(1, 3, 3, 3, 3, 3, 3, 3, 3, 1, bias=True, recurrent=True, hiddenclass=TanhLayer)
if verbose: print 'complete'
if verbose: print 'Training network...'
trainer = BackpropTrainer(rnn, ds, learningrate=0.01)
totalTime, averageTimePerEpoch, trainerErrorValues, epochTimes = trainNetwork(trainer, epochs, verbose)
if verbose: print 'Training network 100.0% complete.'
if verbose: print 'Predicting...'
# prime the network
for i in xTrain:
rnn.activate(i)
# make predictions with network on the training data to show general shape of approximated function
for i in xTrain:
trainingPredictions.append(rnn.activate(i))
# make predictions with the network on the testing data to validate the accuracy of the network
for i in xTest:
fucturePredictions.append(rnn.activate(i))
# predict tomorrow's price
tomorrowPrice = rnn.activate(xTest[len(xTest) - 1] + 1) * priceScaleFactor
if verbose: print 'complete.'
if verbose: print 'Generating graphs...'
# denormalize
xTrain, yTrain, xTest, yTest, fucturePredictions, trainingPredictions = denormalize(xTrain, yTrain, xTest, yTest, fucturePredictions, trainingPredictions, priceScaleFactor, timeScaleFactor)
# calculate percent error
for i in range(0, len(yTest)):
percentError.append((abs((yTest[i] - fucturePredictions[i])/yTest[i]) *100))
# calculates statistics on the analysis of the network
sumPercentError = sum(percentError)
averagePercentError = sumPercentError / len(percentError)
numDataPoints = len(xTrain) + len(xTest)
minPercentError = min(percentError)
# generate the graphs and save them to the working directory
graphOutput(xTrain, yTrain, xTest, yTest, fucturePredictions, trainingPredictions, ticker)
if verbose: print 'complete.'
# returns
return tomorrowPrice, numDataPoints, totalTime, averageTimePerEpoch, averagePercentError, minPercentError
| 3.0625
| 3
|
ote_sdk/ote_sdk/entities/coordinate.py
|
ntyukaev/training_extensions
| 775
|
12778267
|
<reponame>ntyukaev/training_extensions
"""This module implements the Coordinate entity"""
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from typing import Tuple
class Coordinate:
"""
Represents a 2D-coordinate with an x-position and a y-position.
NB most coordinates are normalized (between 0.0 and 1.0)
:param x: x-coordinate
:param y: y-coordinate
"""
__slots__ = ["x", "y"]
def __init__(self, x: float, y: float):
self.x = x
self.y = y
def __repr__(self):
return f"Coordinate(x={self.x}, y={self.y})"
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __hash__(self):
return hash(str(self))
def as_tuple(self) -> Tuple[float, float]:
"""
Convert the coordinates to a pair (x,y)
"""
return self.x, self.y
def as_int_tuple(self) -> Tuple[int, int]:
"""
Convert the coordinates to a pair of integer coordinates (x,y)
"""
return int(self.x), int(self.y)
| 3.328125
| 3
|
bot/twitter.py
|
dandelea/twitter-naive-communities
| 0
|
12778268
|
<filename>bot/twitter.py
import datetime
import time
import tweepy
class Connection:
"""
Manages the connection with the Twitter API.
"""
def __init__(self, accounts, user_fields, tweet_fields):
self.accounts = accounts
self.index = 0
self.user_fields = user_fields
self.tweet_fields = tweet_fields
self.api = None
def connect(self):
"""
Activates a connection.
"""
auth = tweepy.OAuthHandler(
self.accounts[self.index]['consumer_key'],
self.accounts[self.index]['consumer_secret']
)
auth.set_access_token(
self.accounts[self.index]['access_token'],
self.accounts[self.index]['access_token_secret']
)
self.api = tweepy.API(auth)
def next(self):
"""
Switchs to next twitter account.
If got to the end, restart from the beginning.
"""
if self.index == len(self.accounts)-1:
self.index = 0
else:
self.index += 1
print("Switch to account {0}".format(self.index))
self.connect()
def wait(self, api_family, api_url):
"""
Check if has to wait for a specific endpoint.
If the result is True, sleep.
"""
copy_index = self.index
while True:
rate_limit_status = self.api.rate_limit_status()
if rate_limit_status['resources'][api_family][api_url]['remaining'] == 0:
self.next()
if self.index == copy_index:
# Full circle
rate = self.api.rate_limit_status()['resources']
sleep_time = rate[api_family][api_url]['reset'] - int(time.time()) + 5
wakeup_date = datetime.datetime.now() + datetime.timedelta(seconds=sleep_time)
print("Sleeping until {0}".format(wakeup_date.strftime("%c")))
time.sleep(sleep_time)
break
else:
break
| 3.28125
| 3
|
tests/util/test_message_loop.py
|
natduca/ndbg
| 5
|
12778269
|
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from util.message_loop import *
class TestMessageLoop_NoMessages(unittest.TestCase):
def test_add_message(self):
res = []
MessageLoop.add_message(lambda: res.append(True))
MessageLoop.run_while(lambda: len(res) == 0)
def test_add_delayed_mssage(self):
res = []
MessageLoop.add_delayed_message(lambda: res.append(True), 200)
MessageLoop.run_while(lambda: len(res) == 0)
def test_add_delayed_recurring_message(self):
res = []
i = [3]
def tick():
i[0] -= 1
res.append(True)
return i[0] > 0
MessageLoop.add_delayed_message(tick, 200)
MessageLoop.run_until(lambda: len(res) == 2)
| 2.359375
| 2
|
tests/contracts/test_bundle.py
|
devopshq/crosspm2
| 3
|
12778270
|
<reponame>devopshq/crosspm2
import pytest
from crosspm.contracts.bundle import Bundle, validate_trigger_package_doesnt_hide_higher_version
from crosspm.contracts.package import Package
from crosspm.helpers.exceptions import CrosspmBundleNoValidContractsGraph, CrosspmException, \
CrosspmBundleTriggerPackageHidesHigherVersion
def create_packages_repo(packages):
packages_repo = [Package.create_package_from_tuple(p) for p in packages]
packages_repo.sort(key=lambda p: str(p), reverse=True)
return packages_repo
def packages_repo_contract_single_value():
packages =[
('db', '1', 'contracts.db=a1'),
('db', '2', 'contracts.db=A2'),
('db', '3', 'contracts.db=B3'),
('db', '4', 'contracts.db=b4'),
('be', '1', 'contracts.db=a1;contracts.rest=1'),
('be', '2', 'contracts.db=A2;contracts.rest=1'),
('be', '3', 'contracts.db=A1;contracts.rest=2'),
('be', '4', 'contracts.db=b4;contracts.rest=2'),
('ui', '1', 'contracts.rest=1'),
('ui', '2', 'contracts.rest=2'),
('ui', '3', 'contracts.rest=3'),
('ui', '4', 'contracts.rest=4'),
('ncp', '1', ''),
('ncp', '2', ''),
('ncp', '3', ''),
]
return create_packages_repo(packages)
def create_bundle(deps_txt, packages_repo, trigger_packages, enable_tp_hides_higher_version):
return Bundle(deps_txt, packages_repo, trigger_packages, enable_tp_hides_higher_version)
class TestBundle:
@pytest.mark.parametrize(
"test_case",
[
{
'trigger_packages': [('db', 4)],
'packages': [('db', 4), ('ui', 2), ('be', 4), ('ncp', 3)]
},
{
'trigger_packages': [('be', 4)],
'packages': [('db', 4), ('ui', 2), ('be', 4), ('ncp', 3)]
},
{
'trigger_packages': [('ui', 2)],
'packages': [('db', 4), ('ui', 2), ('be', 4), ('ncp', 3)]
},
{
'trigger_packages': [('be', 1)],
'packages': [('db', 1), ('ui', 1), ('be', 1), ('ncp', 3)]
},
{
'trigger_packages': [],
'packages': [('db', 4), ('ui', 2), ('be', 4), ('ncp', 3)]
},
{
'trigger_packages': [('be', 2), ('db', 2)],
'packages': [('db', 2), ('ui', 1), ('be', 2), ('ncp', 3)]
},
]
)
def test_calculate_success(self, test_case):
deps_txt = ['db', 'be', 'ui', 'ncp']
self.do_test_calculate_case(deps_txt, packages_repo_contract_single_value(), test_case)
@pytest.mark.parametrize(
"test_case",
[
{
'trigger_packages': [('ui', 4)]
},
{
'trigger_packages': [('be', 1), ('db', 2)],
},
]
)
def test_calculate_failure(self, test_case):
with pytest.raises(CrosspmException) as exc_info:
deps_txt = ['db', 'be', 'ui', 'ncp']
self.do_test_calculate_case(deps_txt, packages_repo_contract_single_value(), test_case)
def do_test_calculate_case(self, deps_txt, packages_repo, test_case) -> None:
tp = []
if test_case['trigger_packages']:
tp = [Package.create_package_from_tuple(tp) for tp in test_case['trigger_packages']]
bundle = create_bundle(deps_txt, packages_repo, tp, True)
packages = set(bundle.calculate().values())
expected_packages = set()
for p in Package.create_packages(*test_case['packages']):
expected_packages.add(packages_repo[packages_repo.index(p)])
assert packages == expected_packages
def packages_repo_contract_multiple_values(self):
packages =[
('db', '1', 'contracts.db=90fe'),
('db', '2', 'contracts.db=10be'),
('db', '3', 'contracts.db=81ab'),
('db', '4', 'contracts.db=00fe'),
('db', '5', 'contracts.db=00be'),
('be', '1', 'contracts.db=81ab;contracts.rest=1'),
('be', '2', 'contracts.db=00fe,10be;contracts.rest=1'),
('be', '11', 'contracts.db=10be;contracts.rest=1'),
('ui', '1', 'contracts.rest=1'),
('ui', '2', 'contracts.rest=1'),
('ui', '3', 'contracts.rest=3'),
('ui', '4', 'contracts.rest=4'),
('ncp', '1', ''),
('ncp', '2', ''),
('ncp', '3', ''),
]
return create_packages_repo(packages)
@pytest.mark.parametrize(
"test_case",
[
# {
# 'trigger_packages': [('be', 2)],
# 'packages': [('db', 2), ('be', 2), ('ui', 2), ('ncp', 3)]
# },
{
'trigger_packages': [],
'packages': [('db', 4), ('be', 2), ('ui', 2), ('ncp', 3)]
},
]
)
def test_contract_multiple_values_calculate_success(self, test_case):
deps_txt = ['db', 'be', 'ui', 'ncp']
self.do_test_calculate_case(deps_txt, self.packages_repo_contract_multiple_values(), test_case)
def test_trigger_packages_doesnt_hide_higher_version(self):
repo = self.packages_repo_contract_multiple_values()
assert validate_trigger_package_doesnt_hide_higher_version(Package.create_package_from_tuple(('db', 6)), repo)
assert validate_trigger_package_doesnt_hide_higher_version(Package.create_package_from_tuple(('ui', 5)), repo)
with pytest.raises(CrosspmBundleTriggerPackageHidesHigherVersion):
validate_trigger_package_doesnt_hide_higher_version(Package.create_package_from_tuple(('ui', 1)), repo)
def test_validate_trigger_package_doesnt_hide_higher_version():
packages = ["tad-db_1.2.3_all.deb", "tad-db_1.3.3_all.deb"]
repo = [Package.create_package_debian(p) for p in packages]
assert validate_trigger_package_doesnt_hide_higher_version(Package.create_package_debian("tad-db_1.3.4_all.deb"), repo)
assert validate_trigger_package_doesnt_hide_higher_version(Package.create_package_debian("tad-db_2.1.1_all.deb"),
repo)
with pytest.raises(CrosspmBundleTriggerPackageHidesHigherVersion):
validate_trigger_package_doesnt_hide_higher_version(Package.create_package_debian("tad-db_1.2.4_all.deb"), repo)
with pytest.raises(CrosspmBundleTriggerPackageHidesHigherVersion):
validate_trigger_package_doesnt_hide_higher_version(Package.create_package_debian("tad-db_1.1.4_all.deb"), repo)
| 2.046875
| 2
|
fonty/models/font/font_format.py
|
jamesssooi/font-cli
| 12
|
12778271
|
<reponame>jamesssooi/font-cli
'''font_format.py'''
from enum import Enum
class FontFormat(Enum):
'''Represents a font format.'''
WOFF = 'woff'
WOFF2 = 'woff2'
TTF = 'ttf'
OTF = 'otf'
| 2.40625
| 2
|
kashgari/layers/__init__.py
|
SharpKoi/Kashgari
| 2,422
|
12778272
|
# encoding: utf-8
# author: BrikerMan
# contact: <EMAIL>
# blog: https://eliyar.biz
# file: __init__.py
# time: 7:39 下午
from typing import Dict, Any
from tensorflow import keras
from .conditional_random_field import KConditionalRandomField
from .behdanau_attention import BahdanauAttention # type: ignore
L = keras.layers
L.BahdanauAttention = BahdanauAttention
L.KConditionalRandomField = KConditionalRandomField
def resigter_custom_layers(custom_objects: Dict[str, Any]) -> Dict[str, Any]:
custom_objects['KConditionalRandomField'] = KConditionalRandomField
custom_objects['BahdanauAttention'] = BahdanauAttention
return custom_objects
if __name__ == "__main__":
pass
| 2.125
| 2
|
e2xgrader/preprocessors/authoring/__init__.py
|
divindevaiah/e2xgrader
| 2
|
12778273
|
from .preprocessor import Preprocessor
from .removeexercise import RemoveExercise
from .copynotebooks import CopyNotebooks
from .copyfiles import CopyFiles
from .generatetaskids import GenerateTaskIDs
from .makeexercise import MakeExercise
from .filltemplate import FillTemplate
from .addtaskheader import AddTaskHeader
__all__ = [
"Preprocessor",
"RemoveExercise",
"CopyNotebooks",
"CopyFiles",
"GenerateTaskIDs",
"MakeExercise",
"FillTemplate",
"AddTaskHeader",
]
| 1.132813
| 1
|
src/tav/tmux/model.py
|
mudox/pytav
| 0
|
12778274
|
# -*- coding: utf-8 -*-
from typing import NamedTuple
class Server(NamedTuple):
sessions: list
class Window(NamedTuple):
id: str
name: str
index: int
def __eq__(self, rhs):
if not isinstance(rhs, Window):
return NotImplemented
if self is rhs:
return True
return \
self.id == rhs.id and \
self.name == rhs.name and \
self.index == rhs.index
class Session(NamedTuple):
name: str
loaded: bool
id: str
windows: list
def __eq__(self, rhs):
if not isinstance(rhs, Session):
return NotImplemented
if self is rhs:
return True
for a in ('id', 'loaded', 'name'):
if getattr(self, a) != getattr(rhs, a):
return False
if len(self.windows) != len(rhs.windows):
return False
return self.windows == rhs.windows
class Pane(NamedTuple):
tty: str
| 2.90625
| 3
|
tests/integration/airflow/dags/demo_dag.py
|
frankcash/marquez-airflow
| 1
|
12778275
|
from datetime import datetime
from airflow.operators.dummy_operator import DummyOperator
from marquez_airflow import DAG
DAG_NAME = 'test_dag'
default_args = {
'depends_on_past': False,
'start_date': datetime(2019, 2, 1),
}
dag = DAG(DAG_NAME, schedule_interval='0 0 * * *',
catchup=False,
default_args=default_args, description="My awesome DAG")
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag)
run_this_2.set_upstream(run_this_1)
| 2.203125
| 2
|
src/my_tfg_pkg/my_tfg_pkg/controller_node.py
|
pparrilla/ROS2_TFG
| 0
|
12778276
|
<reponame>pparrilla/ROS2_TFG
#!/usr/bin/env python3
from functools import partial
import json
import datetime
import rclpy
import os
from rclpy.logging import get_logger
from rclpy.node import Node
from my_tfg_interfaces.msg import FloatDataNode, StatusNode
from my_tfg_interfaces.srv import UploadFile
class ControllerNode(Node):
def __init__(self):
super().__init__("controller_1") # Node name
self.declare_parameter("device_id")
self.declare_parameter("pos_x", 0.0)
self.declare_parameter("pos_y", 0.0)
self.status_controller_ = StatusNode()
self.status_controller_.device_id = self.get_parameter(
"device_id").value
self.status_controller_.work_status = 1
self.status_controller_.position.x = self.get_parameter("pos_x").value
self.status_controller_.position.y = self.get_parameter("pos_y").value
self.data_nodes_ = {}
self.data_nodes_firebase_ = {}
self.sensors_info_ = {}
self.actuators_info_ = {}
self.json_filename_ = "data_" + \
self.get_name() + ".json"
self.json_filename_firebase_ = "data_" + \
self.get_name() + "_firebase.json"
self.json_nodes_info_filename_ = "nodes_info_" + \
self.get_name() + ".json"
self.init_data()
# Create all subscribers
self.temperature_subscriber_ = self.create_subscription(
FloatDataNode, "temperature", self.callback_temperature, 10)
self.humidity_subscriber_ = self.create_subscription(
FloatDataNode, "humidity", self.callback_humidity, 10)
self.irradiance_subscriber_ = self.create_subscription(
FloatDataNode, "irradiance", self.callback_irradiance, 10)
self.status_subscriber_ = self.create_subscription(
StatusNode, "status_actuator", self.callback_status, 10)
self.save_timer_ = self.create_timer(300, self.save_data)
self.save_timer_nodes_ = self.create_timer(300, self.save_nodes_info)
self.get_logger().info(
"Controller_" + str(self.status_controller_.device_id) + " has been started.")
def callback_temperature(self, msg):
self.add_data_to_list("temperature", msg)
def callback_humidity(self, msg):
self.add_data_to_list("humidity", msg)
def callback_irradiance(self, msg):
self.add_data_to_list("irradiance", msg)
def callback_status(self, msg):
actuator_info = {
msg.device_id:{
"type" : msg.device_type,
"pos_x" : msg.position.x,
"pos_y" : msg.position.y
}
}
if msg.device_id in self.actuators_info_:
if not msg.device_type in self.actuators_info_[msg.device_id]["type"]:
self.actuators_info_[msg.device_id]["type"].append(msg.device_type)
else:
self.actuators_info_.update(actuator_info)
actuator_data = {
"device_id": msg.device_id,
"value": msg.work_status,
"timestamp": datetime.datetime.now().strftime("%x, %X")
}
self.data_nodes_[msg.device_type].append(actuator_data)
def add_data_to_list(self, type, msg):
sensor_info = {
msg.device_id:{
"type" : [type],
"pos_x": msg.position.x,
"pos_y": msg.position.y
}
}
if msg.device_id in self.sensors_info_:
if not type in self.sensors_info_[msg.device_id]["type"]:
self.sensors_info_[msg.device_id]["type"].append(type)
else:
self.sensors_info_.update(sensor_info)
sensor_data = {
"device_id": msg.device_id,
"value": msg.data,
"timestamp": datetime.datetime.now().strftime("%x, %X")
}
self.data_nodes_[type].append(sensor_data)
def save_data(self):
self.save_data_firebase()
self.get_logger().info("Saving in " + self.json_filename_ + " for sqlite3")
json_dir_path = os.getenv('JSON_ROS_DIR')
json_file_path = json_dir_path + self.json_filename_
with open(json_file_path, 'w') as outfile:
json.dump(self.data_nodes_, outfile)
self.call_upload_file("upload_sqlite", self.json_filename_)
self.init_data()
def save_data_firebase(self):
self.get_logger().info("Saving in " + self.json_filename_firebase_ + " for firebase")
json_dir_path = os.getenv('JSON_ROS_DIR')
json_file_path = json_dir_path + self.json_filename_firebase_
# Add timestamp from last data
timestamp = datetime.datetime.now().strftime("%x, %X")
for type_data in self.data_nodes_:
devices_id = []
for data in self.data_nodes_[type_data]:
if not data["device_id"] in devices_id:
devices_id.append(data["device_id"])
changed_data_time = data
changed_data_time["timestamp"] = timestamp
self.data_nodes_firebase_[
type_data].append(changed_data_time)
with open(json_file_path, 'w') as outfile:
json.dump(self.data_nodes_firebase_, outfile)
self.call_upload_file("upload_firebase", self.json_filename_firebase_)
# self.init_data()
def save_nodes_info(self):
self.get_logger().info("Saving Nodes info in " + self.json_nodes_info_filename_ + " for firebase")
json_dir_path = os.getenv('JSON_ROS_DIR')
json_file_path = json_dir_path + self.json_nodes_info_filename_
nodes_info = {
"sensors" : self.sensors_info_,
"actuators" : self.actuators_info_
}
with open(json_file_path, 'w') as outfile:
json.dump(nodes_info, outfile)
self.call_upload_file("upload_nodes_info", self.json_nodes_info_filename_)
def call_upload_file(self, db, json_name):
client = self.create_client(UploadFile, db)
while not client.wait_for_service(1.0):
self.get_logger().warn("Waiting for " + db + " Service...")
request = UploadFile.Request()
request.device_id = self.status_controller_.device_id
request.filename = json_name
future = client.call_async(request)
future.add_done_callback(
partial(self.callback_upload_file,
device_id=self.status_controller_.device_id,
filename=json_name))
def callback_upload_file(self, future, device_id, filename):
try:
response = future.result()
if response.success:
self.get_logger().info("Upload to Database done.")
except Exception as e:
self.get_logger().error("Service call failed %r" % (e,))
def init_data(self):
self.data_nodes_ = {"temperature": [],
"humidity": [], "irradiance": [], "heater": [], "window": []}
self.data_nodes_firebase_ = {"temperature": [],
"humidity": [], "irradiance": [], "heater": [], "window": []}
def main(args=None):
rclpy.init(args=args)
node = ControllerNode()
rclpy.spin(node)
rclpy.shutdown()
if __name__ == "__main__":
main()
| 2.34375
| 2
|
kafka/metrics/stats/percentiles.py
|
tcpcloud/debian-python-kafka
| 0
|
12778277
|
<gh_stars>0
from kafka.metrics import AnonMeasurable, NamedMeasurable
from kafka.metrics.compound_stat import AbstractCompoundStat
from kafka.metrics.stats import Histogram
from kafka.metrics.stats.sampled_stat import AbstractSampledStat
class BucketSizing(object):
CONSTANT = 0
LINEAR = 1
class Percentiles(AbstractSampledStat, AbstractCompoundStat):
"""A compound stat that reports one or more percentiles"""
def __init__(self, size_in_bytes, bucketing, max_val, min_val=0.0,
percentiles=None):
super(Percentiles, self).__init__(0.0)
self._percentiles = percentiles or []
self._buckets = int(size_in_bytes / 4)
if bucketing == BucketSizing.CONSTANT:
self._bin_scheme = Histogram.ConstantBinScheme(self._buckets,
min_val, max_val)
elif bucketing == BucketSizing.LINEAR:
if min_val != 0.0:
raise ValueError('Linear bucket sizing requires min_val'
' to be 0.0.')
self.bin_scheme = Histogram.LinearBinScheme(self._buckets, max_val)
else:
ValueError('Unknown bucket type: %s' % bucketing)
def stats(self):
measurables = []
def make_measure_fn(pct):
return lambda config, now: self.value(config, now,
pct / 100.0)
for percentile in self._percentiles:
measure_fn = make_measure_fn(percentile.percentile)
stat = NamedMeasurable(percentile.name, AnonMeasurable(measure_fn))
measurables.append(stat)
return measurables
def value(self, config, now, quantile):
self.purge_obsolete_samples(config, now)
count = sum(sample.event_count for sample in self._samples)
if count == 0.0:
return float('NaN')
sum_val = 0.0
quant = float(quantile)
for b in range(self._buckets):
for sample in self._samples:
assert type(sample) is self.HistogramSample
hist = sample.histogram.counts
sum_val += hist[b]
if sum_val / count > quant:
return self._bin_scheme.from_bin(b)
return float('inf')
def combine(self, samples, config, now):
return self.value(config, now, 0.5)
def new_sample(self, time_ms):
return Percentiles.HistogramSample(self._bin_scheme, time_ms)
def update(self, sample, config, value, time_ms):
assert type(sample) is self.HistogramSample
sample.histogram.record(value)
class HistogramSample(AbstractSampledStat.Sample):
def __init__(self, scheme, now):
super(Percentiles.HistogramSample, self).__init__(0.0, now)
self.histogram = Histogram(scheme)
| 2.28125
| 2
|
niobium/webelement_wait.py
|
cle-b/niobium
| 1
|
12778278
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
import time
from selenium.common.exceptions import (
ElementNotVisibleException,
ElementNotInteractableException,
NoSuchElementException,
)
from .timeout import ImplicitWait, ExplicitWait
def wait(self, displayed=True, enabled=True, timeout=None):
"""
Wait until expected conditions.
:Args:
- displayed (bool - default: True): Wait until the element is displayed.
- enabled (bool - default: True): Wait until the element is enabled.
- timeout (bool - default: None): Timeout in seconds, implicit timeout if None.
:Returns:
The element itself.
:Raises:
- ElementNotVisibleException - if element is not visible.
- ElementNotInteractableException - if element is not enabled.
- NoSuchElementException - if expected conditions not satisfied.
"""
wait_timer = ExplicitWait(timeout) if timeout is not None else ImplicitWait()
wait_timer.start()
once = True
while (not wait_timer.max_time_exceeded) or once:
once = False
if ((displayed is None) or (displayed == self.is_displayed())) and (
(enabled is None) or (enabled == self.is_enabled())
):
return self
time.sleep(0.2)
if displayed:
raise ElementNotVisibleException()
elif enabled:
raise ElementNotInteractableException()
else:
raise NoSuchElementException()
| 3.4375
| 3
|
agilent33220a.py
|
Quik-e/Agilent-33220a-Remote-Control
| 0
|
12778279
|
<filename>agilent33220a.py<gh_stars>0
# By <NAME>
import time
import numpy as np
import visa
class instrument:
def __init__(self, visa_instrument_handle):
self.instrument_handle = visa_instrument_handle
self.memory=65536
self.namelen=12
self.vmax=10.0 # High Z
self.freqmax=1000000.0 #[Hz] Arbs
self.connected=0
self.lentext=40
self.noerror="ERROR"
self.nospace="+0\n"
def write(self, command_string): # Implements visa write command
self.instrument_handle.write(command_string)
def query(self, command_string): # Implements visa query command
return self.instrument_handle.query(command_string)
def read(self): # Implements visa read command
return self.instrument_handle.read()
def load_signal(self,V=0,name="VOLATILE"): # Instrument, Samples (floats) [-1;1], Name of the function
if self.connected:
if len(V)>self.memory:
print("ERROR: Too many samples. (MAXIMUM MEMORY SAMPLES: {}.)".format(self.memory))
print("Use V=V[:(-len(V)+self.memory)] to shorten the amount of samples.")
self.write("DISPlay:TEXT \"ERROR: Too many samples.\"")
time.sleep(2.5)
self.write("DISPlay:TEXT:CLEAR")
return
name=name.upper()
if V is not 0: # Looking if the input was an only one sample of values 0
if np.amax(np.absolute(V)) is not 1:
print("Normalizing Arbitrary Function...")
self.write("DISPlay:TEXT \"Normalizing Arbitrary Function...\"")
V=V/np.amax(np.absolute(V))
time.sleep(2)
self.write("DISPlay:TEXT:CLEAR")
if name=="VOLATILE":
print("Loading AF in volatile memory...")
self.write("DISPlay:TEXT \"Loading AF in volatile memory...\"")
charray=[]
for samples in V:
charray.append((samples)) # Adds a single element to the end of the list. Common error: does not return the new list, just modifies the original.
V=str(charray) # Paso de vars a chars
V=V.replace(V[0],"") # Take out '['
V=V.replace(V[-1],"") # Take out ']'
error="error"
while error != self.noerror:
error=self.query("SYSTem:ERRor?")
self.write("DATA VOLATILE,{}".format(V))
# Ver como se comprueba si se cargo
if self.query("SYSTem:ERRor?") == self.noerror:
print("***LOAD SUCCESSFUL***")
self.write("DISPlay:TEXT \"***LOAD SUCCESSFUL***\"")
time.sleep(2.5)
self.write("DISPlay:TEXT:CLEAR")
else:
print("***LOAD FAILED***")
self.write("DISPlay:TEXT \"***LOAD FAILED***\"")
time.sleep(2.5)
self.write("DISPlay:TEXT:CLEAR")
return
else:
if self.query("DATA:NVOLatile:FREE?") != self.nospace:
if name[0].isalpha() != 1:
print("ERROR: Name must start with a letter.")
self.write("DISPlay:TEXT \"ERROR: Name must start with a letter.\"")
time.sleep(2.5)
self.write("DISPlay:TEXT:CLEAR")
return
if len(name)>12:
print("ERROR: Maximum name length is {} characters.".format(self.namelen))
self.write("DISPlay:TEXT \"ERROR: Maximum name length is {} characters.\"".format(namelen))
time.sleep(2.5)
print("Shortening name...")
self.write("DISPlay:TEXT \"Shortening name...\"")
name=name[:-leng(name)+self.namelen]
time.sleep(0.2)
print("Name shortened.")
self.write("DISPlay:TEXT \"Name shortened.\"")
for i in range(0,len(name)):
if name[i] not in "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_":
name.replace(name[i],"")
catalog=self.query("DATA:CATalog?")
if name in catalog:
print("Another AF with the same name already exists in the catalog.")
print("Would you like to overwrite it? [Y/N]")
answer=input()
while answer.upper() not in "YN":
print("Please insert one of the options. [Y/N].")
answer=input()
if answer.upper()=="N":
print("***LOAD ABORTED***")
return;
print("Loading AF in volatile memory...")
self.write("DISPlay:TEXT \"Loading AF in volatile memory...\"")
charray=[]
for samples in V:
charray.append((samples)) # Adds a single element to the end of the list. Common error: does not return the new list, just modifies the original.
V=str(charray) # From floats to chars
V=V.replace(V[0],"") # Take out '['
V=V.replace(V[-1],"") # Take out ']'
error="error"
while error != self.noerror:
error=self.query("SYSTem:ERRor?")
self.write("DATA VOLATILE,{}".format(V))
if self.query("SYSTem:ERRor?") == self.noerror:
print("***LOAD SUCCESSFUL***")
self.write("DISPlay:TEXT \"***LOAD SUCCESSFUL***\"")
time.sleep(2.5)
self.write("DISPlay:TEXT:CLEAR")
else:
print("***LOAD FAILED***")
self.write("DISPlay:TEXT \"***LOAD FAILED***\"")
time.sleep(2.5)
self.write("DISPlay:TEXT:CLEAR")
return
name=name.replace(" ","_")
print("Copying AF to non-volatile memory...")
self.write("DISPlay:TEXT \"Copying AF to non-volatile memory...\"")
while error != self.noerror:
error=self.query("SYSTem:ERRor?")
self.write("DATA:COPY {}, VOLATILE".format(name))
time.sleep(0.5)
if self.query("SYSTem:ERRor?") == self.noerror:
print("***AF COPIED***")
self.write("DISPlay:TEXT \"***AF COPIED***\"")
time.sleep(2.5)
print("AF saved as \"{}\"".format(name))
time.sleep(2.5)
self.write("DISPlay:TEXT:CLEAR")
else:
print("***COPY FAILED***")
self.write("DISPlay:TEXT \"***COPY FAILED***\"")
time.sleep(2.5)
self.write("DISPlay:TEXT:CLEAR")
return
else:
print("Non-volatile memory is full. Need to erase one arbitrary function.")
self.write("DISPlay:TEXT \"Non-volatile memory is full. Need to erase one arbitrary function.\"")
time.sleep(2.5)
self.write("DISPlay:TEXT:CLEAR")
return
else:
print("Please connect the device using connect_device().")
def output_af(self,name="nonamewritten",amp=1,freq=1,Voffset=0): # Name of the Arbitrary Function, Amplitude in Vpp, Frequency in Hz, Voffset in V
if self.connected:
name=name.upper()
amp=np.absolute(amp)
freq=np.absolute(freq)
if name=="nonamewritten":
print("Must enter the name of the arbitrary function.")
print("Catalog:",self.query("DATA:CATalog?"))
return
catalog=self.query("DATA:CATalog?")
if name in catalog:
self.write("OUTPUT:LOAD INFinity") # High Z
if amp/2>self.vmax:
print("ERROR: VPP surpasses {}V.".format(self.vmax))
amp=self.vmax*2
print("New VPP: {}V".format(amp))
if np.absolute(Voffset)>self.vmax-amp/2:
print("ERROR: |VPP/2|+|Voffset| surpasses {}V.".format(self.vmax))
Voffset=(self.vmax-amp/2)*Voffset/np.absolute(Voffset)
print("New Voffset: {}V".format(Voffset))
if freq>self.freqmax:
print("ERROR: Frequency too high.")
freq=self.freqmax
print("New FREQ: {}kHz".format(freq/1000))
self.write("FUNCtion:USER {}".format(name)) # Selects which arbitrary function I want
self.write("FUNCtion USER") # Selects to use the USER function prevously chosen
self.write("FREQuency {}".format(freq)) # Sets frequency
self.write("VOLTage {}".format(amp)) # Sets Amplitude (in Vpp)
self.write("VOLTage:OFFSet {}".format(Voffset)) # Sets offset
self.write("DATA:TEXT \"{} selected.\"".format(name))
else:
print("Name isn't in catalog.")
print("Catalog:",self.query("DATA:CATalog?"))
return
else:
print("Please connect the device using connect_device().")
def af_attributes(self,name="nonamewritten"):
if self.connected:
if name=="nonamewritten":
print("Must enter the name of the arbitrary function.")
print("Catalog:",self.query("DATA:CATalog?"))
return
name=name.upper()
catalog=self.query("DATA:CATalog?")
if name in catalog:
print(f"{name} points:",self.query("DATA:ATTRibute:POINts? {}".format(name)))
print(f"{name} average:",self.query("DATA:ATTRibute:AVERage? {}".format(name)))
print(f"{name} cfactor:",self.query("DATA:ATTRibute:CFACtor? {}".format(name)))
print(f"{name} ptpeak:",self.query("DATA:ATTRibute:PTPeak? {}".format(name)))
else:
print("Please connect the device using connect_device().")
def erase_af(self,name):
if self.connected:
catalog=self.query("DATA:CATalog?")
name=name.upper()
if name in catalog:
print("Are you sure yoou want to erase {}? [Y/N]".format(name))
answer=input()
while answer.upper() != "Y" and answer.upper() != "N":
print("Please insert one of the options. [Y/N].")
answer=input()
if answer.upper()=="N":
print("***ERASE ABORTED***")
return
self.write("OUTPUT OFF")
self.write("FUNCtion SIN") # Selects to use the USER function prevously chosen
error="error"
while error != self.noerror:
error=self.query("SYSTem:ERRor?")
self.write("DATA:DELete {}".format(name))
if self.query("SYSTem:ERRor?") == self.noerror:
print("***ERASE SUCCESSFUL***")
self.write("DISPlay:TEXT \"***ERASE SUCCESSFUL***\"")
time.sleep(2.5)
self.write("DISPlay:TEXT:CLEAR")
else:
print("***ERASE FAILED***")
self.write("DISPlay:TEXT \"***ERASE FAILED***\"")
time.sleep(2.5)
self.write("DISPlay:TEXT:CLEAR")
return
else:
print("Please connect the device using connect_device().")
def display_text(self,text, tim=0):
if self.connected:
if len(text)>self.lentext:
print("ERROR: Maximum char quantity ({}) exceeded.".format(self.lentext))
self.write("DISPlay:TEXT \"ERROR: Maximum char quantity ({}) exceeded.\"")
else:
self.write("DISPlay:TEXT \"{}\"".format(text))
if tim > 0:
time.sleep(tim)
self.write("DISPlay:TEXT:CLEAR")
else:
print("Please connect the device using connect_device().")
def output_on(self):
if self.connected:
self.write("OUTPUT ON")
else:
print("Please connect the device using connect_device().")
def output_off(self):
if self.connected:
self.write("OUTPUT OFF")
else:
print("Please connect the device using connect_device().")
def catalog(self):
if self.connected:
catalog=self.query("DATA:CATalog?")
print("Catalog:",catalog)
else:
print("Please connect the device using connect_device().")
def connect_device(): # No se si funciona
rm = visa.ResourceManager('@ni') # @ni loads national instruments, @py loads pyvisa
print(rm.list_resources())
inst_handle = rm.open_resource(rm.list_resources()[0])
inst_handle.timeout=40000
connect=instrument(inst_handle)
connect.connected=1
connect.write("*CLS")
connect.noerror=connect.query("SYSTem:ERRor?")
print("***INSTRUMENT CONNECTED***")
connect.write("DISPlay:TEXT \"***INSTRUMENT CONNECTED***\"")
time.sleep(1.5)
return connect
| 2.71875
| 3
|
AnoutherFile.py
|
batmansdu/Git_learn
| 0
|
12778280
|
print("Another!")
"This is the develop branch for developing some new features."
| 1.367188
| 1
|
miniworld/model/spatial/Node/DefaultNode.py
|
miniworld-project/miniworld_core
| 5
|
12778281
|
# encoding: utf-8
from miniworld.model.spatial.MovementPattern.RandomWalk import RandomWalk
from .AbstractNode import AbstractNode
__author__ = "<NAME>"
__email__ = "uni at lamp<EMAIL>"
class DefaultNode(AbstractNode):
"""
Attributes
----------
crnt_movement_pattern : AbstractMovementPattern
dict_of_movement_pattern : dict<String, AbstractMovementPattern>
"""
def __init__(self, node_id):
super(DefaultNode, self).__init__(node_id)
self.crnt_movement_pattern = RandomWalk()
self.dict_of_movement_pattern["RandomWalk"] = self.crnt_movement_pattern
def __check_conditions(self):
pass
| 2.453125
| 2
|
exercises/CursoemVideo/ex075.py
|
arthurguerra/cursoemvideo-python
| 0
|
12778282
|
a = (int(input('Digite um número: ')),
int(input('Digite outro número: ')),
int(input('Digite mais um número: ')),
int(input('Digite o último número: ')))
print(f'Você digitou os valores {a}')
print(f'O número 9 apareceu {a.count(9)} vezes')
if a.count(3) != 0:
print(f'O valor 3 apareceu na {a.index(3)+1}ª posição')
else:
print('O valor 3 não foi digitado em nenhuma posição')
print('Os valores pares digitados foram: ', end='')
for count in a:
if count % 2 == 0:
print(count, end=' ')
| 3.984375
| 4
|
Python3/576.out-of-boundary-paths.py
|
610yilingliu/leetcode
| 0
|
12778283
|
<filename>Python3/576.out-of-boundary-paths.py
#
# @lc app=leetcode id=576 lang=python3
#
# [576] Out of Boundary Paths
#
# @lc code=start
class Solution:
def findPaths(self, m: int, n: int, N: int, i: int, j: int):
dp = [[[0] * n for _ in range(m)] for _ in range(N + 1)]
for s in range(1, N + 1):
for x in range(m):
for y in range(n):
v1 = 1 if x == 0 else dp[s - 1][x - 1][y]
v2 = 1 if x == m - 1 else dp[s - 1][x + 1][y]
v3 = 1 if y == 0 else dp[s - 1][x][y - 1]
v4 = 1 if y == n - 1 else dp[s - 1][x][y + 1]
dp[s][x][y] = (v1 + v2 + v3 + v4) % (10**9 + 7)
return dp[N][i][j]
# @lc code=end
| 3.3125
| 3
|
devday/event/models.py
|
ronnyfriedland/devday_website
| 0
|
12778284
|
from django.apps import apps
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
class EventManager(models.Manager):
def current_event(self):
return self.filter(published=True).order_by("-start_time").first()
def current_event_id(self):
e = self.current_event()
if e:
return e.id
return None
def current_registration_open(self):
e = self.current_event()
if e:
return e.registration_open
return False
def current_submission_open(self):
e = self.current_event()
if e:
return e.submission_open
return False
def all_but_current(self):
return self.exclude(id=self.current_event_id())
def create_event(self, title, **kwargs):
event = self.model(title=title, **kwargs)
event.save()
return event
@python_2_unicode_compatible
class Event(models.Model):
title = models.CharField(verbose_name=_("Event title"), max_length=256, unique=True)
slug = models.SlugField(verbose_name=_("Short name for URLs"), unique=True)
description = models.TextField(verbose_name=_("Description"))
location = models.TextField(verbose_name=_("Location"), blank=True)
full_day = models.BooleanField(verbose_name=_("Full day event"), default=False)
start_time = models.DateTimeField(verbose_name=_("Start time"))
end_time = models.DateTimeField(verbose_name=_("End time"))
published = models.BooleanField(verbose_name=_("Published"), default=True)
registration_open = models.BooleanField(
verbose_name=_("Registration Open"), default=False
)
submission_open = models.BooleanField(
verbose_name=_("Submission Open"), default=False
)
voting_open = models.BooleanField(
verbose_name=_("Voting Open"),
default=False,
help_text=_("Attendees can vote for their preferred sessions"),
)
sessions_published = models.BooleanField(
verbose_name=_("Grid Published"), default=False
)
talkformat = models.ManyToManyField(
"talk.TalkFormat", verbose_name=_("Talk Formats")
)
objects = EventManager()
class Meta:
verbose_name = _("Event")
verbose_name_plural = _("Events")
def registration_count(self):
"""Returns the count of registered attendees."""
a = apps.get_app_config("attendee").get_model("Attendee")
return a.objects.filter(event=self).count()
registration_count.short_description = _("Registration Count")
@property
def feedback_open(self):
"""
:return: True if the event has started and feedback is allowed
"""
return (
self.published
and self.start_time is not None
and self.start_time <= timezone.now()
)
def is_started(self):
return self.start_time < timezone.now()
def is_running(self):
return self.start_time < timezone.now() < self.end_time
def has_ended(self):
return self.end_time < timezone.now()
def is_raffle_available(self):
return not self.has_ended()
def get_absolute_url(self):
return reverse("session_list", kwargs={"event": self.slug})
def save(
self, force_insert=False, force_update=False, using=None, update_fields=None
):
if not self.slug:
self.slug = slugify(self.title)
super().save(force_insert, force_update, using, update_fields)
def __str__(self):
return self.title
| 2.203125
| 2
|
codegenandtransformerapi/models/template.py
|
farhan-apimatic/2bb956fd-e451-4e82-beb3-5cae661a01be
| 0
|
12778285
|
# -*- coding: utf-8 -*-
"""
codegenandtransformerapi.models.template
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io )
"""
class Template(object):
"""Implementation of the 'template' enum.
TODO: type enum description here.
Attributes:
CS_PORTABLE_NET_LIB: TODO: type description here.
JAVA_ECLIPSE_JRE_LIB: TODO: type description here.
JAVA_GRADLE_ANDROID_LIB: TODO: type description here.
OBJC_COCOA_TOUCH_IOS_LIB: TODO: type description here.
ANGULAR_JAVASCRIPT_LIB: TODO: type description here.
RUBY_GENERIC_LIB: TODO: type description here.
ENUM_PYTHON_GENERIC_LIB: TODO: type description here.
ENUM_PHP_GENERIC_LIB: TODO: type description here.
ENUM_NODE_JAVASCRIPT_LIB: TODO: type description here.
ENUM_GO_GENERIC_LIB: TODO: type description here.
"""
CS_PORTABLE_NET_LIB = 'cs_portable_net_lib'
JAVA_ECLIPSE_JRE_LIB = 'java_eclipse_jre_lib'
JAVA_GRADLE_ANDROID_LIB = 'java_gradle_android_lib'
OBJC_COCOA_TOUCH_IOS_LIB = 'objc_cocoa_touch_ios_lib'
ANGULAR_JAVASCRIPT_LIB = 'angular_javascript_lib'
RUBY_GENERIC_LIB = 'ruby_generic_lib'
ENUM_PYTHON_GENERIC_LIB = 'python_generic_lib'
ENUM_PHP_GENERIC_LIB = 'php_generic_lib'
ENUM_NODE_JAVASCRIPT_LIB = 'node_javascript_lib'
ENUM_GO_GENERIC_LIB = 'go_generic_lib'
| 1.351563
| 1
|
tensorkit/settings_.py
|
lizeyan/tensorkit
| 0
|
12778286
|
<reponame>lizeyan/tensorkit<gh_stars>0
from enum import Enum
from typing import *
from mltk import Config, ConfigField, field_checker
__all__ = ['JitMode', 'Settings', 'settings']
KNOWN_BACKENDS = ('PyTorch', 'TensorFlow')
def auto_choose_backend() -> Optional[str]:
"""
Choose the backend automatically.
If the dependencies of one backend has been imported, then it will be
chosen as the preferred backend. If the dependencies of multiple backends
have been imported, then the backend will be chosen according to the
following priority order:
PyTorch, TensorFlow
Returns:
The backend name, or `None` if no backend can be automatically chosen.
"""
import sys
activated_backends = []
for name, module in sys.modules.items():
for backend in KNOWN_BACKENDS:
if backend.lower() in name.lower() and backend not in activated_backends:
activated_backends.append(backend)
for backend in KNOWN_BACKENDS:
if backend in activated_backends:
return backend
class JitMode(str, Enum):
"""Enum of the JIT mode."""
ALL = 'all'
"""Enable JIT on both functions and modules (layers)."""
FUNCTION_ONLY = 'function_only'
"""Enable JIT only on functions."""
NONE = 'none'
"""Disable JIT."""
class Settings(Config):
backend: str = ConfigField(
default=auto_choose_backend() or KNOWN_BACKENDS[0],
choices=KNOWN_BACKENDS,
envvar='TENSORKIT_BACKEND',
description='The backend to use.'
'Changing the value of this configuration at runtime '
'will not take effect.'
)
float_x: str = ConfigField(
default='float32',
choices=['float32', 'float64', 'float16'],
envvar='TENSORKIT_FLOATX',
description='The default dtype for floating-point numbers. '
'Changing the value of this configuration at runtime may '
'not take effect.'
)
validate_tensors: bool = ConfigField(
default=False,
envvar='TENSORKIT_VALIDATE_TENSORS',
description='Whether or not to perform time-consuming validation on '
'tensors for input arguments of functions or classes, '
'and on intermediate computation results, to ensure there '
'are no numerical issues (i.e., no NaN or Infinity values), '
'and no semantic or logical errors (e.g., `low` > `high`)?'
)
jit_mode: Optional[JitMode] = ConfigField(
default=None,
envvar='TENSORKIT_JIT_MODE',
description='The mode of JIT engine. If not specified, determined by '
'the backend. '
'Changing the value of this configuration at runtime will '
'not take effect.'
)
sparse_enable_jit: Optional[bool] = ConfigField(
default=None,
envvar='TENSORKIT_SPARSE_ENABLE_JIT',
description='Whether or not to enable JIT engine on sparse functions '
'and modules? If not specified, determined by the backend. '
'Changing the value of this configuration at runtime will '
'not take effect.'
)
settings = Settings()
"""The global configuration for TensorKit."""
| 2.4375
| 2
|
leet/array/maxSubArrayLen.py
|
peterlamar/python-cp-cheatsheet
| 140
|
12778287
|
class Solution:
# Maximum Size Subarray Sum Equals k
def maxSubArrayLen(self, nums: List[int], k: int) -> int:
hm = {0:-1}
ps = 0
rtn = 0
for i in range(len(nums)):
ps += nums[i]
if ps not in hm:
hm[ps] = i
if ps - k in hm:
rtn = max(rtn, i-hm[ps-k])
return rtn
def maxSubArrayLen(self, nums: List[int], k: int) -> int:
hm = {0:-1}
ps = rtn = 0
for i, n in enumerate(nums):
ps += n
if ps not in hm:
hm[ps] = i
if ps-k in hm:
rtn = max(rtn, i-hm[ps-k])
return rtn
| 2.890625
| 3
|
cotede/qctests/cars_normbias.py
|
BillMills/CoTeDe
| 0
|
12778288
|
<filename>cotede/qctests/cars_normbias.py
# -*- coding: utf-8 -*-
"""
"""
from datetime import timedelta
import logging
import numpy as np
from numpy import ma
from oceansdb import CARS
from .qctests import QCCheckVar
module_logger = logging.getLogger(__name__)
class CARS_NormBias(QCCheckVar):
"""Compares measuremnts with CARS climatology
Notes
-----
* Although using standard error is a good idea, the default is to not use
standard error to estimate the bias to follow the traaditional approach.
This can have a signifcant impact in the deep oceans and regions lacking
extensive sampling.
"""
flag_bad = 3
use_standard_error = False
def __init__(self, data, varname, cfg=None, autoflag=True):
try:
self.use_standard_error = cfg["use_standard_error"]
except (KeyError, TypeError):
module_logger.debug("use_standard_error undefined. Using default value")
super().__init__(data, varname, cfg, autoflag)
def keys(self):
return self.features.keys() + ["flag_%s" % f for f in self.flags.keys()]
def set_features(self):
if ("LATITUDE" in self.data.attributes.keys()) and (
"LONGITUDE" in self.data.attributes.keys()
):
kwargs = {
"lat": self.data.attributes["LATITUDE"],
"lon": self.data.attributes["LONGITUDE"],
}
if ("LATITUDE" in self.data.keys()) and ("LONGITUDE" in self.data.keys()):
dLmax = max(
data["LATITUDE"].max() - data["LATITUDE"].min(),
data["LONGITUDE"].max() - data["LONGITUDE"].min(),
)
# Only use each measurement coordinate if it is spread.
if dLmax >= 0.01:
kwargs = {
"lat": self.data["LATITUDE"],
"lon": self.data["LONGITUDE"],
"alongtrack_axis": ["lat", "lon"],
}
if "DEPTH" in self.data.keys():
depth = self.data["DEPTH"]
elif "PRES" in self.data.keys():
depth = self.data["PRES"]
try:
doy = int(self.data.attributes["date"].strftime("%j"))
except:
doy = int(self.data.attributes["datetime"].strftime("%j"))
db = CARS()
if self.varname[-1] == "2":
vtype = self.varname[:-1]
else:
vtype = self.varname
idx = ~ma.getmaskarray(depth) & np.array(depth >= 0)
cars = db[vtype].extract(
var=["mn", "std_dev"], doy=doy, depth=depth[idx], **kwargs
)
if idx.all() is not True:
for v in cars.keys():
tmp = ma.masked_all(depth.shape, dtype=cars[v].dtype)
tmp[idx] = cars[v]
cars[v] = tmp
self.features = {"cars_mean": cars["mn"], "cars_std": cars["std_dev"]}
self.features["cars_bias"] = (
self.data[self.varname] - self.features["cars_mean"]
)
# if use_standard_error = True, the comparison with the climatology
# considers the standard error, i.e. the bias will be only the
# ammount above the standard error range.
assert not self.use_standard_error, "Sorry, I'm not ready for that"
if self.use_standard_error is True:
standard_error = (
self.features["cars_std"] / self.features["cars_nsamples"] ** 0.5
)
idx = np.absolute(self.features["cars_bias"]) <= standard_error
self.features["cars_bias"][idx] = 0
idx = np.absolute(self.features["cars_bias"]) > standard_error
self.features["cars_bias"][idx] -= (
np.sign(self.features["cars_bias"][idx]) * standard_error[idx]
)
self.features["cars_normbias"] = (
self.features["cars_bias"] / self.features["cars_std"]
)
def test(self):
# 3 is the possible minimum to estimate the std, but I shold use higher.
try:
min_samples = self.cfg["min_samples"]
except KeyError:
min_samples = 3
self.flags = {}
threshold = self.cfg["threshold"]
assert (np.size(threshold) == 1) and (threshold is not None)
flag = np.zeros(self.data[self.varname].shape, dtype="i1")
normbias_abs = np.absolute(self.features["cars_normbias"])
ind = np.nonzero(normbias_abs <= threshold)
flag[ind] = self.flag_good
ind = np.nonzero(normbias_abs > threshold)
flag[ind] = self.flag_bad
# Flag as 9 any masked input value
flag[ma.getmaskarray(self.data[self.varname])] = 9
self.flags["cars_normbias"] = flag
| 2.5625
| 3
|
cli/tests/__init__.py
|
2O4/keyt
| 0
|
12778289
|
<reponame>2O4/keyt<filename>cli/tests/__init__.py
"""Keyt tests."""
| 1.039063
| 1
|
projects/webptspy/apps/tcase/views/api.py
|
codelieche/testing
| 2
|
12778290
|
<reponame>codelieche/testing
# -*- coding:utf-8 -*-
"""
这个文件主要是提供:
1、通过case的id获取到最近的execute id
2、如果最近的execute id是空 就创建一个
"""
from datetime import datetime
from django.shortcuts import get_object_or_404
from django.http import JsonResponse
from django.views.generic import View
from django.core.exceptions import PermissionDenied
from utils.mixins import CsrfExemptMixin
from ..models import Case, Execute
class CaseExecute(CsrfExemptMixin, View):
"""
通过Case id获取Execute的View
# 如果是post,就表示,如果没有就创建一个,然后返回新的execute_id
"""
def get(self, request, pk):
case = get_object_or_404(Case, pk=pk)
# 判断是否存在execute id
if case.execute_id:
return JsonResponse({'execute_id': case.execute_id})
else:
return JsonResponse({'execute_id': None})
def post(self, request, pk):
case = get_object_or_404(Case, pk=pk)
# 判断是否存在execute id
need_create_execute = False
if case.execute_id:
# 根据execute状态来判断这个execute是否可用
execute = Execute.objects.filter(pk=case.execute_id).first()
# if execute and execute.status in ['failure', 'success', 'stoped']:
if execute and execute.status in ['created', 'ready', 'running']:
need_create_execute = False
else:
need_create_execute = True
else:
need_create_execute = True
if need_create_execute:
# 表示还没有execute_id
# 那就创建一个返回
execute_name = case.name + datetime.now().strftime("%Y%m%d%H%M%S")
# 这里由于是post过来的,没做user认证,所以执行者,暂时不设置
execute = Execute.objects.create(case=case, name=execute_name)
case.execute_id = execute.id
# 新建了execute,把Case状态改成Ready
case.status = 'ready'
case.save()
return JsonResponse({'execute_id': case.execute_id})
class ExecuteUpdateStatus(CsrfExemptMixin, View):
"""
更新Case的状态:
注意:这个view是在tresult.urls.api_execute中调用的
# 当测试用例刚保存,状态为created
# 创建好了case_id.py文件,修改状态为`ready`状态
# 当开始execute的同时,也修改case的状态为:running
# execute执行完毕,同时也修改Case的状态为:success / failure / stoped
# Case还可以删除, 待优化
"""
def post(self, request, pk):
execute = get_object_or_404(Execute, pk=pk)
# 获取执行 对应的Case
case = execute.case
status = request.POST.get('status', '')
status_tuple = ('created', 'ready', 'running',
'failure', 'success', 'stoped')
if status and status in status_tuple:
# TODO: 这里还要加点安全方面验证,不用登陆,比如每次POST传个密匙即可
execute.status = status
if status in ('running', 'stoped', 'failure', 'success'):
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if status == 'running':
execute.time_start = now
else:
execute.time_end = now
execute.save()
if status in ('running', 'failure', 'success', 'stoped'):
# 当状态是running、failure、success的时候,也需要更新下case的状态
case.status = status
case.save(update_fields=('status',))
return JsonResponse({"sucess": True})
class CaseDelete(CsrfExemptMixin, View):
"""
删除CaseView
"""
# 只有是超级用户才可以删除
def post(self, request):
if request.user.is_superuser:
case_id = request.POST.get('case', '')
case = get_object_or_404(Case, pk=case_id)
case.status = 'delete'
case.save()
return JsonResponse({'status': "success"})
else:
raise PermissionDenied()
| 2.265625
| 2
|
scripts/setup_scenes_example.py
|
mrudorfer/burg-toolkit
| 0
|
12778291
|
"""
===================
Setup Scene Example
===================
This script provides examples for loading, using and saving an object library based on a YAML file.
We will step through the individual commands, generate thumbnails, VHACD meshes, URDF files for all objects.
After that, we will compute the stable poses of each object.
We randomly sample scenes exploiting the stable poses of the objects.
We can interact with the object instances and move them, and put them into a simulator so they attain a resting
pose again.
"""
import argparse
import os
import burg_toolkit as burg
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--lib', type=str,
default='/home/rudorfem/datasets/object_libraries/test_library/test_library.yaml',
help='path to object library file')
parser.add_argument('--skip', action='store_true', default=False,
help='use this option to skip through the user interactions')
parser.add_argument('--override', action='store_true', default=False,
help='if activated, all object data will be regenerated even if it already exists')
return parser.parse_args()
def wait_for_user(skip=False):
if skip:
return
print('press any key to continue')
input()
def main(object_library_fn, skip, override):
object_library = burg.ObjectLibrary.from_yaml(object_library_fn)
library_dir = os.path.dirname(object_library_fn)
print(object_library) # prints short version
object_library.print_details() # gives more detailed output about contained objects
print('*************************')
print('next action: generate thumbnail files for all objects')
wait_for_user(skip)
thumbnail_dir = os.path.join(library_dir, 'thumbnails')
object_library.generate_thumbnails(thumbnail_dir, override=override)
print(f'thumbnails created in {thumbnail_dir}')
print('*************************')
print('next action: generate vhacd meshes')
wait_for_user(skip)
vhacd_dir = os.path.join(library_dir, 'vhacd')
object_library.generate_vhacd_files(vhacd_dir, override=override)
print(f'vhacd files created in {vhacd_dir}')
print('*************************')
print('next action: generate urdf files')
wait_for_user(skip)
urdf_dir = os.path.join(library_dir, 'urdf')
object_library.generate_urdf_files(urdf_dir, use_vhacd=True, override=override)
print(f'urdf files created in {urdf_dir}')
print('*************************')
print('next action: compute stable poses for objects and verify with vhacd in simulation')
wait_for_user(skip)
object_library.compute_stable_poses(verify_in_sim=True, override=override)
print('stable poses computed.')
print('*************************')
print('all information in object library should be completed now:')
object_library.print_details()
print('*************************')
new_lib_fn = f'{object_library_fn[:-5]}_roundtrip.yaml'
print(f'next action: save object library to {new_lib_fn}')
wait_for_user(skip)
object_library.to_yaml(new_lib_fn)
print('object library saved.')
print('*************************')
print('next action: sampling scenes with object instances in stable poses, and visualise.')
print('note: you need to close the open3d window to continue. (not the simulation window later on, though!)')
dim = (1, 0.5)
n_instances = 5
print(f'{n_instances} instances will be placed in ground area of {dim}')
wait_for_user(skip)
scene = burg.sampling.sample_scene(
object_library,
ground_area=dim,
instances_per_scene=n_instances,
instances_per_object=1
)
burg.visualization.show_geometries([scene])
print('*************************')
print('next action: simulate this scene to make sure it is at rest, then visualise again.')
wait_for_user(skip)
sim = burg.scene_sim.SceneSimulator(verbose=True) # verbose shows the simulator GUI, slower than real-time
sim.simulate_scene(scene) # the poses of all instances in the scene are automatically updated by the simulator
sim.dismiss() # can also reuse, then the window stays open
burg.visualization.show_geometries([scene])
print('*************************')
print('next action: manually change the pose of an object instance, visualise, simulate, visualise.')
wait_for_user(skip)
instance = scene.objects[0]
# we lift it up a bit to avoid any collisions with other objects
instance.pose[2, 3] = instance.pose[2, 3] + 0.2
burg.visualization.show_geometries([scene])
sim = burg.scene_sim.SceneSimulator(verbose=True)
sim.simulate_scene(scene)
burg.visualization.show_geometries([scene])
sim.dismiss()
print('*************************')
print('that was all, thank you and good bye.')
if __name__ == "__main__":
args = parse_args()
main(args.lib, args.skip, args.override)
| 2.78125
| 3
|
leetcode/python/easy/p777_canTransform.py
|
kefirzhang/algorithms
| 0
|
12778292
|
class Solution:
def canTransform(self, start: str, end: str) -> bool:
len1 = len(start)
len2 = len(end)
if len1 != len2:
return False
i = 0
j = 0
while i < len1 and j < len2:
while start[i] == 'X' and i < len1 - 1:
i += 1
while end[j] == 'X' and j < len2 - 1:
j += 1
if start[i] != end[j]:
return False
elif start[i] == 'R':
if i > j:
return False
elif start[i] == 'L':
if i < j:
return False
i += 1
j += 1
return True
slu = Solution()
print(slu.canTransform("XXXXXRXXXX", "RXXXXXXXXX"))
"XXXXXLXXXX"
"LXXXXXXXXX"
"XXRXXLXXXX"
"XXXXRXXLXX"
"XXXXXLXXXX"
"LXXXXXXXXX"
'''
在一个由 'L' , 'R' 和 'X' 三个字符组成的字符串(例如"RXXLRXRXL")中进行移动操作。
一次移动操作指用一个"LX"替换一个"XL",或者用一个"XR"替换一个"RX"。现给定起始字符串start和结束字符串end,请编写代码,
当且仅当存在一系列移动操作使得start可以转换成end时, 返回True。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/swap-adjacent-in-lr-string
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
| 3.1875
| 3
|
lzproduction/sql/tables/Requests.py
|
alexanderrichards/LZProduction
| 2
|
12778293
|
"""Requests Table."""
import json
import logging
from datetime import datetime
import cherrypy
from sqlalchemy import Column, Integer, String, TIMESTAMP, ForeignKey, Enum
from sqlalchemy.orm import relationship
from lzproduction.utils.collections import subdict
from ..utils import db_session
from ..statuses import LOCALSTATUS
from .SQLTableBase import SQLTableBase
from .JSONTableEncoder import JSONTableEncoder
from .Users import Users
from .ParametricJobs import ParametricJobs
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@cherrypy.expose
class Requests(SQLTableBase):
"""Requests SQL Table."""
__tablename__ = 'requests'
id = Column(Integer, primary_key=True) # pylint: disable=invalid-name
requester_id = Column(Integer, ForeignKey('users.id'), nullable=False)
request_date = Column(String(250), nullable=False)
source = Column(String(250), nullable=False)
detector = Column(String(250), nullable=False)
sim_lead = Column(String(250), nullable=False)
status = Column(Enum(LOCALSTATUS), nullable=False, default=LOCALSTATUS.Requested)
description = Column(String(250), nullable=False)
timestamp = Column(TIMESTAMP, nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow)
parametricjobs = relationship("ParametricJobs", back_populates="request")
def submit(self):
"""Submit Request."""
with db_session() as session:
parametricjobs = session.query(ParametricJobs).filter_by(request_id=self.id).all()
session.expunge_all()
session.merge(self).status = LOCALSTATUS.Submitting
logger.info("Submitting request %s", self.id)
submitted_jobs = []
try:
for job in parametricjobs:
job.submit()
submitted_jobs.append(job)
except:
logger.exception("Exception while submitting request %s", self.id)
logger.info("Resetting associated ParametricJobs")
for job in submitted_jobs:
job.reset()
def delete_parametric_jobs(self, session):
"""Delete associated ParametricJob jobs."""
logger.info("Deleting ParametricJobs for Request id: %s", self.id)
parametric_jobs = session.query(ParametricJobs)\
.filter_by(request_id=self.id)
for job in parametric_jobs.all():
job.delete_dirac_jobs(session)
parametric_jobs.delete(synchronize_session=False)
def update_status(self):
"""Update request status."""
with db_session() as session:
parametricjobs = session.query(ParametricJobs).filter_by(request_id=self.id).all()
session.expunge_all()
statuses = []
for job in parametricjobs:
try:
statuses.append(job.update_status())
except:
logger.exception("Exception updating ParametricJob %s", job.id)
status = max(statuses or [self.status])
if status != self.status:
with db_session(reraise=False) as session:
session.merge(self).status = status
logger.info("Request %s moved to state %s", self.id, status.name)
@staticmethod
def GET(reqid=None): # pylint: disable=invalid-name
"""REST Get method."""
logger.debug("In GET: reqid = %s", reqid)
requester = cherrypy.request.verified_user
with db_session() as session:
user_requests = session.query(Requests).filter_by(requester_id=requester.id)
# Get all requests.
if reqid is None:
if requester.admin:
all_requests = session.query(Requests, Users)\
.join(Users, Requests.requester_id == Users.id)\
.all()
# could make a specialised encoder for this.
return json.dumps({'data': [dict(request, requester=user.name, status=request.status.name)
for request, user in all_requests]},
cls=JSONTableEncoder)
return json.dumps({'data': user_requests.all()}, cls=JSONTableEncoder)
# Get specific request.
if requester.admin:
user_requests = session.query(Requests)
request = user_requests.filter_by(id=reqid).first()
return json.dumps({'data': request}, cls=JSONTableEncoder)
@staticmethod
def DELETE(reqid): # pylint: disable=invalid-name
"""REST Delete method."""
logger.debug("In DELETE: reqid = %s", reqid)
if cherrypy.request.verified_user.admin:
with db_session() as session:
logger.info("Deleting Request id: %s", reqid)
try:
request = session.query(Requests).filter_by(id=reqid).one()
except NoResultFound:
logger.warning("No Request found with id: %s", reqid)
except MultipleResultsFound:
logger.error("Multiple Requests found with id: %s!", reqid)
else:
request.delete_parametric_jobs(session)
session.delete(request)
return Requests.GET()
@staticmethod
def PUT(reqid, **kwargs): # pylint: disable=invalid-name
"""REST Put method."""
logger.debug("In PUT: reqid = %s, kwargs = %s", reqid, kwargs)
requester = cherrypy.request.verified_user
status_update = kwargs.pop('status', None)
with db_session() as session:
query = session.query(Requests).filter_by(id=reqid)
if requester.admin and status_update == 'Approved':
query.update(subdict(kwargs, ('description',
'sim_lead',
'detector',
'source'), status=LOCALSTATUS.Approved))
return Requests.GET()
if not requester.admin:
query = query.filter_by(requester_id=requester.id)
query.update(subdict(kwargs, ('description', 'sim_lead', 'detector', 'source')))
return Requests.GET()
@staticmethod
def POST(**kwargs): # pylint: disable=invalid-name
"""REST Post method."""
logger.debug("In POST: kwargs = %s", kwargs)
selected_macros = kwargs.pop('selected_macros', [])
if not isinstance(selected_macros, list):
selected_macros = [selected_macros]
with db_session() as session:
request = Requests(**subdict(kwargs, Requests.columns,
requester_id=cherrypy.request.verified_user.id,
request_date=datetime.now().strftime('%d/%m/%Y'),
status=LOCALSTATUS.Requested))
session.add(request)
session.flush()
session.refresh(request)
parametricjobs = []
if 'app' in kwargs:
for macro in selected_macros:
path, njobs, nevents, seed = macro.split()
parametricjobs.append(subdict(kwargs, ParametricJobs.columns,
request_id=request.id,
status=LOCALSTATUS.Requested,
macro=path,
njobs=njobs,
nevents=nevents,
seed=seed))
elif kwargs.viewkeys() & {'reduction_lfn_inputdir',
'der_lfn_inputdir',
'lzap_lfn_inputdir'}:
parametricjobs.append(subdict(kwargs, ParametricJobs.columns,
request_id=request.id,
status=LOCALSTATUS.Requested))
if parametricjobs:
session.bulk_insert_mappings(ParametricJobs, parametricjobs)
else:
logger.warning("No ParametricJobs added to the DB.")
return Requests.GET()
| 2.34375
| 2
|
tunelo/common/wsgi.py
|
ChameleonCloud/tunelo
| 0
|
12778294
|
<reponame>ChameleonCloud/tunelo<filename>tunelo/common/wsgi.py
from oslo_concurrency import processutils
from oslo_service import service
from oslo_service import wsgi
from tunelo.flask import create_app
from tunelo.common import exception
from tunelo.conf import CONF
_MAX_DEFAULT_WORKERS = 4
class WSGIService(service.ServiceBase):
"""Provides ability to launch tunelo API from wsgi app."""
def __init__(self, name, use_ssl=False):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param use_ssl: Wraps the socket in an SSL context if True.
:returns: None
"""
self.name = name
self.app = create_app()
self.workers = (
CONF.api.api_workers
# NOTE(dtantsur): each worker takes a substantial amount of memory,
# so we don't want to end up with dozens of them.
or min(processutils.get_worker_count(), _MAX_DEFAULT_WORKERS)
)
if self.workers and self.workers < 1:
raise exception.ConfigInvalid(
f"api_workers value of {self.workers} is invalid, "
f"must be greater than 0."
)
self.server = wsgi.Server(
CONF,
name,
self.app,
host=CONF.api.host_ip,
port=CONF.api.port,
use_ssl=use_ssl,
)
def start(self):
"""Start serving this service using loaded configuration.
:returns: None
"""
self.server.start()
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self.server.reset()
| 2.21875
| 2
|
servers/flashx.py
|
sodicarus/channels
| 0
|
12778295
|
<filename>servers/flashx.py<gh_stars>0
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa-PureITA - XBMC Plugin
# Conector para flashx
# http://www.mimediacenter.info/foro/viewtopic.php?f=36&t=7808
# Alfa-Addon / Alfa-PureITA
# ------------------------------------------------------------
import base64
import os
import re
import time
import urllib
import xbmc
from core import config
from core import httptools
from core import logger
from core import scrapertools
from lib import jsunpack
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
pfxfx = ""
data = httptools.downloadpage(page_url, cookies=False).data
data = data.replace("\n", "")
cgi_counter = scrapertools.find_single_match(data,
"""(?is)src=.(https://www.flashx.../counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = cgi_counter.replace("%0A", "").replace("%22", "")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.../dl[^"]+')
# Para obtener el f y el fxfx
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//", "/"),
"""(?is)(flashx.../js\w+/c\w+.*?[^(?:'|")]+)""")
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'", "").replace(" ", "")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
for f, v in matches:
pfxfx += f + "=" + v + "&"
logger.info("mfxfxfx1= %s" % js_fxfx)
logger.info("mfxfxfx2= %s" % pfxfx)
if pfxfx == "":
pfxfx = "f=fail&fxfx=6"
coding_url = 'https://www.flashx.co/flashx.php?%s' % pfxfx
# {f: 'y', fxfx: '6'}
bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?)span')
flashx_id = scrapertools.find_single_match(bloque, 'name="id" value="([^"]+)"')
fname = scrapertools.find_single_match(bloque, 'name="fname" value="([^"]+)"')
hash_f = scrapertools.find_single_match(bloque, 'name="hash" value="([^"]+)"')
imhuman = scrapertools.find_single_match(bloque, "value='([^']+)' name='imhuman'")
post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=%s' % (
flashx_id, urllib.quote(fname), hash_f, imhuman)
wait_time = scrapertools.find_single_match(data, "<span id='xxc2'>(\d+)")
# Obligatorio descargar estos 2 archivos, porque si no, muestra error
httptools.downloadpage(coding_url, cookies=False)
httptools.downloadpage(cgi_counter, cookies=False)
try:
time.sleep(int(wait_time) + 1)
except:
time.sleep(6)
data = httptools.downloadpage(playnow, post).data
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
if "You try to access this video with Kodi" in data:
url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
try:
data = httptools.downloadpage(url_reload, cookies=False).data
data = httptools.downloadpage(playnow, post, cookies=False).data
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
except:
pass
matches = scrapertools.find_multiple_matches(data, "(eval\(function\(p,a,c,k.*?)\s+</script>")
video_urls = []
for match in matches:
try:
match = jsunpack.unpack(match)
match = match.replace("\\'", "'")
media_urls = scrapertools.find_multiple_matches(match, "{src:'([^']+)'.*?,label:'([^']+)'")
subtitle = ""
for media_url, label in media_urls:
if media_url.endswith(".srt") and label == "Spanish":
try:
from core import filetools
data = httptools.downloadpage(media_url)
subtitle = os.path.join(config.get_data_path(), 'sub_flashx.srt')
filetools.write(subtitle, data)
except:
import traceback
logger.info("Error al descargar el subtítulo: " + traceback.format_exc())
for media_url, label in media_urls:
if not media_url.endswith("png") and not media_url.endswith(".srt"):
video_urls.append(["." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url, 0, subtitle])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
except:
pass
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
# Añade manualmente algunos erróneos para evitarlos
encontrados = set()
devuelve = []
# http://flashx.tv/z3nnqbspjyne
# http://www.flashx.tv/embed-li5ydvxhg514.html
patronvideos = 'flashx.(?:tv|pw|ws|sx|to)/(?:embed.php\\?c=|embed-|playvid-|)([A-z0-9]+)'
logger.info("#" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[flashx]"
url = "https://www.flashx.tv/%s.html" % match
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'flashx'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
patronvideos = 'flashx.co/([A-z0-9]+)\\.jsp'
logger.info("#" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[flashx]"
url = "https://www.flashx.co/\\%s.jsp" % match
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'flashx'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve
| 2.109375
| 2
|
plugins/pkts_utils/pkts_utils.py
|
sooualil/atlas-feature-extraction-extension
| 0
|
12778296
|
import binascii
import numpy as np
import copy
from scapy.all import TCP, UDP, IP, IPv6, ARP, raw
def get_packet_matrix(packet):
"""
Transform a packet content into 1D array of bytes
Parameters
----------
packet : an IP packet
Returns
-------
1D ndarry of packet bytes
"""
hexst = binascii.hexlify(raw(packet))
fh = np.array([int(hexst[i:i+2],16) for i in range(0, len(hexst), 2)])
fh = np.uint8(fh)
return fh.reshape(-1)
def santize_packet_zeros(packet):
"""
This method sanitize a packet by annonymizing IP and MAC adresses
Parameters
----------
packet : a packet
Returns
-------
sanitized packet
"""
pkt = copy.deepcopy(packet)
ipv4='0.0.0.0'
ipv6='0000:00::00'
mac='00:00:00:00:00:00'
if pkt.haslayer(IPv6):
pkt[IPv6].src = ipv6
pkt[IPv6].dst = ipv6
if pkt.haslayer(TCP):
pkt[TCP].sport = 0
pkt[TCP].dport = 0
elif pkt.haslayer(UDP):
pkt[UDP].sport = 0
pkt[UDP].dport = 0
elif pkt.haslayer(IP) :
pkt[IP].src = ipv4
pkt[IP].dst = ipv4
if pkt.haslayer(TCP):
pkt[TCP].sport = 0
pkt[TCP].dport = 0
elif pkt.haslayer(UDP):
pkt[UDP].sport = 0
pkt[UDP].dport = 0
elif pkt.haslayer(ARP):
pkt[ARP].hwsrc = mac
pkt[ARP].hwdst = mac
pkt[ARP].psrc = ipv4
pkt[ARP].pdst = ipv4
else:
pass
return pkt
| 3.21875
| 3
|
raspa/raspa_join_movies.py
|
kbsezginel/tutorials
| 11
|
12778297
|
<filename>raspa/raspa_join_movies.py
"""
Join RASPA movie output pdb files for the adsorbent and adsorbate.
Creates a pdb file (not a trajectory!).
Usage:
>>> python raspa_join_movies.py framework.pdb adsorbate.pbd
Typical RASPA movie output names:
- framework: Framework_0_final.pdb
- adsorbate: Movie_IRMOF-1_2.2.2_298.000000_10.000000_component_CH4_0.pdb
"""
import os
import sys
filename = 'movies_joined.pdb'
framework = os.path.abspath(sys.argv[1])
component = os.path.abspath(sys.argv[2])
with open(framework, 'r') as fil:
framework_lines = fil.readlines()
# Assuming there are only two extra lines other than atomic coordinates (remarks and crystal cell)
n_framework_atoms = len(framework_lines) - 2
with open(component, 'r') as fil:
component_lines = fil.readlines()
i = 0
for line in component_lines:
if line[:4] == 'ATOM':
i += 1
atom_idx = str(n_framework_atoms + i)
new_line = line[:11 - len(atom_idx)] + atom_idx + line[11:]
framework_lines.append(new_line)
with open(filename, 'w') as fil:
for line in framework_lines:
fil.write(line)
print('Saved as -> %s' % filename)
| 2.953125
| 3
|
ex_discrete_convolution.py
|
ZEXINLIU/Univariate_ttr_examples
| 0
|
12778298
|
<gh_stars>0
import numpy as np
from UncertainSCI.ttr import predict_correct_discrete, stieltjes_discrete, \
aPC, hankel_deter, mod_cheb, lanczos_stable
from UncertainSCI.utils.compute_moment import compute_moment_discrete
from UncertainSCI.families import JacobiPolynomials
from UncertainSCI.utils.verify_orthonormal import verify_orthonormal
import time
from tqdm import tqdm
"""
We use six methods and use Lanczos as the true solution
1. pc (Predictor-corrector method)
2. sp (Stieltjes procedure)
3. apc (Arbitrary polynomial chaos expansion method)
4. hd (Hankel determinants)
5. mc (Modified Chebyshev algorithm)
6. lz (Stabilized Lanczos algorithm)
to compute the recurrence coefficients for
the discrete probability density function.
"""
def preprocess_a(a):
"""
If a_i = 0 for some i, then the corresponding x_i has no influence
on the model output and we can remove this variable.
"""
a = a[np.abs(a) > 0.]
return a
def compute_u(a, N):
"""
Given the vector a in R^m (except for 0 vector),
compute the equally spaced points {u_i}_{i=0}^N-1
along the one-dimensional interval
Return
(N,) numpy.array, u = [u_0, ..., u_N-1]
"""
# assert N % 2 == 1
a = preprocess_a(a=a)
u_l = np.dot(a, np.sign(-a))
u_r = np.dot(a, np.sign(a))
u = np.linspace(u_l, u_r, N)
return u
def compute_q(a, N):
"""
Given: an vector a in R^m (except for 0 vector),
compute the discrete approximation to the convolution
q(u) = (p_0 * p_1 * ...)(u) = int p_0(t) p_1(u-t) ... dt
where x_i ~ UNIF[-1,1], i.e. p_i = 1/2 if |x_i|<=1 or 0 o.w.
Returns
(N,) numpy.array, q = [q_0, ..., q_N-1]
"""
u = compute_u(a=a, N=N)
q = np.zeros(u.shape)
q[np.abs(u) <= np.abs(a[0])] = 1 / (2 * np.abs(a[0]))
if len(a) == 1:
return q
for i in range(1, len(a)):
disc_q = np.zeros(u.shape)
for j in range(N):
p = np.zeros(u.shape)
p[np.abs(u[j] - u) <= np.abs(a[i])] = 1 / (2 * np.abs(a[i]))
disc_q[j] = np.trapz(y=q*p, x=u)
q = disc_q
return q
def compute_q_01(a, N):
"""
Given: an vector a in R^m (except for 0 vector),
compute the discrete approximation to the convolution
q(u) = (p_0 * p_1 * ...)(u) = int p_0(t) p_1(u-t) ... dt
where x_i ~ UNIF[0,1], i.e. p_i = 1 if 0<=x_i<=1 or 0 o.w.
Returns
(N,) numpy.array, q = [q_0, ..., q_N-1]
"""
u = compute_u(a=a, N=N)
q = np.zeros(u.shape)
q[(0 <= u) & (u <= a[0])] = 1 / a[0]
if len(a) == 1:
return q
for i in range(1, len(a)):
disc_q = np.zeros(u.shape)
for j in range(N):
p = np.zeros(u.shape)
p[(0 <= u[j] - u) & (u[j] - u <= a[i])] = 1 / a[i]
disc_q[j] = np.trapz(y=q*p, x=u)
q = disc_q
return q
m = 25
np.random.seed(1)
a = np.random.rand(m,) * 2 - 1.
a = a / np.linalg.norm(a, None)
M = 100
u = compute_u(a=a, N=M)
du = (u[-1] - u[0]) / (M - 1)
q = compute_q(a=a, N=M)
w = du*q
N_array = [20, 40, 60, 80, 100]
t_pc = np.zeros(len(N_array))
t_sp = np.zeros(len(N_array))
t_apc = np.zeros(len(N_array))
t_hd = np.zeros(len(N_array))
t_mc = np.zeros(len(N_array))
t_lz = np.zeros(len(N_array))
e_pc = np.zeros(len(N_array))
e_sp = np.zeros(len(N_array))
e_apc = np.zeros(len(N_array))
e_hd = np.zeros(len(N_array))
e_mc = np.zeros(len(N_array))
e_lz = np.zeros(len(N_array))
iter_n = np.arange(100)
for k in tqdm(iter_n):
for ind, N in enumerate(N_array):
m = compute_moment_discrete(u, w, N)
# Predict-correct
start = time.time()
ab_pc = predict_correct_discrete(u, w, N)
end = time.time()
t_pc[ind] += (end - start) / len(iter_n)
e_pc[ind] = np.linalg.norm(verify_orthonormal(ab_pc,
np.arange(N), u, w) - np.eye(N), None)
# Stieltjes
start = time.time()
ab_sp = stieltjes_discrete(u, w, N)
end = time.time()
t_sp[ind] += (end - start) / len(iter_n)
e_sp[ind] = np.linalg.norm(verify_orthonormal(ab_sp,
np.arange(N), u, w) - np.eye(N), None)
# Arbitrary Polynomial Chaos Expansion
start = time.time()
ab_apc = aPC(m, N)
end = time.time()
t_apc[ind] += (end - start) / len(iter_n)
e_apc[ind] = np.linalg.norm(verify_orthonormal(ab_apc,
np.arange(N), u, w) - np.eye(N), None)
# Hankel Determinant
start = time.time()
ab_hd = hankel_deter(N, m)
end = time.time()
t_hd[ind] += (end - start) / len(iter_n)
e_hd[ind] = np.linalg.norm(verify_orthonormal(ab_hd,
np.arange(N), u, w) - np.eye(N), None)
# Modified Chebyshev
J = JacobiPolynomials(probability_measure=False)
def peval(x, n):
return J.eval(x, n)
def integrand(x):
return peval(x, i).flatten()
mod_m = np.zeros(2*N - 1)
for i in range(2*N - 1):
mod_m[i] = np.sum(integrand(u) * w)
start = time.time()
ab_mc = mod_cheb(N, mod_m, J)
end = time.time()
t_mc[ind] += (end - start) / len(iter_n)
e_mc[ind] = np.linalg.norm(verify_orthonormal(ab_mc,
np.arange(N), u, w) - np.eye(N), None)
# Stabilized Lanczos
start = time.time()
ab_lz = lanczos_stable(u, w, N)
end = time.time()
t_lz[ind] += (end - start) / len(iter_n)
e_lz[ind] = np.linalg.norm(verify_orthonormal(ab_lz,
np.arange(N), u, w) - np.eye(N), None)
"""
N_array = [20, 40, 60, 80, 100] with tol = 1e-12, M = 100
--- Frobenius norm error ---
e_pc
array([3.95937126e-15, 1.17465968e-14, 1.03274678e-09, 4.00000000e+00,
7.48331477e+00])
e_sp
array([2.73718732e-15, 9.26234216e-15, 5.94278038e-10, 4.00000000e+00,
7.48331477e+00])
e_apc
array([7.71740426e-08, 2.02884310e+05, 1.34663120e+27, 5.84702012e+47,
4.75461302e+67])
e_hd
array([1.69002753e-07, nan, nan, nan,
nan])
e_mc
array([3.02198881e-09, nan, nan, nan,
nan])
e_lz
array([4.75363731e-15, 2.95344383e-14, 3.45264757e-09, 1.86341337e+68,
2.49535465e+68])
--- elapsed time ---
t_pc
array([0.006624 , 0.02360435, 0.04991234, 0.08732417, 0.13431059])
t_sp
array([0.00610083, 0.0226834 , 0.04878942, 0.08529013, 0.13456016])
t_apc
array([0.0012863 , 0.00377234, 0.00772418, 0.01319587, 0.02014426])
t_hd
array([0.00285909, 0.00948635, 0.02003272, 0.03506948, 0.05553345])
t_mc
array([0.001672 , 0.00677977, 0.01486643, 0.02690948, 0.04083029])
t_lz
array([0.00102357, 0.00202671, 0.00326769, 0.00459507, 0.00605136])
N_array = [20, 40, 60, 80, 100] with tol = 1e-12, M = 200
e_pc
array([3.11798041e-15, 7.39256700e-15, 1.29863095e-14, 9.88664797e-14,
3.61285380e-10])
e_sp
array([3.72126779e-15, 7.53592850e-15, 1.26781374e-14, 2.20923974e-13,
9.87486694e-10])
e_apc
array([7.78482843e-09, nan, nan, nan,
nan])
e_hd
array([1.32862308e-07, nan, nan, nan,
nan])
e_mc
array([1.35810897e-08, nan, nan, nan,
nan])
e_lz
array([3.41140713e-15, 1.14879579e-14, 2.11319394e-14, 1.59865492e-13,
6.02815932e-10])
t_pc
array([0.00723969, 0.02657084, 0.05732713, 0.10112422, 0.15605735])
t_sp
array([0.00674671, 0.02566374, 0.05539571, 0.0994516 , 0.15175851])
t_apc
array([0.00124123, 0.00373606, 0.00757654, 0.01280226, 0.01998155])
t_hd
array([0.00280116, 0.00920861, 0.01995641, 0.03663265, 0.05622729])
t_mc
array([0.0016241 , 0.00665073, 0.01525463, 0.02681506, 0.04088025])
t_lz
array([0.00116924, 0.00255765, 0.00427727, 0.00629655, 0.00895607])
N_array = [20, 40, 60, 80, 100] with tol = 1e-12, M = 300
e_pc
array([4.53766132e-15, 9.56792320e-15, 1.49645839e-14, 2.46683586e-14,
1.40823522e-13])
e_sp
array([3.38625397e-15, 8.53221733e-15, 2.61509605e-14, 5.20440256e-14,
9.37261060e-14])
e_apc
array([3.79788608e-08, 7.66674623e+05, 3.90076936e+25, 2.70711113e+55,
9.69092414e+72])
e_hd
array([1.23521956e-07, nan, nan, nan,
nan])
e_mc
array([3.59601296e-09, nan, nan, nan,
nan])
e_lz
array([3.86864436e-15, 1.09695006e-14, 1.73436385e-14, 3.37621255e-14,
9.29458751e-14])
t_pc
array([0.0078077 , 0.02890715, 0.06318885, 0.11134079, 0.17054021])
t_sp
array([0.00751934, 0.02812571, 0.06108396, 0.10756618, 0.17055254])
t_apc
array([0.00134934, 0.00386411, 0.0075922 , 0.01308334, 0.02020775])
t_hd
array([0.00286912, 0.00990186, 0.0207794 , 0.03607668, 0.05730121])
t_mc
array([0.00165269, 0.00695888, 0.01558213, 0.02764288, 0.04210448])
t_lz
array([0.00145459, 0.00330329, 0.00638815, 0.01078056, 0.01479507])
"""
| 2.25
| 2
|
PyRAI2MD/Dynamics/Ensembles/thermostat.py
|
lopez-lab/PyRAI2MD
| 12
|
12778299
|
######################################################
#
# PyRAI2MD 2 module for thermostat in NVT ensemble
#
# Author <NAME>
# Sep 7 2021
#
######################################################
import numpy as np
def NoseHoover(traj):
""" Velocity scaling function in NVT ensemble (Nose Hoover thermostat)
Parameters: Type:
traj class trajectory class
Attribute: Type:
natom int number of atoms
temp float temperature
kinetic float kinetic energy
Vs list additional velocity information
kb float Boltzmann's constant
fs_to_au float unit conversion fs to au of time
"""
natom = traj.natom
kinetic = traj.kinetic
temp = traj.temp
size = traj.size
Vs = traj.Vs
kb = 3.16881 * 10**-6
fs_to_au = 2.4188843265857 * 10**-2
if len(Vs) == 0:
freq = 1 / (22 / fs_to_au) ## 22 fs to au Hz
Q1 = 3 * natom * temp * kb / freq**2
Q2 = temp * kb / freq**2
traj.Vs = [Q1, Q2, 0, 0]
else:
Q1, Q2, V1, V2 = Vs
G2 = (Q1 * V1**2 - temp * kb) / Q2
V2 += G2 * size / 4
V1 *= np.exp(-V2 * size / 8)
G1 = (2 * kinetic - 3 * natom * temp * kb) / Q1
V1 += G1 * size / 4
V1 *= np.exp(-V2 * size / 8)
s = np.exp(-V1 * size / 2)
traj.kinetic *= s**2
traj.velo *= s
V1 *= np.exp(-V2 * size / 8)
G1 = (2 * kinetic - 3 * natom * temp * kb) / Q1
V1 += G1 * size / 4
V1 *= np.exp(-V2 * size / 8)
G2 = (Q1 * V1**2 - temp * kb) / Q2
V2 += G2 * size / 4
traj.Vs = [Q1, Q2, V1, V2]
return traj
| 2.53125
| 3
|
setup.py
|
daiyizheng/liyi-cute
| 0
|
12778300
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/4/5 15:18
# @Author : <NAME>
# @Email : <EMAIL>
# @File : setup.py.py
import setuptools
__version__ = None
exec(open('liyi_cute/__init__.py').read())
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
tests_requires = [
"pytest"
]
install_requires = [
"marshmallow >= 3.15.0",
]
extras_requires = {
'test': tests_requires
}
package_data = {
"liyi_cute": ["*.py","*.so"]
}
setuptools.setup(
name="liyi-cute",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
license='Apache 2.0',
description="A text processing tools",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/daiyizheng/liyi-cute",
keywords="Text processing tools, including named entity recognition, "
"relation extraction, event extraction, and some statistical "
"and visualization functions",
packages=setuptools.find_packages(),
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development",
],
install_requires= install_requires,
tests_require=tests_requires,
python_requires='>=3.8',
package_data=package_data,
project_urls={
'Bug Reports': 'https://github.com/daiyizheng/liyi-cute/issues',
'Source': 'https://github.com/daiyizheng/liyi-cute',
}
)
| 1.328125
| 1
|
faker/providers/lorem/tl_PH/__init__.py
|
jacksmith15/faker
| 12,077
|
12778301
|
<reponame>jacksmith15/faker<gh_stars>1000+
from ..fil_PH import Provider as FilPhProvider
class Provider(FilPhProvider):
"""Implement lorem provider for ``tl_PH`` locale.
There is no difference from the |FilPhLoremProvider|.
.. |FilPhLoremProvider| replace::
:meth:`FilPhLoremProvider <faker.providers.lorem.fil_PH.Provider>`
"""
pass
| 1.523438
| 2
|
sampling_free/modeling/generalized_rcnn/rpn/retinanet/__init__.py
|
ChenJoya/sampling-free
| 266
|
12778302
|
<reponame>ChenJoya/sampling-free
from .retinanet import build_retinanet
| 0.8125
| 1
|
scripts/python/check_issue.py
|
zxkane/aws-cloudfront-extensions
| 87
|
12778303
|
import sys
import logging
from optparse import OptionParser
log = logging.getLogger('aws-cloudfront-extension.check_issue')
log_formatter = logging.Formatter(
'[%(asctime)s %(name)s][%(levelname)s] %(message)s')
log_stream_handler = logging.StreamHandler(sys.stdout)
log_stream_handler.setFormatter(log_formatter)
log.addHandler(log_stream_handler)
log.setLevel(logging.INFO)
REPRO_STEP = "### Reproduction Steps"
EXPECTED_RESULT = "### What did you expect to happen?"
ACTUAL_RESULT = "### What actually happened?"
def parse_opt():
parser = OptionParser(
usage="Usage: python check_issue.py [options]\n\t Check compliance for the content")
parser.add_option("-b", "--body",
dest="body",
help="The issue content body")
option, args = parser.parse_args()
return parser, option, args
def check_issue():
parser, option, args = parse_opt()
if not option.body:
log.error('Missing arguments: -b or --body')
parser.print_help()
sys.exit(1)
issue_content = get_issue_from_file(option.body)
log.info('Issue content: ' + issue_content)
index_repro = issue_content.find(REPRO_STEP)
index_expected = issue_content.find(EXPECTED_RESULT)
index_actual = issue_content.find(ACTUAL_RESULT)
if index_repro == -1 or index_expected == -1 or index_actual == -1:
log.error('Please fill in the information by using the template')
sys.exit(1)
repro_content = issue_content[index_repro + len(REPRO_STEP): index_expected]
expected_content = issue_content[
index_expected + len(EXPECTED_RESULT): index_actual]
log.info('Reproduce steps: ' + repro_content)
log.info('Expected result: ' + expected_content)
if len(repro_content.strip()) == 0 or len(expected_content.strip()) == 0:
log.error(
'Empty reproduce steps or expected result, please fill in these fields')
sys.exit(1)
log.info('Check issue compliance succeed')
def get_issue_from_file(file_name):
f = open(file_name, "r")
process_lines = f.readlines()
file_content = ''
for line in process_lines:
file_content += line
f.close()
return file_content
if __name__ == '__main__':
check_issue()
| 2.5
| 2
|
bootstrap/src/nodelabels.py
|
mapr/mapr-operators
| 2
|
12778304
|
import json
from common.mapr_logger.log import Log
class NodeLabels(object):
MAPR_LABEL = "mapr.com/usenode"
EXCLUSIVE_LABEL = "mapr.com/exclusivecluster"
def __init__(self, k8s):
self.k8s = k8s
self._node_count = 0
self._items = None
self._json = None
def _get_json(self):
Log.info("Retrieving node information...", stdout=True)
result, status = self.k8s.run_get("nodes -o=json")
if status != 0:
return None
self._json = json.loads(result)
if self._json is None:
Log.error("No JSON was returned from get nodes command")
return
self._items = self._json.get("items")
if self._items is None:
Log.error("No items dictonary in get nodes JSON")
return
self._node_count = len(self._items)
def get_mapr_use_node_labels(self, label):
nodes_set = 0
nodes_not_set = set()
for node in self._items:
node_name = node["metadata"]["name"]
mapr_usenode = node["metadata"]["labels"].get(label)
if mapr_usenode is not None:
nodes_set += 1
Log.info("Node: {0} has {1} label set to: {2}".format(node_name, label, mapr_usenode))
else:
nodes_not_set.add(node_name)
Log.info("Node: {0} does not have {1} label set".format(node_name, label))
Log.info("{0} node(s) found, {1} node(s) tagged with the MapR usage tag {2} while {3} node(s) not"
.format(self._node_count, nodes_set, label, len(nodes_not_set)), stdout=True)
return nodes_not_set
def process_labels(self):
self._get_json()
nodes_not_set = self.get_mapr_use_node_labels(NodeLabels.MAPR_LABEL)
if nodes_not_set is not None and len(nodes_not_set) > 0:
Log.info("Setting MapR usage tag {0} for {1} nodes...".format(NodeLabels.MAPR_LABEL, len(nodes_not_set)),
stdout=True)
for node_not_set in nodes_not_set:
self.k8s.run_label_mapr_node(node_not_set, NodeLabels.MAPR_LABEL, True)
nodes_not_set = self.get_mapr_use_node_labels(NodeLabels.EXCLUSIVE_LABEL)
if nodes_not_set is not None and len(nodes_not_set) > 0:
Log.info("Setting MapR usage tag {0} for {1} nodes...".format(NodeLabels.EXCLUSIVE_LABEL, len(nodes_not_set)),
stdout=True)
for node_not_set in nodes_not_set:
self.k8s.run_label_mapr_node(node_not_set, NodeLabels.EXCLUSIVE_LABEL, "None")
| 2.484375
| 2
|
{{cookiecutter.gh_repo_name}}/setup.py
|
filwaitman/cookiecutter-lib
| 0
|
12778305
|
<reponame>filwaitman/cookiecutter-lib<filename>{{cookiecutter.gh_repo_name}}/setup.py<gh_stars>0
{%- if cookiecutter.py2_support.lower() == 'n' -%}
import sys
{% endif -%}
from setuptools import setup
{%- if cookiecutter.py2_support.lower() == 'n' %}
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = (3, 6)
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write('This project does not support your Python version.')
sys.exit(1)
{%- endif %}
setup()
| 1.84375
| 2
|
tests/ref_jobs/fit_curve_odc_ref.py
|
Open-EO/openeo-odc
| 1
|
12778306
|
<filename>tests/ref_jobs/fit_curve_odc_ref.py
from dask.distributed import Client
import datacube
import openeo_processes as oeop
# Initialize ODC instance
cube = datacube.Datacube(app='collection', env='default')
cube_user_gen = datacube.Datacube(app='user_gen', env='user_generated')
# Connect to Dask Scheduler
client = Client('tcp://xx.yyy.zz.kk:8786')
def extra_func_18_0(x, *parameters):
_32frj455b_1 = oeop.pi(**{})
_2sjyaa699_11 = oeop.pi(**{})
_lyjcuq5vd_15 = oeop.multiply(**{'x': 2, 'y': _32frj455b_1})
_9k6vt7qcn_2 = oeop.multiply(**{'x': 2, 'y': _2sjyaa699_11})
_1ipvki94n_4 = oeop.divide(**{'x': _lyjcuq5vd_15, 'y': 31557600})
_p42lrxmbq_16 = oeop.divide(**{'x': _9k6vt7qcn_2, 'y': 31557600})
_wz26aglyi_5 = oeop.multiply(**{'x': _p42lrxmbq_16, 'y': x})
_kryhimf6r_6 = oeop.array_element(**{'data': parameters, 'index': 0})
_jxs4umqsh_10 = oeop.array_element(**{'data': parameters, 'index': 1})
_8jjjztmya_12 = oeop.array_element(**{'data': parameters, 'index': 2})
_ya3hbxpot_17 = oeop.multiply(**{'x': _1ipvki94n_4, 'y': x})
_v81bsalku_7 = oeop.cos(**{'x': _wz26aglyi_5})
_0p7xlqeyo_8 = oeop.sin(**{'x': _ya3hbxpot_17})
_jhus2gz74_13 = oeop.multiply(**{'x': _jxs4umqsh_10, 'y': _v81bsalku_7})
_0v09jn699_14 = oeop.multiply(**{'x': _8jjjztmya_12, 'y': _0p7xlqeyo_8})
_xb4c1hk1f_9 = oeop.add(**{'x': _kryhimf6r_6, 'y': _jhus2gz74_13})
_b4mf181yp_3 = oeop.add(**{'x': _xb4c1hk1f_9, 'y': _0v09jn699_14})
return _b4mf181yp_3
_23_20 = oeop.load_collection(odc_cube=cube, **{'product': 'boa_sentinel_2', 'dask_chunks': {'time': 'auto', 'x': 1000, 'y': 1000}, 'x': (11.5381, 11.5381), 'y': (46.4868, 46.4868), 'time': ['2016-01-01T00:00:00Z', '2016-05-31T00:00:00Z'], 'measurements': []})
_1_19 = oeop.clip(**{'x': _23_20, 'min': 0, 'max': 4000})
_22_18 = oeop.apply(**{'process': _1_19, 'data': _1_19, 'context': ''})
_18_0 = oeop.fit_curve(**{'data': _22_18, 'function': extra_func_18_0, 'parameters': [1, 1, 1], 'dimension': 't'})
_saveresult1_21 = oeop.save_result(**{'data': _18_0, 'format': 'NETCDF'})
| 2.046875
| 2
|
drivers/verdictdb.py
|
xty0505/crossfilter-benchmark-public
| 2
|
12778307
|
<filename>drivers/verdictdb.py
import json
import datetime, time
import itertools
import pyverdict
import decimal
import os
import multiprocessing
from multiprocessing import Queue
from common import util
import pandas as pd
import numpy as np
import queue
import threading
from threading import Thread
#logger = logging.getLogger("idebench")
class IDEBenchDriver:
# def init(self, options, schema, driver_arg):
# pass
#
# def workflow_start(self):
# print("workflow start")
# pass
#
# def workflow_end(self):
# #os.system("/usr/local/pgsql/bin/pg_ctl stop -D ~/xdb_data")
# #os.system('sudo -b bash -c "echo 1 > /proc/sys/vm/drop_caches"')
# #os.system("/usr/local/pgsql/bin/pg_ctl start -D ~/xdb_data")
# pass
#def can_execute_online(self, sql_statement):
# return (not " or " in sql_statement.lower()) and (not " AVG(" in sql_statement)
def verdictdbedit(self, sql_statement):
sql_statement=sql_statement.replace('FROM movies','FROM public.movies_scrambled_'+str(self.verdictdbconfig["scramblePercent"])+'_percent')
sql_statement=sql_statement.replace('FROM flights','FROM public.flights_scrambled_'+str(self.verdictdbconfig["scramblePercent"])+'_percent')
sql_statement=sql_statement.replace('FROM weather','FROM public.weather_scrambled_'+str(self.verdictdbconfig["scramblePercent"])+'_percent')
#print("SQL:",sql_statement)
return sql_statement.lower()
def create_connection(self):
connection = pyverdict.postgres(host=self.config['host'], user='crossfilter', password=self.config['password'], port=self.config['port'], dbname='crossfilter-eval-db')
connection.set_loglevel("ERROR")
return connection
def init(self, options, schema, driver_arg):
self.time_of_latest_request = 0
self.isRunning = False
self.requests = queue.LifoQueue()
with open("verdictdb.config.json","r") as f:
self.verdictdbconfig = json.load(f)
self.config = json.load(open(os.path.join(os.path.dirname(__file__),'..','verdictdb.config.json')))
def execute_vizrequest(self, viz_request, options, schema, result_queue):
viz = viz_request.viz
sql_statement = viz.get_computed_filter_as_sql(schema)
#calculate connection time
# get a connection from the pool - block if non is available
# connection = self.pool.get()
connection=self.conn
viz_request.start_time = util.get_current_ms_time()
try:
editedSqlStatement = self.verdictdbedit(sql_statement)
#print(editedSqlStatement)
data = connection.sql(editedSqlStatement)
except Exception as e:
print(e, flush=True)
viz_request.result = {}
viz_request.margins = {}
viz_request.end_time = util.get_current_ms_time()
result_queue.put(viz_request)
return
viz_request.end_time = util.get_current_ms_time()
# put connection back in the queue so the next thread can use it.
#cursor.close()
#connection.close()
#connection=self.create_connection()
#self.pool.put(connection)
results = {}
for i, row in data.iterrows():
keys = []
if row[0] is None:
continue
for i, bin_desc in enumerate(viz_request.viz.binning):
if "width" in bin_desc:
bin_width = bin_desc["width"]
keys.append(str(int(row[0])))
else:
keys.append(str(row[0]).strip())
key = ",".join(keys)
row = list(row)
for i, r in enumerate(row):
if isinstance(r, decimal.Decimal):
row[i] = float(r)
results[key] = row[1]
viz_request.result = results
#viz_request.margins = margins
viz_request.margins = {}
result_queue.put(viz_request)
print("delivering...")
def process_request(self, viz_request, options, schema, result_queue):
self.requests.put((viz_request, options, schema, result_queue))
def process(self):
# while the workflow is running, pop the latest request from the stack and execute it
while self.isRunning:
try:
request = self.requests.get(timeout=1)
viz_request = request[0]
options = request[1]
schema = request[2]
result_queue = request[3]
# only execute requests that are newer than the last one we processed (drops old/no longer needed queries)
if viz_request.expected_start_time < self.time_of_latest_request:
viz_request.dropped = True
result_queue.put(viz_request)
continue
self.time_of_latest_request = viz_request.expected_start_time
self.execute_vizrequest(viz_request, options, schema, result_queue)
except Exception as e:
# ignore queue-empty exceptions
print(e, flush=True)
pass
self.conn.close()
def workflow_start(self):
# pool a number of db connections
self.isRunning = True
#self.pool = queue.Queue()
#for i in range(1):
# conn = self.create_connection()
# self.pool.put(conn)
self.conn=self.create_connection()
thread = Thread(target = self.process)
thread.start()
def workflow_end(self):
self.isRunning = False
# close all db connections at the end of a workflow
#for i in range(self.pool.qsize()):
# conn = self.pool.get(timeout=1)
# conn.close()
# def process_request(self, viz_request, options, schema, out_q):
# print("processsing..." + str(viz_request.operation_id))
# if viz_request.viz.binning:
# sql_statement = viz_request.viz.get_computed_filter_as_sql(schema)
# sql_statement = sql_statement.replace(schema.get_fact_table_name(), "%s_%s%s" % (
# schema.get_fact_table_name(), options.settings_size, "n" if options.settings_normalized else ""))
# #if self.can_execute_online(sql_statement):
# # sql_statement = sql_statement.replace("SELECT ", "SELECT ONLINE ")
# # sql_statement += " WITHTIME %s CONFIDENCE 95" % options.settings_time_requirement
# # sql_statement += " REPORTINTERVAL %s;" % options.settings_time_requirement
# # connection, cursor = self.create_connection(options.settings_time_requirement + 20)
#
# #connection, cursor = self.create_connection(options.settings_time_requirement)
# #calculate connection time
# t1=util.get_current_ms_time()
# connection, cursor = self.create_connection()
# t2=util.get_current_ms_time()
# viz_request.connection_time=t2-t1
# viz_request.start_time = util.get_current_ms_time()
# try:
# data = connection.sql(self.verdictdbedit(sql_statement))
# except:
# viz_request.result = {}
# viz_request.margins = {}
# viz_request.timedout = True
# viz_request.end_time = util.get_current_ms_time()
# out_q.put(viz_request)
# return
# #data = connection.sql(self.verdictdbedit(sql_statement))
# #data=connection.sql(sql_statement)
#
# viz_request.end_time = util.get_current_ms_time()
# connection.close()
#
# results = {}
# margins = {}
| 2.375
| 2
|
minicms/urls.py
|
optik/minicms
| 0
|
12778308
|
from django.conf.urls import url
from . import views
app_name = 'minicms'
urlpatterns = [
url(r'^$', views.homepage, name='homepage'),
url(r'^(?P<path>.+)/$', views.page, name='page'),
]
| 1.617188
| 2
|
src/agent/base_agent.py
|
Wisteria30/GIM-RL
| 3
|
12778309
|
# -*- coding: utf-8 -*-
import torch
class Agent:
def __init__(self, env, cfg):
# if gpu is to be used
self.device = torch.device(
f"cuda:{cfg.gpu}" if torch.cuda.is_available() else "cpu"
)
self.env = env
self.cfg = cfg
self.n_states = self.env.observation_space.shape[0]
self.n_actions = self.env.action_space.n
def select_action(self, state):
pass
def optimize_agent(self, memory):
pass
def update_agent(self):
pass
def get_model(self):
return None
def reset_agent4test(self, **kwargs):
pass
| 2.515625
| 3
|
wgetgui/wget_gui_main.py
|
tadiclazar/tkintering
| 0
|
12778310
|
<filename>wgetgui/wget_gui_main.py<gh_stars>0
import tkinter as tk
from tkinter import ttk
import urllib.request
import os
from download_funcs import get_content, get_content_from_file
def main():
root = tk.Tk()
root.wm_title("Content Downloader")
style = ttk.Style()
if os.name == "nt":
style.theme_use("vista")
else:
self.style.theme_use("clam")
frame = ttk.Frame(root)
help_str = tk.StringVar(frame)
help_str.set("Enter URL to the content below."
"\nEnter optional path where the content will be stored."
"\nIf omitted, content is stored in the current folder.")
url_entry_var = tk.StringVar(frame)
path_var = tk.StringVar(frame)
filen_var = tk.StringVar(frame)
help_lbl = ttk.Label(frame, textvariable=help_str, justify=tk.CENTER)
url_lbl = ttk.Label(frame, text="URL: ")
path_lbl = ttk.Label(frame, text="Path: ")
url_entry = ttk.Entry(frame, textvariable=url_entry_var, width=30)
path_entry = ttk.Entry(frame, textvariable=path_var, width=30)
download_btn = ttk.Button(frame, text="Download",
command=lambda pa_var=path_var, url_evar=url_entry_var: get_content(pa_var, url_evar), width=20)
file_lbl = ttk.Label(frame, text="From File: ")
file_entry = ttk.Entry(frame, textvariable=filen_var, width=40)
from_file_btn = ttk.Button(frame, text="Get From File",
command=lambda fn=filen_var, pvar=path_var: get_content_from_file(fn, pvar), width=15)
frame.grid(row=0, column=0, padx=5, pady=5, sticky="nesw")
help_lbl.grid(row=0, column=1, padx=5, pady=5, sticky="nesw")
url_lbl.grid(row=1, column=0, padx=5, pady=5, sticky="e")
path_lbl.grid(row=2, column=0, padx=5, pady=5, sticky="e")
url_entry.grid(row=1, column=1, padx=5, pady=5, sticky="nesw")
path_entry.grid(row=2, column=1, padx=5, pady=5, sticky="news")
download_btn.grid(row=3, column=1, padx=5, pady=5, sticky="sw")
file_lbl.grid(row=4, column=0, padx=5, pady=5, sticky=tk.E)
file_entry.grid(row=4, column=1, padx=5, pady=5, sticky=tk.E)
from_file_btn.grid(row=5, column=1, padx=5, pady=5, sticky=tk.S)
root.mainloop()
if __name__ == '__main__':
main()
urllib.request.urlcleanup()
| 3.5
| 4
|
eventvec/server/model/torch_models/eventvec/event_torch_model.py
|
vedmathai/event-vec
| 0
|
12778311
|
import torch.nn as nn
import torch
class EventModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size, device):
super(EventModel, self).__init__()
self.i2o = nn.Linear(input_size * 4, output_size, device=device)
self.dropout = nn.Dropout(0.1)
self.relu = nn.ReLU()
def forward(self, verb_vector, subject_vector, object_vector, date_vector):
input_combined = torch.cat((verb_vector, subject_vector, object_vector, date_vector), 1)
output = self.i2o(input_combined)
output = self.relu(output)
output = self.dropout(output)
return output
| 2.703125
| 3
|
pacman-arch/test/pacman/tests/upgrade074.py
|
Maxython/pacman-for-termux
| 23
|
12778312
|
self.description = "pkg2<2.0 dependency (satisfy)"
p = pmpkg("pkg1")
p.depends = ["pkg2<2.0"]
self.addpkg(p)
lp = pmpkg("pkg2", "1.9b-3")
self.addpkg2db("local", lp)
self.args = "-U %s" % p.filename()
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=pkg1")
self.addrule("PKG_EXIST=pkg2")
| 1.960938
| 2
|
cli/gardener_ci/checkmarx_cli.py
|
zkdev/cc-utils
| 15
|
12778313
|
import concourse.steps.scan_sources
def upload_and_scan_from_component_descriptor(
checkmarx_cfg_name: str,
team_id: str,
component_descriptor_path: str
):
concourse.steps.scan_sources.scan_sources_and_notify(
checkmarx_cfg_name=checkmarx_cfg_name,
team_id=team_id,
component_descriptor_path=component_descriptor_path,
email_recipients=['<EMAIL>']
)
| 1.648438
| 2
|
Assignments/Dictionaries/Lab/01. Bakery.py
|
KaloyankerR/python-fundamentals-repository
| 0
|
12778314
|
<reponame>KaloyankerR/python-fundamentals-repository
items = input().split(" ")
bakery = {}
for item in range(0, len(items), 2):
key = items[item]
value = int(items[item + 1])
bakery[key] = value
print(bakery)
| 3.71875
| 4
|
trackml/score.py
|
victor-estrade/trackml-library
| 166
|
12778315
|
"""TrackML scoring metric"""
__authors__ = ['<NAME>', '<NAME>', '<NAME>',
'<NAME>']
import numpy
import pandas
def _analyze_tracks(truth, submission):
"""Compute the majority particle, hit counts, and weight for each track.
Parameters
----------
truth : pandas.DataFrame
Truth information. Must have hit_id, particle_id, and weight columns.
submission : pandas.DataFrame
Proposed hit/track association. Must have hit_id and track_id columns.
Returns
-------
pandas.DataFrame
Contains track_id, nhits, major_particle_id, major_particle_nhits,
major_nhits, and major_weight columns.
"""
# true number of hits for each particle_id
particles_nhits = truth['particle_id'].value_counts(sort=False)
total_weight = truth['weight'].sum()
# combined event with minimal reconstructed and truth information
event = pandas.merge(truth[['hit_id', 'particle_id', 'weight']],
submission[['hit_id', 'track_id']],
on=['hit_id'], how='left', validate='one_to_one')
event.drop('hit_id', axis=1, inplace=True)
event.sort_values(by=['track_id', 'particle_id'], inplace=True)
# ASSUMPTIONs: 0 <= track_id, 0 <= particle_id
tracks = []
# running sum for the reconstructed track we are currently in
rec_track_id = -1
rec_nhits = 0
# running sum for the particle we are currently in (in this track_id)
cur_particle_id = -1
cur_nhits = 0
cur_weight = 0
# majority particle with most hits up to now (in this track_id)
maj_particle_id = -1
maj_nhits = 0
maj_weight = 0
for hit in event.itertuples(index=False):
# we reached the next track so we need to finish the current one
if (rec_track_id != -1) and (rec_track_id != hit.track_id):
# could be that the current particle is the majority one
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# store values for this track
tracks.append((rec_track_id, rec_nhits, maj_particle_id,
particles_nhits[maj_particle_id], maj_nhits,
maj_weight / total_weight))
# setup running values for next track (or first)
if rec_track_id != hit.track_id:
rec_track_id = hit.track_id
rec_nhits = 1
cur_particle_id = hit.particle_id
cur_nhits = 1
cur_weight = hit.weight
maj_particle_id = -1
maj_nhits = 0
maj_weights = 0
continue
# hit is part of the current reconstructed track
rec_nhits += 1
# reached new particle within the same reconstructed track
if cur_particle_id != hit.particle_id:
# check if last particle has more hits than the majority one
# if yes, set the last particle as the new majority particle
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# reset runnig values for current particle
cur_particle_id = hit.particle_id
cur_nhits = 1
cur_weight = hit.weight
# hit belongs to the same particle within the same reconstructed track
else:
cur_nhits += 1
cur_weight += hit.weight
# last track is not handled inside the loop
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# store values for the last track
tracks.append((rec_track_id, rec_nhits, maj_particle_id,
particles_nhits[maj_particle_id], maj_nhits, maj_weight / total_weight))
cols = ['track_id', 'nhits',
'major_particle_id', 'major_particle_nhits',
'major_nhits', 'major_weight']
return pandas.DataFrame.from_records(tracks, columns=cols)
def score_event(truth, submission):
"""Compute the TrackML event score for a single event.
Parameters
----------
truth : pandas.DataFrame
Truth information. Must have hit_id, particle_id, and weight columns.
submission : pandas.DataFrame
Proposed hit/track association. Must have hit_id and track_id columns.
"""
tracks = _analyze_tracks(truth, submission)
purity_rec = numpy.true_divide(tracks['major_nhits'], tracks['nhits'])
purity_maj = numpy.true_divide(tracks['major_nhits'], tracks['major_particle_nhits'])
good_track = (0.5 < purity_rec) & (0.5 < purity_maj)
return tracks['major_weight'][good_track].sum()
| 2.9375
| 3
|
openedx_export_plugins/exporters/base.py
|
appsembler/openedx-export-plugins
| 1
|
12778316
|
"""
Define an Exporter Plugin class providing
additional options to xmodule lib ExportManager
"""
import datetime
from lxml import etree
from xmodule.modulestore import xml_exporter
from .. import app_settings
from . import resolvers
class PluggableCourseExportManager(xml_exporter.CourseExportManager):
"""
Export format-agnostic block/module course export manager.
Course export plugins should register themselves in the namespace
`openedx.exporters.course` and inherit from this class.
"""
@property
def name(self):
raise NotImplementedError
@property
def http_content_type(self):
raise NotImplementedError
@property
def filename_extension(self):
raise NotImplementedError
def process_root(self, root, export_fs):
"""
Perform any additional tasks to the root node.
"""
super(PluggableCourseExportManager, self).process_root(root, export_fs)
def process_extra(self, root, courselike, root_courselike_dir, xml_centric_courselike_key, export_fs):
"""
Process additional content, like static assets.
"""
super(PluggableCourseExportManager, self).process_extra(root, courselike, root_courselike_dir, xml_centric_courselike_key, export_fs)
def post_process(self, root, export_fs):
"""
Perform any final processing after the other export tasks are done.
This is where most plugin export managers will do their work, after
export to XML. XModules and XBlocks provide XML serialization directly
or via mixin, and it's much more work to directly serialize to some other
format than to post-process XML output to another format
"""
def export(self):
"""
Perform the export given the parameters handed to this class at init.
"""
super(PluggableCourseExportManager, self).export()
def _load_export_xsl(self):
"""
Get the XSL stylesheet for post_processing.
"""
try:
return self.DEFAULT_XSL_STYLESHEET
except AttributeError:
raise # do something more intelligent here, not all exporter plugins may use XSL
def _do_xsl_transform(self, root, export_fs):
"""
Perform XSLT transform of export output using XSL stylesheet.
"""
parser = etree.XMLParser(recover=True) # use a forgiving parser, OLX is messy
parser.resolvers.add(resolvers.ExportFSResolver(export_fs))
parser.resolvers.add(resolvers.PyLocalXSLResolver())
parser.resolvers.add(resolvers.AssetURLResolver(export_fs))
xsl_sheet = bytes(self._load_export_xsl(), 'utf-8')
xslt_root = etree.XML(xsl_sheet, parser)
transform = etree.XSLT(xslt_root)
dt = datetime.datetime.now()
result_tree = transform(root, baseURL="'{}'".format(app_settings.LMS_ROOT_URL), curDateTime="'{}'".format(dt))
print((str(result_tree)))
return result_tree
| 2.515625
| 3
|
test/unit-tests/common/test_translator.py
|
jaredcurtis/confluencebuilder
| 0
|
12778317
|
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2016-2019 by the contributors (see AUTHORS file).
:license: BSD-2-Clause, see LICENSE for details.
"""
from collections import namedtuple
from sphinxcontrib.confluencebuilder.translator import ConfluenceTranslator
from sphinxcontrib_confluencebuilder_util import ConfluenceTestUtil as _
from sphinxcontrib_confluencebuilder_util import EXT_NAME
import os
import unittest
Reporter = namedtuple('Reporter', 'warning')
class DummyDocument(dict):
def __init__(self, source, warn=False):
self['source'] = source
self.reporter = Reporter(warn)
class TestConfluenceTranslator(unittest.TestCase):
@classmethod
def setUpClass(self):
self.config = _.prepareConfiguration()
self.test_dir = os.path.dirname(os.path.realpath(__file__))
def test_docname_and_docparent(self):
mock_ds = os.path.join(self.test_dir, 'dataset-common')
doc_dir, doctree_dir = _.prepareDirectories('config-dummy')
mock_docpath = os.path.join(mock_ds, 'foo', 'bar' , 'baz.rst')
doc = DummyDocument(mock_docpath)
# prepare a dummy application; no need to actually build
with _.prepareSphinx(mock_ds, doc_dir, doctree_dir, self.config) as app:
translator = ConfluenceTranslator(doc, app.builder)
self.assertEqual(translator.docname, 'foo/bar/baz')
self.assertEqual(translator.docparent, 'foo/bar/')
| 2.109375
| 2
|
modmon/db/create.py
|
alan-turing-institute/ModMon
| 1
|
12778318
|
"""
Functions for creating and deleting the ModMon database.
"""
import argparse
import sys
from sqlalchemy import create_engine
from sqlalchemy.exc import ProgrammingError
from .schema import Base
from .connect import get_database_config, DATABASE_NAME, ENGINE
from ..config import config
from ..utils.utils import ask_for_confirmation
ADMIN_CONNECTION_STRING, _ = get_database_config(config["database-admin"])
def create_database(db_name=DATABASE_NAME, force=False):
"""Create the ModMon database.
Parameters
----------
db_name : str, optional
Name of the database to create, by default modmon.db.connect.DB
force : bool, optional
If True delete any pre-existing database and create a new one, by default False
"""
engine = create_engine(ADMIN_CONNECTION_STRING)
conn = engine.connect()
conn.execute("commit")
try:
conn.execute(f'CREATE DATABASE "{db_name}"')
except ProgrammingError as e:
if f'database "{db_name}" already exists' in str(e):
if force:
print("Deleting pre-existing database.")
delete_database(db_name=db_name, force=force)
print("Creating new database.")
create_database(db_name=db_name, force=force)
else:
print(f'Database "{db_name}" already exists.')
else:
raise
def delete_database(db_name=DATABASE_NAME, force=False):
"""Delete the ModMon database.
Parameters
----------
db_name : str, optional
Name of the database to delete, by default modmon.db.connect.DB
force : bool, optional
Unless True ask the user for confirmation before deleting, by default False
"""
if not force:
confirmed = ask_for_confirmation(
"WARNING: This will delete all data currently in the database."
)
if not confirmed:
print("Aborting create.")
return
engine = create_engine(ADMIN_CONNECTION_STRING)
conn = engine.connect()
conn.execute("commit")
try:
conn.execute(f'DROP DATABASE "{db_name}"')
except ProgrammingError as e:
if f'database "{db_name}" does not exist' in str(e):
print(f'There is no database called "{db_name}".')
else:
raise
def create_schema(force=False, checkfirst=True):
"""Create the tables and schema on the ModMon database.
Parameters
----------
force : bool, optional
Unless True ask for confirmation before taking potentially destructive action if
checkfirst is False, by default False
checkfirst : bool, optional
If True don't recreate tables already present in the database, by default True
"""
if not checkfirst and not force:
confirmed = ask_for_confirmation(
"WARNING: This will delete all data currently in the database."
)
if not confirmed:
print("Aborting create.")
return
Base.metadata.create_all(ENGINE, checkfirst=checkfirst)
def delete_schema(force=False, checkfirst=True):
"""Delete all tables and data stored in the ModMon database.
Parameters
----------
force : bool, optional
Unless True ask the user for confirmation before proceeding, by default False
checkfirst : bool, optional
If True only issue DROPs for tables confirmed to be present, by default True
"""
if not force:
confirmed = ask_for_confirmation(
"WARNING: This will delete ALL tables and data in the database."
)
if not confirmed:
print("Aborting delete.")
return
Base.metadata.drop_all(ENGINE, checkfirst=checkfirst)
def main():
"""Delete and re-create the model monitoring database.
To be used from command-line as modmon_db_create
"""
parser = argparse.ArgumentParser(
description="Create the model monitoring database (ModMon)."
)
parser.add_argument(
"--force",
help="Delete and recreate the database without asking for confirmation if set",
action="store_true",
)
args = parser.parse_args()
if not args.force:
confirmed = ask_for_confirmation(
"WARNING: This will delete all data in any pre-existing ModMon database."
)
if not confirmed:
print("Aborting create.")
sys.exit(0)
create_database(force=True)
create_schema(force=True, checkfirst=False)
| 3.484375
| 3
|
packages/w3af/w3af/tests/vuln_sites/utils/scan_vulnerable_site.py
|
ZooAtmosphereGroup/HelloPackages
| 3
|
12778319
|
<reponame>ZooAtmosphereGroup/HelloPackages
"""
test_scan_vulnerable_site.py
Copyright 2014 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from nose.plugins.attrib import attr
from w3af.plugins.tests.helper import PluginConfig
@attr('functional')
@attr('internet')
@attr('slow')
@attr('ci_fails')
class TestScanVulnerableSite(object):
target_url = None
_run_configs = {
'cfg': {
'plugins': {
'crawl': (PluginConfig('web_spider',),),
'audit': (PluginConfig('all'),),
'grep': (PluginConfig('all'),),
}
}
}
EXPECTED_URLS = {}
EXPECTED_VULNS = {()}
def test_scan_vulnerable_site(self):
if self.target_url is None:
return
cfg = self._run_configs['cfg']
self._scan(self.target_url, cfg['plugins'])
#self.assertAllURLsFound(self.EXPECTED_URLS)
self.assertMostExpectedVulnsFound(self.EXPECTED_VULNS)
| 1.804688
| 2
|
balsam/job_sources/JobInterface.py
|
hep-cce/hpc-edge-service
| 0
|
12778320
|
from common_core.MessageInterface import MessageInterface
from balsam_core.BalsamJobMessage import BalsamJobMessage
from balsam_core.job_sources.StatusMessage import StatusMessage
import logging,time,sys
logger = logging.getLogger(__name__)
class NoMoreJobs(Exception): pass
class JobListener:
''' opens message interface and receives jobs '''
def __init__(self,site_name):
self.site_name = site_name
self.msgInt = MessageInterface()
# open connection that just stays open until class is destroyed
self.msgInt.open_blocking_connection()
# make sure machine queue exists
self.msgInt.create_queue(self.site_name,self.site_name)
def get_job_to_submit(self):
# create empty message
msg = BalsamJobMessage()
# request message
method,properties,body = self.msgInt.receive_msg(self.site_name)
if method is not None:
# acknowledge receipt of the message
self.msgInt.send_ack(method.delivery_tag)
logger.debug('BalsamMsgHandler.recv_new_job: received message')
try:
msg.load(body)
except:
logger.error( ' received exception while loading message body into BalsamJobMessage: ' + str(sys.exc_info()[1]))
raise
logger.debug(str(msg))
return msg
else:
logger.debug('No new job message received')
raise NoMoreJobs('No jobs available')
return None
def get_jobs_to_submit(self):
jobs = []
i = 0
# only get 10 jobs at a time so as not to overwhelm the system
while i < 10:
try:
new_job = self.get_job_to_submit()
except NoMoreJobs,e:
logger.debug('Done retrieving jobs')
break
jobs.append(new_job)
logger.debug('retrieved ' + str(len(jobs)) + ' jobs to process.')
return jobs
def send_job_failed(machine_name,job_id,message=None):
logger.debug(' sending job failed message ')
msg = StatusMessage.FAILED
if message is not None:
msg = (StatusMessage.FAILED | message)
send_job_status_msg(machine_name,operation = '',job_id=job_id,message=msg)
def send_job_finished(machine_name,job_id,message=None):
logger.debug(' sending job succeeded message ')
msg = StatusMessage.SUCCEEDED
if message is not None:
msg = (StatusMessage.SUCCEEDED | message)
send_job_status_msg(machine_name,operation='',job_id=job_id,message=msg)
def send_job_status_msg(machine_name,
operation,
job_id,
message = '',
priority = 0, # make message persistent
delivery_mode = 2, # default
):
logger.debug('sending job status message: ' + str(message))
timestamp = time.time()
# create message interface
msgInt = MessageInterface()
msgInt.open_blocking_connection()
# create a header
headers = {
'hpc': machine_name,
'taskID': job_id,
'operation': operation,
'created': int(timestamp),
}
# send message
msgInt.send_msg(str(message),
str(job_id),
exchange_name = None,
message_headers = headers,
)
# close connection
msgInt.close()
| 2.125
| 2
|
BOJ_Solved/BOJ-25191.py
|
CodingLeeSeungHoon/Python_Algorithm_TeamNote
| 7
|
12778321
|
<reponame>CodingLeeSeungHoon/Python_Algorithm_TeamNote
"""
백준 25191번 : 치킨댄스를 추는 곰곰이를 본 임스
"""
chicken = int(input())
coke, beer = map(int, input().split())
print(min(coke//2 + beer, chicken))
| 2.84375
| 3
|
pypardot/objects_v3/tests/__init__.py
|
andyoneal/PyPardotSF
| 2
|
12778322
|
<gh_stars>1-10
# This module (objects) was originally imported from <NAME>'s original implementation
# https://github.com/joshgeller/PyPardot/tree/349dde1fad561f32a425324005c4f2a0c4a23d9b/pypardot/objects
| 1.1875
| 1
|
test.py
|
dvanderrijst/DEMO-text2vec-openai
| 7
|
12778323
|
import json
f = open('data/movies.json')
data = json.load(f)
for movie in data[:10]:
print(movie["Title"])
| 3.1875
| 3
|
python/setup.py
|
chemalot/openmm-py
| 1
|
12778324
|
###############################################################################
## The MIT License
##
## SPDX short identifier: MIT
##
## Copyright 2019 Genentech Inc. South San Francisco
##
## Permission is hereby granted, free of charge, to any person obtaining a
## copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction, including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included
## in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
## OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
## DEALINGS IN THE SOFTWARE.
###############################################################################
###############################################################################
## Portions of this software were derived from code originally developed
## by <NAME> and copyrighted by Stanford University and the Authors
###############################################################################
from distutils.core import setup
from distutils.extension import Extension
import os
import sys
import platform
openmm_dir = '@OPENMM_DIR@'
nn_plugin_header_dir = '@NN_PLUGIN_HEADER_DIR@'
nn_plugin_library_dir = '@NN_PLUGIN_LIBRARY_DIR@'
# setup extra compile and link arguments on Mac
extra_compile_args = []
extra_link_args = []
if platform.system() == 'Darwin':
extra_compile_args += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
extra_link_args += ['-stdlib=libc++', '-mmacosx-version-min=10.7', '-Wl', '-rpath', openmm_dir+'/lib']
extension = Extension(name='_openmm_py',
sources=['PYPluginWrapper.cpp'],
libraries=['OpenMM', 'OpenMMPY'],
include_dirs=[os.path.join(openmm_dir, 'include'), nn_plugin_header_dir],
library_dirs=[os.path.join(openmm_dir, 'lib'), nn_plugin_library_dir],
runtime_library_dirs=[os.path.join(openmm_dir, 'lib')],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args
)
setup(name='openmm_py',
version='1.0',
py_modules=['openmm_py'],
ext_modules=[extension],
)
| 1.257813
| 1
|
pycausal_explorer/meta/_single_learner.py
|
gotolino/pycausal-explorer
| 3
|
12778325
|
<reponame>gotolino/pycausal-explorer
import numpy as np
from sklearn.base import clone
from sklearn.utils.validation import check_is_fitted, check_X_y
from pycausal_explorer.base import BaseCausalModel
class SingleLearnerBase(BaseCausalModel):
def __init__(self, learner):
if isinstance(learner, type):
raise ValueError(
"You should provide an instance of an estimator instead of a class."
)
else:
self.learner = clone(learner)
def fit(self, X, y, *, treatment):
X, y = check_X_y(X, y)
X, w = check_X_y(X, treatment)
self.learner = self.learner.fit(np.column_stack([X, w]), y)
self.is_fitted_ = True
return self
def predict(self, X, w):
check_is_fitted(self)
return self.learner.predict(np.column_stack([X, w]))
class SingleLearnerRegressor(SingleLearnerBase):
"""
Implementation of the single learner model.
Regressor version, should be used with continuous data.
Uses a single provided model to predict outcome when under treatment, and when not.
Uses that to estimate treatment effect.
Parameters
----------
learner: estimator object
base learner to use when predicting outcome. Should implement fit and predict methods.
"""
def __init__(self, learner):
super().__init__(learner)
self._estimator_type = "regressor"
def predict_ite(self, X):
check_is_fitted(self)
return self.predict(X, np.ones(shape=X.shape[0])) - self.predict(
X, np.zeros(shape=X.shape[0])
)
class SingleLearnerClassifier(SingleLearnerBase):
"""
Implementation of the single learner model.
Logistic version, should be used with binary data.
Uses a single provided model to predict outcome when under treatment, and when not.
Uses that to estimate treatment effect.
Parameters
----------
learner: estimator object
base learner to use when predicting outcome. Should implement fit and predict methods.
"""
def __init__(self, learner):
super().__init__(learner)
self._estimator_type = "classifier"
def predict_proba(self, X, w):
return self.learner.predict_proba(np.column_stack([X, w]))
def predict_ite(self, X):
check_is_fitted(self)
return (
self.predict_proba(X, np.ones(shape=X.shape[0]))[:, 1]
- self.predict_proba(X, np.zeros(shape=X.shape[0]))[:, 1]
)
| 2.734375
| 3
|
ppdb_app/models.py
|
aryanicosa/ppdb_mvt
| 0
|
12778326
|
<gh_stars>0
from django.db import models
# Create your models here.
class Users(models.Model):
role = models.CharField(max_length=20)
username = models.CharField(max_length=100)
fullname = models.CharField(max_length=100)
password = models.CharField(max_length=200)
def __str__(self):
return self.username and self.password
| 2.453125
| 2
|
source/FnAssetAPI/core/properties.py
|
IngenuityEngine/ftrack-connect-foundry
| 1
|
12778327
|
import re
__all__ = ['UntypedProperty', 'TypedProperty', 'TimecodeProperty']
class UntypedProperty(object):
"""
The Property classes form the basis for the FixedInterfaceObject. They
implement a Python property, and store the data in the instances dataVar.
Docstrings can also be provided to improve help() output.
@param initVal, An initial value for the property if None is not supported.
@param doc str, A docstring for the property that will be printed when
help() is called on the object.
@param dataVar str ['__dict__'] The instance dict attribute that should be
used to hold the properties data. It defaults to the objects __dict__, but
could be something else if de-coupled storage is desired.
@param dataName str ['__<id(self)>'] The key to use when storing a value in
dataVar. If ommited, this defaults to a prefixed version of the id of the
object, though this may cause serialisation issues - so its recommended that
this is set to something meaningful. Some objects use Metaclasses to take care
of this automatically to avoid the developer having to manually match the
dataName to the actual attribute name.
@oaram order int [-1] A UI hint as to the 'natural ordering' for this
property when it's displayed in a list.
"""
def __init__(self, initVal=None, doc=None, dataVar=None, dataName=None, order=-1):
super(UntypedProperty, self).__init__()
self.__doc__ = doc
self.value = initVal
self.dataVar = dataVar if dataVar else '__dict__'
# I don't know how well this will serialize but its to avoid you always
# having to name it twice. Though most Factories take care of this now.
self.dataName = dataName if dataName else "__%s" % id(self)
# This may be used for positioning in the ui, this should be > 0
# as -1 indicates that it is unordered or ordering is not important
self.order = order
def __get__(self, obj, cls):
# Allow access to ourself if we're called on the class
if obj is None:
return self
return getattr(obj, self.dataVar).get(self.dataName, None)
def __set__(self, obj, value):
getattr(obj, self.dataVar)[self.dataName] = value
class TypedProperty(UntypedProperty):
"""
Extends the UntypedProperty to allow strict type checking of values.
@param typ Class, Sets will be conformed to being instances of this type of
None.
@exception ValueError or other as per constructing an instance of the
property's typ from the supplied value. ie: typ(value).
"""
def __init__(self, typ, initVal=None, doc=None, dataVar=None, dataName=None,
order=-1):
super(TypedProperty, self).__init__(initVal, doc, dataVar, dataName, order)
self.__doc__ = "[%s]" % typ.__name__
if doc:
self.__doc__ += " %s" % doc
self.typ = typ
def __set__(self, obj, value):
if not isinstance(value, self.typ) and value is not None:
value = self.typ(value)
super(TypedProperty, self).__set__(obj, value)
class TimecodeProperty(TypedProperty):
"""
A specialised property to hold SMPTE timecode values. Valid formats are:
HH:MM:SS:FF (non-drop)
HH:MM:SS;FF or HH:MM:SS.FF (drop)
Any of the above can be suffixed with a floating point frame rate (R) or
prefixed with a sign.
[+-]HH:MM:SS:FF@R
"""
## A regex that can be used to match timecode values, groups are named
## 'hours', 'minutes', 'seconds', 'frames', 'dropFrame' and 'frameRate'
timecodeRegex = re.compile(r'(?P<sign>[+\-]?)(?P<hours>[0-9]{2}):(?P<minutes>[0-9]{2}):(?P<seconds>[0-9]{2})(?P<dropFrame>[:;.])(?P<frames>[0-9]{2})(?:@(?P<frameRate>[0-9.]+)|$)')
def __init__(self, doc=None, dataVar=None, dataName=None, order=-1):
super(TimecodeProperty, self).__init__(str, None, doc, dataVar,
dataName, order)
def __set__(self, obj, value):
if value is not None:
if not isinstance(value, str):
raise ValueError("Timecodes must be a string (%s)" % type(value))
if not self.timecodeRegex.match(value):
raise ValueError("Invalid timecode format: '%s' (hh:mm:ss:ff or "+
"[+-]hh:mm:ss[:;.]ff@rr[.rr]])" % value)
super(TypedProperty, self).__set__(obj, value)
def getTimecode(self, value):
"""
@return str, The timecode component of @param value, or an empty string if
no valid timecode is found in the input.
"""
if value is None:
return ''
match = self.timecodeRegex.match(value)
if not match:
return ''
sign = match.group('sign')
sign = sign if sign else ''
hh = int(match.group('hours'))
mm = int(match.group('minutes'))
ss = int(match.group('seconds'))
ff = int(match.group('frames'))
df = match.group('dropFrame')
tc = "%s%02d:%02d:%02d%s%02d" % (sign, hh, mm, ss, df, ff)
return tc
def getFrameRate(self, value):
"""
@return float, The frame rate of @param value else 0 if no valid framerate
is encoded in the value.
"""
rate = 0.0
if value is None:
return rate
match = self.timecodeRegex.match(value)
if not match:
return rate
rr = match.group('frameRate')
if rr:
rate = float(rr)
return rate
| 2.625
| 3
|
tests/common/csv_import.py
|
Amber1990Zhang/nebula-graph
| 0
|
12778328
|
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
import csv
import re
from tests.common.types import (
VID,
Rank,
Prop,
Tag,
Edge,
Vertex,
)
class CSVImporter:
_SRC_VID = ':SRC_VID'
_DST_VID = ':DST_VID'
_VID = ':VID'
_RANK = ':RANK'
def __init__(self, filepath):
self._filepath = filepath
self._insert_stmt = ""
self._create_stmt = ""
self._type = None
def __iter__(self):
with open(self._filepath, 'r') as f:
for i, row in enumerate(csv.reader(f)):
if i == 0:
yield self.parse_header(row)
else:
yield self.process(row)
def process(self, row: list):
if isinstance(self._type, Vertex):
return self.build_vertex_insert_stmt(row)
return self.build_edge_insert_stmt(row)
def build_vertex_insert_stmt(self, row: list):
props = []
for p in self._type.tags[0].props:
col = row[p.index]
props.append(self.value(p.ptype, col))
vid = self._type.vid
id_val = self.value(vid.id_type, row[vid.index])
return f'{self._insert_stmt} {id_val}:({",".join(props)});'
def build_edge_insert_stmt(self, row: list):
props = []
for p in self._type.props:
col = row[p.index]
props.append(self.value(p.ptype, col))
src = self._type.src
dst = self._type.dst
src_vid = self.value(src.id_type, row[src.index])
dst_vid = self.value(dst.id_type, row[dst.index])
if self._type.rank is None:
return f'{self._insert_stmt} {src_vid}->{dst_vid}:({",".join(props)});'
rank = row[self._type.rank.index]
return f'{self._insert_stmt} {src_vid}->{dst_vid}@{rank}:({",".join(props)});'
def value(self, ptype: str, col):
return f'"{col}"' if ptype == 'string' else f'{col}'
def parse_header(self, row):
"""
Only parse the scenario that one tag in each file
"""
for col in row:
if self._SRC_VID in col or self._DST_VID in col:
self._type = Edge()
self.parse_edge(row)
break
if self._VID in col:
self._type = Vertex()
self.parse_vertex(row)
break
if self._type is None:
raise ValueError(f'Invalid csv header: {",".join(row)}')
return self._create_stmt
def parse_edge(self, row):
props = []
name = ''
for i, col in enumerate(row):
if col == self._RANK:
self._type.rank = Rank(i)
continue
m = re.search(r':SRC_VID\((.*)\)', col)
if m:
self._type.src = VID(i, m.group(1))
continue
m = re.search(r':DST_VID\((.*)\)', col)
if m:
self._type.dst = VID(i, m.group(1))
continue
m = re.search(r'(\w+)\.(\w+):(\w+)', col)
if not m:
raise ValueError(f'Invalid csv header format {col}')
g1 = m.group(1)
if not name:
name = g1
assert name == g1, f'Different edge type {g1}'
props.append(Prop(i, m.group(2), m.group(3)))
self._type.name = name
self._type.props = props
pdecl = ','.join(p.name for p in props)
self._insert_stmt = f"INSERT EDGE {name}({pdecl}) VALUES"
pdecl = ','.join(f"`{p.name}` {p.ptype}" for p in props)
self._create_stmt = f"CREATE EDGE IF NOT EXISTS `{name}`({pdecl});"
def parse_vertex(self, row):
tag = Tag()
props = []
for i, col in enumerate(row):
m = re.search(r':VID\((.*)\)', col)
if m:
self._type.vid = VID(i, m.group(1))
continue
m = re.search(r'(\w+)\.(\w+):(\w+)', col)
if not m:
raise ValueError(f'Invalid csv header format {col}')
g1 = m.group(1)
if not tag.name:
tag.name = g1
assert tag.name == g1, f'Different tag name {g1}'
props.append(Prop(i, m.group(2), m.group(3)))
tag.props = props
self._type.tags = [tag]
pdecl = ','.join(p.name for p in tag.props)
self._insert_stmt = f"INSERT VERTEX {tag.name}({pdecl}) VALUES"
pdecl = ','.join(f"`{p.name}` {p.ptype}" for p in tag.props)
self._create_stmt = f"CREATE TAG IF NOT EXISTS `{tag.name}`({pdecl});"
if __name__ == '__main__':
for row in CSVImporter('../data/nba/player.csv'):
print(row)
| 2.265625
| 2
|
project/hijri_calendar_project/hijri_calendar_app/views.py
|
bilgrami/hijri-calendar
| 1
|
12778329
|
<filename>project/hijri_calendar_project/hijri_calendar_app/views.py
from django.shortcuts import render, get_object_or_404
from django.views.generic import TemplateView
from .models import HijriCalendar
from datetime import date
from helpers import cache_helper as ch
class HomePageView(TemplateView):
template_name = "index.html"
class AboutPageView(TemplateView):
template_name = "about.html"
# Add this view
class HolidayPageView(TemplateView):
@staticmethod
def cache_key_prefix():
return 'holiday-list:views'
def get(self, request, **kwargs):
self.cache = ch.CacheHelper(key_prefix=self.cache_key_prefix())
m = HijriCalendar.holiday_calendar.all()
holiday_calendars = m.filter(date_value__gte=date.today()) \
.order_by('date_value')
data = 'Note: Holidays earlier than today are not displayed'
key = ''
total_views = self.cache.increment(key)
return render(request, 'hijri_calendar_app/holiday.html',
{'holiday_calendars': holiday_calendars,
'data': data,
'cache_key': self.cache.get_key(key),
'cache_timeout': self.cache.get_timeout(),
'total_views': total_views})
class CalendarDetailPageView(TemplateView):
@staticmethod
def cache_key_prefix():
return 'calendar-date'
def get(self, request, date_value, **kwargs):
self.cache = ch.CacheHelper(key_prefix=self.cache_key_prefix())
data = get_object_or_404(HijriCalendar,
date_value=date_value)
key = f'{str(date_value)}:views'
total_views = self.cache.increment(key=key)
return render(request, 'hijri_calendar_app/calendar_detail.html',
{'data': data,
'cache_key': self.cache.get_key(key=key),
'cache_timeout': self.cache.get_timeout(),
'total_views': total_views})
| 2.046875
| 2
|
examples/basic/cm_assembly_playground/early_tests/assembly_testold.py
|
tatung/somo
| 27
|
12778330
|
<gh_stars>10-100
from pathlib import Path
import os
import pybullet as p
import pybullet_data
import time # for waiting
from pybullet_utils import bullet_client as bc
from pybullet_utils import urdfEditor as ed
p0 = bc.BulletClient(connection_mode=p.DIRECT)
p0.setAdditionalSearchPath(pybullet_data.getDataPath())
p1 = bc.BulletClient(connection_mode=p.DIRECT)
p1.setAdditionalSearchPath(pybullet_data.getDataPath())
# can also connect using different modes, GUI, SHARED_MEMORY, TCP, UDP, SHARED_MEMORY_SERVER, GUI_SERVER
finger_urdf_path = os.path.join(Path(__file__).parent, "abondance_finger.urdf")
finger0 = p0.loadURDF(finger_urdf_path)
finger1 = p1.loadURDF(finger_urdf_path)
ed0 = ed.UrdfEditor()
ed0.initializeFromBulletBody(finger1, p1._client) # xx todo: switch 0 and 1
ed1 = ed.UrdfEditor()
ed1.initializeFromBulletBody(finger0, p0._client)
parentLinkIndex = 0
jointPivotXYZInParent = [0, 0, 0]
jointPivotRPYInParent = [0, 0, 0]
jointPivotXYZInChild = [0, 0, 0]
jointPivotRPYInChild = [0, 0, 0]
newjoint = ed0.joinUrdf(
ed1,
parentLinkIndex,
jointPivotXYZInParent,
jointPivotRPYInParent,
jointPivotXYZInChild,
jointPivotRPYInChild,
p0._client,
p1._client,
)
newjoint.joint_type = p0.JOINT_FIXED
ed0.saveUrdf("combined.urdf")
# now that the urdf is merged, load and simulate it
# add white ground plane for pretty screen grabs and videos
p.setAdditionalSearchPath(
pybullet_data.getDataPath()
) # defines the path used by p.loadURDF
planeId = p.loadURDF("plane.urdf")
physicsClient = p.connect(
p.GUI
) # p.GUI for graphical, or p.DIRECT for non-graphical version
world_scaling = 1.0
p.setGravity(0, 0, -world_scaling * 9.81)
p.setPhysicsEngineParameter(enableConeFriction=1)
p.setRealTimeSimulation(0)
startPos = [
0,
0,
0,
] # have to be careful to set this position such that the box and atuator just touch (to replicate experimental condition)
startOr = p.getQuaternionFromEuler([0, 0, 0])
assemblyId = p.loadURDF(
"combined.urdf",
basePosition=startPos,
baseOrientation=startOr,
physicsClientId=physicsClient,
)
time_step = 0.0001
p.setTimeStep(time_step)
n_steps = 10000
start_time = time.time()
for i in range(n_steps):
p.stepSimulation()
end_time = time.time()
print(f"execution time: {end_time-start_time}")
p.disconnect()
#
# print(p0._client)
# print(p1._client)
# print("p0.getNumBodies()=", p0.getNumBodies())
# print("p1.getNumBodies()=", p1.getNumBodies())
#
# pgui = bc.BulletClient(connection_mode=pybullet.GUI)
# pgui.configureDebugVisualizer(pgui.COV_ENABLE_RENDERING, 0)
#
# orn = [0, 0, 0, 1]
# ed0.createMultiBody([0, 0, 0], orn, pgui._client)
# pgui.setRealTimeSimulation(1)
#
# pgui.configureDebugVisualizer(pgui.COV_ENABLE_RENDERING, 1)
#
# while (pgui.isConnected()):
# pgui.getCameraImage(320, 200, renderer=pgui.ER_BULLET_HARDWARE_OPENGL)
# time.sleep(1. / 240.)
| 2.109375
| 2
|
lib/interactiveBrokers/extra.py
|
cmorgan/trading-with-python
| 24
|
12778331
|
'''
Created on May 8, 2013
Copyright: <NAME>
License: BSD
convenience functions for interactiveBrokers module
'''
from ib.ext.Contract import Contract
priceTicks = {1:'bid',2:'ask',4:'last',6:'high',7:'low',9:'close', 14:'open'}
timeFormat = "%Y%m%d %H:%M:%S"
dateFormat = "%Y%m%d"
def createContract(symbol,secType='STK',exchange='SMART',currency='USD'):
''' create contract object '''
c = Contract()
c.m_symbol = symbol
c.m_secType= secType
c.m_exchange = exchange
c.m_currency = currency
return c
| 2.40625
| 2
|
blocks/bevel_types.py
|
go2net/PythonBlocks
| 9
|
12778332
|
<filename>blocks/bevel_types.py
# ctypes and os shouldn't be re-exported.
import ctypes as _ctypes
import os as _os
# Part One: Type Assignments for G and Instrument Drivers, see spec table
# 3.1.1.
#
# Remark: The pointer and probably also the array variants are of no
# significance in Python because there is no native call-by-reference.
# However, as long as I'm not fully sure about this, they won't hurt.
def _type_dublet(ctypes_type):
return (ctypes_type, _ctypes.POINTER(ctypes_type))
def _type_triplet(ctypes_type):
return _type_dublet(ctypes_type) + (_ctypes.POINTER(ctypes_type),)
UInt32, PUInt32, AUInt32 = _type_triplet(_ctypes.c_ulong)
Int32, PInt32, AInt32 = _type_triplet(_ctypes.c_long)
UInt16, PUInt16, AUInt16 = _type_triplet(_ctypes.c_ushort)
Int16, PInt16, AInt16 = _type_triplet(_ctypes.c_short)
UInt8, PUInt8, AUInt8 = _type_triplet(_ctypes.c_ubyte)
Int8, PInt8, AInt8 = _type_triplet(_ctypes.c_byte)
Addr, PAddr, AAddr = _type_triplet(_ctypes.c_void_p)
Char, PChar, AChar = _type_triplet(_ctypes.c_char)
Byte, PByte, AByte = _type_triplet(_ctypes.c_ubyte)
Boolean, PBoolean, ABoolean = _type_triplet(UInt16)
Real32, PReal32, AReal32 = _type_triplet(_ctypes.c_float)
Real64, PReal64, AReal64 = _type_triplet(_ctypes.c_double)
# The following three type triplets are defined rather pathologically, both in
# the spec and the reference .h file. Therefore, I can't use _type_triplet.
Buf = PByte
PBuf = Buf
ABuf = _ctypes.POINTER(Buf)
String = _ctypes.c_char_p # PChar in the spec
PString = _ctypes.c_char_p # PChar in the spec
AString = _ctypes.POINTER(String)
# It is impractical to have Buf defined as an array of unsigned chars,
# because ctypes forces me then to cast the string buffer to an array type.
# The only semantic difference is that String is null terminated while Buf
# is not (as I understand it). However, in Python there is no difference.
# Since the memory representation is the same -- which is guaranteed by the C
# language specification -- the following Buf re-definitions are sensible:
Buf = PBuf = String
ABuf = _ctypes.POINTER(Buf)
Rsrc = String
PRsrc = String
ARsrc = _ctypes.POINTER(Rsrc)
Status, PStatus, AStatus = _type_triplet(Int32)
Version, PVersion, AVersion = _type_triplet(UInt32)
Object, PObject, AObject = _type_triplet(UInt32)
Session, PSession, ASession = _type_triplet(Object)
Attr = UInt32
ConstString = _ctypes.POINTER(Char)
# Part Two: Type Assignments for G only, see spec table 3.1.2. The
# difference to the above is of no significance in Python, so I use it here
# only for easier synchronisation with the spec.
AccessMode, PAccessMode = _type_dublet(UInt32)
BusAddress, PBusAddress = _type_dublet(UInt32)
BusSize = UInt32
AttrState, PAttrState = _type_dublet(UInt32)
# The following is weird, taken from news:<EMAIL>
VAList = _ctypes.POINTER(_ctypes.c_char)
EventType, PEventType, AEventType = _type_triplet(UInt32)
PAttr = _ctypes.POINTER(Attr)
AAttr = PAttr
EventFilter = UInt32
FindList, PFindList = _type_dublet(Object)
Event, PEvent = _type_dublet(Object)
KeyId, PKeyId = _type_dublet(String)
JobId, PJobId = _type_dublet(UInt32)
# Class of callback functions for event handling, first type is result type
if _os.name == 'nt':
Hndlr = _ctypes.WINFUNCTYPE(Status, Session, EventType, Event,
Addr)
else:
Hndlr = _ctypes.CFUNCTYPE(Status, Session, EventType, Event,
Addr)
| 1.882813
| 2
|
crawler/grant.gov/spiders/modules/item_properties.py
|
dmvieira/ETL-example
| 1
|
12778333
|
<reponame>dmvieira/ETL-example<gh_stars>1-10
# -*- coding: utf-8 -*-
import urllib2
import json
known_formats = 'pdf doc docx'.split()
def get_attachment_or_description(url):
f = urllib2.urlopen(url)
s = f.read()
f.close()
try:
data = json.loads(s, encoding='latin1')
except ValueError:
return (None, None)
returned_url = _get_attachment_url(data)
if returned_url:
return (None, returned_url)
return (_get_description(data), None)
def _get_attachment_url(data):
# try to find a full announcement attachment
for attachment in data.get('synopsisAttachments', []):
if (attachment['attachmentType'].lower() == 'full announcement'
and attachment['fileExt'].lower() in known_formats):
url = (
'http://www.grants.gov/grantsws/AttachmentDownload?attId=' +
str(attachment['id'])
)
return url
return None
def _get_description(data):
# As I didn't find a full announcement attachment, I'll compose a text
# content for loader to work on.
text = """Due Date: %s
Estimated Total Program Funding: US$ %s
Award Ceiling: US$ %s
Award Floor: US$ %s
Agency Name: %s
Description: %s""" % (
data.get('originalDueDate', ''),
data['synopsis'].get('estimatedFundingFormatted', ''),
data['synopsis'].get('awardCeiling', ''),
data['synopsis'].get('awardFloor', ''),
data['synopsis'].get('agencyName', ''),
data['synopsis'].get('fundingActivityCategoryDesc', ''))
return text
| 2.6875
| 3
|
skills/program-y-wide/test.py
|
stefanrer/commonbsecret
| 0
|
12778334
|
<filename>skills/program-y-wide/test.py<gh_stars>0
import requests
def to_dialogs(sentences):
utters = [{"text": sent, "user": {"user_type": "human"}} for sent in ["hi"] + sentences]
return {"dialogs": [{"utterances": utters, "bot_utterances": utters, "human_utterances": utters}]}
def main_test():
url = "http://0.0.0.0:8064/api/rest/v1.0/ask"
sentences = ["talk about you"]
for sent in sentences:
data = to_dialogs([sent])
response = requests.post(url, json=data).json()[0][0]
assert "Talking is my primary function." in response, print(sent, ".", response)
print("Success")
if __name__ == "__main__":
main_test()
| 2.921875
| 3
|
src/main/python/get_ip_ranges/source.py
|
jbowdre/SWIPAMforvRA8
| 0
|
12778335
|
<filename>src/main/python/get_ip_ranges/source.py<gh_stars>0
"""
Modified by <NAME> to support Solarwinds IPAM
Initial release: 11/10/2020
Copyright (c) 2020 VMware, Inc.
This product is licensed to you under the Apache License, Version 2.0 (the "License").
You may not use this product except in compliance with the License.
This product may include a number of subcomponents with separate copyright notices
and license terms. Your use of these subcomponents is subject to the terms and
conditions of the subcomponent's license, as noted in the LICENSE file.
"""
import requests
from vra_ipam_utils.ipam import IPAM
import logging
from orionsdk import SwisClient
import ipaddress
def handler(context, inputs):
ipam = IPAM(context, inputs)
IPAM.do_get_ip_ranges = do_get_ip_ranges
return ipam.get_ip_ranges()
def do_get_ip_ranges(self, auth_credentials, cert):
username = auth_credentials["privateKeyId"]
password = auth_credentials["privateKey"]
hostname = self.inputs["endpoint"]["endpointProperties"]["hostName"]
requests.packages.urllib3.disable_warnings()
swis = SwisClient(hostname, username, password)
result_ranges = []
qResult = swis.query("SELECT DISTINCT GroupID AS id, FriendlyName AS name, Address AS addressSpaceId, CIDR AS subnetPrefixLength, Comments AS description, i.CustomProperties.Gateway as gatewayAddress, i.CustomProperties.DNS_Servers as dnsServers, i.CustomProperties.Site_ID AS siteId FROM IPAM.GroupNode i WHERE GroupTypeText LIKE 'Subnet' AND i.CustomProperties.VRA_Range = TRUE")
for range in qResult['results']:
logging.info(f"Found subnet: {str(range['name'])}")
network = ipaddress.ip_network(str(range['addressSpaceId']) + '/' + str(range['subnetPrefixLength']))
range['ipVersion'] = 'IPv' + str(network.version)
range['startIPAddress'] = str(network[10])
range['endIPAddress'] = str(network[-6])
range['dnsServerAddresses'] = [server.strip() for server in str(range['dnsServers']).split(',')]
range['tags'] = [{
"key": "Site",
"value": range['siteId']
}]
result_ranges.append(range)
result = {
"ipRanges": result_ranges
}
return result
| 2.203125
| 2
|
Chapter_4/SCU_4_9.py
|
charliealpha094/Introduction-to-Python-Programming-for-Business-and-Social-Sciences-Applications
| 0
|
12778336
|
# Done by <NAME> (2020/09/23)
# SCU 4.9 - Exception Handling
try:
value_entered = int(input("Please, enter a number: "))
print("The number entered was: ", str(value_entered))
except ValueError:
print("Invalid entry! Please, enter a numeric character.")
| 3.96875
| 4
|
quorum/parse/parse.py
|
LSaldyt/quorum
| 0
|
12778337
|
from collections import defaultdict
from ..objects import Clause
from ..objects import Statement
from ..structs import KnowledgeMap
from ..objects import Pattern
def parse_clause(words):
assert len(words) == 3
name, relation, node = words
return Clause((name, relation, node))
def parse_chained(words):
chainDict = defaultdict(set)
if len(words) >= 4:
key, *chained = words
clausewords = chained[:3]
chained = chained[3:]
chainDict[key].add(parse_clause(clausewords))
recurse = parse_chained(chained)
for k, v in recurse.items():
if k == 'and':
k = key
for item in v:
chainDict[k].add(item)
elif len(words) > 0 and len(words) < 4:
raise SyntaxError('Clause file not correctly formatted: {}'.format(str(words)))
return chainDict
def parse_chained_clause(words):
assert len(words) >= 3
root = parse_clause(words[:3])
chainDict = parse_chained(words[3:])
return Statement(root, chainDict)
def parse_chained_clauses(sentence):
clauses = sentence.split(',')
return [parse_chained_clause(c.split()) for c in clauses]
def parse_pattern(text):
conditions, outcomes = text.split('then')
conditions = conditions.replace('if', '')
conditions = parse_chained_clauses(conditions)
outcomes = parse_chained_clauses(outcomes)
return Pattern(conditions, outcomes)
def parse_text(text):
kmap = KnowledgeMap()
for sentence in text.split('.'):
sentence = sentence.replace('\n', ' ')
words = sentence.split()
if len(words) == 0:
pass
elif words[0] == 'if':
kmap.teach(parse_pattern(sentence))
elif len(words) >= 3:
kmap.add(parse_chained_clause(words))
else:
raise SyntaxError('Clause file not correctly formatted: {}'.format(str(words)))
return kmap
def parse_file(filename):
with open(filename, 'r') as infile:
return parse_text(infile.read())
| 2.84375
| 3
|
tests/model/test_provisioned_worker_pool_properties.py
|
yellowdog/yellowdog-sdk-python-public
| 0
|
12778338
|
<gh_stars>0
from yellowdog_client.model import ProvisionedWorkerPoolProperties
from .test_utils import should_serde
def test_serialize_empty():
obj_in_raw = ProvisionedWorkerPoolProperties()
obj_in_dict = {}
should_serde(obj_in_raw, obj_in_dict, ProvisionedWorkerPoolProperties)
def test_serialize_populated(provisioned_worker_pool_properties_raw, provisioned_worker_pool_properties_dict):
should_serde(provisioned_worker_pool_properties_raw, provisioned_worker_pool_properties_dict, ProvisionedWorkerPoolProperties)
| 2.140625
| 2
|
visitors/views.py
|
maxhamz/prieds_test_hospital_queue_be
| 0
|
12778339
|
from rest_framework.decorators import api_view
from rest_framework import status
from rest_framework.response import Response
from visitors.models import Visitor
from visitors.serializers import VisitorSerializer
# Create your views here.
@api_view(['GET', 'POST'])
def visitor_list(request, format=None):
if (request.method == 'GET'):
visitors = Visitor.objects.all()
serializer = VisitorSerializer(visitors, many=True)
return Response(serializer.data)
if(request.method == 'POST'):
serializer = VisitorSerializer(data=request.data)
if (serializer.is_valid()):
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def visitor_detail(request, pk, format=None):
try:
visitor = Visitor.objects.get(pk=pk)
except Visitor.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = VisitorSerializer(visitor)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = VisitorSerializer(visitor, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
visitor.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| 2.328125
| 2
|
plugins/modules/saos8_facts.py
|
ciena/ciena.saos8
| 2
|
12778340
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
"""
The module file for saos8_facts
"""
DOCUMENTATION = """
module: saos8_facts
short_description: Get facts about saos8 devices.
description:
- Collects facts from network devices running the saos8 operating system. This module
places the facts gathered in the fact tree keyed by the respective resource name. The
facts module will always collect a base set of facts from the device and can enable
or disable collection of additional facts.
version_added: 1.0.0
author:
- <NAME>
notes:
- Tested against SAOS rel_saos5170_8.6.5_ga076
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected to a given subset. Possible
values for this argument include all, default, config, and neighbors. Can specify
a list of values to include a larger subset. Values can also be used with an
initial C(M(!)) to specify that a specific subset should not be collected.
required: false
default: '!config'
gather_network_resources:
description:
- When supplied, this argument will restrict the facts collected to a given subset.
Possible values for this argument include all and the resources like interfaces.
Can specify a list of values to include a larger subset. Values can also be
used with an initial C(M(!)) to specify that a specific subset should not be
collected. Valid subsets are 'all', 'interfaces', 'neighbors'
required: false
"""
EXAMPLES = """
- name: Gather all facts
ciena.saos8.saos8_facts:
gather_subset: all
gather_network_resources: all
- name: collect config and default facts
ciena.saos8.saos8_facts:
gather_subset: config
- name: collect everything exception the config
ciena.saos8.saos8_facts:
gather_subset: '!config'
"""
RETURN = """
ansible_net_config:
description: The running-config from the device
returned: when config is configured
type: str
ansible_net_model:
description: The device model string
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the device
returned: always
type: str
ansible_net_version:
description: The version of the software running
returned: always
type: str
ansible_net_neighbors:
description: The set of LLDP neighbors
returned: when interface is configured
type: list
ansible_net_gather_subset:
description: The list of subsets gathered by the module
returned: always
type: list
ansible_net_api:
description: The name of the transport
returned: always
type: str
ansible_net_python_version:
description: The Python version Ansible controller is using
returned: always
type: str
ansible_net_gather_network_resources:
description: The list of fact resource subsets collected from the device
returned: always
type: list
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ciena.saos8.plugins.module_utils.network.saos8.argspec.facts.facts import (
FactsArgs,
)
from ansible_collections.ciena.saos8.plugins.module_utils.network.saos8.facts.facts import (
Facts,
)
from ansible_collections.ciena.saos8.plugins.module_utils.network.saos8.saos8 import (
saos8_argument_spec,
)
def main():
"""
Main entry point for module execution
:returns: ansible_facts
"""
argument_spec = FactsArgs.argument_spec
argument_spec.update(saos8_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec, supports_check_mode=True
)
warnings = []
if module.params["gather_subset"] == "!config":
warnings.append(
"default value for `gather_subset` will be changed to `min` from `!config` v2.11 onwards"
)
result = Facts(module).get_facts()
ansible_facts, additional_warnings = result
warnings.extend(additional_warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == "__main__":
main()
| 2.328125
| 2
|
pipeline_ie/coref.py
|
vj1494/PipelineIE
| 15
|
12778341
|
<reponame>vj1494/PipelineIE
from stanza.server import CoreNLPClient
import pandas as pd
import neuralcoref
from pipeline_ie.config import Config
import time
class Coref:
def __init__(self, nlp, coref_mode, data, coref_output=False):
self.nlp = nlp
self.coref_mode = coref_mode
self.data = data
self.coref_output = coref_output
self.configuration = Config()
def input_data(self):
"""
Create a list of text from the column of given DataFrame.
NOTE: Any processing if required to be done on sentences can be written here.
:return:
- list_text: list
list of sentences
"""
col_name = self.configuration.config.get('file_directory', 'input_column_name')
list_text = self.data[col_name].astype(str).tolist()
return list_text
@staticmethod
def coref_output_file(texts):
"""
Write output after coreference resolution on given text to a csv file.
:param texts: list
list of texts.
"""
df_coref_resolved = pd.DataFrame(texts, columns=['Coref_Resolved_Text'])
df_coref_resolved.to_csv('Text_Coref.csv')
@staticmethod
def create_phrase(mention, ann):
"""
Create a list of tokens for given mention
:param mention: mention object
:param ann: annotation object
Annotation object contains all mentions and coref chains for given text
:return:
- phrase: list
phrase is a list containing all tokens for the given mention
"""
phrase = []
for i in range(mention.beginIndex, mention.endIndex):
phrase.append(ann.sentence[mention.sentenceIndex].token[i].word)
return phrase
def corenlp_coref_resolution(self, memory, timeout, properties):
"""
Perform coreference resolution on given text using Stanford CoreNLP
:param
- memory: str
- timeout: int
- properties: dict
:return:
- texts: list,
List of sentences resolved and unresolved by coreference resolution operation.
"""
# Start CoreNLP Server with required properties
with CoreNLPClient(pipeline='StanfordCoreNLP', timeout=timeout, memory=memory,
properties=properties) as client:
texts = self.input_data()
index = 0
time.sleep(10)
for text in texts:
doc = self.nlp(text)
modified_text = [sentence.string.strip() for sentence in doc.sents]
# submit the request to the server
ann = client.annotate(text)
# In each chain, replace the anaphora with the correct representative
for coref in ann.corefChain:
mts = [mention for mention in coref.mention]
representative = coref.representative
phrase_rep = self.create_phrase(mts[coref.representative], ann)
antecedent = ' '.join(word for word in phrase_rep)
check_rep = 0
for mention in coref.mention:
if check_rep == representative:
check_rep += 1
continue
phrase = self.create_phrase(mts[check_rep], ann)
anaphor = ' '.join(word for word in phrase)
anaphor = anaphor + ' '
antecedent = antecedent + ' '
modified_text[mention.sentenceIndex] = modified_text[mention.sentenceIndex].replace(anaphor,
antecedent)
check_rep += 1
modified_text = ' '.join(modified_text)
texts[index] = modified_text
index += 1
if self.coref_output is True:
self.coref_output_file(texts)
return texts
def neural_coref_resolution(self):
"""
Perform coreference resolution operation on given text using neuralcoref.
Supports domain specific coreference resolution as per the spacy model used.
:return:
- texts: list,
List of sentences resolved and unsresolved by coreference resolution operation.
"""
coref = neuralcoref.NeuralCoref(self.nlp.vocab)
self.nlp.add_pipe(coref, name='neuralcoref')
texts = self.input_data()
for index, text in enumerate(texts):
doc = self.nlp(text)
texts[index] = doc._.coref_resolved
if self.coref_output is True:
self.coref_output_file(texts)
return texts
def coref_resolution(self):
"""
Execute coreference resolution methodology as per the coref mode mentioned either explicitly or implicitly.
:return:
- texts: list.
"""
if self.coref_mode == "corenlp":
properties = self.configuration.corenlp_coref_props()
params = self.configuration.corenlp_params()
memory, timeout = params[0], params[1]
texts = self.corenlp_coref_resolution(memory, timeout, properties)
elif self.coref_mode == "neuralcoref":
texts = self.neural_coref_resolution()
return texts
| 2.953125
| 3
|
scripts/howde_allsome_stats.py
|
rsharris/HowDeSBT-multi_make_bf
| 5
|
12778342
|
#!/usr/bin/env python
"""
Compute some special stats in a howdesbt allsome tree.
"""
from sys import argv,stdin,stdout,stderr,exit
from howde_tree_parse import read_howde_tree_file
def usage(s=None):
message = """
usage: cat annotated_tree_file | howde_allsome_stats [options]
--table print the table
(by default we don't print the table)
Input is a tree hieracrchy file with three extra columns -- the number of
bits in the node's bitvectors, the number of 1s in the node's all
bitvector, and the number of 1s in the node's some bitvector.
node1.allsome.bf 40000 0 24205
*node2.allsome.bf 40000 0 14482
**node4.allsome.bf 40000 187 7867
***EXPERIMENT16.subsample.allsome.bf 40000 4351 0
***node8.allsome.bf 40000 267 4263
****EXPERIMENT1.subsample.allsome.bf 40000 1982 0
****EXPERIMENT15.subsample.allsome.bf 40000 2281 0
**node5.allsome.bf 40000 1 9957
***node9.allsome.bf 40000 12 5283
****EXPERIMENT19.subsample.allsome.bf 40000 3004 0
..."""
if (s == None): exit (message)
else: exit ("%s\n%s" % (s,message))
def main():
# parse the command line
reportTable = False
fileSpec = None
for arg in argv[1:]:
if ("=" in arg):
argVal = arg.split("=",1)[1]
if (arg == "--table"):
reportTable = True
elif (arg.startswith("--")):
usage("unrecognized option: %s" % arg)
else:
usage("unrecognized option: %s" % arg)
# read the tree
forest = read_howde_tree_file(stdin,keepTags=True)
assert (len(forest) != 0), "input has no tree"
# compute the stats
if (reportTable):
print "#node\tbits\tall\tsome\tpresent\tabsent\tunresolved"
sumPresent = sumAbsent = 0
for root in forest:
preOrder = root.pre_order()
for node in preOrder:
assert (len(node.tags) == 3)
node.bfSize = int(node.tags[0])
node.allOnes = int(node.tags[1])
node.someOnes = int(node.tags[2])
node.present = node.allOnes
node.unresolved = node.someOnes
parent = node.parent
if (parent == None):
node.absent = node.bfSize - (node.allOnes + node.someOnes)
else:
assert (node.bfSize == parent.bfSize)
node.absent = parent.unresolved - (node.allOnes + node.someOnes)
sumPresent += node.present
sumAbsent += node.absent
if (reportTable):
print "%s\t%d\t%d\t%d\t%d\t%d\t%d" \
% (node.name,
node.bfSize,node.allOnes,node.someOnes,
node.present,node.absent,node.unresolved)
denom = float(sumPresent+sumAbsent)
output = "present/absent = %d/%d = %.1f%%/%.1f%%" \
% (sumPresent,sumAbsent,100*sumPresent/denom,100*sumAbsent/denom)
if (reportTable): print >>stderr, output
else: print output
if __name__ == "__main__": main()
| 3.203125
| 3
|
plotter/agg_01_d050_weights_dts.py
|
kit-tm/fdeval
| 1
|
12778343
|
<filename>plotter/agg_01_d050_weights_dts.py<gh_stars>1-10
import logging, math, json, pickle, os
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from datetime import datetime
import matplotlib.patches as patches
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.gridspec as gridspec
import time
from heapq import heappush, heappop
logger = logging.getLogger(__name__)
from . import agg_2_utils as utils
SHOW_GREY = False
def plot(blob, **kwargs):
"""DTS functional evaluation"""
utils.EXPORT_BLOB = blob
class ParameterSet(object):
def __init__(self, run):
self.w_table = run.get('param_dts_weight_table')
self.w_link = run.get('param_dts_weight_link')
self.w_ctrl = run.get('param_dts_weight_ctrl')
self.label = r'%d-%d-%d' % (self.w_table, self.w_link, self.w_ctrl)
self.ctrl_overhead_percent = []
self.link_overhead_percent = []
self.table_overhead_percent = []
self.underutil_percent = []
self.runs = []
def add_result(self, run):
for switch in range(0, run.get('scenario_switch_cnt')):
try:
if run.get('dts_%d_table_overhead_percent' % (switch)) > 0:
#self.switches.append(Switch(run, switch))
self.ctrl_overhead_percent.append(run.get('dts_%d_ctrl_overhead_percent' % (switch)))
self.link_overhead_percent.append(run.get('dts_%d_link_overhead_percent' % (switch)))
self.table_overhead_percent.append(run.get('dts_%d_table_overhead_percent' % (switch)))
self.underutil_percent.append(run.get('dts_%d_underutil_percent' % (switch)))
self.runs.append((run, switch))
assert(len(self.ctrl_overhead_percent) == len(self.link_overhead_percent) == len(self.table_overhead_percent) == len(self.underutil_percent))
except:
pass
print("add failed", switch, run.get('scenario_switch_cnt'), self.label)
#print(run)
#exit()
def plotax(ax, data, **kwargs):
data_x = []
data_y = []
for i, v in enumerate(sorted(data)):
data_x.append(i)
data_y.append(v)
ax.plot(data_x, data_y, **kwargs)
includes = ['scenario_switch_cnt']
keywords = ['hit_timelimit',
'ctrl_overhead_percent',
'link_overhead_percent',
'table_overhead_percent',
'underutil_percent']
for keyword in keywords:
includes += blob.find_columns(keyword)
blob.include_parameters(**dict.fromkeys(includes, 1))
runs = blob.filter(**dict())
skipped = 0
use_seeds = set()
ignore_seeds = []
switchcnt = []
results = {}
for run in runs:
seed = run.get('param_topo_seed')
if run.get('hit_timelimit') and run.get('hit_timelimit') > 0:
skipped += 1
if not seed in ignore_seeds:
ignore_seeds.append(seed)
continue
for run in runs:
seed = run.get('param_topo_seed')
if not seed in ignore_seeds:
key = (run.get('param_dts_weight_table'), run.get('param_dts_weight_link'), run.get('param_dts_weight_ctrl'))
try:
results[key].add_result(run)
except KeyError:
results[key] = ParameterSet(run)
use_seeds.add(run.get('param_topo_seed'))
switchcnt.append(run.get('scenario_switch_cnt'))
print("len", len(runs))
print("max", max(switchcnt))
print("seeds", len(use_seeds))
print("ignore_seeds", len(ignore_seeds))
print("skipped", skipped)
# ---------------------------------------------
# Figure 1 (not used atm): results if only one of the three coefficients is used (e.g., wtable=1, others =0)
# ---------------------------------------------
if 0:
plt.close()
fig = plt.figure(figsize=(8, 10))
fig.tight_layout(pad=0)
ax1 = plt.subplot2grid((4, 3), (0, 0), colspan=2)
ax2 = plt.subplot2grid((4, 3), (1, 0), colspan=2)
ax3 = plt.subplot2grid((4, 3), (2, 0), colspan=2)
ax4 = plt.subplot2grid((4, 3), (3, 0), colspan=2)
cdf1 = plt.subplot2grid((4, 3), (0, 2))
cdf1.set_xlabel('Table overhead in %')
cdf2 = plt.subplot2grid((4, 3), (1, 2))
cdf2.set_xlabel('Link overhead in %')
cdf3 = plt.subplot2grid((4, 3), (2, 2))
cdf3.set_xlabel('Control overhead in %')
cdf4 = plt.subplot2grid((4, 3), (3, 2))
cdf4.set_xlabel('Combined overhead in %')
for ax in [cdf1, cdf2, cdf3, cdf4]:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
ax.set_ylabel('CDF')
ax.set_xlim(0,80)
ax.xaxis.grid(True, color='grey', linestyle='--', linewidth=1, alpha=0.5)
ax.yaxis.grid(True, color='grey', linestyle='--', linewidth=1, alpha=0.5)
for ax in [ax1, ax2, ax3, ax4]:
ax.set_ylim(0,55)
ax.xaxis.grid(True, color='grey', linestyle='--', linewidth=1, alpha=0.5)
ax.yaxis.grid(True, color='grey', linestyle='--', linewidth=1, alpha=0.5)
for m, color, marker, label in zip([(1,0,0), (0,1,0), (0,0,1)],
['red', 'green', 'blue'], ['^', 's', 'o'],
['wTable=1, wLink=0, wCtrl=0', 'wTable=0, wLink=1, wCtrl=0',
'wTable=0, wLink=0, wCtrl=1']):
result = results.get(m)
color=color
markevery=20
alpha = 1
combined = [(x+y+z)/3.0 for x,y,z in zip(result.ctrl_overhead_percent, result.link_overhead_percent, result.table_overhead_percent)]
plotax(ax1, result.table_overhead_percent,
color=color, alpha=alpha, marker=marker, markevery=markevery, ms=4, label=label)
plotax(ax2, result.link_overhead_percent,
color=color, alpha=alpha, marker=marker, markevery=markevery, ms=4, label=label)
plotax(ax3, result.ctrl_overhead_percent,
color=color, alpha=alpha, marker=marker, markevery=markevery, ms=4, label=label)
plotax(ax4, combined,
color=color, alpha=alpha, marker=marker, markevery=markevery, ms=4, label=label)
utils.plotcdf(cdf1, result.table_overhead_percent,
color=color, marker=marker, markevery=15, ms=4, alpha=alpha)
utils.plotcdf(cdf2, result.link_overhead_percent,
color=color, marker=marker, markevery=15, ms=4, alpha=alpha)
utils.plotcdf(cdf3, result.ctrl_overhead_percent,
color=color, marker=marker, markevery=15, ms=4, alpha=alpha)
utils.plotcdf(cdf4, combined,
color=color, marker=marker, markevery=15, ms=4, alpha=alpha)
"""
medium = -1
for i, v in enumerate(sorted(result.table_overhead_percent)):
if v > 20 and v < 30:
print(i, v)
medium = (i,v)
run, switch = result.runs[i]
v_link = run.get('dts_%d_link_overhead_percent' % (switch))
i_link = list(sorted(result.link_overhead_percent)).index(v_link)
medium_link = (i_link, v_link)
v_ctrl = run.get('dts_%d_ctrl_overhead_percent' % (switch))
i_ctrl = list(sorted(result.ctrl_overhead_percent)).index(v_ctrl)
medium_ctrl = (i_ctrl, v_ctrl)
run, switch = result.runs[medium[0]]
circle1 = plt.Circle(medium, 2, color='black')
ax1.add_artist(circle1)
circle2 = plt.Circle(medium_link, 2, color='black')
ax2.add_artist(circle2)
circle3 = plt.Circle(medium_ctrl, 2, color='black')
ax3.add_artist(circle3)
plt.show()
#run, switch = result.runs[result.table_overhead_percent.index(max(result.table_overhead_percent))]
#print(run, switch)
plt.close()
dts_fig = utils.plot_dts_utilization_over_time(blob, run, switch, filter=dict(
param_topo_seed=run.get('param_topo_seed'),
param_dts_weight_table=run.get('param_dts_weight_table'),
param_dts_weight_link=run.get('param_dts_weight_link'),
param_dts_weight_ctrl=run.get('param_dts_weight_ctrl')))
plt.show()
plt.close()
dts_fig = utils.plot_dts_utilization_over_time(blob, run, switch, filter=dict(
param_topo_seed=run.get('param_topo_seed'),
param_dts_weight_table=0,
param_dts_weight_link=0,
param_dts_weight_ctrl=1))
plt.show()
plt.close()
dts_fig = utils.plot_dts_utilization_over_time(blob, run, switch, filter=dict(
param_topo_seed=run.get('param_topo_seed'),
param_dts_weight_table=1,
param_dts_weight_link=1,
param_dts_weight_ctrl=1))
plt.show()
exit(1)
"""
ax1.legend()
ax1.set_ylabel('Table overhead in %')
ax1.set_xlabel('DTS experiment index sorted by table overhead')
ax2.legend()
ax2.set_ylabel('Link Overhead in %')
ax2.set_xlabel('DTS experiment index sorted by link overhead')
ax3.legend()
ax3.set_ylabel('Control Overhead in %')
ax3.set_xlabel('DTS experiment index sorted by control overhead')
ax4.legend()
ax4.set_ylabel('Combined Overhead in %')
ax4.set_xlabel('DTS experiment index sorted by combined overhead')
plt.subplots_adjust(wspace=0.1, hspace=0.5, top=.95, bottom=.05)
plt.show()
# ---------------------------------------------
# only cdfs
# ---------------------------------------------
if 1:
plt.close()
all_results = []
fig, axes = plt.subplots(2, 2, figsize=(8, 8))
fig.tight_layout(pad=0)
cdf1 = axes[0][0]
cdf1.set_xlabel(r'Normalized table overhead in \%', fontsize=15)
cdf2 = axes[0][1]
cdf2.set_xlabel(r'Normalized link overhead in \%', fontsize=15)
cdf3 = axes[1][0]
cdf3.set_xlabel(r'Normalized control overhead in \%', fontsize=15)
cdf4 =axes[1][1]
cdf4.set_xlabel(r'Aggregated score', fontsize=15)
for ax in [cdf1, cdf2, cdf3, cdf4]:
#ax.yaxis.tick_right()
#ax.yaxis.set_label_position("right")
ax.set_ylabel('CDF')
#ax.set_xlim(0,80)
ax.xaxis.grid(True, color='grey', linestyle='--', linewidth=1, alpha=0.5)
ax.yaxis.grid(True, color='grey', linestyle='--', linewidth=1, alpha=0.5)
for m, result in results.items():
#if 0 in m: continue;
result.label
combined = [(x+y+z)/3.0 for x,y,z in zip(result.ctrl_overhead_percent, result.link_overhead_percent, result.table_overhead_percent)]
color='lightgray'
alpha = 0.1
rating = sum(combined)/3.0
v1 = float(result.label.split('-')[0])
v2 = float(result.label.split('-')[1])
v3 = float(result.label.split('-')[2])
try:
all_results.append((rating, result, v1/v2, v2/v3))
except ZeroDivisionError:
all_results.append((rating, result, -1, -1))
if SHOW_GREY:
utils.plotcdf(cdf1, result.table_overhead_percent, color=color, alpha=alpha)
utils.plotcdf(cdf2, result.link_overhead_percent, color=color, alpha=alpha)
utils.plotcdf(cdf3, result.ctrl_overhead_percent, color=color, alpha=alpha)
utils.plotcdf(cdf4, combined, color=color, alpha=alpha)
for m, color, marker, label, linestyle in zip([(1,0,0), (0,1,0), (0,0,1), (6,2,1)],
['red', 'green', 'blue', 'black'], ['^', 's', 'o', '*'],
[utils.dts_weights(1,0,0), utils.dts_weights(0,1,0),
utils.dts_weights(0,0,1), utils.dts_weights(6,2,1) + '(best)'],
['-','-','-','--']):
result = results.get(m)
if result:
color=color
markevery=20
alpha = 0.6
combined = [(x+y+z)/3.0 for x,y,z in zip(result.ctrl_overhead_percent, result.link_overhead_percent, result.table_overhead_percent)]
utils.plotcdf(cdf1, result.table_overhead_percent,
color=color, marker=marker, markevery=15, ms=4, alpha=alpha, linestyle=linestyle, label=label)
utils.plotcdf(cdf2, result.link_overhead_percent,
color=color, marker=marker, markevery=15, ms=4, alpha=alpha, linestyle=linestyle, label=label)
utils.plotcdf(cdf3, result.ctrl_overhead_percent,
color=color, marker=marker, markevery=15, ms=4, alpha=alpha, linestyle=linestyle, label=label)
utils.plotcdf(cdf4, combined,
color=color, marker=marker, markevery=15, ms=4, alpha=1, linestyle=linestyle, label=label)
# sort results by rating
all_results = sorted(all_results, key=lambda x: x[0])
print("best scores:")
for i in range(0,20):
print(all_results[i][0], all_results[i][1].label, all_results[i][2:])
print("worst scores:")
for i in range(1,10):
print(all_results[-i][0], all_results[-i][1].label, all_results[i][2:])
print("some selected scores:")
for rating, result, _, _ in all_results:
if result.label in ['11-2-1', '5-2-1']:
print(rating, result.label)
handles, labels = cdf4.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper center', ncol=2, fontsize=12)
#plt.subplots_adjust(top=.9)
plt.subplots_adjust(wspace=0.2, hspace=0.2, left = 0.07, top=.87, bottom=.08)
utils.export(fig, 'weights_dts_cdfonly.pdf', folder='weights')
#plt.show()
# ---------------------------------------------
# Figure 2: all combinations of non-zero cost coefficients
# ---------------------------------------------
if 0:
plt.close()
all_results = []
fig = plt.figure(figsize=(8, 10))
fig.tight_layout(pad=0)
ax1 = plt.subplot2grid((4, 3), (0, 0), colspan=2)
ax2 = plt.subplot2grid((4, 3), (1, 0), colspan=2)
ax3 = plt.subplot2grid((4, 3), (2, 0), colspan=2)
ax4 = plt.subplot2grid((4, 3), (3, 0), colspan=2)
ax1.set_ylabel(r'Table overhead in \%', fontsize=12)
ax1.set_xlabel(r'DTS experiment index sorted by table overhead', fontsize=12)
ax2.set_ylabel(r'Link Overhead in \%', fontsize=12)
ax2.set_xlabel(r'DTS experiment index sorted by link overhead', fontsize=12)
ax3.set_ylabel(r'Control Overhead in \%', fontsize=12)
ax3.set_xlabel(r'DTS experiment index sorted by control overhead', fontsize=12)
ax4.set_ylabel(r'Weighted score', fontsize=12)
ax4.set_xlabel(r'DTS experiment index sorted by weighted score', fontsize=12)
cdf1 = plt.subplot2grid((4, 3), (0, 2))
cdf1.set_xlabel(r'Table overhead in \%', fontsize=12)
cdf2 = plt.subplot2grid((4, 3), (1, 2))
cdf2.set_xlabel(r'Link overhead in \%', fontsize=12)
cdf3 = plt.subplot2grid((4, 3), (2, 2))
cdf3.set_xlabel(r'Control overhead in \%', fontsize=12)
cdf4 = plt.subplot2grid((4, 3), (3, 2))
cdf4.set_xlabel(r'Weighted score in \%', fontsize=12)
for ax in [cdf1, cdf2, cdf3, cdf4]:
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
ax.set_ylabel('CDF')
ax.set_xlim(0,80)
ax.xaxis.grid(True, color='grey', linestyle='--', linewidth=1, alpha=0.5)
ax.yaxis.grid(True, color='grey', linestyle='--', linewidth=1, alpha=0.5)
for ax in [ax1, ax2, ax3, ax4]:
#ax.set_ylim(0,55)
ax.xaxis.grid(True, color='grey', linestyle='--', linewidth=1, alpha=0.5)
ax.yaxis.grid(True, color='grey', linestyle='--', linewidth=1, alpha=0.5)
#fig, ax = plt.subplots(4,1, figsize=(6, 10))
#fig.tight_layout(pad=2.7)
for m, result in results.items():
#if 0 in m: continue;
result.label
combined = [(x+y+z)/3.0 for x,y,z in zip(result.ctrl_overhead_percent, result.link_overhead_percent, result.table_overhead_percent)]
color='lightgray'
alpha = 0.1
rating = sum(combined)
all_results.append((rating, result))
if SHOW_GREY:
plotax(ax1, result.table_overhead_percent, color=color, alpha=alpha)
plotax(ax2, result.link_overhead_percent, color=color, alpha=alpha)
plotax(ax3, result.ctrl_overhead_percent, color=color, alpha=alpha)
plotax(ax4, combined, color=color, alpha=alpha)
utils.plotcdf(cdf1, result.table_overhead_percent, color=color, alpha=alpha)
utils.plotcdf(cdf2, result.link_overhead_percent, color=color, alpha=alpha)
utils.plotcdf(cdf3, result.ctrl_overhead_percent, color=color, alpha=alpha)
utils.plotcdf(cdf4, combined, color=color, alpha=alpha)
for m, color, marker, label, linestyle in zip([(1,0,0), (0,1,0), (0,0,1), (6,2,1)],
['red', 'green', 'blue', 'black'], ['^', 's', 'o', '*'],
['wTable=1, wLink=0, wCtrl=0 (table only)', 'wTable=0, wLink=1, wCtrl=0 (link only)',
'wTable=0, wLink=0, wCtrl=1 (ctrl only)', 'wTable=6, wLink=2, wCtrl=1 (best combination)'],
['-','-','-','--']):
result = results.get(m)
if result:
color=color
markevery=20
alpha = 1
combined = [(x+y+z)/3.0 for x,y,z in zip(result.ctrl_overhead_percent, result.link_overhead_percent, result.table_overhead_percent)]
plotax(ax1, result.table_overhead_percent,
color=color, alpha=alpha, marker=marker, markevery=markevery, ms=4, label=label, linestyle=linestyle)
plotax(ax2, result.link_overhead_percent,
color=color, alpha=alpha, marker=marker, markevery=markevery, ms=4, label=label, linestyle=linestyle)
plotax(ax3, result.ctrl_overhead_percent,
color=color, alpha=alpha, marker=marker, markevery=markevery, ms=4, label=label, linestyle=linestyle)
plotax(ax4, combined,
color=color, alpha=alpha, marker=marker, markevery=markevery, ms=4, label=label, linestyle=linestyle)
utils.plotcdf(cdf1, result.table_overhead_percent,
color=color, marker=marker, markevery=15, ms=4, alpha=alpha, linestyle=linestyle)
utils.plotcdf(cdf2, result.link_overhead_percent,
color=color, marker=marker, markevery=15, ms=4, alpha=alpha, linestyle=linestyle)
utils.plotcdf(cdf3, result.ctrl_overhead_percent,
color=color, marker=marker, markevery=15, ms=4, alpha=alpha, linestyle=linestyle)
utils.plotcdf(cdf4, combined,
color=color, marker=marker, markevery=15, ms=4, alpha=alpha, linestyle=linestyle)
# sort results by rating
all_results = sorted(all_results, key=lambda x: x[0])
print("best scores:")
for i in range(0,20):
print(all_results[i][0], all_results[i][1].label)
print("worst scores:")
for i in range(1,10):
print(all_results[-i][0], all_results[-i][1].label)
print("some selected scores:")
for rating, result in all_results:
if result.label in ['11-2-1', '5-2-1']:
print(rating, result.label)
handles, labels = ax4.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper center', ncol=2, fontsize=12)
plt.subplots_adjust(wspace=0.1, hspace=0.4, top=.9, bottom=.05)
utils.export(fig, 'weights_dts.pdf', folder='weights')
plt.show()
| 1.921875
| 2
|
Factory/tests/test_factory_method.py
|
klee1611/design_pattern
| 0
|
12778344
|
<gh_stars>0
from ..factory_method import ProductAFactory, ProductBFactory
class TestFactoryMethod():
def test_factory_method(self):
factory_a = ProductAFactory()
factory_b = ProductBFactory()
product_a_name = factory_a.get_product_name()
product_b_name = factory_b.get_product_name()
assert 'ConcreteProductA' == product_a_name
assert 'ConcreteProductB' == product_b_name
| 2.984375
| 3
|
sso/tests/test_access_log.py
|
uktrade/staff-sso
| 7
|
12778345
|
import json
import pytest
from freezegun import freeze_time
from sso.core.logging import create_x_access_log
from sso.tests.factories.user import UserFactory
class TestAppAccessLog:
@pytest.mark.django_db
@freeze_time("2017-06-22 15:50:00.000000+00:00")
def test_user_info_is_logged(self, rf, mocker):
mock_logger = mocker.patch("sso.core.logging.logger")
request = rf.get("/whatever/")
create_x_access_log(request, 200)
mock_logger.info.assert_called_once()
assert json.loads(mock_logger.info.call_args[0][0]) == {
"request_id": "",
"request_time": "2017-06-22 15:50:00",
"sso_user_id": None,
"local_user_id": None,
"path": "/whatever/",
"url": {"domain": "testserver"},
"status": 200,
"ip": None,
"message": "",
"service": "staff-sso test",
}
@pytest.mark.django_db
@freeze_time("2017-06-22 15:50:00.000000+00:00")
def test_log_without_user(self, rf, mocker):
mock_logger = mocker.patch("sso.core.logging.logger")
request = rf.get("/whatever/")
user = UserFactory()
request.user = user
create_x_access_log(request, 200, message="test message")
mock_logger.info.assert_called_once()
assert json.loads(mock_logger.info.call_args[0][0]) == {
"request_id": "",
"request_time": "2017-06-22 15:50:00",
"sso_user_id": str(user.user_id),
"local_user_id": user.id,
"path": "/whatever/",
"url": {"domain": "testserver"},
"status": 200,
"ip": None,
"message": "test message",
"service": "staff-sso test",
}
| 2.15625
| 2
|
BOJ_Parsing/main.py
|
alsrua7222/ToyPython
| 0
|
12778346
|
import parserBOJ
import openpyxl as xl
parse = parserBOJ.Parse()
wb = xl.Workbook()
ws = wb.active
ws.title = "sheet100"
# 컬럼명 지정
col_names = ['문제 번호', '문제 제목', '맞힌 사람', '제출 횟수', '정답률']
for seq, name in enumerate(col_names):
ws.cell(row=1, column=seq+1, value=name)
row_num = 2
# 데이터 입력
for n, rows in enumerate(parse.processParsing(DEBUG=False, MAX_PAGE=150)):
for seq, value in enumerate(rows):
ws.cell(row=row_num+n, column=seq+1, value=value)
wb.save("BOJ_Parsing.xlsx")
wb.close()
| 2.75
| 3
|
euler/problem_45.py
|
jcthomassie/euler
| 0
|
12778347
|
# -*- coding: utf-8 -*-
"""
Triangular, pentagonal, and hexagonal
=====================================
https://projecteuler.net/problem=45
Triangle, pentagonal, and hexagonal numbers are generated by the following
formulae:
Triangle Tn=n(n+1)/2 1, 3, 6, 10, 15, ...
Pentagonal Pn=n(3n−1)/2 1, 5, 12, 22, 35, ...
Hexagonal Hn=n(2n−1) 1, 6, 15, 28, 45, ...
It can be verified that T285 = P165 = H143 = 40755.
Find the next triangle number that is also pentagonal and hexagonal.
"""
from typing import Iterator
from .utils import print_result
def generate_triangulars(n: int) -> Iterator[int]:
"""Generate triangular numbers starting from the Nth triangle number."""
while True:
yield n * (n + 1) // 2
n += 1
def generate_pentagonals(n: int) -> Iterator[int]:
"""Generate pentagonal numbers starting from the Nth pentagonal number."""
while True:
yield n * (3 * n - 1) // 2
n += 1
def generate_hexagonals(n: int) -> Iterator[int]:
"""Generate hexagonal numbers starting from the Nth hexagonal number."""
while True:
yield n * (2 * n - 1)
n += 1
@print_result
def solve() -> int:
generators = {
"t": generate_triangulars(286),
"p": generate_pentagonals(166),
"h": generate_hexagonals(144),
}
values = {key: next(generator) for key, generator in generators.items()}
while not values["t"] == values["p"] == values["h"]:
smallest = min(values, key=values.__getitem__)
values[smallest] = next(generators[smallest])
return values["t"]
if __name__ == "__main__":
solve()
| 3.96875
| 4
|
grumpyforms/tests/form_test.py
|
FelixSchwarz/grumpywidgets
| 0
|
12778348
|
<filename>grumpyforms/tests/form_test.py
# This file is a part of GrumpyWidgets.
# The source code contained in this file is licensed under the MIT license.
# See LICENSE.txt in the main project directory, for more information.
from pythonic_testcase import *
from grumpyforms.api import Form
from grumpyforms.fields import TextField
class FormTest(PythonicTestCase):
def test_forms_return_empty_path(self):
form = Form('foo')
assert_none(form.parent)
assert_equals((), form.path())
def test_can_build_new_context(self):
form = Form(children=[TextField('name')])
context = form.new_context()
assert_equals(['name'], list(context.children.keys()))
assert_equals({'name': None}, context.value)
assert_false(context.contains_errors())
def test_can_build_new_context_with_initial_values(self):
form = Form('foo', children=[TextField('name')])
context = form.new_context({'name': '<NAME>'})
assert_false(context.contains_errors())
assert_equals({'name': '<NAME>'}, context.initial_value)
| 2.21875
| 2
|
HandsOnDemo/DSS Example Code/models/FieldOperation.py
|
Patrick-iOS/DevelopWithDeere2019-Mannheim
| 1
|
12778349
|
import json
class FieldOperation(object):
def __init__(self, d):
if type(d) is str:
d = json.loads(d)
self.from_dict(d)
def from_dict(self, d):
self.__dict__ = {}
for key, value in d.items():
if type(value) is dict:
value = FieldOperation(value)
self.__dict__[key] = value
fieldOperationType = ""
adaptMachineType = ""
cropSeason = ""
startDate = ""
endDate = ""
links = ""
| 3.015625
| 3
|
src/database/json_func.py
|
swordysrepo/youtube_discord_bot
| 1
|
12778350
|
# import json
# a_dictionary = {"d": 4}
# def add_to_json(channel):
# '''add new channel to the json list file'''
# with open("stored_youtube_channels.json", "r+") as file:
# data = json.load(file)
# data.update(a_dictionary)
# file.seek(0)
# json.dump(data, file)
# # json_tests.py
import json
from datetime import datetime
import os.path
from os import path
from src.config.exc import *
def add_to_channel_file(channel,channel_id,list="default"):
'''
select correct file
read the file
append data to the data from the file
save the file
'''
'''add new channel to the json list file'''
# with open(f"channels_{list}.json", "a+") as file:
# '''
# read the file
# append data to the data from the file
# save the file
# '''
time_obj = datetime.now()
base_list_structure = {
"channels":[
{
"channel_name":channel,
"date_added":time_obj.strftime("%m/%d/%y"),
"channel_id":channel_id
}
]
}
added_structure = {
"channel_name":channel,
"date_added":time_obj.strftime("%m/%d/%y"),
"channel_id":channel_id
}
# try:
# data = json.load(file)
# print(data)
# data.update(list_structure)
#if the file does not exist create it with a structure
if path.exists(f"channels_{list}.json") == False:
print("detected file doesn't exist, creating ..")
with open(f"channels_{list}.json", "w") as file:
json.dump(base_list_structure, file)
print("file created")
else:
with open(f"channels_{list}.json") as file:
data = json.load(file)
# check.check(json.dumps(data, indent=4, sort_keys=True))
# print(data)
val = []
for i in data["channels"]:
val.append(i["channel_name"].lower())
print(val)
if channel.lower() in val:
raise ChannelAlreadyEnteredException
else:
with open(f"channels_{list}.json", "w") as file:
data["channels"].append(added_structure)
json.dump(data, file)
def get_list(list_name):
''' return list of channels and channel ids from a stored list'''
with open(f"channels_{list_name}.json", "r") as file:
data = json.load(file)
print(json.dumps(data, indent=4, sort_keys=True))
channels = []
ids = []
for i in range(len(data["channels"])):
channel = data["channels"][i]["channel_name"]
id = data["channels"][i]["channel_id"]
channels.append(channel)
ids.append(id)
return channels,ids
def get_stored_lists():
'''get the names of every stored list
additional:
get how many channels are in each list
'''
import os
arr = os.listdir('.')
for filename in arr:
if "channels_" in filename:
print(filename)
def get_stored_channels(list_name):
'''return all of the channels in a specified list '''
pass
# channels,channel_ids = get_list("test")
# print(f"channels are {channels}")
# print(f"ids are {channel_ids}")
| 3.359375
| 3
|