hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6f62159a43423725ef27c820b6b056f93c11c850 | 331 | py | Python | util/core.py | IRIS-Team/tchecker | 7f0b3218f8914ac3d96eadc8a7db1c2d23016503 | [
"MIT"
] | 17 | 2021-09-05T11:35:03.000Z | 2022-03-23T19:06:54.000Z | util/core.py | IRIS-Team/tchecker | 7f0b3218f8914ac3d96eadc8a7db1c2d23016503 | [
"MIT"
] | 2 | 2021-09-04T11:06:02.000Z | 2021-09-06T06:40:08.000Z | util/core.py | IRIS-Team/tchecker | 7f0b3218f8914ac3d96eadc8a7db1c2d23016503 | [
"MIT"
] | 5 | 2021-09-04T13:13:59.000Z | 2022-02-19T00:22:56.000Z | import json
colours = json.loads(open("theme/default.json").read()) | 25.461538 | 55 | 0.673716 | import json
colours = json.loads(open("theme/default.json").read())
class colours:
main = colours["main_color"]
sencondary = colours["sencondary"]
text = colours["text"]
darktext = colours["darktext"]
error = colours["error"]
def returnColor(string) -> str:
return f'{colours.main}{string}{colours.text}' | 61 | 156 | 46 |
6b1ed1845a4dbd185d9265d1d2db0d1dc492042a | 17,657 | py | Python | examples/demo_runner.py | funky23exe/habitat-sim | a72ba43593a57995972ba1521f6f7a20d122761c | [
"MIT"
] | null | null | null | examples/demo_runner.py | funky23exe/habitat-sim | a72ba43593a57995972ba1521f6f7a20d122761c | [
"MIT"
] | null | null | null | examples/demo_runner.py | funky23exe/habitat-sim | a72ba43593a57995972ba1521f6f7a20d122761c | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import multiprocessing
import os
import random
import time
from enum import Enum
import numpy as np
from PIL import Image
from settings import default_sim_settings, make_cfg
import habitat_sim
import habitat_sim.agent
import habitat_sim.bindings as hsim
from habitat_sim.physics import MotionType
from habitat_sim.utils.common import (
d3_40_colors_rgb,
download_and_unzip,
quat_from_angle_axis,
)
_barrier = None
| 40.497706 | 103 | 0.607408 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import multiprocessing
import os
import random
import time
from enum import Enum
import numpy as np
from PIL import Image
from settings import default_sim_settings, make_cfg
import habitat_sim
import habitat_sim.agent
import habitat_sim.bindings as hsim
from habitat_sim.physics import MotionType
from habitat_sim.utils.common import (
d3_40_colors_rgb,
download_and_unzip,
quat_from_angle_axis,
)
_barrier = None
class DemoRunnerType(Enum):
BENCHMARK = 1
EXAMPLE = 2
class DemoRunner:
def __init__(self, sim_settings, simulator_demo_type):
if simulator_demo_type == DemoRunnerType.EXAMPLE:
self.set_sim_settings(sim_settings)
def set_sim_settings(self, sim_settings):
self._sim_settings = sim_settings.copy()
def save_color_observation(self, obs, total_frames):
color_obs = obs["color_sensor"]
color_img = Image.fromarray(color_obs, mode="RGBA")
color_img.save("test.rgba.%05d.png" % total_frames)
def save_semantic_observation(self, obs, total_frames):
semantic_obs = obs["semantic_sensor"]
semantic_img = Image.new("P", (semantic_obs.shape[1], semantic_obs.shape[0]))
semantic_img.putpalette(d3_40_colors_rgb.flatten())
semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))
semantic_img.save("test.sem.%05d.png" % total_frames)
def save_depth_observation(self, obs, total_frames):
if self._sim_settings["depth_sensor"]:
depth_obs = obs["depth_sensor"]
depth_img = Image.fromarray(
(depth_obs / 10 * 255).astype(np.uint8), mode="L"
)
depth_img.save("test.depth.%05d.png" % total_frames)
def output_semantic_mask_stats(self, obs, total_frames):
semantic_obs = obs["semantic_sensor"]
counts = np.bincount(semantic_obs.flatten())
total_count = np.sum(counts)
print(f"Pixel statistics for frame {total_frames}")
for object_i, count in enumerate(counts):
sem_obj = self._sim.semantic_scene.objects[object_i]
cat = sem_obj.category.name()
pixel_ratio = count / total_count
if pixel_ratio > 0.01:
print(f"obj_id:{sem_obj.id},category:{cat},pixel_ratio:{pixel_ratio}")
def init_agent_state(self, agent_id):
# initialize the agent at a random start state
agent = self._sim.initialize_agent(agent_id)
start_state = agent.get_state()
# force starting position on first floor (try 100 samples)
num_start_tries = 0
while start_state.position[1] > 0.5 and num_start_tries < 100:
start_state.position = self._sim.pathfinder.get_random_navigable_point()
num_start_tries += 1
agent.set_state(start_state)
if not self._sim_settings["silent"]:
print(
"start_state.position\t",
start_state.position,
"start_state.rotation\t",
start_state.rotation,
)
return start_state
def compute_shortest_path(self, start_pos, end_pos):
self._shortest_path.requested_start = start_pos
self._shortest_path.requested_end = end_pos
self._sim.pathfinder.find_path(self._shortest_path)
print("shortest_path.geodesic_distance", self._shortest_path.geodesic_distance)
def init_physics_test_scene(self, num_objects):
object_position = np.array(
[-0.569043, 2.04804, 13.6156]
) # above the castle table
# turn agent toward the object
print("turning agent toward the physics!")
agent_state = self._sim.get_agent(0).get_state()
agent_to_obj = object_position - agent_state.position
agent_local_forward = np.array([0, 0, -1.0])
flat_to_obj = np.array([agent_to_obj[0], 0.0, agent_to_obj[2]])
flat_dist_to_obj = np.linalg.norm(flat_to_obj)
flat_to_obj /= flat_dist_to_obj
# move the agent closer to the objects if too far (this will be projected back to floor in set)
if flat_dist_to_obj > 3.0:
agent_state.position = object_position - flat_to_obj * 3.0
# unit y normal plane for rotation
det = (
flat_to_obj[0] * agent_local_forward[2]
- agent_local_forward[0] * flat_to_obj[2]
)
turn_angle = math.atan2(det, np.dot(agent_local_forward, flat_to_obj))
agent_state.rotation = quat_from_angle_axis(turn_angle, np.array([0, 1.0, 0]))
# need to move the sensors too
for sensor in agent_state.sensor_states:
agent_state.sensor_states[sensor].rotation = agent_state.rotation
agent_state.sensor_states[
sensor
].position = agent_state.position + np.array([0, 1.5, 0])
self._sim.get_agent(0).set_state(agent_state)
# hard coded dimensions of maximum bounding box for all 3 default objects:
max_union_bb_dim = np.array([0.125, 0.19, 0.26])
# add some objects in a grid
object_lib_size = self._sim.get_physics_object_library_size()
object_init_grid_dim = (3, 1, 3)
object_init_grid = {}
assert (
object_lib_size > 0
), "!!!No objects loaded in library, aborting object instancing example!!!"
# clear the objects if we are re-running this initializer
for old_obj_id in self._sim.get_existing_object_ids():
self._sim.remove_object(old_obj_id)
for obj_id in range(num_objects):
# rand_obj_index = random.randint(0, object_lib_size - 1)
# rand_obj_index = 0 # overwrite for specific object only
rand_obj_index = self._sim_settings.get("test_object_index")
if rand_obj_index < 0: # get random object on -1
rand_obj_index = random.randint(0, object_lib_size - 1)
object_init_cell = (
random.randint(-object_init_grid_dim[0], object_init_grid_dim[0]),
random.randint(-object_init_grid_dim[1], object_init_grid_dim[1]),
random.randint(-object_init_grid_dim[2], object_init_grid_dim[2]),
)
while object_init_cell in object_init_grid:
object_init_cell = (
random.randint(-object_init_grid_dim[0], object_init_grid_dim[0]),
random.randint(-object_init_grid_dim[1], object_init_grid_dim[1]),
random.randint(-object_init_grid_dim[2], object_init_grid_dim[2]),
)
object_id = self._sim.add_object(rand_obj_index)
object_init_grid[object_init_cell] = object_id
object_offset = np.array(
[
max_union_bb_dim[0] * object_init_cell[0],
max_union_bb_dim[1] * object_init_cell[1],
max_union_bb_dim[2] * object_init_cell[2],
]
)
self._sim.set_translation(object_position + object_offset, object_id)
print(
"added object: "
+ str(object_id)
+ " of type "
+ str(rand_obj_index)
+ " at: "
+ str(object_position + object_offset)
+ " | "
+ str(object_init_cell)
)
def do_time_steps(self):
total_sim_step_time = 0.0
total_frames = 0
start_time = time.time()
action_names = list(
self._cfg.agents[self._sim_settings["default_agent"]].action_space.keys()
)
# load an object and position the agent for physics testing
if self._sim_settings["enable_physics"]:
self.init_physics_test_scene(
num_objects=self._sim_settings.get("num_objects")
)
print("active object ids: " + str(self._sim.get_existing_object_ids()))
time_per_step = []
while total_frames < self._sim_settings["max_frames"]:
if total_frames == 1:
start_time = time.time()
action = random.choice(action_names)
if not self._sim_settings["silent"]:
print("action", action)
start_step_time = time.time()
# apply kinematic or dynamic control to all objects based on their MotionType
if self._sim_settings["enable_physics"]:
obj_ids = self._sim.get_existing_object_ids()
for obj_id in obj_ids:
rand_nudge = np.random.uniform(-0.05, 0.05, 3)
if self._sim.get_object_motion_type(obj_id) == MotionType.KINEMATIC:
# TODO: just bind the trnslate function instead of emulating it here.
cur_pos = self._sim.get_translation(obj_id)
self._sim.set_translation(cur_pos + rand_nudge, obj_id)
elif self._sim.get_object_motion_type(obj_id) == MotionType.DYNAMIC:
self._sim.apply_force(rand_nudge, np.zeros(3), obj_id)
# get "interaction" time
total_sim_step_time += time.time() - start_step_time
observations = self._sim.step(action)
time_per_step.append(time.time() - start_step_time)
# get simulation step time without sensor observations
total_sim_step_time += self._sim._previous_step_time
if self._sim_settings["save_png"]:
if self._sim_settings["color_sensor"]:
self.save_color_observation(observations, total_frames)
if self._sim_settings["depth_sensor"]:
self.save_depth_observation(observations, total_frames)
if self._sim_settings["semantic_sensor"]:
self.save_semantic_observation(observations, total_frames)
state = self._sim.last_state()
if not self._sim_settings["silent"]:
print("position\t", state.position, "\t", "rotation\t", state.rotation)
if self._sim_settings["compute_shortest_path"]:
self.compute_shortest_path(
state.position, self._sim_settings["goal_position"]
)
if self._sim_settings["compute_action_shortest_path"]:
self._action_shortest_path.requested_start.position = state.position
self._action_shortest_path.requested_start.rotation = state.rotation
self._action_pathfinder.find_path(self._action_shortest_path)
print(
"len(action_shortest_path.actions)",
len(self._action_shortest_path.actions),
)
if (
self._sim_settings["semantic_sensor"]
and self._sim_settings["print_semantic_mask_stats"]
):
self.output_semantic_mask_stats(observations, total_frames)
total_frames += 1
end_time = time.time()
perf = {}
perf["total_time"] = end_time - start_time
perf["frame_time"] = perf["total_time"] / total_frames
perf["fps"] = 1.0 / perf["frame_time"]
perf["time_per_step"] = time_per_step
perf["avg_sim_step_time"] = total_sim_step_time / total_frames
return perf
def print_semantic_scene(self):
if self._sim_settings["print_semantic_scene"]:
scene = self._sim.semantic_scene
print(f"House center:{scene.aabb.center} dims:{scene.aabb.sizes}")
for level in scene.levels:
print(
f"Level id:{level.id}, center:{level.aabb.center},"
f" dims:{level.aabb.sizes}"
)
for region in level.regions:
print(
f"Region id:{region.id}, category:{region.category.name()},"
f" center:{region.aabb.center}, dims:{region.aabb.sizes}"
)
for obj in region.objects:
print(
f"Object id:{obj.id}, category:{obj.category.name()},"
f" center:{obj.aabb.center}, dims:{obj.aabb.sizes}"
)
input("Press Enter to continue...")
def init_common(self):
self._cfg = make_cfg(self._sim_settings)
scene_file = self._sim_settings["scene"]
if (
not os.path.exists(scene_file)
and scene_file == default_sim_settings["test_scene"]
):
print(
"Test scenes not downloaded locally, downloading and extracting now..."
)
download_and_unzip(default_sim_settings["test_scene_data_url"], ".")
print("Downloaded and extracted test scenes data.")
self._sim = habitat_sim.Simulator(self._cfg)
random.seed(self._sim_settings["seed"])
self._sim.seed(self._sim_settings["seed"])
# initialize the agent at a random start state
start_state = self.init_agent_state(self._sim_settings["default_agent"])
return start_state
def _bench_target(self, _idx=0):
self.init_common()
best_perf = None
for _ in range(3):
if _barrier is not None:
_barrier.wait()
if _idx == 0:
_barrier.reset()
perf = self.do_time_steps()
# The variance introduced between runs is due to the worker threads
# being interrupted a different number of times by the kernel, not
# due to difference in the speed of the code itself. The most
# accurate representation of the performance would be a run where
# the kernel never interrupted the workers, but this isn't
# feasible, so we just take the run with the least number of
# interrupts (the fastest) instead.
if best_perf is None or perf["frame_time"] < best_perf["frame_time"]:
best_perf = perf
self._sim.close()
del self._sim
return best_perf
@staticmethod
def _pool_init(b):
global _barrier
_barrier = b
def benchmark(self, settings):
self.set_sim_settings(settings)
nprocs = settings["num_processes"]
barrier = multiprocessing.Barrier(nprocs)
with multiprocessing.Pool(
nprocs, initializer=self._pool_init, initargs=(barrier,)
) as pool:
perfs = pool.map(self._bench_target, range(nprocs))
res = {k: [] for k in perfs[0].keys()}
for p in perfs:
for k, v in p.items():
res[k] += [v]
return dict(
frame_time=sum(res["frame_time"]),
fps=sum(res["fps"]),
total_time=sum(res["total_time"]) / nprocs,
avg_sim_step_time=sum(res["avg_sim_step_time"]) / nprocs,
)
def example(self):
start_state = self.init_common()
# initialize and compute shortest path to goal
if self._sim_settings["compute_shortest_path"]:
self._shortest_path = hsim.ShortestPath()
self.compute_shortest_path(
start_state.position, self._sim_settings["goal_position"]
)
# set the goal headings, and compute action shortest path
if self._sim_settings["compute_action_shortest_path"]:
agent_id = self._sim_settings["default_agent"]
goal_headings = self._sim_settings["goal_headings"]
self._action_pathfinder = self._sim.make_action_pathfinder(agent_id)
self._action_shortest_path = hsim.MultiGoalActionSpaceShortestPath()
self._action_shortest_path.requested_start.position = start_state.position
self._action_shortest_path.requested_start.rotation = start_state.rotation
# explicitly reset the start position
self._shortest_path.requested_start = start_state.position
# initialize the requested ends when computing the action shortest path
next_goal_idx = 0
while next_goal_idx < len(goal_headings):
sampled_pos = self._sim.pathfinder.get_random_navigable_point()
self._shortest_path.requested_end = sampled_pos
if (
self._sim.pathfinder.find_path(self._shortest_path)
and self._shortest_path.geodesic_distance < 5.0
and self._shortest_path.geodesic_distance > 2.5
):
self._action_shortest_path.requested_ends.append(
hsim.ActionSpacePathLocation(
sampled_pos, goal_headings[next_goal_idx]
)
)
next_goal_idx += 1
self._shortest_path.requested_end = self._sim_settings["goal_position"]
self._sim.pathfinder.find_path(self._shortest_path)
self._action_pathfinder.find_path(self._action_shortest_path)
print(
"len(action_shortest_path.actions)",
len(self._action_shortest_path.actions),
)
# print semantic scene
self.print_semantic_scene()
perf = self.do_time_steps()
self._sim.close()
del self._sim
return perf
| 16,506 | 485 | 46 |
0c25ee0e646d54847337890d8e0fcb21c9e410f2 | 516 | py | Python | Util.py | WhatsFish/txt2json | 7fa998031a0901bf914f3e3268ce31c3b3894de2 | [
"MIT"
] | null | null | null | Util.py | WhatsFish/txt2json | 7fa998031a0901bf914f3e3268ce31c3b3894de2 | [
"MIT"
] | null | null | null | Util.py | WhatsFish/txt2json | 7fa998031a0901bf914f3e3268ce31c3b3894de2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 17 15:05:43 2020
@author: lihaoyang03
"""
| 17.2 | 35 | 0.587209 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 17 15:05:43 2020
@author: lihaoyang03
"""
def asc_sort(ages):
list.sort(ages)
return ages;
def desc_sort(ages):
list.sort(ages,reverse = True)
return ages;
def test_asc_sort(ages):
for i in range(len(ages)-1):
if ages[i] > ages[i+1]:
return False
return True
def test_desc_sort(ages):
for i in range(len(ages)-1):
if ages[i] < ages[i+1]:
return False
return True
| 305 | 0 | 91 |
9893ddee6bbb747785141d0ec8f728428c20fbdd | 2,074 | py | Python | santa.py | alexander-travov/synchro | 74856e68bedfb2680ae4f2af191cb8220412bbde | [
"MIT"
] | null | null | null | santa.py | alexander-travov/synchro | 74856e68bedfb2680ae4f2af191cb8220412bbde | [
"MIT"
] | null | null | null | santa.py | alexander-travov/synchro | 74856e68bedfb2680ae4f2af191cb8220412bbde | [
"MIT"
] | null | null | null | import sys
import time
import random
from sync_utils import Thread, Semaphore, Barrier, watch
SANTA = Semaphore(0)
NUM_DEERS = 0
DEER_MUTEX = Semaphore(1)
SLEDGE = Semaphore(0)
SLEDGE_READY = Semaphore(0)
BARRIER = Barrier(9)
NUM_ELVES = 0
ELF_MUTEX = Semaphore(1)
ELF_MULTIPLEX = Semaphore(3)
HELP = Semaphore(0)
watch(main)
| 20.74 | 57 | 0.575699 | import sys
import time
import random
from sync_utils import Thread, Semaphore, Barrier, watch
SANTA = Semaphore(0)
NUM_DEERS = 0
DEER_MUTEX = Semaphore(1)
SLEDGE = Semaphore(0)
SLEDGE_READY = Semaphore(0)
BARRIER = Barrier(9)
NUM_ELVES = 0
ELF_MUTEX = Semaphore(1)
ELF_MULTIPLEX = Semaphore(3)
HELP = Semaphore(0)
def raindeer():
global NUM_DEERS
while True:
time.sleep(5 + 3*random.random())
rudolf = False
DEER_MUTEX.acquire()
NUM_DEERS += 1
sys.stdout.write("Deer arrives.\n")
if NUM_DEERS == 9:
rudolf = True
sys.stdout.write("Deers wake Santa.\n")
SANTA.signal()
DEER_MUTEX.release()
SLEDGE.wait()
sys.stdout.write("Deer gets hitched.\n")
BARRIER.wait()
if rudolf:
SLEDGE_READY.signal()
def santa():
global NUM_DEERS
global NUM_ELVES
while True:
SANTA.wait()
DEER_MUTEX.acquire()
if NUM_DEERS == 9:
NUM_DEERS = 0
DEER_MUTEX.release()
# prepare sledge
sys.stdout.write("Santa prepares sledges.\n")
SLEDGE.release(9)
SLEDGE_READY.wait()
sys.stdout.write("Ho-ho-ho.\n")
continue
else:
DEER_MUTEX.release()
# help elves
sys.stdout.write("Santa helps elves.\n")
HELP.signal(3)
ELF_MUTEX.acquire()
NUM_ELVES = 0
ELF_MUTEX.release()
ELF_MULTIPLEX.release(3)
def elf():
global NUM_ELVES
ELF_MULTIPLEX.acquire()
sys.stdout.write("Elf wants help.\n")
ELF_MUTEX.acquire()
NUM_ELVES += 1
if NUM_ELVES == 3:
sys.stdout.write("Elves wake Santa.\n")
SANTA.signal()
ELF_MUTEX.release()
else:
ELF_MUTEX.release()
HELP.wait()
# get help
sys.stdout.write("Elf gets help.\n")
def main():
Thread(target=santa)
for i in range(9):
Thread(raindeer)
while True:
time.sleep(random.random())
Thread(elf)
watch(main)
| 1,646 | 0 | 92 |
02d35ea48440a2683538e39ea302a054c2d4db7f | 790 | py | Python | src/get_original_data.py | RosalRicardo/RTRGAN | 6f4551ab8445367f8b9c711f41f15dd465abaef1 | [
"MIT"
] | null | null | null | src/get_original_data.py | RosalRicardo/RTRGAN | 6f4551ab8445367f8b9c711f41f15dd465abaef1 | [
"MIT"
] | null | null | null | src/get_original_data.py | RosalRicardo/RTRGAN | 6f4551ab8445367f8b9c711f41f15dd465abaef1 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
| 52.666667 | 98 | 0.725316 | import pandas as pd
import numpy as np
def get_original_data(df_transformed, df_orig, ohe, scaler):
# df_int = df_orig.select_dtypes(['float','integer'])
df_ohe_int = df_transformed[:, :df_orig.select_dtypes(['float', 'integer']).shape[1]]
df_ohe_int = scaler.inverse_transform(df_ohe_int)
df_ohe_cats = df_transformed[:, df_orig.select_dtypes(['float', 'integer']).shape[1]:]
df_ohe_cats = ohe.inverse_transform(df_ohe_cats)
# df_income = df_transformed[:,-1]
# df_ohe_cats = np.hstack((df_ohe_cats, df_income.reshape(-1,1)))
df_int = pd.DataFrame(df_ohe_int, columns=df_orig.select_dtypes(['float', 'integer']).columns)
df_cat = pd.DataFrame(df_ohe_cats, columns=df_orig.select_dtypes('object').columns)
return pd.concat([df_int, df_cat], axis=1)
| 728 | 0 | 23 |
15be091dc287f310c631199352d615a271f2d3a6 | 3,126 | py | Python | axfr-test.py | internetwache/Python-AXFR-Test | 626c27d46b1f2f7fa76648cb771d48bc1f9fd9a3 | [
"MIT"
] | 52 | 2015-03-03T13:52:19.000Z | 2022-01-06T03:37:50.000Z | axfr-test.py | codingo/Python-AXFR-Test | 3c78829c8884ebcb4861e3237f756a1b21e41862 | [
"MIT"
] | null | null | null | axfr-test.py | codingo/Python-AXFR-Test | 3c78829c8884ebcb4861e3237f756a1b21e41862 | [
"MIT"
] | 25 | 2015-03-29T12:35:28.000Z | 2022-01-15T21:43:52.000Z | #!/usr/bin/python
import argparse
import dns.resolver
import dns.query
import dns.zone
import os
import sys
from multiprocessing import Pool
INPUTFILE = sys.stdin
OUTPUTFILE = sys.stdout
LOGFILE = sys.stderr
PROCESSES = 20
if __name__ == '__main__':
main()
| 30.647059 | 141 | 0.633397 | #!/usr/bin/python
import argparse
import dns.resolver
import dns.query
import dns.zone
import os
import sys
from multiprocessing import Pool
INPUTFILE = sys.stdin
OUTPUTFILE = sys.stdout
LOGFILE = sys.stderr
PROCESSES = 20
def checkaxfr(domain):
domain = domain.strip()
try:
ns_query = dns.resolver.query(domain,'NS')
for ns in ns_query.rrset:
nameserver = str(ns)[:-1]
if nameserver is None or nameserver == "":
continue
try:
axfr = dns.query.xfr(nameserver, domain, lifetime=5)
try:
zone = dns.zone.from_xfr(axfr)
if zone is None:
continue
LOGFILE.write("Success: " + domain + " @ " + nameserver + "\n")
LOGFILE.flush()
OUTPUTFILE.write("Success: " + domain + " @ " + nameserver + "\n")
OUTPUTFILE.flush()
for name, node in zone.nodes.items():
rdatasets = node.rdatasets
for rdataset in rdatasets:
OUTPUTFILE.write(str(name) + " " + str(rdataset) + "\n")
OUTPUTFILE.flush()
except Exception as e:
continue
except Exception as e:
continue
except Exception as e:
pass
LOGFILE.write("Finished: " + domain + "\n")
LOGFILE.flush()
def main():
global PROCESSES, LOGFILE, OUTPUTFILE, INPUTFILE
parser = argparse.ArgumentParser(description='Check domains\' nameservers for public AXFR')
parser.add_argument('-i', '--inputfile', type=str, nargs="?", default=sys.stdin, help='Inputfile to read domains from. Default: stdin')
parser.add_argument('-o', '--outputfile', type=str, nargs="?", default=sys.stdout, help='Outputfile to write zonedata to. Default: stdout')
parser.add_argument('-l', '--logfile', type=str, nargs="?", default=sys.stderr, help="Logfile to use. Default: stderr")
parser.add_argument('-p', '--processes', type=int, nargs="?", default=20, help='Processes to use. Default: 20')
parser.add_argument('-d', '--domain', type=str, nargs="?", help="Domain to check. Ignored if -i is used.")
args = parser.parse_args()
if args.processes <=0:
print("Number of processes must be greater than zero.")
sys.exit(1)
PROCESSES = args.processes
if not str(type(args.inputfile)) == "<class '_io.TextIOWrapper'>":
if not os.path.isfile(args.inputfile):
print("Inputfile does not exist.")
sys.exit(1)
domains = open(args.inputfile, "r").readlines()
else:
if not args.domain is None:
domains = [args.domain]
else:
domains = args.inputfile.readlines()
if not str(type(args.outputfile)) == "<class '_io.TextIOWrapper'>":
try:
OUTPUTFILE = open(args.outputfile, "w")
except:
print("Outputfile cannot be created.")
sys.exit(1)
else:
OUTPUTFILE = args.outputfile
if not str(type(args.logfile)) == "<class '_io.TextIOWrapper'>":
try:
LOGFILE = open(args.logfile, "w")
except:
print("Logfile cannot be created.")
sys.exit(1)
else:
LOGFILE = args.logfile
pool = Pool(processes=PROCESSES)
pool.map(checkaxfr, domains)
if __name__ == '__main__':
main()
| 2,818 | 0 | 46 |
4e2d51086f3f82db4205166399a4a1197e55e89d | 1,226 | py | Python | data_utils/convert_imgs_to_jpg.py | sun-yitao/GrabAIChallenge | 05946339e5a478216d7a9234e29e9bd7af5b3492 | [
"MIT"
] | 10 | 2019-07-05T05:28:30.000Z | 2020-09-15T02:47:16.000Z | data_utils/convert_imgs_to_jpg.py | sun-yitao/GrabAIChallenge | 05946339e5a478216d7a9234e29e9bd7af5b3492 | [
"MIT"
] | 6 | 2019-11-18T12:59:22.000Z | 2022-02-10T00:23:00.000Z | data_utils/convert_imgs_to_jpg.py | sun-yitao/GrabAIChallenge | 05946339e5a478216d7a9234e29e9bd7af5b3492 | [
"MIT"
] | 2 | 2019-07-17T15:12:14.000Z | 2020-04-15T19:06:41.000Z | """Script to convert non-jpgs in a folder to jpg"""
import os
from pathlib import Path
from PIL import Image
from glob import glob
from tqdm import tqdm
from shutil import move
cwd = Path.cwd()
DATA_DIR = cwd.parent / 'data' / 'stanford-car-dataset-by-classes-folder' / 'car_data' / 'new_data'
if __name__ == '__main__':
convert_png_to_jpg() | 28.511628 | 99 | 0.556281 | """Script to convert non-jpgs in a folder to jpg"""
import os
from pathlib import Path
from PIL import Image
from glob import glob
from tqdm import tqdm
from shutil import move
cwd = Path.cwd()
DATA_DIR = cwd.parent / 'data' / 'stanford-car-dataset-by-classes-folder' / 'car_data' / 'new_data'
def convert_png_to_jpg():
for image in glob(str(DATA_DIR / '**' / '*.png'), recursive=True):
image = Path(image)
print(image)
try:
im = Image.open(str(image))
rgb_im = im.convert('RGB')
rgb_im.save(str(image).replace('png', 'jpg'))
except:
print(f'Failed: {image}')
continue
def convert_non_jpg_to_jpg():
for image in tqdm(glob(str(DATA_DIR / '**' / '*'), recursive=True)):
image = Path(image)
if image.suffix != '.jpg':
try:
im = Image.open(str(image))
rgb_im = im.convert('RGB')
rgb_im.save(str(image).replace(image.suffix, '.jpg'))
print('Converted')
except Exception as e:
print(e)
print(f'Failed: {image}')
continue
if __name__ == '__main__':
convert_png_to_jpg() | 829 | 0 | 46 |
f06408f512996c1c23fa1eb9aa58df5911971391 | 3,169 | py | Python | test_learnable_histogram.py | donikv/IlluminationBase | 4aade52bb8a1065f10b94ffda09645a681d8160c | [
"MIT"
] | null | null | null | test_learnable_histogram.py | donikv/IlluminationBase | 4aade52bb8a1065f10b94ffda09645a681d8160c | [
"MIT"
] | null | null | null | test_learnable_histogram.py | donikv/IlluminationBase | 4aade52bb8a1065f10b94ffda09645a681d8160c | [
"MIT"
] | null | null | null | import learnable_histogram
import TauDataset
import data_processing as dp
import numpy as np
import tensorflow as tf
import losses
import histogram
import visualizer
import CubeDataset
from report import print_report, error_statistics
mode = 'rb'
# path = "/media/donik/Disk/intel_tau/paths_field.txt"
# imgs = load_image_names(path, base_path='/media/donik/Disk/intel_tau')
# count = len(imgs)
# imgs_test = imgs[int(count * 0.9):]
import CubeDataset
from sklearn.model_selection import train_test_split
path = "/media/donik/Slowpoke/fax/Cube+/paths.txt"
paths = load_image_names(path, base_path="/media/donik/Slowpoke/fax/Cube+")
_, paths = train_test_split(paths, train_size=0.8, random_state=69)
gts = np.loadtxt("/media/donik/Slowpoke/fax/Cube+/cube+_gt.txt")
indices = np.array(list(map(lambda x: int(x[x.rfind('/') + 1:-4]) - 1, paths)))
ds = CubeDataset.regression_dataset(paths, indices, type=CubeDataset.TEST, bs=1, cache=False,
uv=False,
gts=gts)
# ds = TauDataset.regression_dataset(imgs_test, dp.TEST, bs=1, uv=True)
ds = ds.map(map_fn)
model, hist = learnable_histogram.build_model(TauDataset.IMG_HEIGHT // 2, TauDataset.IMG_WIDTH // 2, 5, 256,
range_init=(0, 1), w_init=1 / 64, out=2, activation='relu')
# model, hist = learnable_histogram.build_simple_model(TauDataset.IMG_HEIGHT // 2, TauDataset.IMG_WIDTH // 2, 3, 64,
# range_init=(-5, 5), w_init=1 / 32)
checkpoint_path = f"/home/donik/Desktop/models/training_08_11/hist_simple_2_cube.ckpt"
# checkpoint_path = f"/home/donik/Desktop/models/training_06_11/hist_simple_tau.ckpt"
model.load_weights(checkpoint_path)
learnable_histogram.plot_histogram(hist)
coss = []
for img, mask in iter(ds):
pred = model.predict(img)
if mode == 'uv':
pred_rgb = histogram.from_uv(pred)
mask_rgb = histogram.from_uv(mask)
else:
pred_rgb = tf.stack([pred[..., 0], tf.ones_like(pred[..., 0]), pred[..., 1]], axis=-1)
mask_rgb = tf.stack([mask[..., 0], tf.ones_like(mask[..., 0]), mask[..., 1]], axis=-1)
mask_rgb = tf.cast(mask_rgb, dtype=float)
cos = losses.cosine_similarity(pred_rgb, mask_rgb) * 180 / 3.14
coss.append(cos)
print(cos)
p = visualizer.create_mask(pred_rgb[0], (10, 10))
m = visualizer.create_mask(mask_rgb[0], (10, 10))
visualizer.visualize([m,p])
report = error_statistics(coss)
print_report(report) | 34.075269 | 116 | 0.646261 | import learnable_histogram
import TauDataset
import data_processing as dp
import numpy as np
import tensorflow as tf
import losses
import histogram
import visualizer
import CubeDataset
from report import print_report, error_statistics
def load_image_names(path, base_path):
names = np.loadtxt(path, dtype="str")
names = np.array([base_path + n for n in names])
return names
mode = 'rb'
# path = "/media/donik/Disk/intel_tau/paths_field.txt"
# imgs = load_image_names(path, base_path='/media/donik/Disk/intel_tau')
# count = len(imgs)
# imgs_test = imgs[int(count * 0.9):]
import CubeDataset
from sklearn.model_selection import train_test_split
path = "/media/donik/Slowpoke/fax/Cube+/paths.txt"
paths = load_image_names(path, base_path="/media/donik/Slowpoke/fax/Cube+")
_, paths = train_test_split(paths, train_size=0.8, random_state=69)
gts = np.loadtxt("/media/donik/Slowpoke/fax/Cube+/cube+_gt.txt")
indices = np.array(list(map(lambda x: int(x[x.rfind('/') + 1:-4]) - 1, paths)))
ds = CubeDataset.regression_dataset(paths, indices, type=CubeDataset.TEST, bs=1, cache=False,
uv=False,
gts=gts)
# ds = TauDataset.regression_dataset(imgs_test, dp.TEST, bs=1, uv=True)
def map_fn(*x):
import histogram
img, mask = x[0], x[1]
fn = histogram.to_uv if mode=='uv' else histogram.to_rb
img = dp.__process_images__(img, [0,4])
img = tf.expand_dims(img, axis=1)
img, Iy = fn(img)
img = tf.stack([Iy, img[..., 0], img[..., 1]], axis=-1)
img = tf.concat((img[0], img[1,:,:,:,1:]), axis=-1)
mask = tf.reshape(mask, (-1, 3))
mask, masky = fn(mask)
mask = tf.reshape(mask, (-1, 2))
return img, mask
ds = ds.map(map_fn)
model, hist = learnable_histogram.build_model(TauDataset.IMG_HEIGHT // 2, TauDataset.IMG_WIDTH // 2, 5, 256,
range_init=(0, 1), w_init=1 / 64, out=2, activation='relu')
# model, hist = learnable_histogram.build_simple_model(TauDataset.IMG_HEIGHT // 2, TauDataset.IMG_WIDTH // 2, 3, 64,
# range_init=(-5, 5), w_init=1 / 32)
checkpoint_path = f"/home/donik/Desktop/models/training_08_11/hist_simple_2_cube.ckpt"
# checkpoint_path = f"/home/donik/Desktop/models/training_06_11/hist_simple_tau.ckpt"
model.load_weights(checkpoint_path)
learnable_histogram.plot_histogram(hist)
coss = []
for img, mask in iter(ds):
pred = model.predict(img)
if mode == 'uv':
pred_rgb = histogram.from_uv(pred)
mask_rgb = histogram.from_uv(mask)
else:
pred_rgb = tf.stack([pred[..., 0], tf.ones_like(pred[..., 0]), pred[..., 1]], axis=-1)
mask_rgb = tf.stack([mask[..., 0], tf.ones_like(mask[..., 0]), mask[..., 1]], axis=-1)
mask_rgb = tf.cast(mask_rgb, dtype=float)
cos = losses.cosine_similarity(pred_rgb, mask_rgb) * 180 / 3.14
coss.append(cos)
print(cos)
p = visualizer.create_mask(pred_rgb[0], (10, 10))
m = visualizer.create_mask(mask_rgb[0], (10, 10))
visualizer.visualize([m,p])
report = error_statistics(coss)
print_report(report) | 574 | 0 | 46 |
0f792987d80e5861bf3ac86ee1a3d3b2bbbcfcc4 | 23 | py | Python | vgtk/vgtk/functional/__init__.py | chienerh/EPN_PointCloud | d1488cf1ff82a5bc7ac89c28df30fa2f3f2e0e30 | [
"MIT"
] | 51 | 2021-03-29T08:57:50.000Z | 2022-03-30T07:43:58.000Z | vgtk/vgtk/functional/__init__.py | chienerh/EPN_PointCloud | d1488cf1ff82a5bc7ac89c28df30fa2f3f2e0e30 | [
"MIT"
] | 2 | 2021-04-03T15:22:29.000Z | 2021-09-03T01:27:59.000Z | vgtk/vgtk/functional/__init__.py | chienerh/EPN_PointCloud | d1488cf1ff82a5bc7ac89c28df30fa2f3f2e0e30 | [
"MIT"
] | 4 | 2021-05-14T02:58:30.000Z | 2022-03-22T18:55:34.000Z | from .rotation import * | 23 | 23 | 0.782609 | from .rotation import * | 0 | 0 | 0 |
04cbaef966fa6fa70ab44438f50e64c3a5e06b8e | 251 | py | Python | output/models/nist_data/list_pkg/any_uri/schema_instance/nistschema_sv_iv_list_any_uri_max_length_3_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/list_pkg/any_uri/schema_instance/nistschema_sv_iv_list_any_uri_max_length_3_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/list_pkg/any_uri/schema_instance/nistschema_sv_iv_list_any_uri_max_length_3_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.nist_data.list_pkg.any_uri.schema_instance.nistschema_sv_iv_list_any_uri_max_length_3_xsd.nistschema_sv_iv_list_any_uri_max_length_3 import NistschemaSvIvListAnyUriMaxLength3
__all__ = [
"NistschemaSvIvListAnyUriMaxLength3",
]
| 41.833333 | 193 | 0.89243 | from output.models.nist_data.list_pkg.any_uri.schema_instance.nistschema_sv_iv_list_any_uri_max_length_3_xsd.nistschema_sv_iv_list_any_uri_max_length_3 import NistschemaSvIvListAnyUriMaxLength3
__all__ = [
"NistschemaSvIvListAnyUriMaxLength3",
]
| 0 | 0 | 0 |
ad6f07e3a152dcb07c862494d1fbbcad0a6bcced | 3,579 | py | Python | flaskJSONRPCServer/example/mergeFlaskApp.py | byaka/flaskJSONRPCServer | 328e88c7358e8ce87cd8c56a2db22b0c43e9661d | [
"Apache-2.0"
] | 2 | 2017-04-12T05:54:38.000Z | 2019-10-09T15:56:18.000Z | flaskJSONRPCServer/example/mergeFlaskApp.py | byaka/flaskJSONRPCServer | 328e88c7358e8ce87cd8c56a2db22b0c43e9661d | [
"Apache-2.0"
] | 160 | 2015-04-24T09:39:08.000Z | 2019-10-13T15:27:29.000Z | flaskJSONRPCServer/example/mergeFlaskApp.py | byaka/flaskJSONRPCServer | 328e88c7358e8ce87cd8c56a2db22b0c43e9661d | [
"Apache-2.0"
] | 2 | 2016-02-26T13:20:02.000Z | 2021-10-05T04:05:04.000Z | # -*- coding: utf-8 -*-
import sys, time, random
#FLASK
from flask import Flask, request
from flaskJSONRPCServer import flaskJSONRPCServer
echo._alias='helloworld' #setting alias for method
app=Flask(__name__)
@app.route('/readpost', methods=['POST'])
app2=Flask(__name__)
@app2.route('/helloworld', methods=['GET'])
@app2.route('/readpost', methods=['POST'])
if __name__=='__main__':
print 'Running api..'
# Creating instance of server
# <blocking> switch server to one-request-per-time mode
# <cors> switch auto CORS support
# <gevent> switch to patching process with Gevent
# <debug> switch to logging connection's info from serv-backend
# <log> set logging level (0-critical, 1-errors, 2-warnings, 3-info, 4-debug)
# <fallback> switch auto fallback to JSONP on GET requests
# <allowCompress> switch auto compression
# <compressMinSize> set min limit for compression
# <tweakDescriptors> set file-descriptor's limit for server (useful on high-load servers)
# <jsonBackend> set JSON-backend. Auto fallback to native when problems
# <notifBackend> set exec-backend for Notify-requests
# <servBackend> set serving-backend ('pywsgi', 'werkzeug', 'wsgiex' or 'auto'). 'auto' is more preffered
# <experimental> switch using of experimental perfomance-patches
server=flaskJSONRPCServer(("0.0.0.0", 7001), blocking=False, cors=False, gevent=True, debug=False, log=3, fallback=True, allowCompress=False, jsonBackend='simplejson', notifBackend='simple', tweakDescriptors=[1000, 1000], servBackend='auto')
# Register dispatchers for single functions
server.registerFunction(echo, path='/api')
server.registerFunction(stats, path='/api')
# merge with Flask app
server.postprocessAdd_wsgi(app, status=404)
server.postprocessAdd_wsgi(fakeWSGI1, status=404)
server.postprocessAdd_wsgi(fakeWSGI2, status=404)
server.postprocessAdd_wsgi(app2, status=404)
server.postprocessAdd_cb(ppCB1, status=404)
# Run server
server.serveForever()
# Now you can access this api by path http://127.0.0.1:7001/api for JSON-RPC requests
# Or by path http://127.0.0.1:7001/api/<method>?jsonp=<callback>&(params) for JSONP requests
# For example by http://127.0.0.1:7001/api/echo?data=test_data&jsonp=jsonpCallback_129620
| 42.105882 | 244 | 0.686225 | # -*- coding: utf-8 -*-
import sys, time, random
#FLASK
from flask import Flask, request
from flaskJSONRPCServer import flaskJSONRPCServer
def echo(data='Hello world!'):
# Simply echo
return data
echo._alias='helloworld' #setting alias for method
def stats(_connection=None):
#return server's speed stats
return _connection.server.stats(inMS=True) #inMS=True return stats in milliseconds
app=Flask(__name__)
@app.route('/readpost', methods=['POST'])
def flaskReadpost1():
print 'flaskReadpost1 called'
d=request.data
s='{"jsonrpc": "2.0", "result":"You sended %s", "id": 1}'%(d.replace('"', '\\"'))
return s
app2=Flask(__name__)
@app2.route('/helloworld', methods=['GET'])
def flaskHelloworld():
return 'Hello world!'
@app2.route('/readpost', methods=['POST'])
def flaskReadpost2():
print 'flaskReadpost2 called'
d=request.data
s='{"jsonrpc": "2.0", "result":"(REWRITED_WITH_flaskReadpost2) You sended %s", "id": 1}'%(d.replace('"', '\\"'))
return s
def fakeWSGI1(env, start_response):
d=env['wsgi.input'].read(10)+env['wsgi.input'].read(10)
print 'fakeWSGI1 called', d
env['flaskJSONRPCServer_skip']()
def fakeWSGI2(env, start_response):
d=env['wsgi.input'].read(3)+env['wsgi.input'].read(20)+env['wsgi.input'].read()
print 'fakeWSGI2 called', d
env['flaskJSONRPCServer_skip']()
def ppCB1(request, server, controller):
d=server._loadPostData(request)
print 'ppCB1 called', d
# return 200, '{"jsonrpc": "2.0", "result":"HI!", "id": 1}'
controller.skip()
if __name__=='__main__':
print 'Running api..'
# Creating instance of server
# <blocking> switch server to one-request-per-time mode
# <cors> switch auto CORS support
# <gevent> switch to patching process with Gevent
# <debug> switch to logging connection's info from serv-backend
# <log> set logging level (0-critical, 1-errors, 2-warnings, 3-info, 4-debug)
# <fallback> switch auto fallback to JSONP on GET requests
# <allowCompress> switch auto compression
# <compressMinSize> set min limit for compression
# <tweakDescriptors> set file-descriptor's limit for server (useful on high-load servers)
# <jsonBackend> set JSON-backend. Auto fallback to native when problems
# <notifBackend> set exec-backend for Notify-requests
# <servBackend> set serving-backend ('pywsgi', 'werkzeug', 'wsgiex' or 'auto'). 'auto' is more preffered
# <experimental> switch using of experimental perfomance-patches
server=flaskJSONRPCServer(("0.0.0.0", 7001), blocking=False, cors=False, gevent=True, debug=False, log=3, fallback=True, allowCompress=False, jsonBackend='simplejson', notifBackend='simple', tweakDescriptors=[1000, 1000], servBackend='auto')
# Register dispatchers for single functions
server.registerFunction(echo, path='/api')
server.registerFunction(stats, path='/api')
# merge with Flask app
server.postprocessAdd_wsgi(app, status=404)
server.postprocessAdd_wsgi(fakeWSGI1, status=404)
server.postprocessAdd_wsgi(fakeWSGI2, status=404)
server.postprocessAdd_wsgi(app2, status=404)
server.postprocessAdd_cb(ppCB1, status=404)
# Run server
server.serveForever()
# Now you can access this api by path http://127.0.0.1:7001/api for JSON-RPC requests
# Or by path http://127.0.0.1:7001/api/<method>?jsonp=<callback>&(params) for JSONP requests
# For example by http://127.0.0.1:7001/api/echo?data=test_data&jsonp=jsonpCallback_129620
| 987 | 0 | 181 |
2971e7f8d7109db83925545f9423d8ef90054149 | 594 | py | Python | src/python/pants/core/util_rules/distdir_test.py | viktortnk/pants | 54c98206de5ac9aadfe26d83175f472941be6c7d | [
"Apache-2.0"
] | 1 | 2020-06-13T22:01:39.000Z | 2020-06-13T22:01:39.000Z | src/python/pants/core/util_rules/distdir_test.py | viktortnk/pants | 54c98206de5ac9aadfe26d83175f472941be6c7d | [
"Apache-2.0"
] | null | null | null | src/python/pants/core/util_rules/distdir_test.py | viktortnk/pants | 54c98206de5ac9aadfe26d83175f472941be6c7d | [
"Apache-2.0"
] | 2 | 2020-05-18T18:43:11.000Z | 2020-05-19T02:47:47.000Z | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pathlib import Path
import pytest
from pants.core.util_rules.distdir import DistDir, InvalidDistDir, validate_distdir
| 34.941176 | 96 | 0.747475 | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pathlib import Path
import pytest
from pants.core.util_rules.distdir import DistDir, InvalidDistDir, validate_distdir
def test_distdir():
buildroot = Path("/buildroot")
assert DistDir(relpath=Path("dist")) == validate_distdir(Path("dist"), buildroot)
assert DistDir(relpath=Path("dist")) == validate_distdir(Path("/buildroot/dist"), buildroot)
with pytest.raises(InvalidDistDir):
validate_distdir(Path("/other/dist"), buildroot)
| 313 | 0 | 23 |
17b0a6e850d3e4025bc339029a459d15ea6ce799 | 9,504 | py | Python | brainstat/tests/test_f_test.py | rmarkello/BrainStat | f34ffa01274aabf411feb801a3ea1869f8a22d11 | [
"BSD-3-Clause"
] | null | null | null | brainstat/tests/test_f_test.py | rmarkello/BrainStat | f34ffa01274aabf411feb801a3ea1869f8a22d11 | [
"BSD-3-Clause"
] | null | null | null | brainstat/tests/test_f_test.py | rmarkello/BrainStat | f34ffa01274aabf411feb801a3ea1869f8a22d11 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pickle
from .testutil import datadir
from brainstat.stats.SLM import SLM, f_test
from brainstat.stats.terms import Term
# test data *pkl consists of slm1* and slm2* keys
# slm1* variables will be assigned to slm1 dictionary, and slm2* to the slm2 dict.
| 35.729323 | 86 | 0.59838 | import numpy as np
import pickle
from .testutil import datadir
from brainstat.stats.SLM import SLM, f_test
from brainstat.stats.terms import Term
def dummy_test(infile, expfile):
# load input test data
ifile = open(infile, "br")
idic = pickle.load(ifile)
ifile.close()
slm1 = SLM(Term(1), Term(1))
slm2 = SLM(Term(1), Term(2))
for key in idic.keys():
if "1" in key:
setattr(slm1, key[4:], idic[key])
elif "2" in key:
setattr(slm2, key[4:], idic[key])
# run f test
outdic = f_test(slm1, slm2)
# load expected outout data
efile = open(expfile, "br")
expdic = pickle.load(efile)
efile.close()
testout = []
for key in expdic.keys():
comp = np.allclose(
getattr(outdic, key), expdic[key], rtol=1e-05, equal_nan=True
)
testout.append(comp)
assert all(flag == True for (flag) in testout)
# test data *pkl consists of slm1* and slm2* keys
# slm1* variables will be assigned to slm1 dictionary, and slm2* to the slm2 dict.
def test_01():
# small sized slm1 and slm2 keys ['X'], ['df'], ['SSE'], ['coef']
# slm1X : np array, shape (5, 6), int64
# slm1df : int
# slm1SSE : np array, shape (3, 1), int64
# slm1coef : np array, shape (6, 1), int64
# slm2X' : np array, shape (5, 6), int64
# slm2df' : int,
# slm2SSE' : np array, shape (3, 1), int64
# slm2coef' : np array, shape (6, 1), int64
infile = datadir("statf_01_IN.pkl")
expfile = datadir("statf_01_OUT.pkl")
dummy_test(infile, expfile)
def test_02():
# middle sized slm1 and slm2 keys ['X'], ['df'], ['SSE'], ['coef']
# slm1X : np array, shape (84, 77), int64
# slm1df : int
# slm1SSE : np array, shape (1128, 42), int64
# slm1coef : np array, shape (77, 42), int64
# slm2X : np array, shape (84, 77), int64
# slm2df : int
# slm2SSE : np array, shape (1128, 42), int64
# slm2coef : np array, shape (77, 42), int64
infile = datadir("statf_02_IN.pkl")
expfile = datadir("statf_02_OUT.pkl")
dummy_test(infile, expfile)
def test_03():
# middle sized slm1 and slm2 keys ['X'], ['df'], ['SSE'] + and ['SSE'] has 2k rows
# slm1X np array, shape (91, 58), float64
# slm1df : int
# slm1SSE : np array, shape (2278, 75), float64
# slm1coef : np array, shape (58, 75), float64
# slm2X : np array, shape (91, 58), float64
# slm2df : int
# slm2SSE : np array, shape (2278, 75), float64
# slm2coef : np array, shape (58, 75) float64
infile = datadir("statf_03_IN.pkl")
expfile = datadir("statf_03_OUT.pkl")
dummy_test(infile, expfile)
def test_04():
# small sized input slm1 and slm2 keys ['X'], ['df'], ['SSE'], ['coef'] is 3D
# slm1X : np array, shape (19, 27), int64
# slm1df : int
# slm1SSE : np array, shape (6, 87), int64
# slm1coef : np array, shape (27, 87, 3), float64
# slm2X : np array, shape (19, 27), int64
# slm2df : int
# slm2SSE : np array, shape (6, 87), int64
# slm2coef : np array, shape (27, 87, 3), float64
infile = datadir("statf_04_IN.pkl")
expfile = datadir("statf_04_OUT.pkl")
dummy_test(infile, expfile)
def test_05():
# similar to test_04, shapes of ['X'], ['SSE'] and ['coef'] changed
# slm1X : np array, shape (13, 3), int64
# slm1df : int
# slm1SSE : np array, shape (3, 27), int64
# slm1coef : np array, shape (3, 27, 2), float64
# slm2X : np array, shape (13, 3), int64
# slm2df : int
# slm2SSE : np array, shape (3, 27), int64
# slm2coef np array, shape (3, 27, 2), float64
infile = datadir("statf_05_IN.pkl")
expfile = datadir("statf_05_OUT.pkl")
dummy_test(infile, expfile)
def test_06():
# similar to test_04, shapes/values of ['X'], ['SSE'], ['df'] and ['coef'] changed
# slm1X : np array, shape (13, 10), int64
# slm1df : int
# slm1SSE : np array, shape (3, 34), int64
# slm1coef : np array, shape (10, 34, 2), int64
# slm2X : np array, shape (13, 10), int64
# slm2df : int
# slm2SSE : np array, shape (3, 34), int64
# slm2coef : np array, shape (10, 34, 2), int64
infile = datadir("statf_06_IN.pkl")
expfile = datadir("statf_06_OUT.pkl")
dummy_test(infile, expfile)
def test_07():
# similar to test_04, shapes/values of ['X'], ['SSE'], ['df'] and ['coef'] changed
# slm1X : np array, shape (12, 4), float64
# slm1df : int
# slm1SSE : np array, shape (6, 42), float64
# slm1coef : np array, shape (4, 42, 3), float64
# slm2X : np array, shape (12, 4), float64
# slm2df : int
# slm2SSE np array, shape (6, 42), float64
# slm2coef np array, shape (4, 42, 3), float64
infile = datadir("statf_07_IN.pkl")
expfile = datadir("statf_07_OUT.pkl")
dummy_test(infile, expfile)
def test_08():
# similar to test_04, shapes/values of ['X'], ['SSE'], ['df'] and ['coef'] changed
# slm1X : np array, shape (32, 91), float64
# slm1df : int
# slm1SSE : np array, shape (3, 78), float64
# slm1coef : np array, shape (91, 78, 2), float64
# slm2X : np array, shape (32, 91), float64
# slm2df : int
# slm2SSE np array, shape (3, 78), float64
# slm2coef np array, shape (91, 78, 2), float64
infile = datadir("statf_08_IN.pkl")
expfile = datadir("statf_08_OUT.pkl")
dummy_test(infile, expfile)
def test_09():
# similar to test_04, shapes/values of ['X'], ['SSE'], ['df'] and ['coef'] changed
# slm1X : np array, shape (88, 49), float64
# slm1df : int
# slm1SSE : np array, shape (1, 56), float64
# slm1coef : np array, shape (49, 56, 1), float64
# slm2X : np array, shape (88, 49), float64
# slm2df : int
# slm2SSE : np array, shape (1, 56), float64
# slm2coef : np array, shape (49, 56, 1), float64
infile = datadir("statf_09_IN.pkl")
expfile = datadir("statf_09_OUT.pkl")
dummy_test(infile, expfile)
def test_10():
# real data set with more keys then ['X'], ['SSE'], ['df'] and ['coef'] given
# slm1X : np array, shape (10, 2), uint8
# slm1df : int
# slm1SSE : np array, shape (1, 20484), float64
# slm1coef : np array, shape (2, 20484), float64
# slm1tri : np array, shape (40960, 3), int32
# slm1resl : np array, shape (61440, 1), float64
# slm1c : np array, shape (1, 2), float64
# slm1k : int
# slm1ef : np array, shape (1, 20484), float64
# slm1sd : np array, shape (1, 20484), float64
# slm1t : np array, shape (1, 20484), float64
# slm2X : np array, shape (10, 2), uint8
# slm2df : int
# slm2SSE : np array, shape (1, 20484), float64
# slm2coef : np array, shape (2, 20484), float64
# slm2tri : np array, shape (40960, 3), int32
# slm2resl : np array, shape (61440, 1), float64
# slm2c : np array, shape (1, 2), float64
# slm2k : int
# slm2ef : np array, shape (1, 20484), float64
# slm2sd : np array, shape (1, 20484), float64
# slm2t : np array, shape (1, 20484), float64
infile = datadir("statf_10_IN.pkl")
expfile = datadir("statf_10_OUT.pkl")
dummy_test(infile, expfile)
def test_11():
# test_10 + slm2['resl'] and slm2[X] shuffled, slm2['df'] changed
# slm1X : np array, shape (10, 2), uint8
# slm1df : int
# slm1SSE : np array, shape (1, 20484), float64
# slm1coef : np array, shape (2, 20484), float64
# slm1tri : np array, shape (40960, 3), int32
# slm1resl : np array, shape (61440, 1), float64
# slm1c : np array, shape (1, 2), float64
# slm1k : int
# slm1ef : np array, shape (1, 20484), float64
# slm1sd : np array, shape (1, 20484), float64
# slm1t : np array, shape (1, 20484), float64
# slm2X : np array, shape (10, 2), uint8
# slm2df : int
# slm2SSE : np array, shape (1, 20484), float64
# slm2coef : np array, shape (2, 20484), float64
# slm2tri : np array, shape (40960, 3), int32
# slm2resl : np array, shape (61440, 1), float64
# slm2c : np array, shape (1, 2), float64
# slm2k : int
# slm2ef : np array, shape (1, 20484), float64
# slm2sd : np array, shape (1, 20484), float64
# slm2t : np array, shape (1, 20484), float64
infile = datadir("statf_11_IN.pkl")
expfile = datadir("statf_11_OUT.pkl")
dummy_test(infile, expfile)
def test_12():
# test_10 + shapes/values of ['X'], ['SSE'], ['df'] and ['coef'] changed
# slm1X : np array, shape (20, 9), uint16
# slm1df : int
# slm1SSE : np array, shape (1, 20484), float64
# slm1coef : np array, shape (9, 20484), float64
# slm1tri : np array, shape (40960, 3), int32
# slm1resl : np array, shape (61440, 1), float64
# slm1c : np array, shape (1, 9), float64
# slm1k : int
# slm1ef : np array, shape (1, 20484), float64
# slm1sd : np array, shape (1, 20484), float64
# slm1t : np array, shape (1, 20484), float64
# slm2X : np array, shape (20, 9), uint16
# slm2df : int
# slm2SSE : np array, shape (1, 20484), float64
# slm2coef : np array, shape (9, 20484), float64
# slm2tri : np array, shape (40960, 3), int32
# slm2resl : np array, shape (61440, 1), float64
# slm2c : np array, shape (1, 9), float64
# slm2k : int
# slm2ef : np array, shape (1, 20484), float64
# slm2sd : np array, shape (1, 20484), float64
# slm2t : np array, shape (1, 20484), float64
infile = datadir("statf_12_IN.pkl")
expfile = datadir("statf_12_OUT.pkl")
dummy_test(infile, expfile)
| 8,911 | 0 | 299 |
c99df33ef3173736e323be22afd14fcd3e379231 | 5,762 | py | Python | fragresp/scripts/run_check.py | wutobias/fragresp | 57ecd73b9f056812752ce6f00b6456b532311c76 | [
"MIT"
] | 1 | 2020-07-22T13:55:20.000Z | 2020-07-22T13:55:20.000Z | fragresp/scripts/run_check.py | wutobias/fragresp | 57ecd73b9f056812752ce6f00b6456b532311c76 | [
"MIT"
] | null | null | null | fragresp/scripts/run_check.py | wutobias/fragresp | 57ecd73b9f056812752ce6f00b6456b532311c76 | [
"MIT"
] | null | null | null | import os
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pickle
from collections import OrderedDict
from fragresp.gaussian_tools import check_opt as _check_opt
from fragresp.gaussian_tools import check_esp as _check_esp
from fragresp.gaussian_tools import get_energy as _get_energy
from fragresp.constants import hartree_to_kcal
| 30.812834 | 73 | 0.566644 | import os
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pickle
from collections import OrderedDict
from fragresp.gaussian_tools import check_opt as _check_opt
from fragresp.gaussian_tools import check_esp as _check_esp
from fragresp.gaussian_tools import get_energy as _get_energy
from fragresp.constants import hartree_to_kcal
def check_opt(database, frag_dir, surr_cap_dir, mol_dir, stdout, stderr):
frag_count_path = frag_dir+"/conf_count.dat"
mol_count_path = mol_dir+"/conf_count.dat"
surr_count_path = surr_cap_dir+"/conf_count.dat"
frag_check_path = frag_dir+"/check_opt.log"
mol_check_path = mol_dir+"/check_opt.log"
surr_check_path = surr_cap_dir+"/check_opt.log"
db = pickle.load(open(database, "rb"))
include_list_mol = list()
mol2frag = db.get_mol2frag()
for mol_i in range(db.get_mol_count()):
frag_count = len(mol2frag[mol_i])
if frag_count==0:
include_list_mol.append(mol_i)
include_list_conn = list()
for conn_i, conn in enumerate(db.get_conn_list()):
if conn.get_terminal():
continue
include_list_conn.append(conn_i)
frag_count_dict = OrderedDict()
with open(frag_count_path, 'r') as f:
for line in f:
l = line.rstrip().lstrip().split()
if len(l)==0:
continue
i,c = int(l[0]), int(l[1])
frag_count_dict[i] = c
_check_opt(frag_count_dict,
mol_list=db.get_frag_list(),
qm_dir=frag_dir,
logfile=frag_check_path,
stdout=stdout,
stderr=stderr)
plot_cycle(frag_count_dict, frag_dir)
if len(include_list_conn)>0:
surr_count_dict = OrderedDict()
with open(surr_count_path, 'r') as f:
for line in f:
l = line.rstrip().lstrip().split()
if len(l)==0:
continue
i,c = int(l[0]), int(l[1])
surr_count_dict[i] = c
_check_opt(surr_count_dict,
mol_list=db.get_surr_cap_list(),
qm_dir=surr_cap_dir,
logfile=surr_check_path,
stdout=stdout,
stderr=stderr)
plot_cycle(surr_count_dict, surr_cap_dir)
if len(include_list_mol)>0:
mol_count_dict = OrderedDict()
with open(mol_count_path, 'r') as f:
for line in f:
l = line.rstrip().lstrip().split()
if len(l)==0:
continue
i,c = int(l[0]), int(l[1])
mol_count_dict[i] = c
_check_opt(mol_count_dict,
mol_list=db.get_mol_list(),
qm_dir=mol_dir,
logfile=mol_check_path,
stdout=stdout,
stderr=stderr)
plot_cycle(mol_count_dict, mol_dir)
def plot_cycle(conf_dict, path):
for i,c in conf_dict.items():
fig_path = path\
+"/frag%d"%i\
+"/opt_conv.png"
hasplot = False
for j in range(c) :
ene_list = list()
log_path = path\
+"/frag%d"%i\
+"/conf%d"%j\
+"/frag%d-conf%d_opt.log" %(i,j)
if os.path.exists(log_path):
_get_energy(log_path, ene_list)
ene_list = np.array(ene_list)
ene_list *= hartree_to_kcal
plt.plot(ene_list, label="Conf %d" %j)
hasplot = True
if hasplot:
plt.xlabel("Cycle")
plt.ylabel("Energy [kcal]")
plt.legend()
plt.savefig(fig_path, dpi=1000)
plt.clf()
def check_esp(database, frag_dir, surr_cap_dir, mol_dir):
frag_count_path = frag_dir+"/conf_count.dat"
mol_count_path = mol_dir+"/conf_count.dat"
surr_count_path = surr_cap_dir+"/conf_count.dat"
frag_check_path = frag_dir+"/check_esp.log"
mol_check_path = mol_dir+"/check_esp.log"
surr_check_path = surr_cap_dir+"/check_esp.log"
db = pickle.load(open(database, "rb"))
include_list_mol = list()
mol2frag = db.get_mol2frag()
for mol_i in range(db.get_mol_count()):
frag_count = len(mol2frag[mol_i])
if frag_count==0:
include_list_mol.append(mol_i)
include_list_conn = list()
for conn_i, conn in enumerate(db.get_conn_list()):
if conn.get_terminal():
continue
include_list_conn.append(conn_i)
frag_count_dict = OrderedDict()
with open(frag_count_path, 'r') as f:
for line in f:
l = line.rstrip().lstrip().split()
if len(l)==0:
continue
i,c = int(l[0]), int(l[1])
frag_count_dict[i] = c
_check_esp(frag_count_dict,
qm_dir=frag_dir,
logfile=frag_check_path)
if len(include_list_conn)>0:
surr_count_dict = OrderedDict()
with open(surr_count_path, 'r') as f:
for line in f:
l = line.rstrip().lstrip().split()
if len(l)==0:
continue
i,c = int(l[0]), int(l[1])
surr_count_dict[i] = c
_check_esp(surr_count_dict,
qm_dir=surr_cap_dir,
logfile=surr_check_path)
if len(include_list_mol)>0:
mol_count_dict = OrderedDict()
with open(mol_count_path, 'r') as f:
for line in f:
l = line.rstrip().lstrip().split()
if len(l)==0:
continue
i,c = int(l[0]), int(l[1])
mol_count_dict[i] = c
_check_esp(mol_count_dict,
qm_dir=mol_dir,
logfile=mol_check_path) | 5,312 | 0 | 69 |
fdead8cdb2770e15490a8872b628ae1a8cf86c76 | 2,057 | py | Python | examples/market.py | paulorauber/pgm | 6d5af508b0fa48e475b8a54c7dce8e1515f6a50e | [
"MIT"
] | 19 | 2015-04-28T18:23:18.000Z | 2022-01-27T10:20:44.000Z | examples/market.py | paulorauber/pgm | 6d5af508b0fa48e475b8a54c7dce8e1515f6a50e | [
"MIT"
] | 1 | 2020-10-16T04:24:26.000Z | 2020-10-16T04:24:26.000Z | examples/market.py | paulorauber/pgm | 6d5af508b0fa48e475b8a54c7dce8e1515f6a50e | [
"MIT"
] | 11 | 2017-08-01T17:11:05.000Z | 2021-11-26T14:48:40.000Z | from model.factor import RandomVar
from model.factor import CPD
from model.factor import Factor
from model.influence import InfluenceDiagram
from inference.exact import ExpectedUtility
if __name__ == '__main__':
main()
| 26.371795 | 60 | 0.562956 | from model.factor import RandomVar
from model.factor import CPD
from model.factor import Factor
from model.influence import InfluenceDiagram
from inference.exact import ExpectedUtility
def three_variables():
M = RandomVar('Market', 3)
F = RandomVar('Found', 2)
uMF = Factor([M, F], [0, -7, 0, 5, 0, 20])
cM = CPD([M], [0.5, 0.3, 0.2])
# Alternative decision rules for F
dF_1 = CPD([F], [1.0, 0])
dF_2 = CPD([F], [0, 1.0]) # Optimal
id = InfluenceDiagram([cM], [uMF])
eu = ExpectedUtility(id)
print(eu.expected_utility([dF_1]))
print(eu.expected_utility([dF_2]))
print(eu.optimal_decision_rule([F]))
def six_variables():
M = RandomVar('Market', 3)
S = RandomVar('Survey', 4) # S = 3 means no survey
T = RandomVar('Test', 2)
F = RandomVar('Found', 2)
uMF = Factor([M, F], [0, -7, 0, 5, 0, 20])
uT = Factor([T], [0, -1])
cM = CPD([M], [0.5, 0.3, 0.2])
cST = CPD([S, M, T], [0.0, 0.6, 0.0, 0.3, 0.0, 0.1,
0.0, 0.3, 0.0, 0.4, 0.0, 0.4,
0.0, 0.1, 0.0, 0.3, 0.0, 0.5,
1.0, 0.0, 1.0, 0.0, 1.0, 0.0])
# Alternative decision rules for F given S
dFS_1 = CPD([F, S], [0, 0, 0, 1, 1, 1, 1, 0])
dFS_2 = CPD([F, S], [1, 0, 0, 0, 0, 1, 1, 1]) # Optimal
# Alternative decision rules for T
dT_1 = CPD([T], [1.0, 0.0])
dT_2 = CPD([T], [0.0, 1.0]) # Optimal
id = InfluenceDiagram([cM, cST], [uMF, uT])
eu = ExpectedUtility(id)
print(eu.expected_utility([dFS_1, dT_1]))
print(eu.expected_utility([dFS_1, dT_2]))
print(eu.expected_utility([dFS_2, dT_1]))
print(eu.expected_utility([dFS_2, dT_2]))
# New influence diagram with a single decision rule
dT = dT_2
id2 = InfluenceDiagram([cM, cST, dT], [uMF, uT])
eu2 = ExpectedUtility(id2)
dFS_optimal = eu2.optimal_decision_rule([F, S])
print(eu.expected_utility([dFS_optimal, dT]))
def main():
three_variables()
six_variables()
if __name__ == '__main__':
main()
| 1,761 | 0 | 69 |
578135e235807cfcab22851a2858d9e073215126 | 6,116 | py | Python | zanichelli_parola_del_giorno/zanichelli_parola_del_giorno.py | cipz/InstagramBots | c9e391633684ae17aa8c6edf5b8d854e27b89bcc | [
"MIT"
] | 3 | 2020-10-28T09:51:14.000Z | 2020-10-29T08:22:50.000Z | zanichelli_parola_del_giorno/zanichelli_parola_del_giorno.py | cipz/InstagramBots | c9e391633684ae17aa8c6edf5b8d854e27b89bcc | [
"MIT"
] | null | null | null | zanichelli_parola_del_giorno/zanichelli_parola_del_giorno.py | cipz/InstagramBots | c9e391633684ae17aa8c6edf5b8d854e27b89bcc | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Importing custom files
import sys
sys.path.insert(1, '../')
import utils
import instagram
from datetime import date
from PIL import Image, ImageDraw
from bs4 import BeautifulSoup
import urllib.request
import requests
import time
import wget
import json
import os
if __name__ == "__main__":
if "--debug" in sys.argv[1:]:
debug=True
elif "-d" in sys.argv[1:]:
debug=True
else:
debug=False
main(debug)
print("\nDONE") | 33.059459 | 244 | 0.631295 | #!/usr/bin/python
# Importing custom files
import sys
sys.path.insert(1, '../')
import utils
import instagram
from datetime import date
from PIL import Image, ImageDraw
from bs4 import BeautifulSoup
import urllib.request
import requests
import time
import wget
import json
import os
def main(debug):
## -- ## -- ## -- ## -- ## -- ## -- ## -- ## -- ## -- ## --
# Getting parameters
params_file = 'params.json'
params = utils.get_params(params_file, 'zanichelli_username', 'zanichelli_password')
username = params['username']
password = params['password']
previous_post_key = params['previous_post_key']
## -- ## -- ## -- ## -- ## -- ## -- ## -- ## -- ## -- ## --
# Defining edited parameters dictionary
edit_params = {}
## -- ## -- ## -- ## -- ## -- ## -- ## -- ## -- ## -- ## --
print("Setting url and downloading content")
url = 'https://dizionaripiu.zanichelli.it/cultura-e-attualita/le-parole-del-giorno/parola-del-giorno/'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
parola_title = soup.find('header', {'class':'mb-4'}).text.strip()
# Checking word
if previous_post_key != "" and previous_post_key == parola_title:
print("\n\n\nThere is nothing new to post\n\nDONE")
exit(0)
# Setting the previous post key
edit_params['previous_post_key'] = parola_title
main_div = soup.find('div', {'class': 'main-content light-txt'})
sillabazione = main_div.find('span', {'style': 'display: inline; font-style: normal; font-weight: 300; font-size: 1em; color: #C53329; font-family: "Noto Sans", Verdana, Georgia, Tahoma, sans-serif !important;'}).text.strip()
tipologia_parola = main_div.find('span', {'style':'display: inline; font-style: normal; font-weight: 600; font-size: 1em; color: #444; font-family: "Noto Sans", Verdana, Georgia, Tahoma, sans-serif !important;'}).text.strip()
info_parola = main_div.findAll('span', {'style':'display: inline; font-style: normal; font-weight: 300; font-size: 1em; color: #444; font-family: "Noto Sans", Verdana, Georgia, Tahoma, sans-serif !important;'})
pronuncia_parola = info_parola[0].text
origine_parola = info_parola[1].text
significati_parola = main_div.findAll('div', {'style': 'display: block; background-color: #FFFFFF; margin-top: 1em; margin-left: 0em; padding-bottom: 0.9em; border-bottom-style: solid; border-bottom-color: #ccc; border-bottom-width: 1px;'})
caption_significati = ''
if len(significati_parola) > 1:
for significato in significati_parola:
if len(caption_significati) > 1600:
break
significato_entry = significato.find('p')
significato_entry_text = significato_entry.text.strip()
significato_entry_text = ' '.join(significato_entry_text.split())
caption_significati += '\n' + significato_entry_text
else:
caption_significati += '\n' + significati_parola[0].text
data = soup.find('div', {'class':'article-meta col text-right'}).text
# Setting caption
print("Setting caption")
hashtags = '#' + parola_title + ' #zanichelli #dizionario #paroladelgiorno #grammatica #cultura #italia #linguaitaliana'
caption = 'La parola del giorno è: ' + parola_title + '\n' + caption_significati + '\n\n' + data + '\n\n' + hashtags
print("Caption:\n\n")
print(caption)
print()
print("\n\nSetting correct values in tex file")
words_file = open('latex/words.txt', 'r+')
words_file.truncate(0)
# A long word just to test
# parola_title = 'Supercalifragilistichespiralidoso'
# If the word is too long and does not fit the picture
len_parola_title = len(parola_title)
# print(len_parola_title)
if len_parola_title > 30:
parola_title = '\\footnotesize ' + parola_title
elif len_parola_title > 19:
parola_title = '\small ' + parola_title
elif len_parola_title > 10:
if ' / ' in parola_title:
parola_title = parola_title.replace(' / ', ' / \\newline ')
elif ' ' in parola_title:
parola_title = parola_title.replace(' ', '\\newline ')
else:
parola_title = '\LARGE ' + parola_title
line = '\\newcommand{\parolatitle}{' + parola_title + '}'
words_file.write(line)
line = '\\newcommand{\sillabazione}{' + sillabazione + '}'
words_file.write(line)
if len(origine_parola) > 60:
tipologia_parolas = '{\\scriptsize' + tipologia_parola + '}'
origine_parola = '{\\begin{spacing}{0.85}\\noindent\\footnotesize' + origine_parola + '\end{spacing}}'
line = '\\newcommand{\\tipologiaparola}{' + tipologia_parola + '}'
words_file.write(line)
line = '\\newcommand{\origineparola}{' + origine_parola + '}'
words_file.write(line)
line = '\\newcommand{\data}{' + data + '}'
words_file.write(line)
words_file.close()
print("\n\nCompliling tex file")
# utils.compile_xelatex_verbose()
utils.compile_xelatex_silent()
print("\n\nTransforming pdf in jpg")
utils.pdf_2_jpg('latex/main.pdf', 'out.jpg')
if not debug:
execution_result = {}
execution_result["color"] = "green"
try:
print("Posting to instagram")
instagram.post_image('out.jpg', caption, username, password)
print('Setting new parameters')
utils.set_params(params_file, edit_params)
except Exception as e:
execution_result["color"] = "red"
utils.edit_badge("paroladelgiornozanichelli.json", execution_result)
# Removing stuff (not necessary if used in docker container of github actions)
# Useful if executed locally
print("Removing useless files")
os.system('rm *REMOVE_ME')
os.system('rm img.jpg')
os.system('rm out.jpg')
if __name__ == "__main__":
if "--debug" in sys.argv[1:]:
debug=True
elif "-d" in sys.argv[1:]:
debug=True
else:
debug=False
main(debug)
print("\nDONE") | 5,606 | 0 | 26 |
1d90bc1b77764ec27f3c296f8d8edc9f0f1f66f9 | 307 | py | Python | build/lib/tnetwork/DCD/externals/utils.py | Yquetzal/tnetwork | 43fb2f19aeed57a8a9d9af032ee80f1c9f58516d | [
"BSD-2-Clause"
] | 4 | 2019-02-19T07:49:06.000Z | 2020-09-01T16:17:54.000Z | tnetwork/DCD/externals/utils.py | Yquetzal/tnetwork | 43fb2f19aeed57a8a9d9af032ee80f1c9f58516d | [
"BSD-2-Clause"
] | 1 | 2019-07-13T16:16:28.000Z | 2019-07-15T09:34:33.000Z | build/lib/tnetwork/DCD/externals/utils.py | Yquetzal/tnetwork | 43fb2f19aeed57a8a9d9af032ee80f1c9f58516d | [
"BSD-2-Clause"
] | 3 | 2019-07-13T16:09:20.000Z | 2022-02-08T02:23:46.000Z | import os
| 20.466667 | 43 | 0.641694 | import os
def clean_create_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir, exist_ok=True)
filelist = [f for f in os.listdir(dir)]
for f in filelist:
os.remove(os.path.join(dir, f))
def clear_file(filename):
if os.path.exists(filename):
os.remove(filename)
| 250 | 0 | 46 |
78dcceb9d60795c264aadbff9ec8b0ea045d40a1 | 2,850 | py | Python | fusion/experiment/pe_array.py | SheaCai/optimus | e9a9a2354376c786d7e6c64e34dee2c2010e5585 | [
"MIT"
] | 3 | 2021-05-05T06:55:38.000Z | 2022-02-19T06:19:59.000Z | fusion/experiment/pe_array.py | SheaCai/optimus | e9a9a2354376c786d7e6c64e34dee2c2010e5585 | [
"MIT"
] | null | null | null | fusion/experiment/pe_array.py | SheaCai/optimus | e9a9a2354376c786d7e6c64e34dee2c2010e5585 | [
"MIT"
] | null | null | null | import sys
import os
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('../'))
sys.path.append(os.path.abspath('../../'))
import matplotlib.pyplot as plt
import numpy as np
from fusion.scheduling import batch_size
from fusion.scheduling import Resource
from fusion.scheduling import LoopLowerBound
from fusion.scheduling import ScheduleGenerator
from fusion.scheduling import extract_arch_info, extract_dataflow_info
from fusion.scheduling import CostModel
from fusion.scheduling import res_parse
from fusion.nn_models import import_network
def do_scheduling():
"""
Get optimal scheduling for given problem. Return a result schedule.
"""
buffer = [128, 128, 256, 256, 512, 512, 512, 512]
pe_array = [16, 32, 16, 32, 16, 16, 32, 64]
# Network.
batch_size.init(4)
network = import_network("squeezenet")
dataflow_info = extract_dataflow_info('./fusion/dataflow/dataflow_Ow_Cout.json')
access_list = []
energy_list = []
for pe, bf in zip(pe_array, buffer):
arch_file = './fusion/arch/3_level_mem_{}KB.json'.format(bf)
arch_info = extract_arch_info(arch_file)
arch_info["parallel_count"][0] = pe ** 2
if pe == 8:
arch_info["parallel_cost"][0] = 0.05
resource = Resource.arch(arch_info)
# Unroll loop lower bound
dataflow_info["partitioning_size"] = [pe] * len(dataflow_info["partitioning_size"])
loop_lower_bound = LoopLowerBound.dataflow(dataflow_info)
print("\n===========================================================")
print('PE-array: {}x{}, buffer size: {}(KB)'.format(pe, pe, bf))
print("waiting...")
cost_model = CostModel(network, resource)
# optimal schedule
sg = ScheduleGenerator(network, resource, cost_model, loop_lower_bound)
schedule_info_list, _ = sg.schedule_search()
print("done!\n\n")
energy, access = res_parse(schedule_info_list, resource,
cost_model, sg, network,
loop_lower_bound,
'./result/pe_array', arch_info)
energy_list.append(energy)
access_list.append(access)
x = ["16x16,128", "32x32,128", "16x16,256", "32x32,256", "8x8,512", "16x16,512", "32x32,512", "64x64,512"]
energy_list = np.array(energy_list) / energy_list[0]
access_list = np.array(access_list) / access_list[0]
plt.figure(figsize=(8, 2))
plt.plot(x, energy_list, label="Normalized Energy")
plt.plot(x, access_list, label="Normalized DRAM Access")
plt.ylim(0.2, 1.2)
plt.legend()
plt.savefig('./result/pe_array/pe_array.png')
plt.show()
def main():
"""
Main function.
"""
do_scheduling()
return 0
if __name__ == '__main__':
sys.exit(main())
| 32.022472 | 110 | 0.631579 | import sys
import os
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('../'))
sys.path.append(os.path.abspath('../../'))
import matplotlib.pyplot as plt
import numpy as np
from fusion.scheduling import batch_size
from fusion.scheduling import Resource
from fusion.scheduling import LoopLowerBound
from fusion.scheduling import ScheduleGenerator
from fusion.scheduling import extract_arch_info, extract_dataflow_info
from fusion.scheduling import CostModel
from fusion.scheduling import res_parse
from fusion.nn_models import import_network
def do_scheduling():
"""
Get optimal scheduling for given problem. Return a result schedule.
"""
buffer = [128, 128, 256, 256, 512, 512, 512, 512]
pe_array = [16, 32, 16, 32, 16, 16, 32, 64]
# Network.
batch_size.init(4)
network = import_network("squeezenet")
dataflow_info = extract_dataflow_info('./fusion/dataflow/dataflow_Ow_Cout.json')
access_list = []
energy_list = []
for pe, bf in zip(pe_array, buffer):
arch_file = './fusion/arch/3_level_mem_{}KB.json'.format(bf)
arch_info = extract_arch_info(arch_file)
arch_info["parallel_count"][0] = pe ** 2
if pe == 8:
arch_info["parallel_cost"][0] = 0.05
resource = Resource.arch(arch_info)
# Unroll loop lower bound
dataflow_info["partitioning_size"] = [pe] * len(dataflow_info["partitioning_size"])
loop_lower_bound = LoopLowerBound.dataflow(dataflow_info)
print("\n===========================================================")
print('PE-array: {}x{}, buffer size: {}(KB)'.format(pe, pe, bf))
print("waiting...")
cost_model = CostModel(network, resource)
# optimal schedule
sg = ScheduleGenerator(network, resource, cost_model, loop_lower_bound)
schedule_info_list, _ = sg.schedule_search()
print("done!\n\n")
energy, access = res_parse(schedule_info_list, resource,
cost_model, sg, network,
loop_lower_bound,
'./result/pe_array', arch_info)
energy_list.append(energy)
access_list.append(access)
x = ["16x16,128", "32x32,128", "16x16,256", "32x32,256", "8x8,512", "16x16,512", "32x32,512", "64x64,512"]
energy_list = np.array(energy_list) / energy_list[0]
access_list = np.array(access_list) / access_list[0]
plt.figure(figsize=(8, 2))
plt.plot(x, energy_list, label="Normalized Energy")
plt.plot(x, access_list, label="Normalized DRAM Access")
plt.ylim(0.2, 1.2)
plt.legend()
plt.savefig('./result/pe_array/pe_array.png')
plt.show()
def main():
"""
Main function.
"""
do_scheduling()
return 0
if __name__ == '__main__':
sys.exit(main())
| 0 | 0 | 0 |
2a35694f15ec6717ca92e616b88e2260cec47367 | 68 | py | Python | scripts/qgis_fixes/fix_future_builtins.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | null | null | null | scripts/qgis_fixes/fix_future_builtins.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | null | null | null | scripts/qgis_fixes/fix_future_builtins.py | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | 1 | 2021-12-25T08:40:30.000Z | 2021-12-25T08:40:30.000Z | from libfuturize.fixes.fix_future_builtins import FixFutureBuiltins
| 34 | 67 | 0.911765 | from libfuturize.fixes.fix_future_builtins import FixFutureBuiltins
| 0 | 0 | 0 |
907aad0046071a3624c9516fbf2c4978313c69da | 202 | py | Python | MyPython/test.py | LairdStreak/MyPyPlayGround | e999cfd179d457a6d17c81bf1bacaa7c90e3e1dc | [
"MIT"
] | null | null | null | MyPython/test.py | LairdStreak/MyPyPlayGround | e999cfd179d457a6d17c81bf1bacaa7c90e3e1dc | [
"MIT"
] | null | null | null | MyPython/test.py | LairdStreak/MyPyPlayGround | e999cfd179d457a6d17c81bf1bacaa7c90e3e1dc | [
"MIT"
] | null | null | null | """This module does blah blah."""
import httplib2
resp, content = httplib2.Http().request("http://myip.dk")
#start = content.find("ipv4address")
#end = start + 100
print (content) #[start:end].strip()) | 28.857143 | 57 | 0.688119 | """This module does blah blah."""
import httplib2
resp, content = httplib2.Http().request("http://myip.dk")
#start = content.find("ipv4address")
#end = start + 100
print (content) #[start:end].strip()) | 0 | 0 | 0 |
61b5cb8cdfd5f40d0b514f7d1175350b322a4808 | 3,161 | py | Python | src/bbdata/endpoint/output/values.py | big-building-data/bbdata-python | 46335c9f8db9ceccbd795c4931db0e3041ba9a50 | [
"MIT"
] | null | null | null | src/bbdata/endpoint/output/values.py | big-building-data/bbdata-python | 46335c9f8db9ceccbd795c4931db0e3041ba9a50 | [
"MIT"
] | null | null | null | src/bbdata/endpoint/output/values.py | big-building-data/bbdata-python | 46335c9f8db9ceccbd795c4931db0e3041ba9a50 | [
"MIT"
] | null | null | null | from enum import Enum
import requests
from bbdata.config import output_api_url
from bbdata.util import handle_response
from bbdata.exceptions import ClientException
| 33.273684 | 117 | 0.624486 | from enum import Enum
import requests
from bbdata.config import output_api_url
from bbdata.util import handle_response
from bbdata.exceptions import ClientException
class Aggregation(Enum):
QUARTERS = "quarters"
HOURS = "hours"
class Values:
base_path = "/values"
auth = None
def __init__(self, auth):
self.auth = auth
def get(self, object_id, from_timestamp, to_timestamp, with_comments=False, headers=True):
"""
Get measures.
GET /values
https://bbdata.daplab.ch/api/#values_get
"""
params = {
"ids": object_id,
"from": from_timestamp,
"to": to_timestamp,
"withComments": with_comments,
"headers": headers
}
url = output_api_url + self.base_path
r = requests.get(url, params, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def get_latest(self, object_id, before_timestamp, with_comments=False):
"""
Get the latest measure before a given date, if any. Note that the
lookup won't go further than six month in time. This means that if the
object didn't deliver any value in the six month before the "before"
parameter, no value will be returned.
GET /values/latest
https://bbdata.daplab.ch/api/#values_latest_get
"""
params = {
"ids": object_id,
"before": before_timestamp,
"withComments": with_comments,
}
url = output_api_url + self.base_path
r = requests.get(url, params, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def get_hours(self, object_id, from_timestamp, to_timestamp, with_comments=False, headers=True):
"""
# TODO No definition in the docs
GET /values/hours
"""
return self.__aggregation(object_id, from_timestamp, to_timestamp, "hours", with_comments, headers)
def get_quarters(self, object_id, from_timestamp, to_timestamp, with_comments=False, headers=True):
"""
# TODO No definition in the docs
GET /values/quarters
"""
return self.__aggregation(object_id, from_timestamp, to_timestamp, "quarters", with_comments, headers)
def __aggregation(self, object_id, from_timestamp, to_timestamp, aggregation, with_comments=False, headers=True):
"""
Generic method to call the aggregations implemented in the API
"""
params = {
"ids": object_id,
"from": from_timestamp,
"to": to_timestamp,
"withComments": with_comments,
"headers": headers
}
url = output_api_url + self.base_path
if aggregation == Aggregation.HOURS.value:
url = url + "/hours"
elif aggregation == Aggregation.QUARTERS.value:
url = url + "/quarters"
else:
raise ClientException("This aggregation isn't implemented")
r = requests.get(url, params, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
| 29 | 2,920 | 46 |
71902b1fce8341e9f2e83921f553231c3a499893 | 1,708 | py | Python | dict_tiny/util.py | louieh/dict-tiny | afa3eb9c5e5e00a103799e2b76c16819736d756e | [
"MIT"
] | 8 | 2018-06-28T09:17:18.000Z | 2022-01-12T03:22:05.000Z | dict_tiny/util.py | louieh/dict-tiny | afa3eb9c5e5e00a103799e2b76c16819736d756e | [
"MIT"
] | 1 | 2021-12-17T17:06:11.000Z | 2021-12-17T17:06:11.000Z | dict_tiny/util.py | louieh/dict-tiny | afa3eb9c5e5e00a103799e2b76c16819736d756e | [
"MIT"
] | null | null | null | from collections import defaultdict
from lxml import html
import requests
from plumbum import colors
from dict_tiny.setting import TIME_OUT
def is_alphabet(word):
"""
return the word is English or Chinese
:param word:
:return:
"""
is_alphabet = defaultdict(int)
word = word.replace(' ', '')
for each_letter in word:
if each_letter >= '\u4e00' and each_letter <= '\u9fff':
is_alphabet['cn'] += 1
# elif word >= '\u0030' and word <= '\u0039':
# return 'num'
elif (each_letter >= '\u0041' and each_letter <= '\u005a') or (
each_letter >= '\u0061' and each_letter <= '\u007a'):
is_alphabet['en'] += 1
else:
is_alphabet['other'] += 1
is_alphabet['en'] /= 4
for len_type, num in is_alphabet.items():
if num >= sum(is_alphabet.values()) * 0.7:
return len_type
return 'other'
def downloader(url, header):
"""
:param url: url need to be downloaded
:param header: fake header
:return:
"""
try:
result = requests.get(url, headers=header, timeout=TIME_OUT)
result_selector = html.etree.HTML(result.text)
resp_code = result.status_code
except requests.exceptions.ConnectionError as e:
print(colors.red | "[Error!] Time out.")
print("<%s>" % e)
result_selector = None
resp_code = None
return result_selector, resp_code
def downloader_plain(url, header):
"""
plain download. Do not make the resp to selector
:param url:
:param header:
:return:
"""
try:
return requests.get(url, headers=header).text
except:
return None
| 26.6875 | 71 | 0.595433 | from collections import defaultdict
from lxml import html
import requests
from plumbum import colors
from dict_tiny.setting import TIME_OUT
def is_alphabet(word):
"""
return the word is English or Chinese
:param word:
:return:
"""
is_alphabet = defaultdict(int)
word = word.replace(' ', '')
for each_letter in word:
if each_letter >= '\u4e00' and each_letter <= '\u9fff':
is_alphabet['cn'] += 1
# elif word >= '\u0030' and word <= '\u0039':
# return 'num'
elif (each_letter >= '\u0041' and each_letter <= '\u005a') or (
each_letter >= '\u0061' and each_letter <= '\u007a'):
is_alphabet['en'] += 1
else:
is_alphabet['other'] += 1
is_alphabet['en'] /= 4
for len_type, num in is_alphabet.items():
if num >= sum(is_alphabet.values()) * 0.7:
return len_type
return 'other'
def downloader(url, header):
"""
:param url: url need to be downloaded
:param header: fake header
:return:
"""
try:
result = requests.get(url, headers=header, timeout=TIME_OUT)
result_selector = html.etree.HTML(result.text)
resp_code = result.status_code
except requests.exceptions.ConnectionError as e:
print(colors.red | "[Error!] Time out.")
print("<%s>" % e)
result_selector = None
resp_code = None
return result_selector, resp_code
def downloader_plain(url, header):
"""
plain download. Do not make the resp to selector
:param url:
:param header:
:return:
"""
try:
return requests.get(url, headers=header).text
except:
return None
| 0 | 0 | 0 |
e16f51347983756b0b65cbcd4ff05bed86cadb4a | 341 | py | Python | recipe/run_test.py | conda-forge/cmlgenerator-feedstock | ad4662b2bcc6a7c1de69ed365e14cf4f34773b00 | [
"BSD-3-Clause"
] | null | null | null | recipe/run_test.py | conda-forge/cmlgenerator-feedstock | ad4662b2bcc6a7c1de69ed365e14cf4f34773b00 | [
"BSD-3-Clause"
] | null | null | null | recipe/run_test.py | conda-forge/cmlgenerator-feedstock | ad4662b2bcc6a7c1de69ed365e14cf4f34773b00 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""
Created on Thr Apr 8 18:00:00 2021
:Authors:
Mark Driver <mdd31>
Mark J. Williamson <mjw99>
"""
import logging
from cmlgenerator.test.cmlgeneratortests import run_tests
logging.basicConfig()
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.WARN)
if __name__ == "__main__":
run_tests()
| 17.05 | 57 | 0.730205 | #!/usr/bin/env python3
"""
Created on Thr Apr 8 18:00:00 2021
:Authors:
Mark Driver <mdd31>
Mark J. Williamson <mjw99>
"""
import logging
from cmlgenerator.test.cmlgeneratortests import run_tests
logging.basicConfig()
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.WARN)
if __name__ == "__main__":
run_tests()
| 0 | 0 | 0 |
00b8e893fb9addadfffde5ef162da631dad72050 | 14,987 | py | Python | operators/draw_nodes/draw_nodes_outlines.py | nwtajcky/RenderStackNode | 31516af6fa8572c9d6ee3df017e59cae394047a3 | [
"Apache-2.0"
] | null | null | null | operators/draw_nodes/draw_nodes_outlines.py | nwtajcky/RenderStackNode | 31516af6fa8572c9d6ee3df017e59cae394047a3 | [
"Apache-2.0"
] | 1 | 2021-12-27T06:39:08.000Z | 2021-12-27T06:39:08.000Z | operators/draw_nodes/draw_nodes_outlines.py | nwtajcky/RenderStackNode | 31516af6fa8572c9d6ee3df017e59cae394047a3 | [
"Apache-2.0"
] | null | null | null | from math import cos, sin, pi
import bgl
import blf
import bpy
import gpu
from bpy.props import *
from bpy.types import Operator
from gpu_extras.batch import batch_for_shader
from .utils import dpifac, draw_tri_fan
from ...preferences import get_pref
class RSN_OT_DrawNodes(Operator):
"""Draw the active task's settings """
bl_idname = "rsn.draw_nodes"
bl_label = "Draw Nodes"
bl_options = {'REGISTER', 'UNDO'}
| 35.683333 | 129 | 0.602389 | from math import cos, sin, pi
import bgl
import blf
import bpy
import gpu
from bpy.props import *
from bpy.types import Operator
from gpu_extras.batch import batch_for_shader
from .utils import dpifac, draw_tri_fan
from ...preferences import get_pref
def find_node_parent(node):
def get_parent(obj):
if hasattr(obj, "parent"):
get_parent(obj.parent)
else:
return obj
return get_parent(node)
def get_node_location(node):
nlocx = (node.location.x + 1) * dpifac()
nlocy = (node.location.y + 1) * dpifac()
ndimx = node.dimensions.x
ndimy = node.dimensions.y
# # if node have parent
# loc = find_node_parent(node).location
# nlocx += loc.x
# nlocy += loc.y
return nlocx, nlocy, ndimx, ndimy
def get_node_vertices(nlocx, nlocy, ndimx, ndimy):
top_left = (nlocx, nlocy)
top_right = (nlocx + ndimx, nlocy)
bottom_left = (nlocx, nlocy - ndimy)
bottom_right = (nlocx + ndimx, nlocy - ndimy)
return top_left, top_right, bottom_left, bottom_right
def draw_text_2d(color, text, x, y, size=20):
font_id = 0
blf.position(font_id, x, y, 0)
blf.color(font_id, color[0], color[1], color[2], color[3])
blf.size(font_id, size, 72)
blf.draw(font_id, text)
def draw_round_rectangle(shader, points, radius=8, colour=(1.0, 1.0, 1.0, 0.7)):
sides = 16
radius = 16
# fill
draw_tri_fan(shader, points, colour)
top_left = points[1]
top_right = points[0]
bottom_left = points[2]
bottom_right = points[3]
# Top edge
top_left_top = (top_left[0], top_left[1] + radius)
top_right_top = (top_right[0], top_right[1] + radius)
vertices = [top_right_top, top_left_top, top_left, top_right]
draw_tri_fan(shader, vertices, colour)
# Left edge
top_left_left = (top_left[0] - radius, top_left[1])
bottom_left_left = (bottom_left[0] - radius, bottom_left[1])
vertices = [top_left, top_left_left, bottom_left_left, bottom_left]
draw_tri_fan(shader, vertices, colour)
# Bottom edge
bottom_left_bottom = (bottom_left[0], bottom_left[1] - radius)
bottom_right_bottom = (bottom_right[0], bottom_right[1] - radius)
vertices = [bottom_right, bottom_left, bottom_left_bottom, bottom_right_bottom]
draw_tri_fan(shader, vertices, colour)
# right edge
top_right_right = (top_right[0] + radius, top_right[1])
bottom_right_right = (bottom_right[0] + radius, bottom_right[1])
vertices = [top_right_right, top_right, bottom_right, bottom_right_right]
draw_tri_fan(shader, vertices, colour)
# Top right corner
vertices = [top_right]
mx = top_right[0]
my = top_right[1]
for i in range(sides + 1):
if 0 <= i <= 4:
cosine = radius * cos(i * 2 * pi / sides) + mx
sine = radius * sin(i * 2 * pi / sides) + my
vertices.append((cosine, sine))
draw_tri_fan(shader, vertices, colour)
# Top left corner
vertices = [top_left]
mx = top_left[0]
my = top_left[1]
for i in range(sides + 1):
if 4 <= i <= 8:
cosine = radius * cos(i * 2 * pi / sides) + mx
sine = radius * sin(i * 2 * pi / sides) + my
vertices.append((cosine, sine))
draw_tri_fan(shader, vertices, colour)
# Bottom left corner
vertices = [bottom_left]
mx = bottom_left[0]
my = bottom_left[1]
for i in range(sides + 1):
if 8 <= i <= 12:
cosine = radius * cos(i * 2 * pi / sides) + mx
sine = radius * sin(i * 2 * pi / sides) + my
vertices.append((cosine, sine))
draw_tri_fan(shader, vertices, colour)
# Bottom right corner
vertices = [bottom_right]
mx = bottom_right[0]
my = bottom_right[1]
for i in range(sides + 1):
if 12 <= i <= 16:
cosine = radius * cos(i * 2 * pi / sides) + mx
sine = radius * sin(i * 2 * pi / sides) + my
vertices.append((cosine, sine))
draw_tri_fan(shader, vertices, colour)
def draw_rounded_node_border(shader, node, radius=8, colour=(1.0, 1.0, 1.0, 0.7)):
area_width = bpy.context.area.width - (16 * dpifac()) - 1
bottom_bar = (16 * dpifac()) + 1
sides = 16
radius = radius * dpifac()
nlocx, nlocy, ndimx, ndimy = get_node_location(node)
if node.hide:
nlocx += -1
nlocy += 5
if node.type == 'REROUTE':
# nlocx += 1
nlocy -= 1
ndimx = 0
ndimy = 0
radius += 6
top_left, top_right, bottom_left, bottom_right = get_node_vertices(nlocx, nlocy, ndimx, ndimy)
# Top left corner
mx, my = bpy.context.region.view2d.view_to_region(top_left[0], top_left[1], clip=False)
vertices = [(mx, my)]
for i in range(sides + 1):
if (4 <= i <= 8) and my > bottom_bar and mx < area_width:
cosine = radius * cos(i * 2 * pi / sides) + mx
sine = radius * sin(i * 2 * pi / sides) + my
vertices.append((cosine, sine))
draw_tri_fan(shader, vertices, colour)
# Top right corner
mx, my = bpy.context.region.view2d.view_to_region(top_right[0], top_right[1], clip=False)
vertices = [(mx, my)]
for i in range(sides + 1):
if (0 <= i <= 4) and my > bottom_bar and mx < area_width:
cosine = radius * cos(i * 2 * pi / sides) + mx
sine = radius * sin(i * 2 * pi / sides) + my
vertices.append((cosine, sine))
draw_tri_fan(shader, vertices, colour)
# Bottom left corner
mx, my = bpy.context.region.view2d.view_to_region(bottom_left[0], bottom_left[1], clip=False)
vertices = [(mx, my)]
for i in range(sides + 1):
if 8 <= i <= 12:
if my > bottom_bar and mx < area_width:
cosine = radius * cos(i * 2 * pi / sides) + mx
sine = radius * sin(i * 2 * pi / sides) + my
vertices.append((cosine, sine))
draw_tri_fan(shader, vertices, colour)
# Bottom right corner
mx, my = bpy.context.region.view2d.view_to_region(bottom_right[0], bottom_right[1], clip=False)
vertices = [(mx, my)]
for i in range(sides + 1):
if (12 <= i <= 16) and my > bottom_bar and mx < area_width:
cosine = radius * cos(i * 2 * pi / sides) + mx
sine = radius * sin(i * 2 * pi / sides) + my
vertices.append((cosine, sine))
draw_tri_fan(shader, vertices, colour)
# prepare drawing all edges in one batch
vertices = []
indices = []
id_last = 0
# Left edge
m1x, m1y = bpy.context.region.view2d.view_to_region(nlocx, nlocy, clip=False)
m2x, m2y = bpy.context.region.view2d.view_to_region(nlocx, nlocy - ndimy, clip=False)
if m1x < area_width and m2x < area_width:
vertices.extend([(m2x - radius, m2y), (m2x, m2y),
(m1x, m1y), (m1x - radius, m1y)])
indices.extend([(id_last, id_last + 1, id_last + 3),
(id_last + 3, id_last + 1, id_last + 2)])
id_last += 4
# Top edge
m1x, m1y = bpy.context.region.view2d.view_to_region(nlocx, nlocy, clip=False)
m2x, m2y = bpy.context.region.view2d.view_to_region(nlocx + ndimx, nlocy, clip=False)
m1x = min(m1x, area_width)
m2x = min(m2x, area_width)
if m1y > bottom_bar and m2y > bottom_bar:
vertices.extend([(m1x, m1y), (m2x, m1y),
(m2x, m1y + radius), (m1x, m1y + radius)])
indices.extend([(id_last, id_last + 1, id_last + 3),
(id_last + 3, id_last + 1, id_last + 2)])
id_last += 4
# Right edge
m1x, m1y = bpy.context.region.view2d.view_to_region(nlocx + ndimx, nlocy, clip=False)
m2x, m2y = bpy.context.region.view2d.view_to_region(nlocx + ndimx, nlocy - ndimy, clip=False)
m1y = max(m1y, bottom_bar)
m2y = max(m2y, bottom_bar)
if m1x < area_width and m2x < area_width:
vertices.extend([(m1x, m2y), (m1x + radius, m2y),
(m1x + radius, m1y), (m1x, m1y)])
indices.extend([(id_last, id_last + 1, id_last + 3),
(id_last + 3, id_last + 1, id_last + 2)])
id_last += 4
# Bottom edge
m1x, m1y = bpy.context.region.view2d.view_to_region(nlocx, nlocy - ndimy, clip=False)
m2x, m2y = bpy.context.region.view2d.view_to_region(nlocx + ndimx, nlocy - ndimy, clip=False)
m1x = min(m1x, area_width)
m2x = min(m2x, area_width)
if m1y > bottom_bar and m2y > bottom_bar:
vertices.extend([(m1x, m2y), (m2x, m2y),
(m2x, m1y - radius), (m1x, m1y - radius)])
indices.extend([(id_last, id_last + 1, id_last + 3),
(id_last + 3, id_last + 1, id_last + 2)])
# now draw all edges in one batch
if len(vertices) != 0:
batch = batch_for_shader(shader, 'TRIS', {"pos": vertices}, indices=indices)
shader.bind()
shader.uniform_float("color", colour)
batch.draw(shader)
def draw_callback_nodeoutline(self, context):
if context.window_manager.rsn_node_list == '':
pass
bgl.glLineWidth(1)
bgl.glEnable(bgl.GL_BLEND)
bgl.glEnable(bgl.GL_LINE_SMOOTH)
bgl.glHint(bgl.GL_LINE_SMOOTH_HINT, bgl.GL_NICEST)
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')
# draw outline
########################
# set color
task_outer = (self.task_color[0], self.task_color[1], self.task_color[2], self.alpha)
file_path_outer = (self.file_path_color[0], self.file_path_color[1], self.file_path_color[2], self.alpha)
col_outer = (self.settings_color[0], self.settings_color[1], self.settings_color[2], self.alpha)
col_inner = (0.0, 0.0, 0.0, self.alpha + 0.1)
node_list = context.window_manager.rsn_node_list.split(',')
# draw all nodes
for node_name in node_list:
try:
node = context.space_data.edit_tree.nodes[node_name]
if node.bl_idname == 'RSNodeTaskNode':
draw_rounded_node_border(shader, node, radius=self.radius * 1.25, colour=task_outer)
draw_rounded_node_border(shader, node, radius=self.radius * 1.25 - 1.25, colour=col_inner)
elif node.bl_idname in {'RenderNodeSceneFilePath', 'RSNodeFilePathInputNode'}:
draw_rounded_node_border(shader, node, radius=self.radius, colour=file_path_outer)
draw_rounded_node_border(shader, node, radius=self.radius - 1, colour=col_inner)
elif node.bl_idname != 'NodeReroute':
draw_rounded_node_border(shader, node, radius=self.radius, colour=col_outer)
draw_rounded_node_border(shader, node, radius=self.radius - 1, colour=col_inner)
except KeyError:
pass
# draw text
##################
if self.show_text_info:
# properties text
task_text = "No Active Task!" if context.window_manager.rsn_viewer_node == '' else context.window_manager.rsn_viewer_node
camera = context.scene.camera.name if context.scene.camera else "No Scene camera"
is_save = True if bpy.data.filepath != '' else False
file_path_text = context.scene.render.filepath if is_save else "Save your file first!"
texts = [
f"Task: {task_text}",
f"Camera: {camera}",
f"Engine: {context.scene.render.engine}",
f"Frame: {context.scene.frame_start} - {context.scene.frame_end}",
f"FilePath: {file_path_text}",
]
# text background
r, g, b = self.background_color
longest_text = max(texts, key=len, default='')
size = blf.dimensions(0, longest_text) # get the longest text
size = [v * 1.5 / context.preferences.view.ui_scale for v in size] # scale with the ui scale
# set corner
top = 125
bottom = 25
step = 25
vertices = [(10 + size[0], top + size[1]), (20, top + size[1]), (20, 25), (10 + size[0], bottom), ]
draw_round_rectangle(shader, vertices, radius=18, colour=(0, 0, 0, self.alpha)) # shadow
draw_round_rectangle(shader, vertices, radius=14, colour=(r, g, b, self.alpha)) # main box
# draw texts
r, g, b = self.text_color
size = 20
for i, text in enumerate(texts):
draw_text_2d((r, g, b, self.alpha, size), text, 20, top - step * i)
# restore
#####################
bgl.glDisable(bgl.GL_BLEND)
bgl.glDisable(bgl.GL_LINE_SMOOTH)
class RSN_OT_DrawNodes(Operator):
"""Draw the active task's settings """
bl_idname = "rsn.draw_nodes"
bl_label = "Draw Nodes"
bl_options = {'REGISTER', 'UNDO'}
def modal(self, context, event):
context.area.tag_redraw()
if event.type == 'TIMER':
# show draw
if context.scene.RSNBusyDrawing:
if self.alpha < 0.5: self.alpha += 0.02 # show
# close draw
else:
if self.alpha > 0:
self.alpha -= 0.02 # fade
return {'RUNNING_MODAL'}
# remove timer / handles
context.window_manager.event_timer_remove(self._timer)
bpy.types.SpaceNodeEditor.draw_handler_remove(self._handle, 'WINDOW')
return {'FINISHED'}
return {'PASS_THROUGH'}
def invoke(self, context, event):
if True in {context.area.type != 'NODE_EDITOR',
context.space_data.edit_tree is None,
context.space_data.edit_tree.bl_idname != 'RenderStackNodeTree'}:
self.report({'WARNING'}, "NodeEditor not found, cannot run operator")
return {'CANCELLED'}
# init draw values
#####################
pref = get_pref()
self.alpha = 0
self.radius = pref.draw_nodes.border_radius
# node color
self.settings_color = pref.draw_nodes.settings_color
self.task_color = pref.draw_nodes.task_color
self.file_path_color = pref.draw_nodes.file_path_color
self.show_text_info = pref.draw_nodes.show_text_info
# background color
self.background_color = pref.draw_nodes.background_color
# text color
self.text_color = pref.draw_nodes.text_color
# set statue
##################
context.scene.RSNBusyDrawing = True
# add timer and handles
self._timer = context.window_manager.event_timer_add(0.01, window=context.window)
self._handle = bpy.types.SpaceNodeEditor.draw_handler_add(draw_callback_nodeoutline, (self, context),
'WINDOW', 'POST_PIXEL')
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def register():
bpy.types.Scene.RSNBusyDrawing = BoolProperty(default=False)
bpy.utils.register_class(RSN_OT_DrawNodes)
def unregister():
del bpy.types.Scene.RSNBusyDrawing
bpy.utils.unregister_class(RSN_OT_DrawNodes)
| 14,285 | 0 | 261 |
1cac8ac00e852b70048e86fc5f1112d85ed2cd46 | 3,187 | py | Python | src/internal/checker.py | ziyan-wang/dcs290-proj2-judge | 3fa56f7e9cce58b2f6bc3fcc0fb8b8ae32eb968b | [
"MIT"
] | null | null | null | src/internal/checker.py | ziyan-wang/dcs290-proj2-judge | 3fa56f7e9cce58b2f6bc3fcc0fb8b8ae32eb968b | [
"MIT"
] | null | null | null | src/internal/checker.py | ziyan-wang/dcs290-proj2-judge | 3fa56f7e9cce58b2f6bc3fcc0fb8b8ae32eb968b | [
"MIT"
] | null | null | null | from internal.model import Node, Rule, ExistRule, ChildRule, Direction, RuleNode, ParentChildRule
| 40.858974 | 112 | 0.657986 | from internal.model import Node, Rule, ExistRule, ChildRule, Direction, RuleNode, ParentChildRule
def check(tree: Node, rules: list[Rule]) -> list[str]:
violated: list[str] = []
for rule in rules:
if _check(tree, rule) is False:
violated.append(rule.id)
return violated
def _check(root: Node, rule: Rule) -> bool:
if isinstance(rule, ExistRule):
return _check_exist(root, rule)
elif isinstance(rule, ParentChildRule):
return _check_parent_child(root, rule)
elif isinstance(rule, ChildRule):
return _check_child(root, rule, False)
else:
raise RuntimeError('Unexpected rule type')
def _check_exist(root: Node, rule: ExistRule) -> bool:
if _check_single(root, rule.root):
return True
if root.left_child is not None and _check_exist(root.left_child, rule):
return True
if root.right_child is not None and _check_exist(root.right_child, rule):
return True
return False
def _check_child(root: Node, rule: ChildRule, root_found: bool) -> bool:
if root_found is False:
if _check_single(root, rule.root):
if ((rule.direction == Direction.NONE or rule.direction == Direction.LEFT) and
root.left_child is not None and _check_child(root.left_child, rule, True)):
return True
if ((rule.direction == Direction.NONE or rule.direction == Direction.RIGHT) and
root.right_child is not None and _check_child(root.right_child, rule, True)):
return True
if root.left_child is not None and _check_child(root.left_child, rule, False):
return True
if root.right_child is not None and _check_child(root.right_child, rule, False):
return True
return False
else:
if _check_single(root, rule.child):
return True
if root.left_child is not None and _check_child(root.left_child, rule, True):
return True
if root.right_child is not None and _check_child(root.right_child, rule, True):
return True
return False
def _check_parent_child(root: Node, rule: ParentChildRule) -> bool:
if _check_single(root, rule.root):
parent = root.parent
if parent is not None:
if root == parent.left_child:
return _check_child(parent.right_child, rule, True) if parent.right_child is not None else False
elif root == parent.right_child:
return _check_child(parent.left_child, rule, True) if parent.left_child is not None else False
else:
raise RuntimeError('Unexpected found_root_node')
if root.left_child is not None and _check_parent_child(root.left_child, rule):
return True
if root.right_child is not None and _check_parent_child(root.right_child, rule):
return True
return False
def _check_single(root: Node, rule_node: RuleNode) -> bool:
return (rule_node.name == root.type_name and
(rule_node.index is None or rule_node.index == root.index) and
(rule_node.literal is None or rule_node.literal == root.literal))
| 2,945 | 0 | 138 |
3922c0d00a10118d6f978a18d3a3ad837a2ecf59 | 1,034 | py | Python | portals/wwits/groups/access/function_authorize_list/schemas.py | jalanb/portals | 7a5360b48547719d3fbe50790f08eaf5571148dd | [
"ADSL"
] | null | null | null | portals/wwits/groups/access/function_authorize_list/schemas.py | jalanb/portals | 7a5360b48547719d3fbe50790f08eaf5571148dd | [
"ADSL"
] | null | null | null | portals/wwits/groups/access/function_authorize_list/schemas.py | jalanb/portals | 7a5360b48547719d3fbe50790f08eaf5571148dd | [
"ADSL"
] | null | null | null | from marshmallow import fields, post_load
from portals.wwits.apis.rest import BaseSchemaExcludeFields as Schema
from .models import ParmModel, FunctionAuthorizeListModel, FunctionModel
| 28.722222 | 72 | 0.718569 | from marshmallow import fields, post_load
from portals.wwits.apis.rest import BaseSchemaExcludeFields as Schema
from .models import ParmModel, FunctionAuthorizeListModel, FunctionModel
class ParmSchema(Schema):
Version = fields.Str(data_key="Version")
Env = fields.Str(data_key="Env")
UserID = fields.Str(data_key="UserID")
Session = fields.Int(data_key="Session")
Source = fields.Str(data_key="Source")
RC = fields.Int(data_key="RC")
ResultMsg = fields.Str(data_key="ResultMsg")
@post_load
def make_schema(self, data, **kwargs):
return ParmModel(**data)
class FunctionSchema(Schema):
FunctionName = fields.Str(data_key="FunctionName")
@post_load
def make_schema(self, data, **kwargs):
return FunctionModel(**data)
class FunctionAuthorizeListSchema(Schema):
Parms = fields.Nested(ParmSchema)
DataList = fields.Nested(FunctionSchema, many=True)
@post_load
def make_schema(self, data, **kwargs):
return FunctionAuthorizeListModel(**data)
| 171 | 605 | 69 |
15e5add04b2bb411cd8af5ed528e6fddb6ed8c75 | 8,045 | py | Python | 05 Heap/canonical_heap.py | XuuRee/python-data-structures | a3972f5781d666d15d61c0d474877880d1b7c483 | [
"MIT"
] | null | null | null | 05 Heap/canonical_heap.py | XuuRee/python-data-structures | a3972f5781d666d15d61c0d474877880d1b7c483 | [
"MIT"
] | null | null | null | 05 Heap/canonical_heap.py | XuuRee/python-data-structures | a3972f5781d666d15d61c0d474877880d1b7c483 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# IB002 Domaci uloha 6.
#
# V nasledujicim textu pouzivame pojem "halda" ve vyznamu "binarni halda".
#
# Minimova halda je v kanonickem tvaru, pokud pro kazdy jeji prvek se dvema
# potomky plati, ze jeho levy potomek je mensi nez ten pravy nebo se oba
# rovnaji.
#
# Je v kanonickem tvaru | Neni v kanonickem tvaru
# |
# (1) | (1)
# / \ | / \
# (2) (3) | (3) (2)
# Trida representujici minimovou haldu. Pro praci s ni muzete s vyhodou pouzit
# funkce, ktere jste implementovali v zakladnim domacim ukolu.
# Ukol 1.
# Vasim prvnim ukolem je implementovat funkci is_canonical_min_heap(heap),
# ktera overi, zda je zadana halda 'heap' korektni minimovou haldou
# v kanonickem tvaru. Pokud ano, vrati True, v opacnem pripade vrati False.
#
# Prazdna nebo jednoprvkova halda je v kanonickem tvaru implicitne. Mejte na
# pameti, ze halda v kanonickem tvaru musi splnovat take pozadavky kladene na
# minimovou haldu.
def is_canonical_min_heap(heap):
"""
vstup: 'heap' typu MinHeap
(je zaruceno, ze heap.size je velikost pole heap.array;
neni zaruceno, ze prvky heap.array splnuji haldovou podminku
nebo podminku kanonickeho tvaru)
vystup: True, pokud je 'heap' minimova halda v kanonickem tvaru
False, jinak
casova slozitost: O(n), kde 'n' je pocet prvku 'heap'
"""
if heap is None:
return False
if heap.size == 1 or heap.size == 0:
return True
if not is_min_heap(heap.array, 0):
return False
i = 1
left = heap.array[1]
while i < heap.size:
if i % 2 == 1:
left = heap.array[i]
i += 1
continue
if left > heap.array[i]:
return False
i += 1
return True
# Ukol 2.
# Druhym ukolem je implementovat funkci canonise_min_heap(heap), ktera zadanou
# minimovou haldu 'heap' prevede na kanonicky tvar. Funkce bude menit primo
# haldu zadanou v argumentu, proto nebude vracet zadnou navratovou hodnotu.
#
# Napoveda:
# Pro algoritmus s linearni casovou slozitosti je potreba postupovat takto:
# - Rekurzivne resime od korene k listum haldy;
# - pro kazdy uzel haldy:
# + zkontrolujeme, jestli potomci splnuji vlastnost kanonickeho tvaru;
# pokud ne:
# * prohodime hodnoty leveho a praveho potomka;
# * tim se muze pokazit vlastnost haldy v pravem podstrome, proto
# probublame problematickou hodnotu z korene praveho podstromu
# tak hluboko, aby uz neporusovala vlastnost haldy (pri tomto bublani
# opravujeme pouze vlastnost haldy, kanonicky tvar neresime)
# + mame tedy korektni minimovou haldu, ktera navic splnuje kanonicky
# tvar od tohoto uzlu smerem nahoru;
# + pokracujeme v rekurzi vlevo a vpravo.
def canonise_min_heap(heap):
"""
vstup: 'heap' korektni minimova halda typu MinHeap
vystup: funkce nic nevraci, vstupni halda 'heap' je prevedena
do kanonickeho tvaru (pritom obsahuje stejne prvky jako na zacatku)
casova slozitost: O(n), kde 'n' je pocet prvku 'heap'
"""
if is_min_heap(heap.array, 0):
for i in range(heap.size // 2):
left, right = 2 * i + 1, 2 * i + 2
if left < heap.size and right < heap.size:
if heap.array[left] > heap.array[right]:
swap(heap.array, left, right)
check_subtree(heap, right)
heap = MinHeap()
heap.array = [1, 3, 2]
heap.size = 3
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------")
heap.array = [-1, 0, -1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, -1]
heap.size = 14
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------")
heap.array = [-2, 0, -2, 0, 0, -1, -2]
heap.size = 7
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------")
heap.array = [-1, 0, -1, 0, 0, -1, 0, 0, 0, 0, 0, -1]
heap.size = 12
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------")
heap.array = [1, 3, 2, 4, 5, 9, 7, 6, 8]
heap.size = 9
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------")
heap.array = [0, 1, 0, 1, 1, 0]
heap.size = 6
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------")
heap.array = [0, 1, 0, 1, 1, 0, 0]
heap.size = 7
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------")
heap.array = [0, 1, 0, 1, 1, 0, 1]
heap.size = 7
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------") | 30.589354 | 79 | 0.592915 | #!/usr/bin/env python3
# IB002 Domaci uloha 6.
#
# V nasledujicim textu pouzivame pojem "halda" ve vyznamu "binarni halda".
#
# Minimova halda je v kanonickem tvaru, pokud pro kazdy jeji prvek se dvema
# potomky plati, ze jeho levy potomek je mensi nez ten pravy nebo se oba
# rovnaji.
#
# Je v kanonickem tvaru | Neni v kanonickem tvaru
# |
# (1) | (1)
# / \ | / \
# (2) (3) | (3) (2)
# Trida representujici minimovou haldu. Pro praci s ni muzete s vyhodou pouzit
# funkce, ktere jste implementovali v zakladnim domacim ukolu.
class MinHeap:
def __init__(self):
self.size = 0
self.array = []
# Ukol 1.
# Vasim prvnim ukolem je implementovat funkci is_canonical_min_heap(heap),
# ktera overi, zda je zadana halda 'heap' korektni minimovou haldou
# v kanonickem tvaru. Pokud ano, vrati True, v opacnem pripade vrati False.
#
# Prazdna nebo jednoprvkova halda je v kanonickem tvaru implicitne. Mejte na
# pameti, ze halda v kanonickem tvaru musi splnovat take pozadavky kladene na
# minimovou haldu.
def swap(array, i, j):
tmp = array[i]
array[i] = array[j]
array[j] = tmp
def is_min_heap(heap, i):
left, right = 2 * i + 1, 2 * i + 2
if right < len(heap):
if heap[left] < heap[i] or heap[right] < heap[i]:
return False
return is_min_heap(heap, left) and is_min_heap(heap, right)
elif left < len(heap):
if heap[left] < heap[i]:
return False
return is_min_heap(heap, left)
else:
return True
def is_canonical_min_heap(heap):
"""
vstup: 'heap' typu MinHeap
(je zaruceno, ze heap.size je velikost pole heap.array;
neni zaruceno, ze prvky heap.array splnuji haldovou podminku
nebo podminku kanonickeho tvaru)
vystup: True, pokud je 'heap' minimova halda v kanonickem tvaru
False, jinak
casova slozitost: O(n), kde 'n' je pocet prvku 'heap'
"""
if heap is None:
return False
if heap.size == 1 or heap.size == 0:
return True
if not is_min_heap(heap.array, 0):
return False
i = 1
left = heap.array[1]
while i < heap.size:
if i % 2 == 1:
left = heap.array[i]
i += 1
continue
if left > heap.array[i]:
return False
i += 1
return True
# Ukol 2.
# Druhym ukolem je implementovat funkci canonise_min_heap(heap), ktera zadanou
# minimovou haldu 'heap' prevede na kanonicky tvar. Funkce bude menit primo
# haldu zadanou v argumentu, proto nebude vracet zadnou navratovou hodnotu.
#
# Napoveda:
# Pro algoritmus s linearni casovou slozitosti je potreba postupovat takto:
# - Rekurzivne resime od korene k listum haldy;
# - pro kazdy uzel haldy:
# + zkontrolujeme, jestli potomci splnuji vlastnost kanonickeho tvaru;
# pokud ne:
# * prohodime hodnoty leveho a praveho potomka;
# * tim se muze pokazit vlastnost haldy v pravem podstrome, proto
# probublame problematickou hodnotu z korene praveho podstromu
# tak hluboko, aby uz neporusovala vlastnost haldy (pri tomto bublani
# opravujeme pouze vlastnost haldy, kanonicky tvar neresime)
# + mame tedy korektni minimovou haldu, ktera navic splnuje kanonicky
# tvar od tohoto uzlu smerem nahoru;
# + pokracujeme v rekurzi vlevo a vpravo.
def choose_smaller_number(heap, first, second):
if heap.array[first] < heap.array[second]:
return first
return second
def check_subtree(heap, parent):
left, right = 2 * parent + 1, 2 * parent + 2
if left >= heap.size and right >= heap.size:
return
if left < heap.size and right >= heap.size:
if heap.array[parent] > heap.array[left]:
swap(heap.array, parent, left)
return
if left < heap.size and right < heap.size:
position = choose_smaller_number(heap, left, right)
if heap.array[parent] > heap.array[position]:
swap(heap.array, position, parent)
check_subtree(heap, position)
def canonise_min_heap(heap):
"""
vstup: 'heap' korektni minimova halda typu MinHeap
vystup: funkce nic nevraci, vstupni halda 'heap' je prevedena
do kanonickeho tvaru (pritom obsahuje stejne prvky jako na zacatku)
casova slozitost: O(n), kde 'n' je pocet prvku 'heap'
"""
if is_min_heap(heap.array, 0):
for i in range(heap.size // 2):
left, right = 2 * i + 1, 2 * i + 2
if left < heap.size and right < heap.size:
if heap.array[left] > heap.array[right]:
swap(heap.array, left, right)
check_subtree(heap, right)
heap = MinHeap()
heap.array = [1, 3, 2]
heap.size = 3
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------")
heap.array = [-1, 0, -1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, -1]
heap.size = 14
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------")
heap.array = [-2, 0, -2, 0, 0, -1, -2]
heap.size = 7
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------")
heap.array = [-1, 0, -1, 0, 0, -1, 0, 0, 0, 0, 0, -1]
heap.size = 12
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------")
heap.array = [1, 3, 2, 4, 5, 9, 7, 6, 8]
heap.size = 9
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------")
heap.array = [0, 1, 0, 1, 1, 0]
heap.size = 6
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------")
heap.array = [0, 1, 0, 1, 1, 0, 0]
heap.size = 7
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------")
heap.array = [0, 1, 0, 1, 1, 0, 1]
heap.size = 7
if is_canonical_min_heap(heap):
print(heap.array, " = IS canonical heap")
else:
print(heap.array, " = IS NOT canonical heap")
canonise_min_heap(heap)
print(heap.array, " = REPAIRED")
if is_canonical_min_heap(heap):
print(" = TEST OK")
else:
print(" = TEST NOK")
print("-----------------------------------------") | 1,127 | -7 | 141 |
0723c0fb5653e3940416fc37c1548f5e66e2068a | 1,201 | py | Python | data_helpers/decode_index_files.py | ArvinZhuang/COIL | 5cb492f2dcffe594d2be0ae4d4d557056051ffca | [
"Apache-2.0"
] | null | null | null | data_helpers/decode_index_files.py | ArvinZhuang/COIL | 5cb492f2dcffe594d2be0ae4d4d557056051ffca | [
"Apache-2.0"
] | null | null | null | data_helpers/decode_index_files.py | ArvinZhuang/COIL | 5cb492f2dcffe594d2be0ae4d4d557056051ffca | [
"Apache-2.0"
] | null | null | null | import os, glob
import json
from transformers import BertTokenizer
from tqdm import tqdm
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--file_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
args = parser.parse_args()
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
# file_path = '../msmarco_passage_unicoil_encoded_TILDE_200'
# output_path = '../msmarco_passage_unicoil_encoded_TILDE_200_decoded'
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', use_fast=True, cache_dir="../cache")
files = glob.glob(os.path.join(args.file_path, '*'))
for file in tqdm(files):
with open(file, 'r') as f, open(f"{args.output_path}/{file.split('/')[-1]}", 'w') as wf:
for line in f:
data = json.loads(line)
vector = {}
for tok_id in data['vector'].keys():
vector[tokenizer.decode([int(tok_id)])] = data['vector'][tok_id]
data['vector'] = vector
json.dump(data, wf)
wf.write('\n')
| 34.314286 | 103 | 0.625312 | import os, glob
import json
from transformers import BertTokenizer
from tqdm import tqdm
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--file_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
args = parser.parse_args()
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
# file_path = '../msmarco_passage_unicoil_encoded_TILDE_200'
# output_path = '../msmarco_passage_unicoil_encoded_TILDE_200_decoded'
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', use_fast=True, cache_dir="../cache")
files = glob.glob(os.path.join(args.file_path, '*'))
for file in tqdm(files):
with open(file, 'r') as f, open(f"{args.output_path}/{file.split('/')[-1]}", 'w') as wf:
for line in f:
data = json.loads(line)
vector = {}
for tok_id in data['vector'].keys():
vector[tokenizer.decode([int(tok_id)])] = data['vector'][tok_id]
data['vector'] = vector
json.dump(data, wf)
wf.write('\n')
| 0 | 0 | 0 |
b7c8b8db359b5c382b1b5f6804afa09f814e349b | 329 | py | Python | chapter/urls.py | 0lidaxiang/WeArt | 088bc2cdc7c653d4e4a84dea8f70cc86e12a8db9 | [
"BSD-3-Clause"
] | 1 | 2017-07-12T14:53:45.000Z | 2017-07-12T14:53:45.000Z | chapter/urls.py | 0lidaxiang/WeArt | 088bc2cdc7c653d4e4a84dea8f70cc86e12a8db9 | [
"BSD-3-Clause"
] | null | null | null | chapter/urls.py | 0lidaxiang/WeArt | 088bc2cdc7c653d4e4a84dea8f70cc86e12a8db9 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import include, url
from django.contrib import admin
from chapter.view.createChapter import *
from chapter.view.getChapter import *
urlpatterns = [
# url('^login/$', login),
url('^createAChapter/$', createAChapter),
url('^getChapter/$', getChapter),
url('^bookChapter/$', bookChapter),
]
| 27.416667 | 45 | 0.699088 | from django.conf.urls import include, url
from django.contrib import admin
from chapter.view.createChapter import *
from chapter.view.getChapter import *
urlpatterns = [
# url('^login/$', login),
url('^createAChapter/$', createAChapter),
url('^getChapter/$', getChapter),
url('^bookChapter/$', bookChapter),
]
| 0 | 0 | 0 |
d148ed2e786ff5cc4c8278daf1b9318f71b5f6e0 | 15,641 | py | Python | yggdrasil/config.py | astro-friedel/yggdrasil | 5ecbfd083240965c20c502b4795b6dc93d94b020 | [
"BSD-3-Clause"
] | null | null | null | yggdrasil/config.py | astro-friedel/yggdrasil | 5ecbfd083240965c20c502b4795b6dc93d94b020 | [
"BSD-3-Clause"
] | null | null | null | yggdrasil/config.py | astro-friedel/yggdrasil | 5ecbfd083240965c20c502b4795b6dc93d94b020 | [
"BSD-3-Clause"
] | null | null | null | """
This module imports the configuration for yggdrasil.
.. todo::
Remove reference to environment variables for accessing config options.
"""
import os
import sys
import json
import shutil
import logging
import warnings
import subprocess
from yggdrasil.backwards import configparser
from yggdrasil import platform, tools
conda_prefix = os.environ.get('CONDA_PREFIX', '')
config_file = '.yggdrasil.cfg'
def_config_file = os.path.join(os.path.dirname(__file__), 'defaults.cfg')
if conda_prefix:
usr_dir = conda_prefix
else:
usr_dir = os.path.expanduser('~')
usr_config_file = os.path.join(usr_dir, config_file)
loc_config_file = os.path.join(os.getcwd(), config_file)
if not os.path.isfile(usr_config_file): # pragma: no cover
from yggdrasil.languages import install_languages
shutil.copy(def_config_file, usr_config_file)
install_languages.install_all_languages(from_setup=True)
logger = logging.getLogger(__name__)
class YggConfigParser(configparser.ConfigParser, object):
r"""Config parser that returns None if option not provided on get."""
def reload(self):
r"""Reload parameters from the original files."""
self._sections = self._dict()
if self.files is not None:
self.read(self.files)
@property
def file_to_update(self):
r"""str: Full path to file that should be updated if update_file is
called without an explicit file path."""
out = None
if self.files is not None:
out = self.files[-1]
return out
def update_file(self, fname=None):
r"""Write out updated contents to a file.
Args:
fname (str, optional): Full path to file where contents should be
saved. If None, file_to_update is used. Defaults to None.
Raises:
RuntimeError: If fname is None and file_to_update is None.
"""
if fname is None:
fname = self.file_to_update
if fname is None:
raise RuntimeError("No file provided or set at creation.")
with open(fname, 'w') as fd:
self.write(fd)
@classmethod
def from_files(cls, files, **kwargs):
r"""Construct a config parser from a set of files.
Args:
files (list): One or more files that options should be read from in
the order they should be loaded.
**kwargs: Additional keyword arguments are passed to the class
constructor.
Returns:
YggConfigParser: Config parser with information loaded from the
provided files.
"""
out = cls(files=files, **kwargs)
out.reload()
return out
def set(self, section, option, value=None):
"""Set an option."""
if not isinstance(value, str):
value = json.dumps(value)
super(YggConfigParser, self).set(section, option, value=value)
def get(self, section, option, default=None, **kwargs):
r"""Return None if the section/option does not exist.
Args:
section (str): Name of section.
option (str): Name of option in section.
default (obj, optional): Value that should be returned if the
section and/or option are not found or are an empty string.
Defaults to None.
**kwargs: Additional keyword arguments are passed to the parent
class's get.
Returns:
obj: String entry if the section & option exist, otherwise default.
"""
section = section.lower()
option = option.lower()
if self.has_section(section) and self.has_option(section, option):
# Super does not work for ConfigParser as not inherited from object
out = configparser.ConfigParser.get(self, section, option, **kwargs)
# Count empty strings as not provided
if not out:
return default
else:
return self.backwards_str2val(out)
else:
return default
# Initialize config
ygg_cfg_usr = YggConfigParser.from_files([usr_config_file])
ygg_cfg = YggConfigParser.from_files([def_config_file, usr_config_file,
loc_config_file])
def update_language_config(drv, skip_warnings=False,
disable_languages=None, enable_languages=None,
overwrite=False, verbose=False):
r"""Update configuration options for a language driver.
Args:
drv (list, class): One or more language drivers that should be
configured.
skip_warnings (bool, optional): If True, warnings about missing options
will not be raised. Defaults to False.
disable_languages (list, optional): List of languages that should be
disabled. Defaults to an empty list.
enable_languages (list, optional): List of languages that should be
enabled. Defaults to an empty list.
overwrite (bool, optional): If True, the existing file will be overwritten.
Defaults to False.
verbose (bool, optional): If True, information about the config file
will be displayed. Defaults to False.
"""
if verbose:
logger.info("Updating user configuration file for yggdrasil at:\n\t%s"
% usr_config_file)
miss = []
if not isinstance(drv, list):
drv = [drv]
if disable_languages is None:
disable_languages = []
if enable_languages is None:
enable_languages = []
if overwrite:
shutil.copy(def_config_file, usr_config_file)
ygg_cfg_usr.reload()
for idrv in drv:
if (((idrv.language in disable_languages)
and (idrv.language in enable_languages))):
logger.info(("%s language both enabled and disabled. "
"No action will be taken.") % idrv.language)
elif idrv.language in disable_languages:
ygg_cfg_usr.set(idrv.language, 'disable', 'True')
elif idrv.language in enable_languages:
ygg_cfg_usr.set(idrv.language, 'disable', 'False')
if ygg_cfg_usr.get(idrv.language, 'disable', 'False').lower() == 'true':
continue # pragma: no cover
miss += idrv.configure(ygg_cfg_usr)
ygg_cfg_usr.update_file()
ygg_cfg.reload()
if not skip_warnings:
for sect, opt, desc in miss: # pragma: windows
warnings.warn(("Could not set option %s in section %s. "
+ "Please set this in %s to: %s")
% (opt, sect, ygg_cfg_usr.file_to_update, desc),
RuntimeWarning)
def find_all(name, path):
r"""Find all instances of a file with a given name within the directory
tree starting at a given path.
Args:
name (str): Name of the file to be found (with the extension).
path (str, None): Directory where search should start. If set to
None on Windows, the current directory and PATH variable are
searched.
Returns:
list: All instances of the specified file.
"""
result = []
try:
if platform._is_win: # pragma: windows
if path is None:
out = subprocess.check_output(["where", name],
env=os.environ,
stderr=subprocess.STDOUT)
else:
out = subprocess.check_output(["where", "/r", path, name],
env=os.environ,
stderr=subprocess.STDOUT)
else:
args = ["find", "-L", path, "-type", "f", "-name", name]
pfind = subprocess.Popen(args, env=os.environ,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
(stdoutdata, stderrdata) = pfind.communicate()
out = stdoutdata
for l in stderrdata.splitlines():
if b'Permission denied' not in l:
raise subprocess.CalledProcessError(pfind.returncode,
' '.join(args),
output=stderrdata)
except subprocess.CalledProcessError:
out = ''
if not out.isspace():
result = sorted(out.splitlines())
result = [os.path.normcase(os.path.normpath(m.decode('utf-8'))) for m in result]
return result
def locate_file(fname, environment_variable='PATH', directory_list=None):
r"""Locate a file within a set of paths defined by a list or environment
variable.
Args:
fname (str): Name of the file that should be located.
environment_variable (str): Environment variable containing the set of
paths that should be searched. Defaults to 'PATH'. If None, this
keyword argument will be ignored. If a list is provided, it is
assumed to be a list of environment variables that should be
searched in the specified order.
directory_list (list): List of paths that should be searched in addition
to those specified by environment_variable. Defaults to None and is
ignored. These directories will be searched be for those in the
specified environment variables.
Returns:
bool, str: Full path to the located file if it was located, False
otherwise.
"""
out = []
if ((platform._is_win and (environment_variable == 'PATH')
and (directory_list is None))): # pragma: windows
out += find_all(fname, None)
else:
if directory_list is None:
directory_list = []
if environment_variable is not None:
if not isinstance(environment_variable, list):
environment_variable = [environment_variable]
for x in environment_variable:
directory_list += os.environ.get(x, '').split(os.pathsep)
for path in directory_list:
if path:
out += find_all(fname, path)
if not out:
return False
first = out[0]
if len(out) > 1:
warnings.warn(("More than one (%d) match to %s. "
+ "Using first match (%s)") %
(len(out), fname, first), RuntimeWarning)
return first
# Set associated environment variables
env_map = [('debug', 'ygg', 'YGG_DEBUG'),
('debug', 'rmq', 'RMQ_DEBUG'),
('debug', 'client', 'YGG_CLIENT_DEBUG'),
('jsonschema', 'validate_components', 'YGG_SKIP_COMPONENT_VALIDATION'),
('jsonschema', 'validate_all_messages', 'YGG_VALIDATE_ALL_MESSAGES'),
('rmq', 'namespace', 'YGG_NAMESPACE'),
('rmq', 'host', 'YGG_MSG_HOST'),
('rmq', 'vhost', 'YGG_MSG_VHOST'),
('rmq', 'user', 'YGG_MSG_USER'),
('rmq', 'password', 'YGG_MSG_PW'),
('parallel', 'cluster', 'YGG_CLUSTER'),
]
def get_ygg_loglevel(cfg=None, default='DEBUG'):
r"""Get the current log level.
Args:
cfg (:class:`yggdrasil.config.YggConfigParser`, optional):
Config parser with options that should be used to determine the
log level. Defaults to :data:`yggdrasil.config.ygg_cfg`.
default (str, optional): Log level that should be returned if the log
level option is not set in cfg. Defaults to 'DEBUG'.
Returns:
str: Log level string.
"""
is_model = tools.is_subprocess()
if cfg is None:
cfg = ygg_cfg
if is_model:
opt = 'client'
else:
opt = 'ygg'
return cfg.get('debug', opt, default)
def set_ygg_loglevel(level, cfg=None):
r"""Set the current log level.
Args:
level (str): Level that the log should be set to.
cfg (:class:`yggdrasil.config.YggConfigParser`, optional):
Config parser with options that should be used to update the
environment. Defaults to :data:`yggdrasil.config.ygg_cfg`.
"""
is_model = tools.is_subprocess()
if cfg is None:
cfg = ygg_cfg
if is_model:
opt = 'client'
else:
opt = 'ygg'
cfg.set('debug', opt, level)
logLevelYGG = eval('logging.%s' % level)
ygg_logger = logging.getLogger("yggdrasil")
ygg_logger.setLevel(level=logLevelYGG)
def cfg_logging(cfg=None):
r"""Set logging levels from config options.
Args:
cfg (:class:`yggdrasil.config.YggConfigParser`, optional):
Config parser with options that should be used to update the
environment. Defaults to :data:`yggdrasil.config.ygg_cfg`.
"""
is_model = tools.is_subprocess()
if cfg is None:
cfg = ygg_cfg
_LOG_FORMAT = "%(levelname)s:%(module)s.%(funcName)s[%(lineno)d]:%(message)s"
logging.basicConfig(level=logging.INFO, format=_LOG_FORMAT)
logLevelYGG = eval('logging.%s' % cfg.get('debug', 'ygg', 'NOTSET'))
logLevelRMQ = eval('logging.%s' % cfg.get('debug', 'rmq', 'INFO'))
logLevelCLI = eval('logging.%s' % cfg.get('debug', 'client', 'INFO'))
ygg_logger = logging.getLogger("yggdrasil")
rmq_logger = logging.getLogger("pika")
if is_model:
ygg_logger.setLevel(level=logLevelCLI)
else:
ygg_logger.setLevel(level=logLevelYGG)
rmq_logger.setLevel(level=logLevelRMQ)
# For models, route the loggs to stdout so that they are displayed by the
# model driver.
if is_model:
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logLevelCLI)
ygg_logger.addHandler(handler)
rmq_logger.addHandler(handler)
def cfg_environment(env=None, cfg=None):
r"""Set environment variables based on config options.
Args:
env (dict, optional): Dictionary of environment variables that should
be updated. Defaults to `os.environ`.
cfg (:class:`yggdrasil.config.YggConfigParser`, optional):
Config parser with options that should be used to update the
environment. Defaults to :data:`yggdrasil.config.ygg_cfg`.
"""
if env is None:
env = os.environ
if cfg is None:
cfg = ygg_cfg
for s, o, e in env_map:
v = cfg.get(s, o)
if v:
env[e] = v
# Do initial update of logging & environment (legacy)
cfg_logging()
cfg_environment()
| 36.802353 | 84 | 0.592353 | """
This module imports the configuration for yggdrasil.
.. todo::
Remove reference to environment variables for accessing config options.
"""
import os
import sys
import json
import shutil
import logging
import warnings
import subprocess
from yggdrasil.backwards import configparser
from yggdrasil import platform, tools
conda_prefix = os.environ.get('CONDA_PREFIX', '')
config_file = '.yggdrasil.cfg'
def_config_file = os.path.join(os.path.dirname(__file__), 'defaults.cfg')
if conda_prefix:
usr_dir = conda_prefix
else:
usr_dir = os.path.expanduser('~')
usr_config_file = os.path.join(usr_dir, config_file)
loc_config_file = os.path.join(os.getcwd(), config_file)
if not os.path.isfile(usr_config_file): # pragma: no cover
from yggdrasil.languages import install_languages
shutil.copy(def_config_file, usr_config_file)
install_languages.install_all_languages(from_setup=True)
logger = logging.getLogger(__name__)
class YggConfigParser(configparser.ConfigParser, object):
r"""Config parser that returns None if option not provided on get."""
def __init__(self, files=None):
self.files = files
super(YggConfigParser, self).__init__()
def reload(self):
r"""Reload parameters from the original files."""
self._sections = self._dict()
if self.files is not None:
self.read(self.files)
@property
def file_to_update(self):
r"""str: Full path to file that should be updated if update_file is
called without an explicit file path."""
out = None
if self.files is not None:
out = self.files[-1]
return out
def update_file(self, fname=None):
r"""Write out updated contents to a file.
Args:
fname (str, optional): Full path to file where contents should be
saved. If None, file_to_update is used. Defaults to None.
Raises:
RuntimeError: If fname is None and file_to_update is None.
"""
if fname is None:
fname = self.file_to_update
if fname is None:
raise RuntimeError("No file provided or set at creation.")
with open(fname, 'w') as fd:
self.write(fd)
def read(self, *args, **kwargs):
out = super(YggConfigParser, self).read(*args, **kwargs)
alias_map = [(('debug', 'psi'), ('debug', 'ygg')),
(('debug', 'cis'), ('debug', 'ygg'))]
for old, new in alias_map:
v = self.get(*old)
if v: # pragma: debug
self.set(new[0], new[1], v)
return out
@classmethod
def from_files(cls, files, **kwargs):
r"""Construct a config parser from a set of files.
Args:
files (list): One or more files that options should be read from in
the order they should be loaded.
**kwargs: Additional keyword arguments are passed to the class
constructor.
Returns:
YggConfigParser: Config parser with information loaded from the
provided files.
"""
out = cls(files=files, **kwargs)
out.reload()
return out
def set(self, section, option, value=None):
"""Set an option."""
if not isinstance(value, str):
value = json.dumps(value)
super(YggConfigParser, self).set(section, option, value=value)
def backwards_str2val(self, val): # pragma: no cover
try:
out = json.loads(val)
except ValueError:
if val.startswith('[') and val.endswith(']'):
if val[1:-1]:
out = [self.backwards_str2val(x.strip())
for x in val[1:-1].split(',')]
else:
out = []
elif val.startswith("'") and val.endswith("'"):
out = val.strip("'")
else:
out = val
return out
def get(self, section, option, default=None, **kwargs):
r"""Return None if the section/option does not exist.
Args:
section (str): Name of section.
option (str): Name of option in section.
default (obj, optional): Value that should be returned if the
section and/or option are not found or are an empty string.
Defaults to None.
**kwargs: Additional keyword arguments are passed to the parent
class's get.
Returns:
obj: String entry if the section & option exist, otherwise default.
"""
section = section.lower()
option = option.lower()
if self.has_section(section) and self.has_option(section, option):
# Super does not work for ConfigParser as not inherited from object
out = configparser.ConfigParser.get(self, section, option, **kwargs)
# Count empty strings as not provided
if not out:
return default
else:
return self.backwards_str2val(out)
else:
return default
# Initialize config
ygg_cfg_usr = YggConfigParser.from_files([usr_config_file])
ygg_cfg = YggConfigParser.from_files([def_config_file, usr_config_file,
loc_config_file])
def update_language_config(drv, skip_warnings=False,
disable_languages=None, enable_languages=None,
overwrite=False, verbose=False):
r"""Update configuration options for a language driver.
Args:
drv (list, class): One or more language drivers that should be
configured.
skip_warnings (bool, optional): If True, warnings about missing options
will not be raised. Defaults to False.
disable_languages (list, optional): List of languages that should be
disabled. Defaults to an empty list.
enable_languages (list, optional): List of languages that should be
enabled. Defaults to an empty list.
overwrite (bool, optional): If True, the existing file will be overwritten.
Defaults to False.
verbose (bool, optional): If True, information about the config file
will be displayed. Defaults to False.
"""
if verbose:
logger.info("Updating user configuration file for yggdrasil at:\n\t%s"
% usr_config_file)
miss = []
if not isinstance(drv, list):
drv = [drv]
if disable_languages is None:
disable_languages = []
if enable_languages is None:
enable_languages = []
if overwrite:
shutil.copy(def_config_file, usr_config_file)
ygg_cfg_usr.reload()
for idrv in drv:
if (((idrv.language in disable_languages)
and (idrv.language in enable_languages))):
logger.info(("%s language both enabled and disabled. "
"No action will be taken.") % idrv.language)
elif idrv.language in disable_languages:
ygg_cfg_usr.set(idrv.language, 'disable', 'True')
elif idrv.language in enable_languages:
ygg_cfg_usr.set(idrv.language, 'disable', 'False')
if ygg_cfg_usr.get(idrv.language, 'disable', 'False').lower() == 'true':
continue # pragma: no cover
miss += idrv.configure(ygg_cfg_usr)
ygg_cfg_usr.update_file()
ygg_cfg.reload()
if not skip_warnings:
for sect, opt, desc in miss: # pragma: windows
warnings.warn(("Could not set option %s in section %s. "
+ "Please set this in %s to: %s")
% (opt, sect, ygg_cfg_usr.file_to_update, desc),
RuntimeWarning)
def find_all(name, path):
r"""Find all instances of a file with a given name within the directory
tree starting at a given path.
Args:
name (str): Name of the file to be found (with the extension).
path (str, None): Directory where search should start. If set to
None on Windows, the current directory and PATH variable are
searched.
Returns:
list: All instances of the specified file.
"""
result = []
try:
if platform._is_win: # pragma: windows
if path is None:
out = subprocess.check_output(["where", name],
env=os.environ,
stderr=subprocess.STDOUT)
else:
out = subprocess.check_output(["where", "/r", path, name],
env=os.environ,
stderr=subprocess.STDOUT)
else:
args = ["find", "-L", path, "-type", "f", "-name", name]
pfind = subprocess.Popen(args, env=os.environ,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
(stdoutdata, stderrdata) = pfind.communicate()
out = stdoutdata
for l in stderrdata.splitlines():
if b'Permission denied' not in l:
raise subprocess.CalledProcessError(pfind.returncode,
' '.join(args),
output=stderrdata)
except subprocess.CalledProcessError:
out = ''
if not out.isspace():
result = sorted(out.splitlines())
result = [os.path.normcase(os.path.normpath(m.decode('utf-8'))) for m in result]
return result
def locate_file(fname, environment_variable='PATH', directory_list=None):
r"""Locate a file within a set of paths defined by a list or environment
variable.
Args:
fname (str): Name of the file that should be located.
environment_variable (str): Environment variable containing the set of
paths that should be searched. Defaults to 'PATH'. If None, this
keyword argument will be ignored. If a list is provided, it is
assumed to be a list of environment variables that should be
searched in the specified order.
directory_list (list): List of paths that should be searched in addition
to those specified by environment_variable. Defaults to None and is
ignored. These directories will be searched be for those in the
specified environment variables.
Returns:
bool, str: Full path to the located file if it was located, False
otherwise.
"""
out = []
if ((platform._is_win and (environment_variable == 'PATH')
and (directory_list is None))): # pragma: windows
out += find_all(fname, None)
else:
if directory_list is None:
directory_list = []
if environment_variable is not None:
if not isinstance(environment_variable, list):
environment_variable = [environment_variable]
for x in environment_variable:
directory_list += os.environ.get(x, '').split(os.pathsep)
for path in directory_list:
if path:
out += find_all(fname, path)
if not out:
return False
first = out[0]
if len(out) > 1:
warnings.warn(("More than one (%d) match to %s. "
+ "Using first match (%s)") %
(len(out), fname, first), RuntimeWarning)
return first
# Set associated environment variables
env_map = [('debug', 'ygg', 'YGG_DEBUG'),
('debug', 'rmq', 'RMQ_DEBUG'),
('debug', 'client', 'YGG_CLIENT_DEBUG'),
('jsonschema', 'validate_components', 'YGG_SKIP_COMPONENT_VALIDATION'),
('jsonschema', 'validate_all_messages', 'YGG_VALIDATE_ALL_MESSAGES'),
('rmq', 'namespace', 'YGG_NAMESPACE'),
('rmq', 'host', 'YGG_MSG_HOST'),
('rmq', 'vhost', 'YGG_MSG_VHOST'),
('rmq', 'user', 'YGG_MSG_USER'),
('rmq', 'password', 'YGG_MSG_PW'),
('parallel', 'cluster', 'YGG_CLUSTER'),
]
def get_ygg_loglevel(cfg=None, default='DEBUG'):
r"""Get the current log level.
Args:
cfg (:class:`yggdrasil.config.YggConfigParser`, optional):
Config parser with options that should be used to determine the
log level. Defaults to :data:`yggdrasil.config.ygg_cfg`.
default (str, optional): Log level that should be returned if the log
level option is not set in cfg. Defaults to 'DEBUG'.
Returns:
str: Log level string.
"""
is_model = tools.is_subprocess()
if cfg is None:
cfg = ygg_cfg
if is_model:
opt = 'client'
else:
opt = 'ygg'
return cfg.get('debug', opt, default)
def set_ygg_loglevel(level, cfg=None):
r"""Set the current log level.
Args:
level (str): Level that the log should be set to.
cfg (:class:`yggdrasil.config.YggConfigParser`, optional):
Config parser with options that should be used to update the
environment. Defaults to :data:`yggdrasil.config.ygg_cfg`.
"""
is_model = tools.is_subprocess()
if cfg is None:
cfg = ygg_cfg
if is_model:
opt = 'client'
else:
opt = 'ygg'
cfg.set('debug', opt, level)
logLevelYGG = eval('logging.%s' % level)
ygg_logger = logging.getLogger("yggdrasil")
ygg_logger.setLevel(level=logLevelYGG)
def cfg_logging(cfg=None):
r"""Set logging levels from config options.
Args:
cfg (:class:`yggdrasil.config.YggConfigParser`, optional):
Config parser with options that should be used to update the
environment. Defaults to :data:`yggdrasil.config.ygg_cfg`.
"""
is_model = tools.is_subprocess()
if cfg is None:
cfg = ygg_cfg
_LOG_FORMAT = "%(levelname)s:%(module)s.%(funcName)s[%(lineno)d]:%(message)s"
logging.basicConfig(level=logging.INFO, format=_LOG_FORMAT)
logLevelYGG = eval('logging.%s' % cfg.get('debug', 'ygg', 'NOTSET'))
logLevelRMQ = eval('logging.%s' % cfg.get('debug', 'rmq', 'INFO'))
logLevelCLI = eval('logging.%s' % cfg.get('debug', 'client', 'INFO'))
ygg_logger = logging.getLogger("yggdrasil")
rmq_logger = logging.getLogger("pika")
if is_model:
ygg_logger.setLevel(level=logLevelCLI)
else:
ygg_logger.setLevel(level=logLevelYGG)
rmq_logger.setLevel(level=logLevelRMQ)
# For models, route the loggs to stdout so that they are displayed by the
# model driver.
if is_model:
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logLevelCLI)
ygg_logger.addHandler(handler)
rmq_logger.addHandler(handler)
def cfg_environment(env=None, cfg=None):
r"""Set environment variables based on config options.
Args:
env (dict, optional): Dictionary of environment variables that should
be updated. Defaults to `os.environ`.
cfg (:class:`yggdrasil.config.YggConfigParser`, optional):
Config parser with options that should be used to update the
environment. Defaults to :data:`yggdrasil.config.ygg_cfg`.
"""
if env is None:
env = os.environ
if cfg is None:
cfg = ygg_cfg
for s, o, e in env_map:
v = cfg.get(s, o)
if v:
env[e] = v
# Do initial update of logging & environment (legacy)
cfg_logging()
cfg_environment()
| 967 | 0 | 81 |
169cbc55b107d627e16e8d100c9590e216f3c200 | 253 | py | Python | 226-invert-binary-tree/solution.py | phenix3443/leetcode | b6d8486e859b2db0bf3d58f55a6e1d439b0b891a | [
"MIT"
] | null | null | null | 226-invert-binary-tree/solution.py | phenix3443/leetcode | b6d8486e859b2db0bf3d58f55a6e1d439b0b891a | [
"MIT"
] | null | null | null | 226-invert-binary-tree/solution.py | phenix3443/leetcode | b6d8486e859b2db0bf3d58f55a6e1d439b0b891a | [
"MIT"
] | null | null | null | # -*- coding:utf-8; -*-
class Solution:
"""
解题思路:递归
"""
| 15.8125 | 87 | 0.541502 | # -*- coding:utf-8; -*-
class Solution:
"""
解题思路:递归
"""
def invertTree(self, root):
if not root:
return
root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)
return root
| 156 | 0 | 27 |
d907e264b8481d24b1f44bc322e576f547d94156 | 35,143 | py | Python | domonit/inspect.py | riley022/DoMonit | 4ba0e9b145db03db579e38a126929c29f6f64e6c | [
"MIT"
] | null | null | null | domonit/inspect.py | riley022/DoMonit | 4ba0e9b145db03db579e38a126929c29f6f64e6c | [
"MIT"
] | 1 | 2018-05-22T09:50:35.000Z | 2018-05-22T09:50:35.000Z | domonit/inspect.py | riley022/DoMonit | 4ba0e9b145db03db579e38a126929c29f6f64e6c | [
"MIT"
] | null | null | null | import requests_unixsocket
import json
from errors import NoSuchContainerError, ServerErrorError
from utils.utils import Utils
u = Utils()
#https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/
| 30.29569 | 95 | 0.561477 | import requests_unixsocket
import json
from errors import NoSuchContainerError, ServerErrorError
from utils.utils import Utils
u = Utils()
#https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/
class Inspect():
def __init__(self, container_id):
self.container_id = container_id
self.base = "http+unix://%2Fvar%2Frun%2Fdocker.sock"
self.url = "/containers/%s/json" % (self.container_id)
self.session = requests_unixsocket.Session()
try:
self.resp = self.session.get( self.base + self.url)
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print message
def inspect(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
return self.resp.json()
def args(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return('{}'.format( respj["Args"]) )
def app_armor_profile(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["AppArmorProfile"]) )
def attach_stderr(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["AttachStderr"]) )
def attach_stdin(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["AttachStdin"]) )
def cmd(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return('{}'.format( respj["Config"]["Cmd"]) )
def domainname(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["Domainname"]) )
def entrypoint(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["Entrypoint"]) )
def env(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["Env"]) )
def exposed_ports(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["ExposedPorts"]) )
def hostname(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["Hostname"]) )
def image(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["Image"]) )
def labels(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["Labels"]) )
def mac_address(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["MacAddress"]) )
def network_disabled(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["NetworkDisabled"]) )
def on_build(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["OnBuild"]) )
def open_stdin(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["OpenStdin"]) )
def stdin_once(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["StdinOnce"]) )
def tty(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["Tty"]) )
def user(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["User"]) )
def volumes(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["Volumes"]) )
def working_dir(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["WorkingDir"]) )
def stop_signal(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Config"]["StopSignal"]) )
def created(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Created"]) )
def driver(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Driver"]) )
def exec_ids(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["ExecIds"]) )
def host_config_binds(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["Binds"]) )
def host_config_maximum_iops(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["MaximumIOps"]) )
def host_config_maximum_iobps(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["MaximumIOBps"]) )
def host_config_blkio_weight_device(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["BlkioWeightDevice"]) )
def host_config_blkio_device_read_bps(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["BlkioDeviceReadBps"]) )
def host_config_blkio_device_write_bps(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["BlkioDeviceWriteBps"]) )
def host_config_blkio_device_write_iops(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["BlkioDeviceWriteIOps"]) )
def host_config_cap_add(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["CapAdd"]) )
def host_config_cap_drop(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["CapDrop"]) )
def host_config_container_id_file(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["ContainerIDFile"]) )
def host_config_cpuset_cpus(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["CpusetCpus"]) )
def host_config_cpuset_mems(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["CpusetMems"]) )
def host_config_cpu_percent(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["CpuPercent"]) )
def host_config_cpu_shares(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["CpuShares"]) )
def host_config_cpu_period(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["CpuPeriod"]) )
def host_config_devices(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["Devices"]) )
def host_config_dns(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["Dns"]) )
def host_config_dns_options(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["DnsOptions"]) )
def host_config_dns_search(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["DnsSearch"]) )
def host_config_extra_hosts(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["ExtraHosts"]) )
def host_config_ipc_mode(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["IpcMode"]) )
def host_config_links(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["Links"]) )
def host_config_lxc_conf(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["LxcConf"]) )
def host_config_memory(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["Memory"]) )
def host_config_memory_swap(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["DnsOptions"]) )
def host_config_memory_reservation(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["MemoryReservation"]) )
def host_config_kernel_memory(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["KernelMemory"]) )
def host_config_oom_kill_disable(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["OomKillDisable"]) )
def host_config_oom_score_adj(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["OomScoreAdj"]) )
def host_config_network_mode(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["NetworkMode"]) )
def host_config_pid_mode(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["PidMode"]) )
def host_config_port_bindings(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["PortBindings"]) )
def host_config_privileged(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["Privileged"]) )
def host_config_readonly_rootfs(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["ReadonlyRootfs"]) )
def host_config_publish_all_ports(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["PublishAllPorts"]) )
def host_config_restart_policy_maximum_retry_count(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["RestartPolicy"]["MaximumRetryCount"]) )
def host_config_restart_policy_name(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["RestartPolicy"]["Name"]) )
def host_config_log_config_config(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["LogConfig"]["Config"]) )
def host_config_log_config_type(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["LogConfig"]["Type"]) )
def host_config_security_opt(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["SecurityOpt"]) )
def host_config_sysctls(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["Sysctls"]) )
def host_config_storage_opt(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["StorageOpt"]) )
def host_config_volumes_from(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["VolumesFrom"]) )
def host_config_ulimits(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["Ulimits"]) )
def host_config_volume_driver(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["VolumeDriver"]) )
def host_config_volume_driver(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["VolumeDriver"]) )
def host_config_host_config_shm_size(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostConfig"]["ShmSize"]) )
def hostname_path(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostnamePath"]) )
def hosts_path(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["HostsPath"]) )
def log_path(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["LogPath"]) )
def id(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Id"]) )
def image(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Image"]) )
def mount_label(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["MountLabel"]) )
def name(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Name"]) )
def network_settings_bridge(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["Bridge"]) )
def network_settings_sandbox_id(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["SandboxID"]) )
def network_settings_hairpin_mode(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["HairpinMode"]) )
def network_settings_link_local_ipv6_address(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["LinkLocalIPv6Address"]) )
def network_settings_link_local_ipv6_prefix_len(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["LinkLocalIPv6PrefixLen"]) )
def network_settings_ports(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["Ports"]) )
def network_settings_sandbox_key(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["SandboxKey"]) )
def network_settings_secondary_ip_addresses(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["SecondaryIPAddresses"]) )
def network_settings_secondary_ipv6_addresses(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["SecondaryIPv6Addresses"]) )
def network_settings_endpoint_id(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["EndpointID"]) )
def network_settings_gateway(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["Gateway"]) )
def network_settings_global_ipv6_address(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["GlobalIPv6Address"]) )
def network_settings_global_ipv6_prefixLen(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["GlobalIPv6PrefixLen"]) )
def network_settings_ip_address(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["IPAddress"]) )
def network_settings_ip_prefixLen(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["IPPrefixLen"]) )
def network_settings_ipv6_gateway(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["IPv6Gateway"]) )
def network_settings_mac_address(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["NetworkSettings"]["MacAddress"]) )
def network_settings_networks(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return respj["NetworkSettings"]["Networks"]
def path(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Path"]) )
def process_label(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["ProcessLabel"]) )
def resolv_conf_path(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["ResolvConfPath"]) )
def restart_count(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["RestartCount"]) )
def state_error(self):
resp = self.resp
if resp.status_code == 404:
raise NoSuchContainerError('GET ' + self.url + ' {} '.format(resp.status_code))
elif resp.status_code == 500:
raise ServerErrorError('GET ' + self.url + ' {} '.format(resp.status_code))
respj = self.resp.json()
return( '{}'.format( respj["State"]["Error"]) )
def state_exit_code(self):
resp = self.resp
if resp.status_code == 404:
raise NoSuchContainerError('GET ' + self.url + ' {} '.format(resp.status_code))
elif resp.status_code == 500:
raise ServerErrorError('GET ' + self.url + ' {} '.format(resp.status_code))
respj = self.resp.json()
return( '{}'.format( respj["State"]["ExitCode"]) )
def state_finished_at(self):
resp = self.resp
if resp.status_code == 404:
raise NoSuchContainerError('GET ' + self.url + ' {} '.format(resp.status_code))
elif resp.status_code == 500:
raise ServerErrorError('GET ' + self.url + ' {} '.format(resp.status_code))
respj = self.resp.json()
return( '{}'.format( respj["State"]["FinishedAt"]) )
def state_oom_killed(self):
resp = self.resp
if resp.status_code == 404:
raise NoSuchContainerError('GET ' + self.url + ' {} '.format(resp.status_code))
elif resp.status_code == 500:
raise ServerErrorError('GET ' + self.url + ' {} '.format(resp.status_code))
respj = self.resp.json()
return( '{}'.format( respj["State"]["OOMKilled"]) )
def state_dead(self):
resp = self.resp
if resp.status_code == 404:
raise NoSuchContainerError('GET ' + self.url + ' {} '.format(resp.status_code))
elif resp.status_code == 500:
raise ServerErrorError('GET ' + self.url + ' {} '.format(resp.status_code))
respj = self.resp.json()
return( '{}'.format( respj["State"]["Dead"]) )
def state_paused(self):
resp = self.resp
if resp.status_code == 404:
raise NoSuchContainerError('GET ' + self.url + ' {} '.format(resp.status_code))
elif resp.status_code == 500:
raise ServerErrorError('GET ' + self.url + ' {} '.format(resp.status_code))
respj = self.resp.json()
return( '{}'.format( respj["State"]["Paused"]) )
def state_pid(self):
resp = self.resp
if resp.status_code == 404:
raise NoSuchContainerError('GET ' + self.url + ' {} '.format(resp.status_code))
elif resp.status_code == 500:
raise ServerErrorError('GET ' + self.url + ' {} '.format(resp.status_code))
respj = self.resp.json()
return( '{}'.format( respj["State"]["Pid"]) )
def state_restarting(self):
resp = self.resp
if resp.status_code == 404:
raise NoSuchContainerError('GET ' + self.url + ' {} '.format(resp.status_code))
elif resp.status_code == 500:
raise ServerErrorError('GET ' + self.url + ' {} '.format(resp.status_code))
respj = self.resp.json()
return( '{}'.format( respj["State"]["Restarting"]) )
def state_running(self):
resp = self.resp
if resp.status_code == 404:
raise NoSuchContainerError('GET ' + self.url + ' {} '.format(resp.status_code))
elif resp.status_code == 500:
raise ServerErrorError('GET ' + self.url + ' {} '.format(resp.status_code))
respj = self.resp.json()
return( '{}'.format( respj["State"]["Running"]) )
def state_started_at(self):
resp = self.resp
if resp.status_code == 404:
raise NoSuchContainerError('GET ' + self.url + ' {} '.format(resp.status_code))
elif resp.status_code == 500:
raise ServerErrorError('GET ' + self.url + ' {} '.format(resp.status_code))
respj = self.resp.json()
return( '{}'.format( respj["State"]["StartedAt"]) )
def state_status(self):
resp = self.resp
if resp.status_code == 404:
raise NoSuchContainerError('GET ' + self.url + ' {} '.format(resp.status_code))
elif resp.status_code == 500:
raise ServerErrorError('GET ' + self.url + ' {} '.format(resp.status_code))
respj = self.resp.json()
return( '{}'.format( respj["State"]["Status"]) )
def mounts(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
respj = self.resp.json()
return( '{}'.format( respj["Mounts"]) )
| 31,651 | -5 | 3,266 |
cb142d0c8442e3a75243e7defedf02e5725aacae | 518 | py | Python | iniciante/python/1099-soma-de-impares-consecutivos-ii.py | tfn10/beecrowd | 1ebf19ca9a253eb326160f03145d20be33064969 | [
"MIT"
] | null | null | null | iniciante/python/1099-soma-de-impares-consecutivos-ii.py | tfn10/beecrowd | 1ebf19ca9a253eb326160f03145d20be33064969 | [
"MIT"
] | null | null | null | iniciante/python/1099-soma-de-impares-consecutivos-ii.py | tfn10/beecrowd | 1ebf19ca9a253eb326160f03145d20be33064969 | [
"MIT"
] | null | null | null |
quant_de_testes = int(input())
soma_de_impares_consecutivos(quant_de_testes)
| 22.521739 | 55 | 0.552124 | def entrada():
numeros = list(map(int, input().split(' ')))
n1 = numeros[0]
n2 = numeros[1]
return n1, n2
def soma_de_impares_consecutivos(quantidade_de_testes):
while quantidade_de_testes > 0:
x, y = entrada()
if x > y:
x, y = y, x
soma = 0
for i in range(x+1, y):
if i % 2 != 0:
soma += i
print(soma)
quantidade_de_testes -= 1
quant_de_testes = int(input())
soma_de_impares_consecutivos(quant_de_testes)
| 393 | 0 | 45 |
500f0fc8f62478583876b9768e5dd75780ba3038 | 2,856 | py | Python | components/bigquery/query/src/query.py | xieqihui/pipelines | a39ae8eb779e69e4e85e424abba8ecac3f60435f | [
"Apache-2.0"
] | 9 | 2019-03-28T02:20:45.000Z | 2021-12-01T22:43:36.000Z | components/bigquery/query/src/query.py | xieqihui/pipelines | a39ae8eb779e69e4e85e424abba8ecac3f60435f | [
"Apache-2.0"
] | 21 | 2020-01-28T22:48:55.000Z | 2022-03-08T22:48:12.000Z | components/bigquery/query/src/query.py | xieqihui/pipelines | a39ae8eb779e69e4e85e424abba8ecac3f60435f | [
"Apache-2.0"
] | 4 | 2019-04-11T12:09:59.000Z | 2020-10-11T15:53:53.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
from google.cloud import bigquery
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--output',
type=str,
required=False,
help='GCS URL where results will be saved as a CSV.')
parser.add_argument(
'--query',
type=str,
required=True,
help='The SQL query to be run in BigQuery')
parser.add_argument(
'--dataset_id',
type=str,
required=True,
help='Dataset of the destination table.')
parser.add_argument(
'--table_id',
type=str,
required=True,
help='Name of the destination table.')
parser.add_argument(
'--project',
type=str,
required=True,
help='The GCP project to run the query.')
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
| 28 | 81 | 0.708333 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
from google.cloud import bigquery
def _query(query, project_id, dataset_id, table_id, output):
client = bigquery.Client(project=project_id)
job_config = bigquery.QueryJobConfig()
table_ref = client.dataset(dataset_id).table(table_id)
job_config.create_disposition = bigquery.job.CreateDisposition.CREATE_IF_NEEDED
job_config.write_disposition = bigquery.job.WriteDisposition.WRITE_TRUNCATE
job_config.destination = table_ref
query_job = client.query(query, job_config=job_config)
job_result = query_job.result() # Wait for the query to finish
result = {
'destination_table': table_ref.path,
'total_rows': job_result.total_rows,
'total_bytes_processed': query_job.total_bytes_processed,
'schema': [f.to_api_repr() for f in job_result.schema]
}
# If a GCS output location is provided, export to CSV
if output:
extract_job = client.extract_table(table_ref, output)
extract_job.result() # Wait for export to finish
return result
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--output',
type=str,
required=False,
help='GCS URL where results will be saved as a CSV.')
parser.add_argument(
'--query',
type=str,
required=True,
help='The SQL query to be run in BigQuery')
parser.add_argument(
'--dataset_id',
type=str,
required=True,
help='Dataset of the destination table.')
parser.add_argument(
'--table_id',
type=str,
required=True,
help='Name of the destination table.')
parser.add_argument(
'--project',
type=str,
required=True,
help='The GCP project to run the query.')
args = parser.parse_args()
return args
def run_query(query, project_id, dataset_id, table_id, output):
results = _query(query, project_id, dataset_id, table_id, output)
results['output'] = output
with open('/output.json', 'w+') as f:
json.dump(results, f)
def main():
logging.getLogger().setLevel(logging.INFO)
args = parse_arguments()
run_query(args.query, args.project, args.dataset_id, args.table_id,
args.output)
if __name__ == '__main__':
main()
| 1,299 | 0 | 69 |
c54d1d9204fda34b28ecde787fd15c640bd4234a | 3,885 | py | Python | gs_pip_install/gs_pip_install.py | cseHdz/gs_pip_install | 743e572bf53d01ddbce35d93f740c224d08a494c | [
"MIT"
] | null | null | null | gs_pip_install/gs_pip_install.py | cseHdz/gs_pip_install | 743e572bf53d01ddbce35d93f740c224d08a494c | [
"MIT"
] | null | null | null | gs_pip_install/gs_pip_install.py | cseHdz/gs_pip_install | 743e572bf53d01ddbce35d93f740c224d08a494c | [
"MIT"
] | null | null | null | import os
import subprocess
import sys
import shutil
import logging
from typing import Optional, List
import click
from google.cloud import storage
@click.command()
@click.option('-b', "--bucket_name", help="(str) Name of GCS bucket")
@click.option(
'-r',
"--requirement",
help="(str) Name of Python package or requirements file",
)
@click.option(
'-d',
"--download_dir",
default="gcs_packages",
help="(optional, str) File download destination",
)
@click.option("-t", "--target", default="", help="(str) Package install destination")
def main(bucket_name, requirement, download_dir, target):
"""Pip install {pkg_name}/{pkg_name_versioned}.tar.gz to
current enviroment or a target directory
(1) Copy package_name.tar.gz from Google Cloud bucket
(2) Pip Install package_name.tar.gz into staging directory
(3) Remove the package_name.tar.gz
"""
try:
packages = []
if os.path.isfile(requirement):
with open(requirement) as gs_requirements:
for package_name in gs_requirements.readlines():
packages.append(package_name.strip())
else:
packages.append(requirement)
download_packages(
bucket_name=bucket_name,
package_list=packages,
packages_download_dir=download_dir,
)
logging.info('download done')
install_packages(download_dir, target)
finally:
if os.path.exists(download_dir):
shutil.rmtree(download_dir)
def install_packages(packages_download_dir: str, target_dir: Optional[str] = None):
"""Install packages found in local directory. Do not install dependencies if
target directory is specified.
Args:
packages_download_dir (str): Directory containing packages
target_dir (str): Destination to install packages
"""
for gs_package_zip_file in os.listdir(packages_download_dir):
if not target_dir:
install_command = [
sys.executable,
"-m",
"pip",
"install",
"--quiet",
"--upgrade",
f"{packages_download_dir}/{gs_package_zip_file}",
]
else:
install_command = [
sys.executable,
"-m",
"pip",
"install",
"--quiet",
"--no-deps",
"--upgrade",
"-t",
target_dir,
f"{packages_download_dir}/{gs_package_zip_file}",
]
try:
subprocess.check_output(install_command)
except Exception:
logging.warning("Attempting pip install with pyenv python")
install_command[0] = f"{os.environ['HOME']}/.pyenv/shims/python"
subprocess.check_output(install_command)
def download_packages(
packages_download_dir: str,
bucket_name: str,
package_list: List[str],
):
"""Download Python packages from GCS into a local directory.
Args:
packages_download_dir (str): Local directory to download packages into
bucket_name (str): Name of GCS bucket to download packages from
packages list(str): Names of packages found in bucket
"""
os.mkdir(packages_download_dir)
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
for package_name in package_list:
name_no_version = package_name.split("==")[0]
name_versioned = package_name.replace("==", "-")
package_filepath = f"{name_versioned}.tar.gz"
gs_package_path = f"{name_no_version}/{package_filepath}"
blob_package = bucket.blob(gs_package_path)
blob_package.download_to_filename(
os.path.join(packages_download_dir, package_filepath)
)
| 31.08 | 85 | 0.615444 | import os
import subprocess
import sys
import shutil
import logging
from typing import Optional, List
import click
from google.cloud import storage
@click.command()
@click.option('-b', "--bucket_name", help="(str) Name of GCS bucket")
@click.option(
'-r',
"--requirement",
help="(str) Name of Python package or requirements file",
)
@click.option(
'-d',
"--download_dir",
default="gcs_packages",
help="(optional, str) File download destination",
)
@click.option("-t", "--target", default="", help="(str) Package install destination")
def main(bucket_name, requirement, download_dir, target):
"""Pip install {pkg_name}/{pkg_name_versioned}.tar.gz to
current enviroment or a target directory
(1) Copy package_name.tar.gz from Google Cloud bucket
(2) Pip Install package_name.tar.gz into staging directory
(3) Remove the package_name.tar.gz
"""
try:
packages = []
if os.path.isfile(requirement):
with open(requirement) as gs_requirements:
for package_name in gs_requirements.readlines():
packages.append(package_name.strip())
else:
packages.append(requirement)
download_packages(
bucket_name=bucket_name,
package_list=packages,
packages_download_dir=download_dir,
)
logging.info('download done')
install_packages(download_dir, target)
finally:
if os.path.exists(download_dir):
shutil.rmtree(download_dir)
def install_packages(packages_download_dir: str, target_dir: Optional[str] = None):
"""Install packages found in local directory. Do not install dependencies if
target directory is specified.
Args:
packages_download_dir (str): Directory containing packages
target_dir (str): Destination to install packages
"""
for gs_package_zip_file in os.listdir(packages_download_dir):
if not target_dir:
install_command = [
sys.executable,
"-m",
"pip",
"install",
"--quiet",
"--upgrade",
f"{packages_download_dir}/{gs_package_zip_file}",
]
else:
install_command = [
sys.executable,
"-m",
"pip",
"install",
"--quiet",
"--no-deps",
"--upgrade",
"-t",
target_dir,
f"{packages_download_dir}/{gs_package_zip_file}",
]
try:
subprocess.check_output(install_command)
except Exception:
logging.warning("Attempting pip install with pyenv python")
install_command[0] = f"{os.environ['HOME']}/.pyenv/shims/python"
subprocess.check_output(install_command)
def download_packages(
packages_download_dir: str,
bucket_name: str,
package_list: List[str],
):
"""Download Python packages from GCS into a local directory.
Args:
packages_download_dir (str): Local directory to download packages into
bucket_name (str): Name of GCS bucket to download packages from
packages list(str): Names of packages found in bucket
"""
os.mkdir(packages_download_dir)
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
for package_name in package_list:
name_no_version = package_name.split("==")[0]
name_versioned = package_name.replace("==", "-")
package_filepath = f"{name_versioned}.tar.gz"
gs_package_path = f"{name_no_version}/{package_filepath}"
blob_package = bucket.blob(gs_package_path)
blob_package.download_to_filename(
os.path.join(packages_download_dir, package_filepath)
)
| 0 | 0 | 0 |
3c6c46496a0c4eb7347942566a531f86dc598146 | 118 | py | Python | scripts/generate_secret_key.py | dimadk24/english-fight-api | 506a3eb2cb4cb91203b1e023b5248c27975df075 | [
"MIT"
] | 2 | 2015-10-04T09:00:19.000Z | 2019-06-07T18:59:41.000Z | tools/django_new_secretkey.py | VlasovVitaly/internal | 3cb382ddd34f89ff10bb31537658114722084468 | [
"MIT"
] | null | null | null | tools/django_new_secretkey.py | VlasovVitaly/internal | 3cb382ddd34f89ff10bb31537658114722084468 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from django.core.management.utils import get_random_secret_key
print(get_random_secret_key())
| 19.666667 | 62 | 0.822034 | #!/usr/bin/env python
from django.core.management.utils import get_random_secret_key
print(get_random_secret_key())
| 0 | 0 | 0 |
26201a87a95b649bd707768c61038ae09d833cee | 281 | py | Python | src/json_functions.py | iansedano/twitbotto | 5b25e2745548cb0a0ff705867bd0bd6fbbcafc99 | [
"MIT"
] | null | null | null | src/json_functions.py | iansedano/twitbotto | 5b25e2745548cb0a0ff705867bd0bd6fbbcafc99 | [
"MIT"
] | 2 | 2021-06-08T21:23:08.000Z | 2021-09-08T01:55:32.000Z | src/json_functions.py | iansedano/twitbotto | 5b25e2745548cb0a0ff705867bd0bd6fbbcafc99 | [
"MIT"
] | null | null | null | import json
import os
| 20.071429 | 50 | 0.672598 | import json
import os
def list_to_json(list_to_convert, json_file_name):
path = "..\\jsons\\" + json_file_name
with open(path, 'w') as fp:
json.dump(list_to_convert, fp)
def read_json(path):
with open(path) as json_file:
return json.load(json_file)
| 211 | 0 | 46 |
b8911302eb3439249e4effef1b079cd8227cb386 | 155 | py | Python | locate/__init__.py | AutoActuary/locate | c736b7374d453e5c7cfee3deacfb04a8649ea638 | [
"MIT"
] | null | null | null | locate/__init__.py | AutoActuary/locate | c736b7374d453e5c7cfee3deacfb04a8649ea638 | [
"MIT"
] | 3 | 2021-10-06T11:49:36.000Z | 2022-03-30T11:48:35.000Z | locate/__init__.py | AutoActuary/locate | c736b7374d453e5c7cfee3deacfb04a8649ea638 | [
"MIT"
] | null | null | null | from .locate import (
this_dir,
allow_relative_location_imports,
force_relative_location_imports,
append_sys_path,
prepend_sys_path,
)
| 19.375 | 36 | 0.754839 | from .locate import (
this_dir,
allow_relative_location_imports,
force_relative_location_imports,
append_sys_path,
prepend_sys_path,
)
| 0 | 0 | 0 |
56403e9193d1094b1cd5383247457c75f4873f69 | 230 | py | Python | commonuicomponents/ttk/containers/labeledwidget/labeledcontainer.py | RobinBobin/python3-common-ui-components | 281936a3bf6f1a366eeaaf8f177c0fae9ef24389 | [
"MIT"
] | 2 | 2020-08-04T16:30:13.000Z | 2020-10-28T14:23:33.000Z | commonuicomponents/ttk/containers/labeledwidget/labeledcontainer.py | RobinBobin/python3-common-ui-components | 281936a3bf6f1a366eeaaf8f177c0fae9ef24389 | [
"MIT"
] | null | null | null | commonuicomponents/ttk/containers/labeledwidget/labeledcontainer.py | RobinBobin/python3-common-ui-components | 281936a3bf6f1a366eeaaf8f177c0fae9ef24389 | [
"MIT"
] | null | null | null | from .basecontainer import inflateChildren
from .. import LabeledContainer
| 25.555556 | 54 | 0.769565 | from .basecontainer import inflateChildren
from .. import LabeledContainer
class LabeledWidgetLabeledContainer(LabeledContainer):
def _inflateChildren(self):
inflateChildren(self)
super()._inflateChildren()
| 74 | 33 | 48 |
816361f9f99094f3baec25c09e4c879eebf67dbd | 2,420 | py | Python | calls/utils.py | dtcooper/bmir-calls | f03e40b5037f1c7ec70b8035b9bb219e209e45b9 | [
"MIT"
] | 1 | 2019-11-03T17:37:56.000Z | 2019-11-03T17:37:56.000Z | calls/utils.py | dtcooper/bmir-calls | f03e40b5037f1c7ec70b8035b9bb219e209e45b9 | [
"MIT"
] | null | null | null | calls/utils.py | dtcooper/bmir-calls | f03e40b5037f1c7ec70b8035b9bb219e209e45b9 | [
"MIT"
] | null | null | null | from functools import wraps
import re
from urllib.parse import unquote
from twilio.base.exceptions import TwilioRestException
from flask import (
current_app as app,
render_template,
request,
Response,
url_for,
)
| 28.139535 | 88 | 0.620248 | from functools import wraps
import re
from urllib.parse import unquote
from twilio.base.exceptions import TwilioRestException
from flask import (
current_app as app,
render_template,
request,
Response,
url_for,
)
def sanitize_phone_number(phone_number, with_country_code=False):
sanitized = (None, None) if with_country_code else None
if isinstance(phone_number, str):
# Replace double zero with plus, because I'm used to that shit!
for intl_prefix in ('00', '011'):
if phone_number.startswith(intl_prefix):
phone_number = '+' + phone_number[len(intl_prefix):]
try:
lookup = app.twilio.lookups.phone_numbers(
phone_number).fetch(country_code='US')
except TwilioRestException: # skip coverage
app.logger.warn('Invalid phone number: {}'.format(phone_number))
pass
else:
if with_country_code:
sanitized = (lookup.phone_number, lookup.country_code)
else:
sanitized = lookup.phone_number
return sanitized
def protected(route):
@wraps(route)
def protected_route(*args, **kwargs):
password_get = request.args.get('password', '')
password_auth = request.authorization.password if request.authorization else ''
if (
password_get == app.config['API_PASSWORD']
or password_auth == app.config['API_PASSWORD']
or app.debug
):
return route(*args, **kwargs)
else:
return Response(
status=401,
headers={'WWW-Authenticate': 'Basic realm="Password Required"'})
return protected_route
def render_xml(template, *args, **kwargs):
return Response(render_template(template, *args, **kwargs), content_type='text/xml')
def protected_external_url(endpoint, *args, **kwargs):
kwargs.update({
'password': app.config['API_PASSWORD'],
'_external': True,
})
return url_for(endpoint, *args, **kwargs)
def parse_sip_address(address):
if isinstance(address, str):
match = re.search(r'^sip:([^@]+)@', address)
if match:
return unquote(match.group(1))
return None
def get_gather_times():
try:
times = int(request.args.get('gather', '0'), 10) + 1
except ValueError:
times = 1
return times
| 2,041 | 0 | 138 |
d204be1e4997608731daf912c09b1dfd1b309e6e | 1,665 | py | Python | python/ldtui/__init__.py | ezequielmastrasso/EZLookdevTools | 4914d8f92b1ff195aeb4b15b277035c61a235ae1 | [
"MIT"
] | 45 | 2019-07-12T16:03:20.000Z | 2022-03-04T06:03:48.000Z | python/ldtui/__init__.py | ezequielmastrasso/EZLookdevTools | 4914d8f92b1ff195aeb4b15b277035c61a235ae1 | [
"MIT"
] | 115 | 2019-05-22T09:10:38.000Z | 2020-03-25T11:01:35.000Z | python/ldtui/__init__.py | ezequielmastrasso/EZLookdevTools | 4914d8f92b1ff195aeb4b15b277035c61a235ae1 | [
"MIT"
] | 8 | 2019-12-03T07:19:37.000Z | 2021-12-13T10:31:15.000Z | """
.. module:: ldtui
:synopsis: Main tools UI.
.. moduleauthor:: Ezequiel Mastrasso
"""
from Qt import QtGui, QtWidgets, QtCore
from Qt.QtWidgets import QApplication, QWidget, QLabel, QMainWindow
import sys
import imp
import os
import logging
from functools import partial
from ldtui import qtutils
import ldt
logger = logging.getLogger(__name__)
class LDTWindow(QMainWindow):
'''Main Tools UI Window. Loads the plugInfo.plugin_object.plugin_layout
QWidget from all loaded plugins as tabs'''
| 29.210526 | 75 | 0.671471 | """
.. module:: ldtui
:synopsis: Main tools UI.
.. moduleauthor:: Ezequiel Mastrasso
"""
from Qt import QtGui, QtWidgets, QtCore
from Qt.QtWidgets import QApplication, QWidget, QLabel, QMainWindow
import sys
import imp
import os
import logging
from functools import partial
from ldtui import qtutils
import ldt
logger = logging.getLogger(__name__)
class LDTWindow(QMainWindow):
'''Main Tools UI Window. Loads the plugInfo.plugin_object.plugin_layout
QWidget from all loaded plugins as tabs'''
def __init__(self, plugins):
super(LDTWindow, self).__init__()
self.setWindowTitle("Look Dev Tool Set")
self.setGeometry(0, 0, 650, 600)
layout = QtWidgets.QGridLayout()
self.setLayout(layout)
tabwidget = QtWidgets.QTabWidget()
tabwidget.setTabBar(qtutils.HTabWidget(width=150, height=50))
tabwidget.setTabPosition(QtWidgets.QTabWidget.West)
# Stylesheet fix for Katana
# With default colors, the tab text is almost the
# same as the tab background
stylesheet = """
QTabBar::tab:unselected {background: #222222;}
QTabWidget>QWidget>QWidget{background: #222222;}
QTabBar::tab:selected {background: #303030;}
QTabWidget>QWidget>QWidget{background: #303030;}
"""
tabwidget.setStyleSheet(stylesheet)
layout.addWidget(tabwidget, 0, 0)
plugins_ui = {}
plugins_buttons = {}
for pluginInfo in plugins.getAllPlugins():
tabwidget.addTab(
pluginInfo.plugin_object.plugin_layout,
pluginInfo.name)
self.setCentralWidget(tabwidget)
| 1,126 | 0 | 27 |
f4c3f100fa81a8416efb0fabbb2f62c7fe084acb | 5,754 | py | Python | solutions/poker/poker_test.py | mesmacosta/python-ciandt-intership | 65a8941997eee5d683e56de55d33423b2566b7d0 | [
"MIT"
] | 1 | 2020-08-31T23:06:43.000Z | 2020-08-31T23:06:43.000Z | solutions/poker/poker_test.py | mesmacosta/python-ciandt-intership | 65a8941997eee5d683e56de55d33423b2566b7d0 | [
"MIT"
] | null | null | null | solutions/poker/poker_test.py | mesmacosta/python-ciandt-intership | 65a8941997eee5d683e56de55d33423b2566b7d0 | [
"MIT"
] | null | null | null | import unittest
from poker import best_hands
if __name__ == "__main__":
unittest.main()
| 33.453488 | 110 | 0.521724 | import unittest
from poker import best_hands
class PokerTest(unittest.TestCase):
def test_single_hand_always_wins(self):
self.assertEqual(best_hands(["4♠ 5♠ 7♥ 8♦ J♣"]), ["4♠ 5♠ 7♥ 8♦ J♣"])
def test_highest_card_out_of_all_hands_wins(self):
self.assertEqual(
best_hands(["4♦ 5♠ 6♠ 8♦ 3♣", "2♠ 4♣ 7♠ 9♥ 10♥", "3♠ 4♠ 5♦ 6♥ J♥"]),
["3♠ 4♠ 5♦ 6♥ J♥"],
)
def test_a_tie_has_multiple_winners(self):
self.assertEqual(
best_hands(
[
"4♦ 5♠ 6♠ 8♦ 3♣",
"2♠ 4♣ 7♠ 9♥ 10♥",
"3♠ 4♠ 5♦ 6♥ J♥",
"3♥ 4♥ 5♣ 6♣ J♦",
]
),
["3♠ 4♠ 5♦ 6♥ J♥", "3♥ 4♥ 5♣ 6♣ J♦"],
)
def test_multiple_hands_with_the_same_high_cards_tie_compares_next_highest_ranked_down_to_last_card(
self
):
self.assertEqual(
best_hands(["3♠ 5♥ 6♠ 8♦ 7♥", "2♠ 5♦ 6♦ 8♣ 7♠"]), ["3♠ 5♥ 6♠ 8♦ 7♥"]
)
def test_one_pair_beats_high_card(self):
self.assertEqual(
best_hands(["4♠ 5♥ 6♣ 8♦ K♥", "2♠ 4♥ 6♠ 4♦ J♥"]), ["2♠ 4♥ 6♠ 4♦ J♥"]
)
def test_highest_pair_wins(self):
self.assertEqual(
best_hands(["4♠ 2♥ 6♠ 2♦ J♥", "2♠ 4♥ 6♣ 4♦ J♦"]), ["2♠ 4♥ 6♣ 4♦ J♦"]
)
def test_two_pairs_beats_one_pair(self):
self.assertEqual(
best_hands(["2♠ 8♥ 6♠ 8♦ J♥", "4♠ 5♥ 4♣ 8♣ 5♣"]), ["4♠ 5♥ 4♣ 8♣ 5♣"]
)
def test_both_hands_have_two_pairs_highest_ranked_pair_wins(self):
self.assertEqual(
best_hands(["2♠ 8♥ 2♦ 8♦ 3♥", "4♠ 5♥ 4♣ 8♠ 5♦"]), ["2♠ 8♥ 2♦ 8♦ 3♥"]
)
def test_both_hands_have_two_pairs_with_the_same_highest_ranked_pair_tie_goes_to_low_pair(
self
):
self.assertEqual(
best_hands(["2♠ Q♠ 2♣ Q♦ J♥", "J♦ Q♥ J♠ 8♦ Q♣"]), ["J♦ Q♥ J♠ 8♦ Q♣"]
)
def test_both_hands_have_two_identically_ranked_pairs_tie_goes_to_remaining_card_kicker(
self
):
self.assertEqual(
best_hands(["J♦ Q♥ J♠ 8♦ Q♣", "J♠ Q♠ J♣ 2♦ Q♦"]), ["J♦ Q♥ J♠ 8♦ Q♣"]
)
def test_three_of_a_kind_beats_two_pair(self):
self.assertEqual(
best_hands(["2♠ 8♥ 2♥ 8♦ J♥", "4♠ 5♥ 4♣ 8♠ 4♥"]), ["4♠ 5♥ 4♣ 8♠ 4♥"]
)
def test_both_hands_have_three_of_a_kind_tie_goes_to_highest_ranked_triplet(self):
self.assertEqual(
best_hands(["2♠ 2♥ 2♣ 8♦ J♥", "4♠ A♥ A♠ 8♣ A♦"]), ["4♠ A♥ A♠ 8♣ A♦"]
)
def test_with_multiple_decks_two_players_can_have_same_three_of_a_kind_ties_go_to_highest_remaining_cards(
self
):
self.assertEqual(
best_hands(["4♠ A♥ A♠ 7♣ A♦", "4♠ A♥ A♠ 8♣ A♦"]), ["4♠ A♥ A♠ 8♣ A♦"]
)
def test_a_straight_beats_three_of_a_kind(self):
self.assertEqual(
best_hands(["4♠ 5♥ 4♣ 8♦ 4♥", "3♠ 4♦ 2♠ 6♦ 5♣"]), ["3♠ 4♦ 2♠ 6♦ 5♣"]
)
def test_aces_can_end_a_straight_10_j_q_k_a(self):
self.assertEqual(
best_hands(["4♠ 5♥ 4♣ 8♦ 4♥", "10♦ J♥ Q♠ K♦ A♣"]), ["10♦ J♥ Q♠ K♦ A♣"]
)
def test_aces_can_start_a_straight_a_2_3_4_5(self):
self.assertEqual(
best_hands(["4♠ 5♥ 4♣ 8♦ 4♥", "4♦ A♥ 3♠ 2♦ 5♣"]), ["4♦ A♥ 3♠ 2♦ 5♣"]
)
def test_both_hands_with_a_straight_tie_goes_to_highest_ranked_card(self):
self.assertEqual(
best_hands(["4♠ 6♣ 7♠ 8♦ 5♥", "5♠ 7♥ 8♠ 9♦ 6♥"]), ["5♠ 7♥ 8♠ 9♦ 6♥"]
)
def test_even_though_an_ace_is_usually_high_a_5_high_straight_is_the_lowest_scoring_straight(
self
):
self.assertEqual(
best_hands(["2♥ 3♣ 4♦ 5♦ 6♥", "4♠ A♥ 3♠ 2♦ 5♥"]), ["2♥ 3♣ 4♦ 5♦ 6♥"]
)
def test_flush_beats_a_straight(self):
self.assertEqual(
best_hands(["4♣ 6♥ 7♦ 8♦ 5♥", "2♠ 4♠ 5♠ 6♠ 7♠"]), ["2♠ 4♠ 5♠ 6♠ 7♠"]
)
def test_both_hands_have_a_flush_tie_goes_to_high_card_down_to_the_last_one_if_necessary(
self
):
self.assertEqual(
best_hands(["4♥ 7♥ 8♥ 9♥ 6♥", "2♠ 4♠ 5♠ 6♠ 7♠"]), ["4♥ 7♥ 8♥ 9♥ 6♥"]
)
def test_full_house_beats_a_flush(self):
self.assertEqual(
best_hands(["3♥ 6♥ 7♥ 8♥ 5♥", "4♠ 5♥ 4♣ 5♦ 4♥"]), ["4♠ 5♥ 4♣ 5♦ 4♥"]
)
def test_both_hands_have_a_full_house_tie_goes_to_highest_ranked_triplet(self):
self.assertEqual(
best_hands(["4♥ 4♠ 4♦ 9♠ 9♦", "5♥ 5♠ 5♦ 8♠ 8♦"]), ["5♥ 5♠ 5♦ 8♠ 8♦"]
)
def test_with_multiple_decks_both_hands_have_a_full_house_with_the_same_triplet_tie_goes_to_the_pair(
self
):
self.assertEqual(
best_hands(["5♥ 5♠ 5♦ 9♠ 9♦", "5♥ 5♠ 5♦ 8♠ 8♦"]), ["5♥ 5♠ 5♦ 9♠ 9♦"]
)
def test_four_of_a_kind_beats_a_full_house(self):
self.assertEqual(
best_hands(["4♠ 5♥ 4♦ 5♦ 4♥", "3♠ 3♥ 2♠ 3♦ 3♣"]), ["3♠ 3♥ 2♠ 3♦ 3♣"]
)
def test_both_hands_have_four_of_a_kind_tie_goes_to_high_quad(self):
self.assertEqual(
best_hands(["2♠ 2♥ 2♣ 8♦ 2♦", "4♠ 5♥ 5♠ 5♦ 5♣"]), ["4♠ 5♥ 5♠ 5♦ 5♣"]
)
def test_with_multiple_decks_both_hands_with_identical_four_of_a_kind_tie_determined_by_kicker(
self
):
self.assertEqual(
best_hands(["3♠ 3♥ 2♠ 3♦ 3♣", "3♠ 3♥ 4♠ 3♦ 3♣"]), ["3♠ 3♥ 4♠ 3♦ 3♣"]
)
def test_straight_flush_beats_four_of_a_kind(self):
self.assertEqual(
best_hands(["4♠ 5♥ 5♠ 5♦ 5♣", "7♠ 8♠ 9♠ 6♠ 10♠"]), ["7♠ 8♠ 9♠ 6♠ 10♠"]
)
def test_both_hands_have_straight_flush_tie_goes_to_highest_ranked_card(self):
self.assertEqual(
best_hands(["4♥ 6♥ 7♥ 8♥ 5♥", "5♠ 7♠ 8♠ 9♠ 6♠"]), ["5♠ 7♠ 8♠ 9♠ 6♠"]
)
if __name__ == "__main__":
unittest.main()
| 5,737 | 14 | 778 |
d26277a2445117f02d002af1e9017743e8bd3ff0 | 576 | py | Python | actions/push.py | cognifloyd/stackstorm-packer | c6820967bc07acb1c93d2d3fbd516ba9cc79f212 | [
"Apache-2.0"
] | 164 | 2015-01-17T16:08:33.000Z | 2021-08-03T02:34:07.000Z | actions/push.py | cognifloyd/stackstorm-packer | c6820967bc07acb1c93d2d3fbd516ba9cc79f212 | [
"Apache-2.0"
] | 442 | 2015-01-01T11:19:01.000Z | 2017-09-06T23:26:17.000Z | actions/push.py | cognifloyd/stackstorm-packer | c6820967bc07acb1c93d2d3fbd516ba9cc79f212 | [
"Apache-2.0"
] | 202 | 2015-01-13T00:37:40.000Z | 2020-11-07T11:30:10.000Z | from lib.actions import BaseAction
| 36 | 80 | 0.642361 | from lib.actions import BaseAction
class PushAction(BaseAction):
def run(self, packerfile, name, message=None, cwd=None, exclude=None,
only=None, variables=None, variables_file=None):
if cwd:
self.set_dir(cwd)
p = self.packer(packerfile, exc=exclude, only=only, variables=variables,
vars_file=variables_file)
if self.atlas_token:
return p.push(name, message=message, token=self.atlas_token)
else:
raise ValueError("Missing 'atlas_token' in config.yaml for packer")
| 483 | 8 | 49 |
9dbfe74ec04012783b167322113aa628ebe775f9 | 360 | py | Python | app/exchanges/serializers.py | iyanuashiri/exchange-api | 86f7a4e9fb17f71888e6854510618876d1010c19 | [
"MIT"
] | null | null | null | app/exchanges/serializers.py | iyanuashiri/exchange-api | 86f7a4e9fb17f71888e6854510618876d1010c19 | [
"MIT"
] | null | null | null | app/exchanges/serializers.py | iyanuashiri/exchange-api | 86f7a4e9fb17f71888e6854510618876d1010c19 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import Exchange
| 32.727273 | 118 | 0.713889 | from rest_framework import serializers
from .models import Exchange
class ExchangeSerializer(serializers.ModelSerializer):
class Meta:
model = Exchange
fields = ('from_currency_code', 'from_currency_name', 'to_currency_code', 'to_currency_name', 'exchange_rate',
'last_refreshed', 'timezone', 'bid_price', 'ask_price') | 0 | 268 | 23 |
c97ebed6e5aaf12aba02f2562b55b162f1f5033c | 10,230 | py | Python | optimum/utils/runs.py | huggingface/optimum | 65bb7fa4f635c6d2e6734485f1b8be58cb690fd0 | [
"Apache-2.0"
] | 414 | 2021-09-14T11:39:36.000Z | 2022-03-31T19:10:45.000Z | optimum/utils/runs.py | huggingface/optimum | 65bb7fa4f635c6d2e6734485f1b8be58cb690fd0 | [
"Apache-2.0"
] | 49 | 2021-09-14T11:29:52.000Z | 2022-03-31T14:02:34.000Z | optimum/utils/runs.py | huggingface/optimum | 65bb7fa4f635c6d2e6734485f1b8be58cb690fd0 | [
"Apache-2.0"
] | 29 | 2021-09-15T01:51:54.000Z | 2022-03-23T08:20:17.000Z | from dataclasses import field
from enum import Enum
from typing import Dict, List, Optional, Union
from . import is_pydantic_available
from .doc import generate_doc_dataclass
if is_pydantic_available():
from pydantic.dataclasses import dataclass
else:
from dataclasses import dataclass
@generate_doc_dataclass
@dataclass
class Calibration:
"""Parameters for post-training calibration with static quantization."""
method: CalibrationMethods = field(
metadata={"description": 'Calibration method used, either "minmax", "entropy" or "percentile".'}
)
num_calibration_samples: int = field(
metadata={
"description": "Number of examples to use for the calibration step resulting from static quantization."
}
)
calibration_histogram_percentile: Optional[float] = field(
default=None, metadata={"description": "The percentile used for the percentile calibration method."}
)
calibration_moving_average: Optional[bool] = field(
default=None,
metadata={
"description": "Whether to compute the moving average of the minimum and maximum values for the minmax calibration method."
},
)
calibration_moving_average_constant: Optional[float] = field(
default=None,
metadata={
"description": "Constant smoothing factor to use when computing the moving average of the minimum and maximum values. Effective only when the selected calibration method is minmax and `calibration_moving_average` is set to True."
},
)
@generate_doc_dataclass
@dataclass
@generate_doc_dataclass
@dataclass
@generate_doc_dataclass
@dataclass
@generate_doc_dataclass
@dataclass
class DatasetArgs:
"""Parameters related to the dataset."""
path: str = field(metadata={"description": "Path to the dataset, as in `datasets.load_dataset(path)`."})
eval_split: str = field(metadata={"description": 'Dataset split used for evaluation (e.g. "test").'})
data_keys: Dict[str, Union[None, str]] = field(
metadata={
"description": 'Dataset columns used as input data. At most two, indicated with "primary" and "secondary".'
}
)
ref_keys: List[str] = field(metadata={"description": "Dataset column used for references during evaluation."})
name: Optional[str] = field(
default=None, metadata={"description": "Name of the dataset, as in `datasets.load_dataset(path, name)`."}
)
calibration_split: Optional[str] = field(
default=None, metadata={"description": 'Dataset split used for calibration (e.g. "train").'}
)
@generate_doc_dataclass
@dataclass
class TaskArgs:
"""Task-specific parameters."""
is_regression: Optional[bool] = field(
default=None,
metadata={
"description": "Text classification specific. Set whether the task is regression (output = one float)."
},
)
@dataclass
@dataclass
@dataclass
class _RunConfigBase:
"""Parameters defining a run. A run is an evaluation of a triplet (model, dataset, metric) coupled with optimization parameters, allowing to compare a transformers baseline and a model optimized with Optimum."""
metrics: List[str] = field(metadata={"description": "List of metrics to evaluate on."})
@dataclass
@dataclass
@generate_doc_dataclass
@dataclass
class RunConfig(Run, _RunConfigDefaults, _RunConfigBase):
"""Class holding the parameters to launch a run."""
pass
| 38.458647 | 241 | 0.672434 | from dataclasses import field
from enum import Enum
from typing import Dict, List, Optional, Union
from . import is_pydantic_available
from .doc import generate_doc_dataclass
if is_pydantic_available():
from pydantic.dataclasses import dataclass
else:
from dataclasses import dataclass
class APIFeaturesManager:
_SUPPORTED_TASKS = ["text-classification", "token-classification", "question-answering"]
@staticmethod
def check_supported_model_task_pair(model_type: str, task: str):
model_type = model_type.lower()
if model_type not in APIFeaturesManager._SUPPORTED_MODEL_TYPE:
raise KeyError(
f"{model_type} is not supported yet. "
f"Only {list(APIFeaturesManager._SUPPORTED_MODEL_TYPE.keys())} are supported. "
f"If you want to support {model_type} please propose a PR or open up an issue."
)
elif task not in APIFeaturesManager._SUPPORTED_MODEL_TYPE[model_type]:
raise KeyError(
f"{task} is not supported yet for model {model_type}. "
f"Only {APIFeaturesManager._SUPPORTED_MODEL_TYPE[model_type]} are supported. "
f"If you want to support {task} please propose a PR or open up an issue."
)
@staticmethod
def check_supported_task(task: str):
if task not in APIFeaturesManager._SUPPORTED_TASKS:
raise KeyError(
f"{task} is not supported yet. "
f"Only {APIFeaturesManager._SUPPORTED_TASKS} are supported. "
f"If you want to support {task} please propose a PR or open up an issue."
)
class Frameworks(str, Enum):
onnxruntime = "onnxruntime"
class CalibrationMethods(str, Enum):
minmax = "minmax"
percentile = "percentile"
entropy = "entropy"
class QuantizationApproach(str, Enum):
static = "static"
dynamic = "dynamic"
@generate_doc_dataclass
@dataclass
class Calibration:
"""Parameters for post-training calibration with static quantization."""
method: CalibrationMethods = field(
metadata={"description": 'Calibration method used, either "minmax", "entropy" or "percentile".'}
)
num_calibration_samples: int = field(
metadata={
"description": "Number of examples to use for the calibration step resulting from static quantization."
}
)
calibration_histogram_percentile: Optional[float] = field(
default=None, metadata={"description": "The percentile used for the percentile calibration method."}
)
calibration_moving_average: Optional[bool] = field(
default=None,
metadata={
"description": "Whether to compute the moving average of the minimum and maximum values for the minmax calibration method."
},
)
calibration_moving_average_constant: Optional[float] = field(
default=None,
metadata={
"description": "Constant smoothing factor to use when computing the moving average of the minimum and maximum values. Effective only when the selected calibration method is minmax and `calibration_moving_average` is set to True."
},
)
@generate_doc_dataclass
@dataclass
class FrameworkArgs:
opset: Optional[int] = field(default=15, metadata={"description": "ONNX opset version to export the model with."})
optimization_level: Optional[int] = field(default=0, metadata={"description": "ONNX optimization level."})
def __post_init__(self):
# validate `opset`
assert self.opset <= 15, f"Unsupported OnnxRuntime opset: {self.opset}"
# validate `optimization_level`
assert self.optimization_level in [
0,
1,
2,
99,
], f"Unsupported OnnxRuntime optimization level: {self.optimization_level}"
@generate_doc_dataclass
@dataclass
class Versions:
transformers: str = field(metadata={"description": "Transformers version."})
optimum: str = field(metadata={"description": "Optimum version."})
optimum_hash: Optional[str] = field(
default=None, metadata={"description": "Optimum commit hash, in case the dev version is used."}
)
onnxruntime: Optional[str] = field(default=None, metadata={"description": "Onnx Runtime version."})
torch_ort: Optional[str] = field(default=None, metadata={"description": "Torch-ort version."})
@generate_doc_dataclass
@dataclass
class Evaluation:
time: List[Dict] = field(metadata={"description": "Measures of inference time (latency, throughput)."})
others: Dict = field(metadata={"description": "Metrics measuring the performance on the given task."})
def __post_init__(self):
# validate `others`
assert "baseline" in self.others
assert "optimized" in self.others
for metric_name, metric_dict in self.others["baseline"].items():
assert metric_dict.keys() == self.others["optimized"][metric_name].keys()
@generate_doc_dataclass
@dataclass
class DatasetArgs:
"""Parameters related to the dataset."""
path: str = field(metadata={"description": "Path to the dataset, as in `datasets.load_dataset(path)`."})
eval_split: str = field(metadata={"description": 'Dataset split used for evaluation (e.g. "test").'})
data_keys: Dict[str, Union[None, str]] = field(
metadata={
"description": 'Dataset columns used as input data. At most two, indicated with "primary" and "secondary".'
}
)
ref_keys: List[str] = field(metadata={"description": "Dataset column used for references during evaluation."})
name: Optional[str] = field(
default=None, metadata={"description": "Name of the dataset, as in `datasets.load_dataset(path, name)`."}
)
calibration_split: Optional[str] = field(
default=None, metadata={"description": 'Dataset split used for calibration (e.g. "train").'}
)
@generate_doc_dataclass
@dataclass
class TaskArgs:
"""Task-specific parameters."""
is_regression: Optional[bool] = field(
default=None,
metadata={
"description": "Text classification specific. Set whether the task is regression (output = one float)."
},
)
@dataclass
class _RunBase:
model_name_or_path: str = field(
metadata={"description": "Name of the model hosted on the Hub to use for the run."}
)
task: str = field(metadata={"description": "Task performed by the model."})
quantization_approach: QuantizationApproach = field(
metadata={"description": "Whether to use dynamic or static quantization."}
)
dataset: DatasetArgs = field(
metadata={"description": "Dataset to use. Several keys must be set on top of the dataset name."}
)
framework: Frameworks = field(metadata={"description": 'Name of the framework used (e.g. "onnxruntime").'})
framework_args: FrameworkArgs = field(metadata={"description": "Framework-specific arguments."})
@dataclass
class _RunDefaults:
operators_to_quantize: Optional[List[str]] = field(
default_factory=lambda: ["Add", "MatMul"],
metadata={
"description": 'Operators to quantize, doing no modifications to others (default: `["Add", "MatMul"]`).'
},
)
node_exclusion: Optional[List[str]] = field(
default_factory=lambda: [],
metadata={"description": "Specific nodes to exclude from being quantized (default: `[]`)."},
)
per_channel: Optional[bool] = field(
default=False, metadata={"description": "Whether to quantize per channel (default: `False`)."}
)
calibration: Optional[Calibration] = field(
default=None, metadata={"description": "Calibration parameters, in case static quantization is used."}
)
task_args: Optional[TaskArgs] = field(
default=None, metadata={"description": "Task-specific arguments (default: `None`)."}
)
aware_training: Optional[bool] = field(
default=False,
metadata={
"description": "Whether the quantization is to be done with Quantization-Aware Training (not supported)."
},
)
@dataclass
class _RunConfigBase:
"""Parameters defining a run. A run is an evaluation of a triplet (model, dataset, metric) coupled with optimization parameters, allowing to compare a transformers baseline and a model optimized with Optimum."""
metrics: List[str] = field(metadata={"description": "List of metrics to evaluate on."})
@dataclass
class _RunConfigDefaults(_RunDefaults):
batch_sizes: Optional[List[int]] = field(
default_factory=lambda: [4, 8],
metadata={"description": "Batch sizes to include in the run to measure time metrics."},
)
input_lengths: Optional[List[int]] = field(
default_factory=lambda: [128],
metadata={"description": "Input lengths to include in the run to measure time metrics."},
)
@dataclass
class Run(_RunDefaults, _RunBase):
def __post_init__(self):
# validate `task`
APIFeaturesManager.check_supported_task(task=self.task)
# validate `task_args`
if self.task == "text-classification":
message = "For text classification, whether the task is regression should be explicity specified in the task_args.is_regression key."
assert self.task_args != None, message
assert self.task_args["is_regression"] != None, message
# validate `dataset`
if self.quantization_approach == "static":
assert self.dataset[
"calibration_split"
], "Calibration split should be passed for static quantization in the dataset.calibration_split key."
# validate `calibration`
if self.quantization_approach == "static":
assert (
self.calibration
), "Calibration parameters should be passed for static quantization in the calibration key."
# validate `aware_training`
assert self.aware_training == False, "Quantization-Aware Training not supported."
@generate_doc_dataclass
@dataclass
class RunConfig(Run, _RunConfigDefaults, _RunConfigBase):
"""Class holding the parameters to launch a run."""
pass
| 2,840 | 3,625 | 272 |
43ed48598a6360fbe18f54e747aef536f3e3507a | 3,111 | py | Python | examples/visualization/roi_erpimage_by_rt.py | andylikescodes/mne-python | 79ea57a4318d8d045f5966c26360b079f40a4865 | [
"BSD-3-Clause"
] | 1 | 2022-02-19T08:13:49.000Z | 2022-02-19T08:13:49.000Z | examples/visualization/roi_erpimage_by_rt.py | LiFeng-SECUC/mne-python | 732bb1f994e64e41a8e95dcc10dc98c22cac95c0 | [
"BSD-3-Clause"
] | null | null | null | examples/visualization/roi_erpimage_by_rt.py | LiFeng-SECUC/mne-python | 732bb1f994e64e41a8e95dcc10dc98c22cac95c0 | [
"BSD-3-Clause"
] | null | null | null | """
===========================================================
Plot single trial activity, grouped by ROI and sorted by RT
===========================================================
This will produce what is sometimes called an event related
potential / field (ERP/ERF) image.
The EEGLAB example file, which contains an experiment with button press
responses to simple visual stimuli, is read in and response times are
calculated.
Regions of Interest are determined by the channel types (in 10/20 channel
notation, even channels are right, odd are left, and 'z' are central). The
median and the Global Field Power within each channel group is calculated,
and the trials are plotted, sorting by response time.
"""
# Authors: Jona Sassenhagen <jona.sassenhagen@gmail.com>
#
# License: BSD-3-Clause
# %%
import mne
from mne.event import define_target_events
from mne.channels import make_1020_channel_selections
print(__doc__)
# %%
# Load EEGLAB example data (a small EEG dataset)
data_path = mne.datasets.testing.data_path()
fname = data_path / 'EEGLAB' / 'test_raw.set'
event_id = {"rt": 1, "square": 2} # must be specified for str events
raw = mne.io.read_raw_eeglab(fname)
mapping = {
'EEG 000': 'Fpz', 'EEG 001': 'EOG1', 'EEG 002': 'F3', 'EEG 003': 'Fz',
'EEG 004': 'F4', 'EEG 005': 'EOG2', 'EEG 006': 'FC5', 'EEG 007': 'FC1',
'EEG 008': 'FC2', 'EEG 009': 'FC6', 'EEG 010': 'T7', 'EEG 011': 'C3',
'EEG 012': 'C4', 'EEG 013': 'Cz', 'EEG 014': 'T8', 'EEG 015': 'CP5',
'EEG 016': 'CP1', 'EEG 017': 'CP2', 'EEG 018': 'CP6', 'EEG 019': 'P7',
'EEG 020': 'P3', 'EEG 021': 'Pz', 'EEG 022': 'P4', 'EEG 023': 'P8',
'EEG 024': 'PO7', 'EEG 025': 'PO3', 'EEG 026': 'POz', 'EEG 027': 'PO4',
'EEG 028': 'PO8', 'EEG 029': 'O1', 'EEG 030': 'Oz', 'EEG 031': 'O2'
}
raw.rename_channels(mapping)
raw.set_channel_types({"EOG1": 'eog', "EOG2": 'eog'})
raw.set_montage('standard_1020')
events = mne.events_from_annotations(raw, event_id)[0]
# %%
# Create Epochs
# define target events:
# 1. find response times: distance between "square" and "rt" events
# 2. extract A. "square" events B. followed by a button press within 700 msec
tmax = 0.7
sfreq = raw.info["sfreq"]
reference_id, target_id = 2, 1
new_events, rts = define_target_events(events, reference_id, target_id, sfreq,
tmin=0., tmax=tmax, new_id=2)
epochs = mne.Epochs(raw, events=new_events, tmax=tmax + 0.1,
event_id={"square": 2})
# %%
# Plot using :term:`global field power`
# Parameters for plotting
order = rts.argsort() # sorting from fast to slow trials
selections = make_1020_channel_selections(epochs.info, midline="12z")
# The actual plots (GFP)
epochs.plot_image(group_by=selections, order=order, sigma=1.5,
overlay_times=rts / 1000., combine='gfp',
ts_args=dict(vlines=[0, rts.mean() / 1000.]))
# %%
# Plot using median
epochs.plot_image(group_by=selections, order=order, sigma=1.5,
overlay_times=rts / 1000., combine='median',
ts_args=dict(vlines=[0, rts.mean() / 1000.]))
| 35.758621 | 78 | 0.630023 | """
===========================================================
Plot single trial activity, grouped by ROI and sorted by RT
===========================================================
This will produce what is sometimes called an event related
potential / field (ERP/ERF) image.
The EEGLAB example file, which contains an experiment with button press
responses to simple visual stimuli, is read in and response times are
calculated.
Regions of Interest are determined by the channel types (in 10/20 channel
notation, even channels are right, odd are left, and 'z' are central). The
median and the Global Field Power within each channel group is calculated,
and the trials are plotted, sorting by response time.
"""
# Authors: Jona Sassenhagen <jona.sassenhagen@gmail.com>
#
# License: BSD-3-Clause
# %%
import mne
from mne.event import define_target_events
from mne.channels import make_1020_channel_selections
print(__doc__)
# %%
# Load EEGLAB example data (a small EEG dataset)
data_path = mne.datasets.testing.data_path()
fname = data_path / 'EEGLAB' / 'test_raw.set'
event_id = {"rt": 1, "square": 2} # must be specified for str events
raw = mne.io.read_raw_eeglab(fname)
mapping = {
'EEG 000': 'Fpz', 'EEG 001': 'EOG1', 'EEG 002': 'F3', 'EEG 003': 'Fz',
'EEG 004': 'F4', 'EEG 005': 'EOG2', 'EEG 006': 'FC5', 'EEG 007': 'FC1',
'EEG 008': 'FC2', 'EEG 009': 'FC6', 'EEG 010': 'T7', 'EEG 011': 'C3',
'EEG 012': 'C4', 'EEG 013': 'Cz', 'EEG 014': 'T8', 'EEG 015': 'CP5',
'EEG 016': 'CP1', 'EEG 017': 'CP2', 'EEG 018': 'CP6', 'EEG 019': 'P7',
'EEG 020': 'P3', 'EEG 021': 'Pz', 'EEG 022': 'P4', 'EEG 023': 'P8',
'EEG 024': 'PO7', 'EEG 025': 'PO3', 'EEG 026': 'POz', 'EEG 027': 'PO4',
'EEG 028': 'PO8', 'EEG 029': 'O1', 'EEG 030': 'Oz', 'EEG 031': 'O2'
}
raw.rename_channels(mapping)
raw.set_channel_types({"EOG1": 'eog', "EOG2": 'eog'})
raw.set_montage('standard_1020')
events = mne.events_from_annotations(raw, event_id)[0]
# %%
# Create Epochs
# define target events:
# 1. find response times: distance between "square" and "rt" events
# 2. extract A. "square" events B. followed by a button press within 700 msec
tmax = 0.7
sfreq = raw.info["sfreq"]
reference_id, target_id = 2, 1
new_events, rts = define_target_events(events, reference_id, target_id, sfreq,
tmin=0., tmax=tmax, new_id=2)
epochs = mne.Epochs(raw, events=new_events, tmax=tmax + 0.1,
event_id={"square": 2})
# %%
# Plot using :term:`global field power`
# Parameters for plotting
order = rts.argsort() # sorting from fast to slow trials
selections = make_1020_channel_selections(epochs.info, midline="12z")
# The actual plots (GFP)
epochs.plot_image(group_by=selections, order=order, sigma=1.5,
overlay_times=rts / 1000., combine='gfp',
ts_args=dict(vlines=[0, rts.mean() / 1000.]))
# %%
# Plot using median
epochs.plot_image(group_by=selections, order=order, sigma=1.5,
overlay_times=rts / 1000., combine='median',
ts_args=dict(vlines=[0, rts.mean() / 1000.]))
| 0 | 0 | 0 |
0cff792743ec078792babef838eedd3ca579a76c | 1,407 | py | Python | local_test.py | invenia/GitLabChangelog | 00d832e0552f3e6647a0030f3b3e0669d828b766 | [
"MIT"
] | null | null | null | local_test.py | invenia/GitLabChangelog | 00d832e0552f3e6647a0030f3b3e0669d828b766 | [
"MIT"
] | 10 | 2021-01-22T23:15:31.000Z | 2022-03-08T10:26:10.000Z | local_test.py | invenia/GitLabChangelog | 00d832e0552f3e6647a0030f3b3e0669d828b766 | [
"MIT"
] | 1 | 2021-03-17T06:03:23.000Z | 2021-03-17T06:03:23.000Z | # This file is intended for local testing when contributing to this repository
# Do not commit any changes
# You will need to generate a GitLab Personal Access Token to use this
from os import environ as env
import gitlab
from gitlabchangelog.changelog import Changelog
env["GITLAB_URL"] = "https://gitlab.invenia.ca"
env["GITLAB_API_TOKEN"] = "<the-personal-access-token-you-created>"
client = gitlab.Gitlab(env["GITLAB_URL"], private_token=env["GITLAB_API_TOKEN"])
repo = "invenia/Example.jl"
p = client.projects.get(repo, lazy=True)
template = """
This is release {{ version }} of {{ package }}.
{% if merge_requests %}
**Summary:**
{% for merge_request in merge_requests %}
- {{ merge_request.labels }} {{ merge_request.title }} (!{{ merge_request.number }})
{% endfor %}
{% endif %}
{% if previous_release %}
**Changeset:** {{ compare_url }})
{% endif %}
"""
changelog = Changelog(p, template)
tags = p.tags.list(all=False)
for tag in tags:
commit = tag.commit["id"]
version = tag.name
release_notes = changelog.get(version, commit)
print(release_notes)
print("\n-----------------------------------------------------------------------\n")
# Note the line below will actually set the release notes in the repository used
# Should only be used if that is the intended behaviour
# tag.set_release_description(release_notes)
| 31.977273 | 92 | 0.655295 | # This file is intended for local testing when contributing to this repository
# Do not commit any changes
# You will need to generate a GitLab Personal Access Token to use this
from os import environ as env
import gitlab
from gitlabchangelog.changelog import Changelog
env["GITLAB_URL"] = "https://gitlab.invenia.ca"
env["GITLAB_API_TOKEN"] = "<the-personal-access-token-you-created>"
client = gitlab.Gitlab(env["GITLAB_URL"], private_token=env["GITLAB_API_TOKEN"])
repo = "invenia/Example.jl"
p = client.projects.get(repo, lazy=True)
template = """
This is release {{ version }} of {{ package }}.
{% if merge_requests %}
**Summary:**
{% for merge_request in merge_requests %}
- {{ merge_request.labels }} {{ merge_request.title }} (!{{ merge_request.number }})
{% endfor %}
{% endif %}
{% if previous_release %}
**Changeset:** {{ compare_url }})
{% endif %}
"""
changelog = Changelog(p, template)
tags = p.tags.list(all=False)
for tag in tags:
commit = tag.commit["id"]
version = tag.name
release_notes = changelog.get(version, commit)
print(release_notes)
print("\n-----------------------------------------------------------------------\n")
# Note the line below will actually set the release notes in the repository used
# Should only be used if that is the intended behaviour
# tag.set_release_description(release_notes)
| 0 | 0 | 0 |
fb31d59284b6c9978be82fc5fa54580a4046af2c | 2,117 | py | Python | tree.py | Crowley-VS/Tree | eec5be3adb00aa8a9f0a4d1131bf36d2121271be | [
"MIT"
] | null | null | null | tree.py | Crowley-VS/Tree | eec5be3adb00aa8a9f0a4d1131bf36d2121271be | [
"MIT"
] | null | null | null | tree.py | Crowley-VS/Tree | eec5be3adb00aa8a9f0a4d1131bf36d2121271be | [
"MIT"
] | null | null | null | import os
from pathlib import Path
from colorama import Fore, init
if __name__ == '__main__':
Tree().main()
| 44.104167 | 129 | 0.552197 | import os
from pathlib import Path
from colorama import Fore, init
class Tree:
def __init__(self):
'''Initialize Tree with rendering chars and colors.'''
self.rendering_chars = {'line': '│ ', 'middle': '├───', 'last': '└───', 'space': ' '}
self.dir_color = Fore.CYAN
self.dir_color_reset = Fore.RESET
def go_through(self, _path, prefix = ''):
'''Go through the contents of the path and yield them.
Change colors of the directory names.'''
num_files=0 #num of the times the func went through the files
files = os.listdir(_path)
for file in files:
num_files+=1
if len(files)==num_files: #if the func is on the last file
if os.path.isdir(_path+os.sep+file):
yield prefix + self.rendering_chars['last'] + self.dir_color + file
#if the file is last, there's no need to keep rendering the line going down as there won't be any other files
yield from self.go_through(_path+os.sep+file, prefix = prefix + self.rendering_chars['space'])
else:
yield prefix + self.rendering_chars['last'] + file
else:
if os.path.isdir(_path+os.sep+file):
yield prefix + self.rendering_chars['middle'] + self.dir_color + file
yield from self.go_through(_path+os.sep+file, prefix = prefix + self.rendering_chars['line'])
else:
yield prefix + self.rendering_chars['middle'] + file
def main(self):
'''Start the Tree script.'''
init(autoreset=True)
print('Tree')
while True:
print('(In order to exit, enter \'q\')')
path = input('Please enter a path: ')
if path == 'q':
break
try:
for result in self.go_through(path):
print(result)
except FileNotFoundError:
print('Tree can\'t find the path specified.')
if __name__ == '__main__':
Tree().main()
| 0 | 1,991 | 23 |
cbe42342e44e002e9e3412fb77108ecaf2dc2886 | 5,907 | py | Python | tests/models/test_card_identifier.py | lejion/django-sagepaypi | 86eeff5afb9681fcc820e2d75910c4e642fefe85 | [
"MIT"
] | null | null | null | tests/models/test_card_identifier.py | lejion/django-sagepaypi | 86eeff5afb9681fcc820e2d75910c4e642fefe85 | [
"MIT"
] | 12 | 2019-03-06T14:27:38.000Z | 2019-03-12T21:45:59.000Z | tests/models/test_card_identifier.py | lejion/django-sagepaypi | 86eeff5afb9681fcc820e2d75910c4e642fefe85 | [
"MIT"
] | 1 | 2021-01-06T12:09:42.000Z | 2021-01-06T12:09:42.000Z | import uuid
from django.core.exceptions import ValidationError
from django.db import models
from sagepaypi.constants import COUNTRY_CHOICES, US_STATE_CHOICES
from sagepaypi.models import CardIdentifier
from tests.test_case import AppTestCase
# fields
# properties
# validation
| 34.343023 | 97 | 0.651261 | import uuid
from django.core.exceptions import ValidationError
from django.db import models
from sagepaypi.constants import COUNTRY_CHOICES, US_STATE_CHOICES
from sagepaypi.models import CardIdentifier
from tests.test_case import AppTestCase
class TestModel(AppTestCase):
# fields
def test_id(self):
field = self.get_field(CardIdentifier, 'id')
self.assertModelField(field, models.UUIDField)
self.assertFalse(field.editable)
self.assertTrue(field.primary_key)
def test_created_at(self):
field = self.get_field(CardIdentifier, 'created_at')
self.assertModelField(field, models.DateTimeField, blank=True)
self.assertTrue(field.auto_now_add)
def test_first_name(self):
field = self.get_field(CardIdentifier, 'first_name')
self.assertModelField(field, models.CharField)
self.assertEqual(field.max_length, 100)
def test_last_name(self):
field = self.get_field(CardIdentifier, 'last_name')
self.assertModelField(field, models.CharField)
self.assertEqual(field.max_length, 100)
def test_billing_address_1(self):
field = self.get_field(CardIdentifier, 'billing_address_1')
self.assertModelField(field, models.CharField)
self.assertEqual(field.max_length, 255)
def test_billing_address_2(self):
field = self.get_field(CardIdentifier, 'billing_address_2')
self.assertModelField(field, models.CharField, True, True)
self.assertEqual(field.max_length, 255)
def test_billing_city(self):
field = self.get_field(CardIdentifier, 'billing_city')
self.assertModelField(field, models.CharField)
self.assertEqual(field.max_length, 255)
def test_billing_postal_code(self):
field = self.get_field(CardIdentifier, 'billing_postal_code')
self.assertModelField(field, models.CharField, True, True)
self.assertEqual(field.max_length, 12)
def test_billing_country(self):
field = self.get_field(CardIdentifier, 'billing_country')
self.assertModelField(field, models.CharField)
self.assertEqual(field.max_length, 2)
def test_billing_state(self):
field = self.get_field(CardIdentifier, 'billing_state')
self.assertModelField(field, models.CharField, True, True)
self.assertEqual(field.max_length, 2)
def test_merchant_session_key(self):
field = self.get_field(CardIdentifier, 'merchant_session_key')
self.assertModelField(field, models.CharField)
self.assertEqual(field.max_length, 100)
def test_card_type(self):
field = self.get_field(CardIdentifier, 'card_type')
self.assertModelField(field, models.CharField)
self.assertEqual(field.max_length, 255)
def test_last_four_digits(self):
field = self.get_field(CardIdentifier, 'last_four_digits')
self.assertModelField(field, models.CharField)
self.assertEqual(field.max_length, 4)
def test_expiry_date(self):
field = self.get_field(CardIdentifier, 'expiry_date')
self.assertModelField(field, models.CharField)
def test_card_identifier(self):
field = self.get_field(CardIdentifier, 'card_identifier')
self.assertModelField(field, models.CharField)
self.assertEqual(field.max_length, 100)
# properties
def test_str(self):
pk = uuid.uuid4()
self.assertEqual(CardIdentifier(pk=pk).__str__(), str(pk))
def test_billing_address(self):
card = CardIdentifier(
billing_address_1='88 The Road',
billing_address_2='Some Estate',
billing_postal_code='412',
billing_city='City',
billing_country='US',
billing_state='AL'
)
self.assertEqual(
card.billing_address,
{
'address1': card.billing_address_1,
'address2': card.billing_address_2,
'city': card.billing_city,
'country': card.billing_country,
'postalCode': card.billing_postal_code,
'state': card.billing_state
}
)
def test_display_text(self):
self.assertEqual(
CardIdentifier(card_type='Visa', last_four_digits='1234').display_text,
'Visa (1234)'
)
# validation
def test_postal_code_required_when_country_not_IE(self):
card = CardIdentifier(
first_name='Foo',
last_name='User',
billing_address_1='88 The Road',
billing_city='City',
billing_country='GB'
)
with self.assertRaises(ValidationError) as e:
card.clean()
self.assertEqual(e.exception.args[0], {'billing_postal_code': 'This field is required.'})
def test_postal_code_not_required_when_country_is_IE(self):
card = CardIdentifier(
first_name='Foo',
last_name='User',
billing_address_1='88 The Road',
billing_city='City',
billing_country='IE'
)
card.clean()
def test_state_required_when_country_is_US(self):
card = CardIdentifier(
first_name='Foo',
last_name='User',
billing_address_1='88 The Road',
billing_postal_code='412',
billing_city='City',
billing_country='US'
)
with self.assertRaises(ValidationError) as e:
card.clean()
self.assertEqual(e.exception.args[0], {'billing_state': 'This field is required.'})
def test_state_not_required_when_country_is_not_US(self):
card = CardIdentifier(
first_name='Foo',
last_name='User',
billing_address_1='88 The Road',
billing_city='City',
billing_country='IE'
)
card.clean()
| 4,986 | 8 | 617 |
e02d3f9446fae3b0835e7a0a413eec780fc039ae | 647 | py | Python | setup.py | Lokaltog/axis | f602ef8089ed0332317274e0433f4ede75109533 | [
"MIT"
] | null | null | null | setup.py | Lokaltog/axis | f602ef8089ed0332317274e0433f4ede75109533 | [
"MIT"
] | null | null | null | setup.py | Lokaltog/axis | f602ef8089ed0332317274e0433f4ede75109533 | [
"MIT"
] | null | null | null | """Setup for Axis."""
from setuptools import setup
setup(
name="axis",
packages=["axis"],
version="33",
description="A Python library for communicating with devices from Axis Communications",
author="Robert Svensson",
author_email="Kane610@users.noreply.github.com",
license="MIT",
url="https://github.com/Kane610/axis",
download_url="https://github.com/Kane610/axis/archive/v33.tar.gz",
install_requires=["attrs", "requests", "xmltodict"],
keywords=["axis", "vapix", "onvif", "event stream", "homeassistant"],
classifiers=["Natural Language :: English", "Programming Language :: Python :: 3"],
)
| 34.052632 | 91 | 0.676971 | """Setup for Axis."""
from setuptools import setup
setup(
name="axis",
packages=["axis"],
version="33",
description="A Python library for communicating with devices from Axis Communications",
author="Robert Svensson",
author_email="Kane610@users.noreply.github.com",
license="MIT",
url="https://github.com/Kane610/axis",
download_url="https://github.com/Kane610/axis/archive/v33.tar.gz",
install_requires=["attrs", "requests", "xmltodict"],
keywords=["axis", "vapix", "onvif", "event stream", "homeassistant"],
classifiers=["Natural Language :: English", "Programming Language :: Python :: 3"],
)
| 0 | 0 | 0 |
ce0390b04788ff18fa95b1ea11e3466267edb6f8 | 3,742 | py | Python | Research/squeezenet.py | ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge | d8b06969c9393cfce6d9ac96b58c9d365ff4369d | [
"MIT"
] | null | null | null | Research/squeezenet.py | ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge | d8b06969c9393cfce6d9ac96b58c9d365ff4369d | [
"MIT"
] | null | null | null | Research/squeezenet.py | ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge | d8b06969c9393cfce6d9ac96b58c9d365ff4369d | [
"MIT"
] | null | null | null | from keras import backend as K
from keras.layers import Input, Convolution2D, MaxPooling2D, Activation, concatenate, Dense, add, GaussianNoise
from keras.layers import GlobalAveragePooling2D
from keras.models import Model
from keras.regularizers import l2
sq1x1 = "squeeze1x1"
exp1x1 = "expand1x1"
exp3x3 = "expand3x3"
selu = "selu_"
# Modular function for Fire Node
# Original SqueezeNet from paper.
def SqueezeNet(
input_shape=None,
filters=8,
weight_decay=0.,
classes=2):
"""Instantiates the SqueezeNet architecture.
"""
assert filters % 2 == 0, 'Number of filters must be 2*n, n > 1'
img_input = Input(shape=input_shape)
x = Convolution2D(filters, (3, 3), padding='valid',
use_bias=True,
kernel_regularizer=l2(weight_decay),
name='conv1')(img_input)
x = Activation('selu', name='selu_conv1')(x)
x = fire_module(x, fire_id=2, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
residual = x
x = fire_module(x, fire_id=3, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
x = add([x, residual])
filters *= 2
x = fire_module(x, fire_id=4, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)
residual = x
x = fire_module(x, fire_id=5, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
x = add([x, residual])
filters *=2
x = fire_module(x, fire_id=6, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
residual = x
x = fire_module(x, fire_id=7, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
x = add([x, residual])
filters *= 2
x = fire_module(x, fire_id=8, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool2')(x)
residual = x
x = fire_module(x, fire_id=9, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
x = add([x, residual])
x = Convolution2D(filters, (1, 1), padding='valid',
use_bias=True,
kernel_regularizer=l2(weight_decay),
name='conv10')(x)
x = Activation('selu', name='selu_conv10')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
inputs = img_input
model = Model(inputs, x, name='squeezenet')
return model
| 36.686275 | 111 | 0.616782 | from keras import backend as K
from keras.layers import Input, Convolution2D, MaxPooling2D, Activation, concatenate, Dense, add, GaussianNoise
from keras.layers import GlobalAveragePooling2D
from keras.models import Model
from keras.regularizers import l2
sq1x1 = "squeeze1x1"
exp1x1 = "expand1x1"
exp3x3 = "expand3x3"
selu = "selu_"
# Modular function for Fire Node
def fire_module(x, fire_id, squeeze=16, expand=64, weight_decay=0.):
s_id = 'fire' + str(fire_id) + '/'
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = Convolution2D(squeeze, (1, 1), padding='valid',
kernel_regularizer=l2(weight_decay),
use_bias=True,
name=s_id + sq1x1)(x)
x = Activation('selu', name=s_id + selu + sq1x1)(x)
left = Convolution2D(expand, (1, 1), padding='valid',
kernel_regularizer=l2(weight_decay),
use_bias=True,
name=s_id + exp1x1)(x)
left = Activation('selu', name=s_id + selu + exp1x1)(left)
right = Convolution2D(expand, (3, 3), padding='same',
kernel_regularizer=l2(weight_decay),
use_bias=True,
name=s_id + exp3x3)(x)
right = Activation('selu', name=s_id + selu + exp3x3)(right)
x = concatenate([left, right], axis=channel_axis, name=s_id + 'concat')
return x
# Original SqueezeNet from paper.
def SqueezeNet(
input_shape=None,
filters=8,
weight_decay=0.,
classes=2):
"""Instantiates the SqueezeNet architecture.
"""
assert filters % 2 == 0, 'Number of filters must be 2*n, n > 1'
img_input = Input(shape=input_shape)
x = Convolution2D(filters, (3, 3), padding='valid',
use_bias=True,
kernel_regularizer=l2(weight_decay),
name='conv1')(img_input)
x = Activation('selu', name='selu_conv1')(x)
x = fire_module(x, fire_id=2, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
residual = x
x = fire_module(x, fire_id=3, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
x = add([x, residual])
filters *= 2
x = fire_module(x, fire_id=4, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)
residual = x
x = fire_module(x, fire_id=5, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
x = add([x, residual])
filters *=2
x = fire_module(x, fire_id=6, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
residual = x
x = fire_module(x, fire_id=7, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
x = add([x, residual])
filters *= 2
x = fire_module(x, fire_id=8, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool2')(x)
residual = x
x = fire_module(x, fire_id=9, squeeze=filters // 2, expand=filters, weight_decay=weight_decay)
x = add([x, residual])
x = Convolution2D(filters, (1, 1), padding='valid',
use_bias=True,
kernel_regularizer=l2(weight_decay),
name='conv10')(x)
x = Activation('selu', name='selu_conv10')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
inputs = img_input
model = Model(inputs, x, name='squeezenet')
return model
| 1,089 | 0 | 22 |
c1cd1f7d29474ae84a34ff288a63dd82125505f9 | 178 | py | Python | EX017.py | gjaosdij/PythonProject | ae27990efa93462b632f165d13c08c7fd93beb38 | [
"MIT"
] | null | null | null | EX017.py | gjaosdij/PythonProject | ae27990efa93462b632f165d13c08c7fd93beb38 | [
"MIT"
] | null | null | null | EX017.py | gjaosdij/PythonProject | ae27990efa93462b632f165d13c08c7fd93beb38 | [
"MIT"
] | null | null | null | co = float(input('Medida do cateto oposto: '))
ca = float(input('Medida do cateto adjacente:'))
hi = ((co**2) + (ca**2)) ** (1/2)
print('A hipotenusa mede {:.2f}.'.format(hi))
| 25.428571 | 48 | 0.601124 | co = float(input('Medida do cateto oposto: '))
ca = float(input('Medida do cateto adjacente:'))
hi = ((co**2) + (ca**2)) ** (1/2)
print('A hipotenusa mede {:.2f}.'.format(hi))
| 0 | 0 | 0 |
dddf243717789d4d4c72a9e3059e1d4d30d0b07c | 1,929 | py | Python | app/airtable/base_map_by_geographic_area/geo_area_contacts.py | WildflowerSchools/wf-airtable-api | 963021e5108462d33efa222fedb00890e1788ad6 | [
"MIT"
] | null | null | null | app/airtable/base_map_by_geographic_area/geo_area_contacts.py | WildflowerSchools/wf-airtable-api | 963021e5108462d33efa222fedb00890e1788ad6 | [
"MIT"
] | null | null | null | app/airtable/base_map_by_geographic_area/geo_area_contacts.py | WildflowerSchools/wf-airtable-api | 963021e5108462d33efa222fedb00890e1788ad6 | [
"MIT"
] | null | null | null | from typing import Optional
from pydantic import BaseModel, Field, validator
from app.airtable.response import AirtableResponse, ListAirtableResponse
from app.airtable.validators import get_first_or_default_none
| 37.096154 | 95 | 0.716952 | from typing import Optional
from pydantic import BaseModel, Field, validator
from app.airtable.response import AirtableResponse, ListAirtableResponse
from app.airtable.validators import get_first_or_default_none
class AirtableGeoAreaContactFields(BaseModel):
area_name: Optional[str] = Field(alias="Area Name")
area_type: Optional[str] = Field(alias="Area Type")
city_radius: Optional[int] = Field(alias="City Radius", default=30)
polygon_coordinates: Optional[str] = Field(alias="Polygon Coordinates")
first_contact_email: Optional[str] = Field(alias="First Contact Email")
assigned_rse: Optional[str] = Field(alias="Assigned RSE")
assigned_rse_synced_record_id: Optional[str] = Field(alias="Assigned RSE Synced Record ID")
assigned_rse_name: Optional[str] = Field(alias="Assigned RSE Name")
hub: Optional[str] = Field(alias="Hub")
hub_synced_record_id: Optional[str] = Field(alias="Hub Synced Record ID")
hub_name: Optional[str] = Field(alias="Hub Name")
sendgrid_template_id: Optional[str] = Field(alias="Sendgrid Template ID")
latitude: Optional[float] = Field(alias="Latitude")
longitude: Optional[float] = Field(alias="Longitude")
geocode: Optional[str] = Field(alias="Geocode")
# reusable validator
_get_first_or_default_none = validator(
"area_name",
"area_type",
"polygon_coordinates",
"city_radius",
"assigned_rse",
"assigned_rse_synced_record_id",
"assigned_rse_name",
"hub",
"hub_synced_record_id",
"hub_name",
"latitude",
"longitude",
"geocode",
pre=True,
allow_reuse=True,
)(get_first_or_default_none)
class AirtableGeoAreaContactResponse(AirtableResponse):
fields: AirtableGeoAreaContactFields
class ListAirtableGeoAreaContactResponse(ListAirtableResponse):
__root__: list[AirtableGeoAreaContactResponse]
| 0 | 1,643 | 69 |
59bffbb2c39024844234f6defe4253e770fb15bf | 1,892 | py | Python | MoTrackSemanticSegmentation/scripts/testMTurk.py | ryerrabelli/MoTrackTherapyMobilePublic | 6c554854aec43ab030e1f884b639a2ed72847646 | [
"MIT"
] | 2 | 2020-04-18T22:13:47.000Z | 2020-04-23T15:08:05.000Z | MoTrackSemanticSegmentation/scripts/testMTurk.py | ryerrabelli/MoTrackTherapyMobilePublic | 6c554854aec43ab030e1f884b639a2ed72847646 | [
"MIT"
] | null | null | null | MoTrackSemanticSegmentation/scripts/testMTurk.py | ryerrabelli/MoTrackTherapyMobilePublic | 6c554854aec43ab030e1f884b639a2ed72847646 | [
"MIT"
] | null | null | null | '''
import boto3
from boto.mturk.connection import MTurkConnection
from boto.mturk.question import HTMLQuestion
from boto.mturk.layoutparam import LayoutParameter
from boto.mturk.layoutparam import LayoutParameters
import json
# Create your connection to MTurk
mtc = MTurkConnection(aws_access_key_id='AKIAIBPHQKOJQZULHJSA',
aws_secret_access_key='2EDgdoD4lFrAUd4NHqWnF9qoQBYp1ekV6CVlhUTS',
host='mechanicalturk.sandbox.amazonaws.com') #host='mechanicalturk.amazonaws.com')
account_balance = mtc.get_account_balance()[0]
print("You have a balance of: {}".format(account_balance))
'''
import boto3
import json
region_name = 'us-east-1'
aws_access_key_id = 'AKIAIBPHQKOJQZULHJSA'
aws_secret_access_key = '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'
endpoint_url = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
# Uncomment this line to use in production
# endpoint_url = 'https://mturk-requester.us-east-1.amazonaws.com'
mtc = boto3.client(
'mturk',
endpoint_url=endpoint_url,
region_name=region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
# This will return $10,000.00 in the MTurk Developer Sandbox
print(mtc.get_account_balance()['AvailableBalance'])
# This is the value you received when you created the HIT
# You can also retrieve HIT IDs by calling GetReviewableHITs
# and SearchHITs. See the links to read more about these APIs.
hit_id = "386T3MLZLNVRU564VQVZSIKA8D580B"
result = mtc.get_assignments(hit_id)
assignment = result[0]
worker_id = assignment.WorkerId
for answer in assignment.answers[0]:
if answer.qid == 'annotation_data':
worker_answer = json.loads(answer.fields[0])
print("The Worker with ID {} gave the answer {}".format(worker_id, worker_answer))
left = worker_answer[0]['left']
top = worker_answer[0]['top']
print("The top and left coordinates are {} and {}".format(top, left)) | 34.4 | 82 | 0.786998 | '''
import boto3
from boto.mturk.connection import MTurkConnection
from boto.mturk.question import HTMLQuestion
from boto.mturk.layoutparam import LayoutParameter
from boto.mturk.layoutparam import LayoutParameters
import json
# Create your connection to MTurk
mtc = MTurkConnection(aws_access_key_id='AKIAIBPHQKOJQZULHJSA',
aws_secret_access_key='2EDgdoD4lFrAUd4NHqWnF9qoQBYp1ekV6CVlhUTS',
host='mechanicalturk.sandbox.amazonaws.com') #host='mechanicalturk.amazonaws.com')
account_balance = mtc.get_account_balance()[0]
print("You have a balance of: {}".format(account_balance))
'''
import boto3
import json
region_name = 'us-east-1'
aws_access_key_id = 'AKIAIBPHQKOJQZULHJSA'
aws_secret_access_key = '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'
endpoint_url = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
# Uncomment this line to use in production
# endpoint_url = 'https://mturk-requester.us-east-1.amazonaws.com'
mtc = boto3.client(
'mturk',
endpoint_url=endpoint_url,
region_name=region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
# This will return $10,000.00 in the MTurk Developer Sandbox
print(mtc.get_account_balance()['AvailableBalance'])
# This is the value you received when you created the HIT
# You can also retrieve HIT IDs by calling GetReviewableHITs
# and SearchHITs. See the links to read more about these APIs.
hit_id = "386T3MLZLNVRU564VQVZSIKA8D580B"
result = mtc.get_assignments(hit_id)
assignment = result[0]
worker_id = assignment.WorkerId
for answer in assignment.answers[0]:
if answer.qid == 'annotation_data':
worker_answer = json.loads(answer.fields[0])
print("The Worker with ID {} gave the answer {}".format(worker_id, worker_answer))
left = worker_answer[0]['left']
top = worker_answer[0]['top']
print("The top and left coordinates are {} and {}".format(top, left)) | 0 | 0 | 0 |
e961b2c04df86d84fc7b10231a7e759b601bdbc9 | 5,694 | py | Python | final_model/space_recognition_original.py | AEyeAlliance/aeye-alliance | cd01f46e7d404558f034dd5e8eb7f251e2f4c7aa | [
"MIT"
] | 6 | 2018-06-08T19:08:58.000Z | 2019-09-07T14:15:10.000Z | final_model/space_recognition_original.py | sanglhx/aeye-alliance | ed5052af4a2130ed88604049d338adfd162a3b64 | [
"MIT"
] | null | null | null | final_model/space_recognition_original.py | sanglhx/aeye-alliance | ed5052af4a2130ed88604049d338adfd162a3b64 | [
"MIT"
] | 4 | 2018-06-22T16:17:11.000Z | 2019-09-22T13:28:55.000Z | import torch
import torch.nn as nn
# import torch.onnx
# import onnx
# import onnx_caffe2.backend
# from onnx import checker, helper
import torch.optim as optim
import numpy as np
import cv2
from PIL import Image
import torch.utils.model_zoo as model_zoo
import torch.onnx
# print(make_prediction("test/Prairie.jpg"))
# print(make_prediction("test/He_was_happy..png"))
# print(make_prediction("test/the_little.png"))
# print(make_prediction("test/with_his_family.png"))
# print(make_prediction("test/with_his_mouth..png"))
# print(make_prediction("test/would_run_and_get_it.png")) | 31.285714 | 115 | 0.537232 | import torch
import torch.nn as nn
# import torch.onnx
# import onnx
# import onnx_caffe2.backend
# from onnx import checker, helper
import torch.optim as optim
import numpy as np
import cv2
from PIL import Image
import torch.utils.model_zoo as model_zoo
import torch.onnx
def export_model():
model = CNN()
model.load_state_dict(torch.load("model.pth"))
# Input to the model
x = torch.randn(5, 3, 28, 28)
# Export the model
torch_out = torch.onnx._export(model, # model being run
x, # model input (or a tuple for multiple inputs)
"model.onnx-2",
# where to save the model (can be a file or file-like object)
export_params=True) # store the trained parameter weights inside the model file
def inspect_model():
# Input image into the ONNX model
onnx_model = onnx.load("model.onnx")
model = onnx_caffe2.backend.prepare(onnx_model)
image = Image.open("z.jpg")
# # image = image.convert('RGB')
image = np.array(image)
image = cv2.resize(image, (28, 28))
image = image.astype(np.float32) / 255.0
image = torch.from_numpy(image[None, :, :, :])
image = image.permute(0, 3, 1, 2)
W = {model.graph.input[0].name: image.data.numpy()}
model_out = model.run(W)[0]
print(model_out)
#
# # onnx_model(image)
#
# print(onnx_model)
# onnx.checker.check_model(onnx_model)
# # print(onnx.helper.printable_graph(onnx_model.graph))
def make_prediction(img_path):
model = CNN()
model.load_state_dict(torch.load("final_model/model.pth"))
image = Image.open(img_path)
image = image.convert('RGB')
width, height = image.size
num = round(width/height/0.78)
w = width/num
letters = []
for i in range(0, num):
cropped = image.crop((i * w, 0, (i + 1) * w, height))
# cropped.show()
cropped = np.array(cropped)
cropped = cv2.resize(cropped, (28, 28))
cropped = cropped.astype(np.float32) / 255.0
cropped = torch.from_numpy(cropped[None, :, :, :])
cropped = cropped.permute(0, 3, 1, 2)
predicted_tensor = model(cropped)
_, predicted_letter = torch.max(predicted_tensor, 1)
if int(predicted_letter) == 26:
letters.append(chr(32))
elif int(predicted_letter) == 27:
letters.append(chr(35))
elif int(predicted_letter) == 28:
letters.append(chr(46))
elif int(predicted_letter) == 29:
letters.append(chr(44))
elif int(predicted_letter) == 30:
letters.append(chr(58))
elif int(predicted_letter) == 31:
letters.append(chr(92))
elif int(predicted_letter) == 32:
letters.append(chr(45))
elif int(predicted_letter) == 33:
letters.append(chr(59))
elif int(predicted_letter) == 34:
letters.append(chr(63))
elif int(predicted_letter) == 35:
letters.append(chr(33))
elif int(predicted_letter) == 36:
letters.append(chr(126))
else:
letters.append(chr(97 + predicted_letter))
output = ""
number = False
capL = False
capW = False
for j in letters:
if j == '#':
number = True
elif ord(j) == 126:
if capL:
capW = True
capL = True
elif j == ' ':
number = False
capL = False
capW = False
output = output + j
elif not number:
if capW and ord(j) in range(97, 123):
output = output + chr(ord(j) - 32)
elif capL and ord(j) in range(97, 123):
output = output + chr(ord(j) - 32)
capL = False
else:
output = output + j
else:
if ord(j) in range(97, 106):
output = output + chr(ord(j)-48)
elif ord(j) == 106:
output = output + chr(48)
else:
output = output + j
return output
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.block1 = nn.Sequential(
# 3x28x28
nn.Conv2d(in_channels=3,
out_channels=16,
kernel_size=5,
stride=1,
padding=2),
# 16x28x28
nn.MaxPool2d(kernel_size=2),
# 16x14x14
nn.LeakyReLU()
)
# 16x14x14
self.block2 = nn.Sequential(
nn.Conv2d(in_channels=16,
out_channels=32,
kernel_size=5,
stride=1,
padding=2),
# 32x14x14
nn.MaxPool2d(kernel_size=2),
# 32x7x7
nn.LeakyReLU()
)
# linearly
self.block3 = nn.Sequential(
nn.Linear(32 * 7 * 7, 100),
nn.LeakyReLU(),
nn.Linear(100, 37)
)
# 1x36
def forward(self, x):
out = self.block1(x)
out = self.block2(out)
# flatten the dataset
# ipdb; ipdb.set_trace()
out = out.view(-1, 32 * 7 * 7)
out = self.block3(out)
return out
# print(make_prediction("test/Prairie.jpg"))
# print(make_prediction("test/He_was_happy..png"))
# print(make_prediction("test/the_little.png"))
# print(make_prediction("test/with_his_family.png"))
# print(make_prediction("test/with_his_mouth..png"))
# print(make_prediction("test/would_run_and_get_it.png")) | 4,966 | 0 | 145 |
6528503a87be4a0b325028a625a1f3fc3d7c48e0 | 998 | py | Python | src/MOSIM/abstraction/access/local/local_adapter_client.py | dfki-asr/MMIPython-Core | 2f4b51ffde606c45661d9dbd5153576f919bdb8b | [
"MIT"
] | null | null | null | src/MOSIM/abstraction/access/local/local_adapter_client.py | dfki-asr/MMIPython-Core | 2f4b51ffde606c45661d9dbd5153576f919bdb8b | [
"MIT"
] | null | null | null | src/MOSIM/abstraction/access/local/local_adapter_client.py | dfki-asr/MMIPython-Core | 2f4b51ffde606c45661d9dbd5153576f919bdb8b | [
"MIT"
] | null | null | null | ## SPDX-License-Identifier: MIT
## The content of this file has been developed in the context of the MOSIM research project.
## Original author(s): Jannes Lehwald
# -*- coding: utf-8 -*-
"""
"""
from MOSIM.mmi.register import MMIAdapter
#from MMIStandard import MMIAdapter
from MOSIM.abstraction.access.interface.adapter_client import IAdapterClient
class LocalAdapterClient(IAdapterClient):
"""
A wrapper for an adapter client connection
Attributes
----------
_acces : MMIAdapter.Iface
The actual access
"""
def __init__(self, instance):
"""
Constructor which needs an address, a port and an access_type.
Parameters
----------
instance : MMIAdapter.Iface
The local instance
"""
assert(isinstance(instance, MMIAdapter.Iface)), "The instance is no MMIAdapter"
super(LocalAdapterClient, self).__init__()
self._access = instance | 24.341463 | 92 | 0.632265 | ## SPDX-License-Identifier: MIT
## The content of this file has been developed in the context of the MOSIM research project.
## Original author(s): Jannes Lehwald
# -*- coding: utf-8 -*-
"""
"""
from MOSIM.mmi.register import MMIAdapter
#from MMIStandard import MMIAdapter
from MOSIM.abstraction.access.interface.adapter_client import IAdapterClient
class LocalAdapterClient(IAdapterClient):
"""
A wrapper for an adapter client connection
Attributes
----------
_acces : MMIAdapter.Iface
The actual access
"""
def __init__(self, instance):
"""
Constructor which needs an address, a port and an access_type.
Parameters
----------
instance : MMIAdapter.Iface
The local instance
"""
assert(isinstance(instance, MMIAdapter.Iface)), "The instance is no MMIAdapter"
super(LocalAdapterClient, self).__init__()
self._access = instance | 0 | 0 | 0 |
4d013ccbd7526b2b29195f2123aa1bda69e2eba7 | 3,028 | py | Python | src/cli/cli.py | simondotsh/SidResolver | 4435970199fcb9aeeab836393782f9924a4a6872 | [
"MIT"
] | null | null | null | src/cli/cli.py | simondotsh/SidResolver | 4435970199fcb9aeeab836393782f9924a4a6872 | [
"MIT"
] | null | null | null | src/cli/cli.py | simondotsh/SidResolver | 4435970199fcb9aeeab836393782f9924a4a6872 | [
"MIT"
] | null | null | null | from argparse import ArgumentParser
from getpass import getpass
from os.path import isfile, expanduser
from sys import exit | 30.28 | 79 | 0.575297 | from argparse import ArgumentParser
from getpass import getpass
from os.path import isfile, expanduser
from sys import exit
class Cli:
@classmethod
def parse_and_validate(cls):
parser = cls.__get_parser()
args = parser.parse_args()
args.sids = cls.__parse_sids(args.sids)
if (args.password is None and args.nt_hash is None):
args.password = getpass(f'Password for {args.username}: ')
# impacket needs the unset values to be equal to ''
args.password = args.password if args.password else ''
args.nt_hash = args.nt_hash if args.nt_hash else ''
return args
@staticmethod
def __get_parser():
parser = ArgumentParser()
parser.add_argument('-u', '--username', dest='username', required=True,
help='Username used to authenticate on targets.'
)
parser.add_argument('-d', '--domain', dest='domain', required=True,
help='Domain to authenticate to.'
)
pw_group = parser.add_mutually_exclusive_group()
pw_group.add_argument('-p', '--password', dest='password',
help='Username\'s password. If a password or a hash is not '
'provided, a prompt will request the password on execution.'
)
pw_group.add_argument('-nt', '--nt-hash', dest='nt_hash',
help='Username\'s NT hash.'
)
parser.add_argument('-t', '--timeout', dest='timeout', default=2,
type=int,
help='Drops connection after x seconds when waiting to receive '
'packets from the target (default: 2).'
)
parser.add_argument('-s', '--sid', dest='sids', required=True,
help='A single SID or path to a file containing SIDs.'
)
parser.add_argument('target',
help='Target to request SID resolving from (IP or hostname).'
)
return parser
@classmethod
def __parse_sids(cls, sid):
entries = []
# In case of tilde in path
sid = expanduser(sid)
if isfile(sid):
entries = [line.rstrip() for line in open(sid)]
else:
entries.append(sid)
return cls.__validate_sids(entries)
# This does not validate much other than to avoid sending complete
# gibberish in the case where a random file with no valid SIDs is given.
@staticmethod
def __validate_sids(entries):
valid_sid = []
invalid_sid = []
for sid in entries:
if sid[:2] == 'S-':
valid_sid.append(sid)
else:
invalid_sid.append(sid)
if invalid_sid:
count = len(invalid_sid)
if count <= 20:
print('The following SIDs are invalid:')
for sid in invalid_sid:
print(sid)
else:
print(f'{count} SIDs are invalid. Validate your input file.')
exit(0)
return valid_sid | 2,567 | 315 | 23 |
74ce49e76df3702caf440a65b380e7e8353b2302 | 12,447 | py | Python | tests/test_tree.py | biomadeira/taxonomy-resolver | ce7a264c2b8b552dde6284b4a74821184be8e489 | [
"Apache-2.0"
] | null | null | null | tests/test_tree.py | biomadeira/taxonomy-resolver | ce7a264c2b8b552dde6284b4a74821184be8e489 | [
"Apache-2.0"
] | null | null | null | tests/test_tree.py | biomadeira/taxonomy-resolver | ce7a264c2b8b552dde6284b4a74821184be8e489 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8
"""
Taxonomy Resolver
:copyright: (c) 2020-2021.
:license: Apache 2.0, see LICENSE for more details.
"""
import os
import pytest
from taxonresolver import TaxonResolver
from taxonresolver.utils import load_logging
@pytest.fixture
@pytest.fixture
| 47.507634 | 99 | 0.651884 | #!/usr/bin/env python
# -*- coding: utf-8
"""
Taxonomy Resolver
:copyright: (c) 2020-2021.
:license: Apache 2.0, see LICENSE for more details.
"""
import os
import pytest
from taxonresolver import TaxonResolver
from taxonresolver.utils import load_logging
@pytest.fixture
def context():
return load_logging("INFO")
@pytest.fixture
def cwd():
if not os.getcwd().endswith("tests"):
os.chdir(os.path.join(os.getcwd(), "tests"))
return os.getcwd()
class TestTree:
@pytest.mark.skip(reason="Skip test by default!")
def test_download_taxdmp(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.download(os.path.join(cwd, f"../testdata/taxdmp.zip"), "zip")
assert os.path.isfile(os.path.join(cwd, "../testdata/taxdmp.zip"))
@pytest.mark.skip(reason="Skip test by default!")
def test_resolver_build(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.build(os.path.join(cwd, "../testdata/taxdmp.zip"))
assert len(resolver.tree) == 2302938
def test_resolver_build_and_write(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.build(os.path.join(cwd, "../testdata/taxdmp.zip"))
assert len(resolver.tree) == 2302938
resolver.write(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
assert os.path.isfile(os.path.join(cwd, "../testdata/tree.pickle"))
def test_resolver_load_pickle(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
assert len(resolver.tree) == 2302938
def test_resolver_filter(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
resolver.filter(os.path.join(cwd, "../testdata/taxids_filter.txt"))
assert len(resolver.tree) == 1000
def test_resolver_filter_and_write(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
resolver.filter(os.path.join(cwd, "../testdata/taxids_filter.txt"))
assert len(resolver.tree) == 1000
resolver.write(os.path.join(cwd, "../testdata/tree_filtered.pickle"), "pickle")
assert os.path.isfile(os.path.join(cwd, "../testdata/tree_filtered.pickle"))
def test_resolver_filter_load(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree_filtered.pickle"), "pickle")
assert len(resolver.tree) == 1000
def test_resolver_search_by_taxid_human(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
taxids = resolver.search(["9606"])
assert len(taxids) == 3
def test_resolver_search_by_taxid_bacteria(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
taxids = resolver.search(["2"])
assert len(taxids) == 517912
def test_resolver_search_by_taxid_archaea(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
taxids = resolver.search(["2157"])
assert len(taxids) == 13683
def test_resolver_search_by_taxid_eukaryota(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
taxids = resolver.search(["2759"])
assert len(taxids) == 1541629
def test_resolver_search_by_taxid_viruses(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
taxids = resolver.search(["10239"])
assert len(taxids) == 212409
def test_resolver_search_by_taxid_other(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
taxids = resolver.search(["28384"])
assert len(taxids) == 16313
def test_resolver_search_by_taxid_unclassified(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
taxids = resolver.search(["12908"])
assert len(taxids) == 988
def test_resolver_search_by_taxid_mammalia(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
taxids = resolver.search(["40674"])
assert len(taxids) == 12449
def test_resolver_search_by_taxid_primates(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
taxids = resolver.search(["9443"])
assert len(taxids) == 1002
def test_resolver_search_by_taxid_plants(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
taxids = resolver.search(["3193"])
assert len(taxids) == 216142
def test_resolver_search(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
tax_ids = resolver.search(taxidinclude=os.path.join(cwd, "../testdata/taxids_search.txt"))
assert len(tax_ids) == 533
def test_resolver_search_filter(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
tax_ids = resolver.search(taxidinclude=os.path.join(cwd, "../testdata/taxids_search.txt"),
taxidfilter=os.path.join(cwd, "../testdata/taxids_filter.txt"))
assert len(tax_ids) == 302
def test_resolver_search_exclude_filter(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
tax_ids = resolver.search(taxidinclude=os.path.join(cwd, "../testdata/taxids_search.txt"),
taxidexclude=os.path.join(cwd, "../testdata/taxids_exclude.txt"),
taxidfilter=os.path.join(cwd, "../testdata/taxids_filter.txt"))
assert len(tax_ids) == 296
def test_resolver_validate(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
assert resolver.validate(os.path.join(cwd, "../testdata/taxids_validate.txt"))
def test_resolver_validate_alt(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree.pickle"), "pickle")
assert not resolver.validate(os.path.join(cwd, "../testdata/taxids_validate_alt.txt"))
def test_resolver_build_mock_tree(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.build(os.path.join(cwd, "../testdata/nodes_mock.dmp"))
assert len(resolver.tree) == 29
def test_resolver_build_and_write_mock_tree(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.build(os.path.join(cwd, "../testdata/nodes_mock.dmp"))
resolver.write(os.path.join(cwd, "../testdata/tree_mock.pickle"), "pickle")
assert os.path.isfile(os.path.join(cwd, "../testdata/tree_mock.pickle"))
def test_resolver_load_pickle_mock_tree(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree_mock.pickle"), "pickle")
assert len(resolver.tree) == 29
def test_resolver_filter_mock_tree(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree_mock.pickle"), "pickle")
resolver.filter(taxidfilter=["12", "21"])
assert len(resolver.tree) == 9
resolver.load(os.path.join(cwd, "../testdata/tree_mock.pickle"), "pickle")
resolver.filter(taxidfilter=["10", "21", "24"])
assert len(resolver.tree) == 17
resolver.load(os.path.join(cwd, "../testdata/tree_mock.pickle"), "pickle")
resolver.filter(taxidfilter=["10", "21", "9", "27"])
assert len(resolver.tree) == 19
resolver.load(os.path.join(cwd, "../testdata/tree_mock.pickle"), "pickle")
resolver.filter(taxidfilter=["19", "25", "22", "29"])
assert len(resolver.tree) == 18
def test_resolver_filter_and_write(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree_mock.pickle"), "pickle")
resolver.filter(taxidfilter=["12", "21"])
assert len(resolver.tree) == 9
resolver.write(os.path.join(cwd, "../testdata/tree_mock_filtered.pickle"), "pickle")
assert os.path.isfile(os.path.join(cwd, "../testdata/tree_mock_filtered.pickle"))
def test_resolver_filter_load(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree_mock_filtered.pickle"), "pickle")
assert len(resolver.tree) == 9
def test_resolver_search_mock_tree(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree_mock.pickle"), "pickle")
taxids = resolver.search(taxidinclude=["4"])
assert len(taxids) == 14
taxids = resolver.search(taxidinclude=["5"])
assert len(taxids) == 9
taxids = resolver.search(taxidinclude=["29"])
assert len(taxids) == 1
taxids = resolver.search(taxidinclude=["4", "10", "12", "14"])
assert len(taxids) == 21
taxids = resolver.search(taxidinclude=["7", "11", "21", "27", "29"])
assert len(taxids) == 9
taxids = resolver.search(taxidinclude=["7", "11", "5", "21", "27", "29"])
assert len(taxids) == 14
def test_resolver_search_exclude_mock_tree(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree_mock.pickle"), "pickle")
taxids = resolver.search(taxidinclude=["4"], taxidexclude=["24"])
assert len(taxids) == 10
taxids = resolver.search(taxidinclude=["5"], taxidexclude=["12"])
assert len(taxids) == 4
taxids = resolver.search(taxidinclude=["29"], taxidexclude=["3"])
assert len(taxids) == 1
def test_resolver_search_filter_mock_tree(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree_mock.pickle"), "pickle")
taxidfilter = ["19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29"]
taxids = resolver.search(taxidinclude=["4"], taxidfilter=taxidfilter)
assert len(taxids) == 6
taxids = resolver.search(taxidinclude=["5"], taxidfilter=taxidfilter)
assert len(taxids) == 5
def test_resolver_search_exclude_filter_mock_tree(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree_mock.pickle"), "pickle")
taxidfilter = ["19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29"]
taxids = resolver.search(taxidinclude=["4"], taxidexclude=["24"],
taxidfilter=taxidfilter)
assert len(taxids) == 2
taxids = resolver.search(taxidinclude=["5"], taxidexclude=["12"],
taxidfilter=taxidfilter)
assert len(taxids) == 1
def test_resolver_validate_mock_tree(self, context, cwd):
resolver = TaxonResolver(logging=context)
resolver.load(os.path.join(cwd, "../testdata/tree_mock.pickle"), "pickle")
assert resolver.validate(taxidinclude=["8"])
assert resolver.validate(taxidinclude=["9"])
assert resolver.validate(taxidinclude=["10"])
assert not resolver.validate(taxidinclude=["9606"])
| 11,091 | 992 | 67 |
53dd6eb163acaeef6aac44598d96fbcbb35fb393 | 961 | py | Python | src/learning/DecisionTreeEntropy.py | alexandrabenamar/Who-Wins | 23df54f98286e67aab39e92ac746bccf6916c231 | [
"MIT"
] | 3 | 2018-04-10T21:52:57.000Z | 2018-08-22T15:41:58.000Z | src/learning/DecisionTreeEntropy.py | alexandrabenamar/Who-Wins | 23df54f98286e67aab39e92ac746bccf6916c231 | [
"MIT"
] | null | null | null | src/learning/DecisionTreeEntropy.py | alexandrabenamar/Who-Wins | 23df54f98286e67aab39e92ac746bccf6916c231 | [
"MIT"
] | 1 | 2020-05-18T15:46:23.000Z | 2020-05-18T15:46:23.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pyspark.mllib.tree import DecisionTree
from functions_MLlib import spark_context, training_set, test_set, write_result, brexit_labeled_data, mode_predict
if __name__ == "__main__" :
sc = spark_context()
numFeatures = 10000
print("Training...\n")
(training, idf) = training_set(sc, numFeatures = numFeatures)
model = DecisionTree.trainClassifier(training, categoricalFeaturesInfo={}, impurity="entropy", maxDepth=5, numClasses=2)
print("Test... \n")
test = test_set(sc, numFeatures = numFeatures, idf = idf)
(num_pos, num_neg) = mode_predict(model, test)
print("Test on Brexit labeled data...\n")
(accuracy, f1) = brexit_labeled_data(sc, model = model, numFeatures = numFeatures, idf = idf)
print("Saving results...")
write_result(num_pos, num_neg, accuracy = accuracy, f1 = f1, name = "Decision Tree (Entropy)") | 31 | 124 | 0.671176 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pyspark.mllib.tree import DecisionTree
from functions_MLlib import spark_context, training_set, test_set, write_result, brexit_labeled_data, mode_predict
if __name__ == "__main__" :
sc = spark_context()
numFeatures = 10000
print("Training...\n")
(training, idf) = training_set(sc, numFeatures = numFeatures)
model = DecisionTree.trainClassifier(training, categoricalFeaturesInfo={}, impurity="entropy", maxDepth=5, numClasses=2)
print("Test... \n")
test = test_set(sc, numFeatures = numFeatures, idf = idf)
(num_pos, num_neg) = mode_predict(model, test)
print("Test on Brexit labeled data...\n")
(accuracy, f1) = brexit_labeled_data(sc, model = model, numFeatures = numFeatures, idf = idf)
print("Saving results...")
write_result(num_pos, num_neg, accuracy = accuracy, f1 = f1, name = "Decision Tree (Entropy)") | 0 | 0 | 0 |
8a62452bd2c0e39afcbd171cc50d1d0a21550d89 | 13,646 | py | Python | lib/keybow2040/keybow2040.py | bschapendonk/pim551 | 277b7eddb744ed2733a854bb8e96cd66ec05bd0c | [
"MIT"
] | 47 | 2021-04-28T15:55:29.000Z | 2022-03-18T02:04:10.000Z | lib/keybow2040/keybow2040.py | bschapendonk/pim551 | 277b7eddb744ed2733a854bb8e96cd66ec05bd0c | [
"MIT"
] | 12 | 2021-04-30T19:22:35.000Z | 2022-02-09T10:16:57.000Z | lib/keybow2040/keybow2040.py | bschapendonk/pim551 | 277b7eddb744ed2733a854bb8e96cd66ec05bd0c | [
"MIT"
] | 19 | 2021-04-28T15:43:56.000Z | 2022-03-20T20:42:43.000Z | # SPDX-FileCopyrightText: 2021 Sandy Macdonald
#
# SPDX-License-Identifier: MIT
"""
`Keybow 2040 CircuitPython library`
====================================================
CircuitPython driver for the Pimoroni Keybow 2040 and Pico RGB Keypad Base.
Drop the `lib` contents (`keybow2040.py` file and `keybow_hardware` folder)
into your `lib` folder on your `CIRCUITPY` drive.
* Authors: Sandy Macdonald, Maciej Sokolowski
Notes
--------------------
**Hardware:**
One of:
* Pimoroni Keybow 2040
<https://shop.pimoroni.com/products/keybow-2040>_
* Pimoroni Pico RGB Keypad Base
<https://shop.pimoroni.com/products/pico-rgb-keypad-base>_
**Software and Dependencies:**
For Keybow 2040:
* Adafruit CircuitPython firmware for Keybow 2040:
<https://circuitpython.org/board/pimoroni_keybow2040/>_
* Adafruit CircuitPython IS31FL3731 library:
<https://github.com/adafruit/Adafruit_CircuitPython_IS31FL3731>_
For Pico RGB Keypad Base:
* Adafruit CircuitPython firmware for Raspberry Pi Pico:
<https://circuitpython.org/board/raspberry_pi_pico/>_
* Adafruit CircuitPython DotStar library:
<https://github.com/adafruit/Adafruit_CircuitPython_DotStar>_
"""
import time
class Keybow2040(object):
"""
Represents a Keybow 2040 and hence a set of Key instances with
associated LEDs and key behaviours.
:param hardware: object representing a board hardware
"""
# def rotate(self, degrees):
# # Rotates all of Keybow's keys by a number of degrees, clamped to
# # the closest multiple of 90 degrees. Because it shuffles the order
# # of the Key instances, all of the associated attributes of the key
# # are retained. The x/y coordinate of the keys are rotated also. It
# # also handles negative degrees, e.g. -90 to rotate 90 degrees anti-
# # clockwise.
# # Rotate as follows: `keybow.rotate(270)`
# self.rotation = degrees
# num_rotations = degrees // 90
# if num_rotations == 0:
# return
# if num_rotations < 1:
# num_rotations = 4 + num_rotations
# matrix = [[(x * 4) + y for y in range(4)] for x in range(4)]
# for r in range(num_rotations):
# matrix = zip(*matrix[::-1])
# matrix = [list(x) for x in list(matrix)]
# flat_matrix = [x for y in matrix for x in y]
# for i in range(len(self.keys)):
# self.keys[i].number = flat_matrix[i]
# self.keys = sorted(self.keys, key=lambda x:x.number)
class Key:
"""
Represents a key on Keybow 2040, with associated switch and
LED behaviours.
:param number: the key number (0-15) to associate with the key
:param hardware: object representing a board hardware
""" | 30.324444 | 114 | 0.589843 | # SPDX-FileCopyrightText: 2021 Sandy Macdonald
#
# SPDX-License-Identifier: MIT
"""
`Keybow 2040 CircuitPython library`
====================================================
CircuitPython driver for the Pimoroni Keybow 2040 and Pico RGB Keypad Base.
Drop the `lib` contents (`keybow2040.py` file and `keybow_hardware` folder)
into your `lib` folder on your `CIRCUITPY` drive.
* Authors: Sandy Macdonald, Maciej Sokolowski
Notes
--------------------
**Hardware:**
One of:
* Pimoroni Keybow 2040
<https://shop.pimoroni.com/products/keybow-2040>_
* Pimoroni Pico RGB Keypad Base
<https://shop.pimoroni.com/products/pico-rgb-keypad-base>_
**Software and Dependencies:**
For Keybow 2040:
* Adafruit CircuitPython firmware for Keybow 2040:
<https://circuitpython.org/board/pimoroni_keybow2040/>_
* Adafruit CircuitPython IS31FL3731 library:
<https://github.com/adafruit/Adafruit_CircuitPython_IS31FL3731>_
For Pico RGB Keypad Base:
* Adafruit CircuitPython firmware for Raspberry Pi Pico:
<https://circuitpython.org/board/raspberry_pi_pico/>_
* Adafruit CircuitPython DotStar library:
<https://github.com/adafruit/Adafruit_CircuitPython_DotStar>_
"""
import time
class Keybow2040(object):
"""
Represents a Keybow 2040 and hence a set of Key instances with
associated LEDs and key behaviours.
:param hardware: object representing a board hardware
"""
def __init__(self, hardware):
self.hardware = hardware
self.keys = []
self.time_of_last_press = time.monotonic()
self.time_since_last_press = None
self.led_sleep_enabled = False
self.led_sleep_time = 60
self.sleeping = False
self.was_asleep = False
self.last_led_states = None
# self.rotation = 0
for i in range(self.hardware.num_keys()):
_key = Key(i, self.hardware)
self.keys.append(_key)
def update(self):
# Call this in each iteration of your while loop to update
# to update everything's state, e.g. `keybow.update()`
for _key in self.keys:
_key.update()
# Used to work out the sleep behaviour, by keeping track
# of the time of the last key press.
if self.any_pressed():
self.time_of_last_press = time.monotonic()
self.sleeping = False
self.time_since_last_press = time.monotonic() - self.time_of_last_press
# If LED sleep is enabled, but not engaged, check if enough time
# has elapsed to engage sleep. If engaged, record the state of the
# LEDs, so it can be restored on wake.
if self.led_sleep_enabled and not self.sleeping:
if time.monotonic() - self.time_of_last_press > self.led_sleep_time:
self.sleeping = True
self.last_led_states = [k.rgb if k.lit else [0, 0, 0] for k in self.keys]
self.set_all(0, 0, 0)
self.was_asleep = True
# If it was sleeping, but is no longer, then restore LED states.
if not self.sleeping and self.was_asleep:
for k in range(len(self.keys)):
self.keys[k].set_led(*self.last_led_states[k])
self.was_asleep = False
def set_led(self, number, r, g, b):
# Set an individual key's LED to an RGB value by its number.
self.keys[number].set_led(r, g, b)
def set_all(self, r, g, b):
# Set all of Keybow's LEDs to an RGB value.
if not self.sleeping:
for _key in self.keys:
_key.set_led(r, g, b)
else:
for _key in self.keys:
_key.led_off()
def get_states(self):
# Returns a Boolean list of Keybow's key states
# (0=not pressed, 1=pressed).
_states = [_key.state for _key in self.keys]
return _states
def get_pressed(self):
# Returns a list of key numbers currently pressed.
_pressed = [_key.number for _key in self.keys if _key.state == True]
return _pressed
def any_pressed(self):
# Returns True if any key is pressed, False if none are pressed.
if any(self.get_states()):
return True
else:
return False
def none_pressed(self):
# Returns True if none of the keys are pressed, False is any key
# is pressed.
if not any(self.get_states()):
return True
else:
return False
def on_press(self, _key, handler=None):
# Attaches a press function to a key, via a decorator. This is stored as
# `key.press_function` in the key's attributes, and run if necessary
# as part of the key's update function (and hence Keybow's update
# function). It can be attached as follows:
# @keybow.on_press(key)
# def press_handler(key, pressed):
# if pressed:
# do something
# else:
# do something else
if _key is None:
return
def attach_handler(handler):
_key.press_function = handler
if handler is not None:
attach_handler(handler)
else:
return attach_handler
def on_release(self, _key, handler=None):
# Attaches a release function to a key, via a decorator. This is stored
# as `key.release_function` in the key's attributes, and run if
# necessary as part of the key's update function (and hence Keybow's
# update function). It can be attached as follows:
# @keybow.on_release(key)
# def release_handler(key):
# do something
if _key is None:
return
def attach_handler(handler):
_key.release_function = handler
if handler is not None:
attach_handler(handler)
else:
return attach_handler
def on_hold(self, _key, handler=None):
# Attaches a hold unction to a key, via a decorator. This is stored as
# `key.hold_function` in the key's attributes, and run if necessary
# as part of the key's update function (and hence Keybow's update
# function). It can be attached as follows:
# @keybow.on_hold(key)
# def hold_handler(key):
# do something
if _key is None:
return
def attach_handler(handler):
_key.hold_function = handler
if handler is not None:
attach_handler(handler)
else:
return attach_handler
# def rotate(self, degrees):
# # Rotates all of Keybow's keys by a number of degrees, clamped to
# # the closest multiple of 90 degrees. Because it shuffles the order
# # of the Key instances, all of the associated attributes of the key
# # are retained. The x/y coordinate of the keys are rotated also. It
# # also handles negative degrees, e.g. -90 to rotate 90 degrees anti-
# # clockwise.
# # Rotate as follows: `keybow.rotate(270)`
# self.rotation = degrees
# num_rotations = degrees // 90
# if num_rotations == 0:
# return
# if num_rotations < 1:
# num_rotations = 4 + num_rotations
# matrix = [[(x * 4) + y for y in range(4)] for x in range(4)]
# for r in range(num_rotations):
# matrix = zip(*matrix[::-1])
# matrix = [list(x) for x in list(matrix)]
# flat_matrix = [x for y in matrix for x in y]
# for i in range(len(self.keys)):
# self.keys[i].number = flat_matrix[i]
# self.keys = sorted(self.keys, key=lambda x:x.number)
class Key:
"""
Represents a key on Keybow 2040, with associated switch and
LED behaviours.
:param number: the key number (0-15) to associate with the key
:param hardware: object representing a board hardware
"""
def __init__(self, number, hardware):
self.hardware = hardware
self.number = number
self.state = 0
self.pressed = 0
self.last_state = None
self.time_of_last_press = time.monotonic()
self.time_since_last_press = None
self.time_held_for = 0
self.held = False
self.hold_time = 0.75
self.modifier = False
self.rgb = [0, 0, 0]
self.lit = False
self.xy = self.get_xy()
self.x, self.y = self.xy
self.led_off()
self.press_function = None
self.release_function = None
self.hold_function = None
self.press_func_fired = False
self.hold_func_fired = False
self.debounce = 0.125
self.key_locked = False
def get_state(self):
# Returns the state of the key (0=not pressed, 1=pressed).
return int(self.hardware.switch_state(self.number))
def update(self):
# Updates the state of the key and updates all of its
# attributes.
self.time_since_last_press = time.monotonic() - self.time_of_last_press
# Keys get locked during the debounce time.
if self.time_since_last_press < self.debounce:
self.key_locked = True
else:
self.key_locked = False
self.state = self.get_state()
self.pressed = self.state
update_time = time.monotonic()
# If there's a `press_function` attached, then call it,
# returning the key object and the pressed state.
if self.press_function is not None and self.pressed and not self.press_func_fired and not self.key_locked:
self.press_function(self)
self.press_func_fired = True
# time.sleep(0.05) # A little debounce
# If the key has been pressed and releases, then call
# the `release_function`, if one is attached.
if not self.pressed and self.last_state == True:
if self.release_function is not None:
self.release_function(self)
self.last_state = False
self.press_func_fired = False
if not self.pressed:
self.time_held_for = 0
self.last_state = False
# If the key has just been pressed, then record the
# `time_of_last_press`, and update last_state.
elif self.pressed and self.last_state == False:
self.time_of_last_press = update_time
self.last_state = True
# If the key is pressed and held, then update the
# `time_held_for` variable.
elif self.pressed and self.last_state == True:
self.time_held_for = update_time - self.time_of_last_press
self.last_state = True
# If the `hold_time` theshold is crossed, then call the
# `hold_function` if one is attached. The `hold_func_fired`
# ensures that the function is only called once.
if self.time_held_for > self.hold_time:
self.held = True
if self.hold_function is not None and not self.hold_func_fired:
self.hold_function(self)
self.hold_func_fired = True
else:
self.held = False
self.hold_func_fired = False
def get_xy(self):
# Returns the x/y coordinate of a key from 0,0 to 3,3.
return number_to_xy(self.number)
def get_number(self):
# Returns the key number, from 0 to 15.
return xy_to_number(self.x, self.y)
def is_modifier(self):
# Designates a modifier key, so you can hold the modifier
# and tap another key to trigger additional behaviours.
if self.modifier:
return True
else:
return False
def set_led(self, r, g, b):
# Set this key's LED to an RGB value.
if [r, g, b] == [0, 0, 0]:
self.lit = False
else:
self.lit = True
self.rgb = [r, g, b]
self.hardware.set_pixel(self.number, r, g, b)
def led_on(self):
# Turn the LED on, using its current RGB value.
r, g, b = self.rgb
self.set_led(r, g, b)
def led_off(self):
# Turn the LED off.
self.set_led(0, 0, 0)
def led_state(self, state):
# Set the LED's state (0=off, 1=on)
state = int(state)
if state == 0:
self.led_off()
elif state == 1:
self.led_on()
else:
return
def toggle_led(self, rgb=None):
# Toggle the LED's state, retaining its RGB value for when it's toggled
# back on. Can also be passed an RGB tuple to set the colour as part of
# the toggle.
if rgb is not None:
self.rgb = rgb
if self.lit:
self.led_off()
else:
self.led_on()
def __str__(self):
# When printed, show the key's state (0 or 1).
return self.state
def xy_to_number(x, y):
# Convert an x/y coordinate to key number.
return x + (y * 4)
def number_to_xy(number):
# Convert a number to an x/y coordinate.
x = number % 4
y = number // 4
return (x, y)
def hsv_to_rgb(h, s, v):
# Convert an HSV (0.0-1.0) colour to RGB (0-255)
if s == 0.0:
rgb = [v, v, v]
i = int(h * 6.0)
f = (h*6.)-i; p,q,t = v*(1.-s), v*(1.-s*f), v*(1.-s*(1.-f)); i%=6
if i == 0:
rgb = [v, t, p]
if i == 1:
rgb = [q, v, p]
if i == 2:
rgb = [p, v, t]
if i == 3:
rgb = [p, q, v]
if i == 4:
rgb = [t, p, v]
if i == 5:
rgb = [v, p, q]
rgb = tuple(int(c * 255) for c in rgb)
return rgb | 10,189 | 0 | 688 |
e84367d8ec44da8b98fb3bfd77ea2cb55820c6e8 | 492 | py | Python | simp_py_examples/course/SM001_old/t216.py | kcfkwok2003/Simp_py | f75e66da01b45dc8688dda602f8b33d4258f0c31 | [
"MIT"
] | null | null | null | simp_py_examples/course/SM001_old/t216.py | kcfkwok2003/Simp_py | f75e66da01b45dc8688dda602f8b33d4258f0c31 | [
"MIT"
] | null | null | null | simp_py_examples/course/SM001_old/t216.py | kcfkwok2003/Simp_py | f75e66da01b45dc8688dda602f8b33d4258f0c31 | [
"MIT"
] | null | null | null | import urequests
from simp_py import lcd, mon
while 1:
try:
response = urequests.get('http://api.coindesk.com/v1/bpi/currentprice.json')
if response.reason==b'OK':
data= response.json()
updated=data['time']['updatedISO']
btc = data['bpi']['USD']['rate_float']
lcd.text(0,140,updated)
lcd.text(0,160,'btc:%.04f ' % btc)
else:
lcd.text(0,140,'err:%s' % response.reason)
except Exception as e:
mon.log_exc(e)
time.sleep(10)
| 25.894737 | 80 | 0.609756 | import urequests
from simp_py import lcd, mon
while 1:
try:
response = urequests.get('http://api.coindesk.com/v1/bpi/currentprice.json')
if response.reason==b'OK':
data= response.json()
updated=data['time']['updatedISO']
btc = data['bpi']['USD']['rate_float']
lcd.text(0,140,updated)
lcd.text(0,160,'btc:%.04f ' % btc)
else:
lcd.text(0,140,'err:%s' % response.reason)
except Exception as e:
mon.log_exc(e)
time.sleep(10)
| 0 | 0 | 0 |
ac98afe8378bea03f864f4493a06e66e2834e9bb | 3,384 | py | Python | Chapter 08/Windows/DISTAL/cgi-bin/mapGenerator.py | PacktPublishing/Python-Geospatial-Development-Third-Edition | e1e1f52da8509876e8576e081de9d5f251a21f77 | [
"MIT"
] | 44 | 2016-06-14T05:36:10.000Z | 2022-01-30T18:29:44.000Z | Chapter 09/Windows/cgi-bin/mapGenerator.py | KonstantinKlepikov/Python-Geospatial-Development-Third-Edition | ca3545dbab75dac63080582538de40d4d1c15dab | [
"MIT"
] | null | null | null | Chapter 09/Windows/cgi-bin/mapGenerator.py | KonstantinKlepikov/Python-Geospatial-Development-Third-Edition | ca3545dbab75dac63080582538de40d4d1c15dab | [
"MIT"
] | 48 | 2016-12-11T08:53:46.000Z | 2022-02-10T12:10:34.000Z | # mapGenerator.py
import os, os.path, sys, tempfile
# NOTE: The following needs to be added to fix a problem with my path and
# Python3. Remove to make this work generally.
sys.path.insert(0, "/usr/local/lib/python3.3/site-packages")
# End of fix.
import mapnik
| 31.333333 | 73 | 0.5724 | # mapGenerator.py
import os, os.path, sys, tempfile
# NOTE: The following needs to be added to fix a problem with my path and
# Python3. Remove to make this work generally.
sys.path.insert(0, "/usr/local/lib/python3.3/site-packages")
# End of fix.
import mapnik
def generateMap(tableName, minX, minY, maxX, maxY,
mapWidth, mapHeight,
hiliteExpr=None, points=None):
extent = "{},{},{},{}".format(minX, minY, maxX, maxY)
layer = mapnik.Layer("Layer")
layer.datasource = mapnik.PostGIS(dbname="distal",
table=tableName,
user="distal_user",
password="...",
extent=extent,
geometry_field="outline",
srid=4326)
map = mapnik.Map(mapWidth, mapHeight,
'+proj=longlat +datum=WGS84')
map.background = mapnik.Color("#8080a0")
style = mapnik.Style()
rule = mapnik.Rule()
if hiliteExpr != None:
rule.filter = mapnik.Filter(hiliteExpr)
rule.symbols.append(mapnik.PolygonSymbolizer(
mapnik.Color("#408000")))
rule.symbols.append(mapnik.LineSymbolizer(
mapnik.Stroke(mapnik.Color("#000000"), 0.1)))
style.rules.append(rule)
rule = mapnik.Rule()
rule.set_else(True)
rule.symbols.append(mapnik.PolygonSymbolizer(
mapnik.Color("#a0a0a0")))
rule.symbols.append(mapnik.LineSymbolizer(
mapnik.Stroke(mapnik.Color("#404040"), 0.1)))
style.rules.append(rule)
map.append_style("Map Style", style)
layer.styles.append("Map Style")
map.layers.append(layer)
if points != None:
memoryDatasource = mapnik.MemoryDatasource()
context = mapnik.Context()
context.push("name")
next_id = 1
for long,lat,name in points:
wkt = "POINT (%0.8f %0.8f)" % (long,lat)
feature = mapnik.Feature(context, next_id)
feature['name'] = name
feature.add_geometries_from_wkt(wkt)
next_id = next_id + 1
memoryDatasource.add_feature(feature)
layer = mapnik.Layer("Points")
layer.datasource = memoryDatasource
style = mapnik.Style()
rule = mapnik.Rule()
pointImgFile = os.path.join(os.path.dirname(__file__),
"point.png")
shield = mapnik.ShieldSymbolizer(
mapnik.Expression('[name]'),
"DejaVu Sans Bold", 10,
mapnik.Color("#000000"),
mapnik.PathExpression(pointImgFile))
shield.displacement = (0, 7)
shield.unlock_image = True
rule.symbols.append(shield)
style.rules.append(rule)
map.append_style("Point Style", style)
layer.styles.append("Point Style")
map.layers.append(layer)
map.zoom_to_box(mapnik.Envelope(minX, minY, maxX, maxY))
scriptDir = os.path.dirname(__file__)
cacheDir = os.path.join(scriptDir, "..", "mapCache")
if not os.path.exists(cacheDir):
os.mkdir(cacheDir)
fd,filename = tempfile.mkstemp(".png", dir=cacheDir)
os.close(fd)
mapnik.render_to_file(map, filename, "png")
return "../mapCache/" + os.path.basename(filename)
| 3,095 | 0 | 23 |
9016f832d87f6b1b4790287aa9974c1686f5ad77 | 2,151 | py | Python | dataset/transforms/builder.py | TencentYoutuResearch/Classification-SemiCLS | ceb5546f8d8ba08e18de3b5d9426e6cda177e55e | [
"Apache-2.0"
] | 4 | 2022-03-23T05:16:15.000Z | 2022-03-31T07:33:26.000Z | dataset/transforms/builder.py | TencentYoutuResearch/Classification-SemiCLS | ceb5546f8d8ba08e18de3b5d9426e6cda177e55e | [
"Apache-2.0"
] | 2 | 2022-03-23T08:21:13.000Z | 2022-03-30T04:24:53.000Z | dataset/transforms/builder.py | TencentYoutuResearch/Classification-SemiCLS | ceb5546f8d8ba08e18de3b5d9426e6cda177e55e | [
"Apache-2.0"
] | null | null | null | """ The Code is under Tencent Youtu Public Rule
builder for transforms
transforms from torch or home-made
"""
import copy
from torchvision import transforms
from .randaugment import RandAugmentMC
from .gaussian_blur import GaussianBlur
other_func = {"RandAugmentMC": RandAugmentMC,"GaussianBlur":GaussianBlur}
class BaseTransform(object):
""" For torch transform or self write
"""
def __init__(self, pipeline):
""" transforms for data
Args:
pipelines (list): list of dict, each dict is a transform
"""
self.pipeline = pipeline
self.transform = self.init_trans(pipeline)
class ListTransform(BaseTransform):
""" For torch transform or self write
"""
def __init__(self, pipelines):
""" transforms for data
Args:
pipelines (list): list of dict, each dict is a transform
"""
self.pipelines = pipelines
self.transforms = []
for trans_dict in self.pipelines:
self.transforms.append(self.init_trans(trans_dict))
| 28.302632 | 73 | 0.654579 | """ The Code is under Tencent Youtu Public Rule
builder for transforms
transforms from torch or home-made
"""
import copy
from torchvision import transforms
from .randaugment import RandAugmentMC
from .gaussian_blur import GaussianBlur
other_func = {"RandAugmentMC": RandAugmentMC,"GaussianBlur":GaussianBlur}
def get_trans(trans_cfg):
init_params = copy.deepcopy(trans_cfg)
type_name = init_params.pop("type")
if type_name in other_func.keys():
return other_func[type_name](**init_params)
if type_name == "RandomApply":
r_trans = []
trans_list = init_params.pop('transforms')
for trans_cfg in trans_list:
r_trans.append(get_trans(trans_cfg))
return transforms.RandomApply(r_trans, **init_params)
elif hasattr(transforms, type_name):
return getattr(transforms, type_name)(**init_params)
else:
raise NotImplementedError(
"Transform {} is unimplemented".format(trans_cfg))
class BaseTransform(object):
""" For torch transform or self write
"""
def __init__(self, pipeline):
""" transforms for data
Args:
pipelines (list): list of dict, each dict is a transform
"""
self.pipeline = pipeline
self.transform = self.init_trans(pipeline)
def init_trans(self, trans_list):
trans_funcs = []
for trans_cfg in trans_list:
trans_funcs.append(get_trans(trans_cfg))
return transforms.Compose(trans_funcs)
def __call__(self, data):
return self.transform(data)
class ListTransform(BaseTransform):
""" For torch transform or self write
"""
def __init__(self, pipelines):
""" transforms for data
Args:
pipelines (list): list of dict, each dict is a transform
"""
self.pipelines = pipelines
self.transforms = []
for trans_dict in self.pipelines:
self.transforms.append(self.init_trans(trans_dict))
def __call__(self, data):
results = []
for trans in self.transforms:
results.append(trans(data))
return results
| 984 | 0 | 104 |
03c58ee0cbb4b8189195b618627ebfee904b2c49 | 204 | py | Python | yandex/admin.py | mkbeh/multiple-fishing | 5e0c87233c0cdacb61caa01870a726d47096116f | [
"MIT"
] | null | null | null | yandex/admin.py | mkbeh/multiple-fishing | 5e0c87233c0cdacb61caa01870a726d47096116f | [
"MIT"
] | null | null | null | yandex/admin.py | mkbeh/multiple-fishing | 5e0c87233c0cdacb61caa01870a726d47096116f | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models.fakeauth import FakeAuthMail
@admin.register(FakeAuthMail)
| 22.666667 | 54 | 0.779412 | from django.contrib import admin
from .models.fakeauth import FakeAuthMail
@admin.register(FakeAuthMail)
class VictimMailAdmin(admin.ModelAdmin):
list_display = ('email', 'password', 'user_agent')
| 0 | 74 | 22 |
d003406a8dae70ef44094ac0e756847f1bec276e | 1,919 | py | Python | startServer.py | MuddSub/labelImg | b77641207329b2be08e4d71306b36efee2ac7e50 | [
"MIT"
] | 3 | 2021-01-05T02:51:09.000Z | 2022-01-30T00:53:30.000Z | startServer.py | MuddSub/labelImg | b77641207329b2be08e4d71306b36efee2ac7e50 | [
"MIT"
] | null | null | null | startServer.py | MuddSub/labelImg | b77641207329b2be08e4d71306b36efee2ac7e50 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
ssh cvteam1@134.173.43.20
cd compData
python startServer.py
"""
from http.server import HTTPServer, SimpleHTTPRequestHandler
from os import curdir
import json
from copy import deepcopy
if __name__ == '__main__':
# i.e. if this file is being run directly, not as imported module
main()
| 26.652778 | 69 | 0.620115 | #!/usr/bin/python3
"""
ssh cvteam1@134.173.43.20
cd compData
python startServer.py
"""
from http.server import HTTPServer, SimpleHTTPRequestHandler
from os import curdir
import json
from copy import deepcopy
class requestHandler(SimpleHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_HEAD(self):
self._set_headers()
def do_GET(self):
SimpleHTTPRequestHandler.do_GET(self)
def do_PUT(self):
# refuse to receive non-json content
if self.headers['content-type'] != 'application/json':
self.send_response(400, 'content-type is not json')
self.end_headers()
return
# check that url is correct:
if not self.path.endswith('.txt'):
self.send_response(400, 'Bad url')
self.end_headers()
return
uploadpath = curdir + self.path
# print('self headers\n', self.headers)
# read the message and convert it into a python dictionary
length = self.headers['content-length']
databyt = self.rfile.read(int(length))
datajson = json.loads(databyt)
with open(uploadpath, 'w') as fh:
# write the bbox data to the text file on the server
if 'bboxes' in datajson:
fh.write(datajson['bboxes'])
elif 'numLabeled' in datajson:
fh.write(datajson['numLabeled'])
self.send_response(201, 'Created')
self.end_headers()
def do_POST(self):
self.do_PUT()
def main():
PORT = 8080
httpd = HTTPServer(('', PORT), requestHandler) #http daemon
print('server running on port %s' % PORT)
httpd.serve_forever()
if __name__ == '__main__':
# i.e. if this file is being run directly, not as imported module
main()
| 1,392 | 26 | 180 |
2ac32a412dafa9bce712cbc3d292c036f1066547 | 885 | py | Python | server/extractor/serializers.py | lucmichalski/seo-audits-toolkit | d7a81d2ac1019f60268bad40dff6b0a475f46b1a | [
"MIT"
] | null | null | null | server/extractor/serializers.py | lucmichalski/seo-audits-toolkit | d7a81d2ac1019f60268bad40dff6b0a475f46b1a | [
"MIT"
] | null | null | null | server/extractor/serializers.py | lucmichalski/seo-audits-toolkit | d7a81d2ac1019f60268bad40dff6b0a475f46b1a | [
"MIT"
] | null | null | null | from extractor.models import Extractor
from rest_framework import serializers
from datetime import datetime
from .tasks import extractor_job
from .models import Extractor
from django.utils import timezone
import pytz | 36.875 | 168 | 0.703955 | from extractor.models import Extractor
from rest_framework import serializers
from datetime import datetime
from .tasks import extractor_job
from .models import Extractor
from django.utils import timezone
import pytz
class ExtractorSerializer(serializers.ModelSerializer):
class Meta:
model = Extractor
fields = ['id','url', 'result', 'type_audit', 'status_job', 'begin_date']
def create(self, validated_data):
## Creates the celery task
extractor_task = extractor_job.delay(validated_data["url"],validated_data["type_audit"])
## Creates the Save to DB
newExtractor = Extractor.objects.create(
url = validated_data["url"],status_job="SCHEDULED",task_id=str(extractor_task.id), result="", begin_date=timezone.now(), type_audit=validated_data["type_audit"]
)
return newExtractor | 461 | 185 | 23 |
fa94d442e625eda0e7b3b0718993bcbcb294bb1e | 1,582 | py | Python | Leetcode/Python Solutions/Dynamic Programming/IsSubsequence.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | 1 | 2020-01-06T02:21:56.000Z | 2020-01-06T02:21:56.000Z | Leetcode/Python Solutions/Dynamic Programming/IsSubsequence.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | null | null | null | Leetcode/Python Solutions/Dynamic Programming/IsSubsequence.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | 3 | 2021-02-22T17:41:01.000Z | 2022-01-13T05:03:19.000Z | """
LeetCode Problem: 392. Is Subsequence
Link: https://leetcode.com/problems/is-subsequence/
Written by: Mostofa Adib Shakib
Language: Python
"""
"""
Recursion + Memoization[Built-in function]
Time Complexity: O(n*m)
Space Complexity: O(n*m)
"""
from functools import lru_cache
"""
Dynamic Programming
Time Complexity: O(n*m)
Space Complexity: O(n*m)
"""
| 24.71875 | 90 | 0.48799 | """
LeetCode Problem: 392. Is Subsequence
Link: https://leetcode.com/problems/is-subsequence/
Written by: Mostofa Adib Shakib
Language: Python
"""
"""
Recursion + Memoization[Built-in function]
Time Complexity: O(n*m)
Space Complexity: O(n*m)
"""
from functools import lru_cache
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
n = len(s)-1
m = len(t)-1
@lru_cache # caching the intermediate results to avoid recomputation
def helper(s, p, n, m):
if n < 0 or m < 0:
return 0
if s[n] == p[m]:
return 1 + helper(s, p, n-1, m-1)
else:
return max(helper(s, p, n, m-1), helper(s, p, n-1, m))
if n+1 == helper(s, t, n, m): # check if the entire string s exists in t or not
return True
else:
return False
"""
Dynamic Programming
Time Complexity: O(n*m)
Space Complexity: O(n*m)
"""
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
n = len(s) # column
m = len(t) # row
dp = [ [0 for i in range(n+1)] for j in range(m+1) ]
for i in range(1, m+1):
for j in range(1, n+1):
if s[j-1] == t[i-1]:
dp[i][j] = 1 + dp[i-1][j-1]
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
if n == dp[-1][-1]: # check if the entire string s exists in t or not
return True
else:
return False
| 1,128 | -12 | 98 |
f832979e5111d8b798e7e68dcd21027570803401 | 5,339 | py | Python | geospacelab/datahub/sources/jhuapl/dmsp/ssusi/edraur/loader.py | JouleCai/GeoSpaceLab | 6cc498d3c32501e946931de596a840c73e83edb3 | [
"BSD-3-Clause"
] | 19 | 2021-08-07T08:49:22.000Z | 2022-03-02T18:26:30.000Z | geospacelab/datahub/sources/jhuapl/dmsp/ssusi/edraur/loader.py | JouleCai/GeoSpaceLab | 6cc498d3c32501e946931de596a840c73e83edb3 | [
"BSD-3-Clause"
] | 4 | 2021-11-09T05:53:42.000Z | 2022-03-25T11:49:37.000Z | geospacelab/datahub/sources/jhuapl/dmsp/ssusi/edraur/loader.py | JouleCai/GeoSpaceLab | 6cc498d3c32501e946931de596a840c73e83edb3 | [
"BSD-3-Clause"
] | 3 | 2021-11-07T11:41:20.000Z | 2022-02-14T13:43:11.000Z | # Licensed under the BSD 3-Clause License
# Copyright (C) 2021 GeospaceLab (geospacelab)
# Author: Lei Cai, Space Physics and Astronomy, University of Oulu
import netCDF4
import datetime
import numpy as np
import geospacelab.toolbox.utilities.pydatetime as dttool
if __name__ == "__main__":
import pathlib
fp = pathlib.Path('/Users/lcai/Geospacelab/Data/JHUAPL/DMSP/SSUSI/f17/20151205/' +
'PS.APL_V0105S027CE0008_SC.U_DI.A_GP.F17-SSUSI_PA.APL-EDR-AURORA_DD.20151205_SN.46863-00_DF.NC')
loader = Loader(file_path=fp)
# if hasattr(readObj, 'pole'):
# readObj.filter_data_pole(boundinglat = 25) | 46.833333 | 118 | 0.648623 | # Licensed under the BSD 3-Clause License
# Copyright (C) 2021 GeospaceLab (geospacelab)
# Author: Lei Cai, Space Physics and Astronomy, University of Oulu
import netCDF4
import datetime
import numpy as np
import geospacelab.toolbox.utilities.pydatetime as dttool
class Loader(object):
def __init__(self, file_path, file_type='edr-aur', pole='S'):
self.variables = {}
self.metadata = {}
self.file_path = file_path
self.file_type = file_type
self.pole = pole
self.load_data()
def load_data(self):
dataset = netCDF4.Dataset(self.file_path)
variables = {}
if self.pole == 'N':
pole = self.pole
pole_str = 'NORTH'
elif self.pole == 'S':
pole = self.pole
pole_str = 'SOUTH'
else:
raise ValueError
# Time and Position
# sectime = int(np.array(dataset.variables['TIME']).flatten()[0])
# doy = int(np.array(dataset.variables['DOY']).flatten()[0])
# year = int(np.array(dataset.variables['YEAR']).flatten()[0])
# dt0 = dttool.convert_doy_to_datetime(year, doy)
starting_time = datetime.datetime.strptime(dataset.STARTING_TIME, "%Y%j%H%M%S")
variables['STARTING_TIME'] = starting_time
stopping_time = datetime.datetime.strptime(dataset.STOPPING_TIME, "%Y%j%H%M%S")
variables['STOPPING_TIME'] = stopping_time
dt0 = dttool.get_start_of_the_day(starting_time)
variables['SC_LAT'] = np.array(dataset.variables['LATITUDE'])
variables['SC_LON'] = np.array(dataset.variables['LONGITUDE'])
variables['SC_ALT'] = np.array(dataset.variables['ALTITUDE'])
variables['GRID_MLAT'] = np.array(dataset.variables['LATITUDE_GEOMAGNETIC_GRID_MAP'])
variables['GRID_MLON'] = np.array(
dataset.variables['LONGITUDE_GEOMAGNETIC_' + pole_str + '_GRID_MAP'])
variables['GRID_MLT'] = np.array(dataset.variables['MLT_GRID_MAP'])
if self.pole == 'S':
variables['GRID_MLAT'] = - variables['GRID_MLAT']
variables['GRID_UT'] = np.array(dataset.variables['UT_' + pole])
lat = np.array(variables['GRID_MLAT'])
ut = np.array(variables['GRID_UT'])
lat = np.where(ut == 0, np.nan, lat)
if self.pole == 'N':
ind_mid_t = np.where(lat == np.nanmax(lat.flatten()))
else:
ind_mid_t = np.where(lat == np.nanmin(lat.flatten()))
sectime0 = variables['GRID_UT'][ind_mid_t][0] * 3600
variables['DATETIME'] = dt0 + datetime.timedelta(seconds=int(sectime0))
invalid_ut_inds = np.where(ut == 0)
# Auroral map, #colors: 0: '1216', 1: '1304', 2: '1356', 3: 'LBHS', 4: 'LBHL'.
variables['EMISSION_SPECTRA'] = ['1216', '1304', '1356', 'LBHS', 'LBHL']
disk_aur = np.array(dataset.variables['DISK_RADIANCEDATA_INTENSITY_' + pole_str])
# disk_aur[:, invalid_ut_inds] = np.nan
disk_aur[disk_aur <= 0] = 0.1
variables['GRID_AUR_1216'] = disk_aur[0, ::]
variables['GRID_AUR_1216'][invalid_ut_inds] = np.nan
variables['GRID_AUR_1304'] = disk_aur[1, ::]
variables['GRID_AUR_1304'][invalid_ut_inds] = np.nan
variables['GRID_AUR_1356'] = disk_aur[2, ::]
variables['GRID_AUR_1356'][invalid_ut_inds] = np.nan
variables['GRID_AUR_LBHS'] = disk_aur[3, ::]
variables['GRID_AUR_LBHS'][invalid_ut_inds] = np.nan
variables['GRID_AUR_LBHL'] = disk_aur[4, ::]
variables['GRID_AUR_LBHL'][invalid_ut_inds] = np.nan
# Auroral oval boundary
variables['AOB_EQ_MLAT'] = np.array(dataset.variables[pole_str + '_GEOMAGNETIC_LATITUDE'])
variables['AOB_EQ_MLON'] = np.array(dataset.variables[pole_str + '_GEOMAGNETIC_LONGITUDE'])
variables['AOB_EQ_MLT'] = np.array(dataset.variables[pole_str + '_MAGNETIC_LOCAL_TIME'])
variables['AOB_PL_MLAT'] = np.array(dataset.variables[pole_str + '_POLAR_GEOMAGNETIC_LATITUDE'])
variables['AOB_PL_MLON'] = np.array(dataset.variables[pole_str + '_POLAR_GEOMAGNETIC_LONGITUDE'])
variables['AOB_PL_MLT'] = np.array(dataset.variables[pole_str + '_POLAR_MAGNETIC_LOCAL_TIME'])
variables['MAOB_EQ_MLAT'] = np.array(dataset.variables['MODEL_' + pole_str + '_GEOMAGNETIC_LATITUDE'])
variables['MAOB_EQ_MLON'] = np.array(dataset.variables['MODEL_' + pole_str + '_GEOMAGNETIC_LONGITUDE'])
variables['MAOB_EQ_MLT'] = np.array(dataset.variables['MODEL_' + pole_str + '_MAGNETIC_LOCAL_TIME'])
variables['MAOB_PL_MLAT'] = np.array(dataset.variables['MODEL_' + pole_str + '_POLAR_GEOMAGNETIC_LATITUDE'])
variables['MAOB_PL_MLON'] = np.array(dataset.variables['MODEL_' + pole_str + '_POLAR_GEOMAGNETIC_LONGITUDE'])
variables['MAOB_PL_MLT'] = np.array(dataset.variables['MODEL_' + pole_str + '_POLAR_MAGNETIC_LOCAL_TIME'])
dataset.close()
self.variables = variables
if __name__ == "__main__":
import pathlib
fp = pathlib.Path('/Users/lcai/Geospacelab/Data/JHUAPL/DMSP/SSUSI/f17/20151205/' +
'PS.APL_V0105S027CE0008_SC.U_DI.A_GP.F17-SSUSI_PA.APL-EDR-AURORA_DD.20151205_SN.46863-00_DF.NC')
loader = Loader(file_path=fp)
# if hasattr(readObj, 'pole'):
# readObj.filter_data_pole(boundinglat = 25) | 4,620 | 0 | 77 |
e0b61c3b9fecec287da5c0f5895fe0dc1c55db51 | 153 | py | Python | src/kaggle_web_traffic_forecasting/__init__.py | DSmmartin/KaggleTimeSeries | 54a6627487070b28b2eea4e27bcb35c48acedf19 | [
"MIT"
] | null | null | null | src/kaggle_web_traffic_forecasting/__init__.py | DSmmartin/KaggleTimeSeries | 54a6627487070b28b2eea4e27bcb35c48acedf19 | [
"MIT"
] | null | null | null | src/kaggle_web_traffic_forecasting/__init__.py | DSmmartin/KaggleTimeSeries | 54a6627487070b28b2eea4e27bcb35c48acedf19 | [
"MIT"
] | null | null | null | """ My init to load all main process around my ML project """
from .spark_manage import spark_start
from .data_processing import dataset_train_transpose | 38.25 | 61 | 0.810458 | """ My init to load all main process around my ML project """
from .spark_manage import spark_start
from .data_processing import dataset_train_transpose | 0 | 0 | 0 |
d9bd4c79b64fa98e00b319bfb3427cb6b71d06ab | 21,549 | py | Python | conans/test/unittests/model/build_info/components_test.py | ninjayash/conan | 00fbc925fde93a148abfbcebf236c6b4f2da0572 | [
"MIT"
] | 1 | 2020-11-07T21:25:57.000Z | 2020-11-07T21:25:57.000Z | conans/test/unittests/model/build_info/components_test.py | ttencate/conan | 3dc4fb35cc3be9865f0ae480c89e6a58813d5076 | [
"MIT"
] | null | null | null | conans/test/unittests/model/build_info/components_test.py | ttencate/conan | 3dc4fb35cc3be9865f0ae480c89e6a58813d5076 | [
"MIT"
] | null | null | null | # coding=utf-8
import os
import unittest
import six
from conans.errors import ConanException
from conans.model.build_info import CppInfo, DepsCppInfo, DepCppInfo
from conans.test.utils.test_files import temp_folder
from conans.util.files import save
| 51.676259 | 101 | 0.62388 | # coding=utf-8
import os
import unittest
import six
from conans.errors import ConanException
from conans.model.build_info import CppInfo, DepsCppInfo, DepCppInfo
from conans.test.utils.test_files import temp_folder
from conans.util.files import save
class CppInfoComponentsTest(unittest.TestCase):
def test_components_set(self):
cpp_info = CppInfo("", "root_folder")
cpp_info.components["liba"].name = "LIBA"
cpp_info.components["libb"].includedirs.append("includewhat")
cpp_info.components["libc"].libs.append("thelibc")
self.assertListEqual(list(cpp_info.components.keys()), ["liba", "libb", "libc"])
self.assertEqual(cpp_info.components["liba"].get_name("any"), "LIBA")
self.assertListEqual(cpp_info.components["libb"].includedirs, ["include", "includewhat"])
self.assertListEqual(cpp_info.components["libc"].libs, ["thelibc"])
def test_no_components_inside_components(self):
cpp_info = CppInfo("", "root_folder")
cpp_info.components["liba"].name = "LIBA"
with self.assertRaises(AttributeError):
_ = cpp_info.components["libb"].components
def test_deps_cpp_info_libs(self):
deps_cpp_info = DepsCppInfo()
dep1 = CppInfo("dep1", "root")
dep1.components["liba"].libs.append("liba")
dep1.components["libb"].libs.append("libb")
deps_cpp_info.add("dep1", DepCppInfo(dep1))
dep2 = CppInfo("dep2", "root")
dep2.components["libc"].libs.append("libc")
dep2.components["libd"].libs.append("libd")
deps_cpp_info.add("dep2", DepCppInfo(dep2))
dep3 = CppInfo("dep3", "root")
dep3.libs.append("libdep3")
deps_cpp_info.add("dep3", DepCppInfo(dep3))
self.assertListEqual(["liba", "libb"], deps_cpp_info["dep1"].libs)
self.assertListEqual(["libc", "libd"], deps_cpp_info["dep2"].libs)
self.assertListEqual(["libdep3"], deps_cpp_info["dep3"].libs)
self.assertListEqual(["liba", "libb", "libc", "libd", "libdep3"],
list(deps_cpp_info.libs))
def test_deps_cpp_info_paths(self):
deps_cpp_info = DepsCppInfo()
folder1 = temp_folder()
dep1 = CppInfo("dep1", folder1)
os.mkdir(os.path.join(folder1, "include"))
os.mkdir(os.path.join(folder1, "includea"))
os.mkdir(os.path.join(folder1, "includeb"))
dep1.components["liba"].includedirs.append("includea")
dep1.components["libb"].includedirs.append("includeb")
deps_cpp_info.add("dep1", DepCppInfo(dep1))
folder2 = temp_folder()
dep2 = CppInfo("dep2", folder2)
os.mkdir(os.path.join(folder2, "include"))
os.mkdir(os.path.join(folder2, "includec"))
os.mkdir(os.path.join(folder2, "included"))
dep2.components["libc"].includedirs.append("includec")
dep2.components["libd"].includedirs.append("included")
deps_cpp_info.add("dep2", DepCppInfo(dep2))
self.assertListEqual([os.path.join(folder1, "include"), os.path.join(folder1, "includea"),
os.path.join(folder1, "includeb")],
list(deps_cpp_info["dep1"].include_paths))
self.assertListEqual([os.path.join(folder2, "include"), os.path.join(folder2, "includec"),
os.path.join(folder2, "included")],
list(deps_cpp_info["dep2"].include_paths))
self.assertListEqual([os.path.join(folder1, "include"), os.path.join(folder1, "includea"),
os.path.join(folder1, "includeb"), os.path.join(folder2, "include"),
os.path.join(folder2, "includec"), os.path.join(folder2, "included")],
list(deps_cpp_info.include_paths))
def test_deps_cpp_info_libs_defines_flags(self):
deps_cpp_info = DepsCppInfo()
dep1 = CppInfo("dep1", "root")
dep1.components["liba"].libs.append("liba")
dep1.components["liba"].defines.append("DEFINEA")
dep1.components["liba"].system_libs.append("sysa")
dep1.components["liba"].cxxflags.append("cxxflaga")
dep1.components["liba"].cflags.append("cflaga")
dep1.components["liba"].sharedlinkflags.append("slinka")
dep1.components["liba"].frameworks.append("frameworka")
dep1.components["liba"].exelinkflags.append("elinka")
dep1.components["libb"].libs.append("libb")
dep1.components["libb"].defines.append("DEFINEB")
dep1.components["libb"].system_libs.append("sysb")
dep1.components["libb"].cxxflags.append("cxxflagb")
dep1.components["libb"].cflags.append("cflagb")
dep1.components["libb"].sharedlinkflags.append("slinkb")
dep1.components["libb"].frameworks.append("frameworkb")
dep1.components["libb"].exelinkflags.append("elinkb")
deps_cpp_info.add("dep1", DepCppInfo(dep1))
dep2 = CppInfo("dep2", "root")
dep2.components["libc"].libs.append("libc")
dep2.components["libd"].libs.append("libd")
dep2.components["systemlib"].system_libs = ["systemlib"]
dep2.components["libc"].cxxflags = ["cxxflagc"]
dep2.components["libd"].cflags = ["cflagd"]
dep2.components["libc"].sharedlinkflags = ["slinkc"]
dep2.components["libd"].sharedlinkflags = ["slinkd"]
deps_cpp_info.add("dep2", DepCppInfo(dep2))
self.assertListEqual(["liba", "libb"], deps_cpp_info["dep1"].libs)
self.assertListEqual(["libc", "libd"], deps_cpp_info["dep2"].libs)
self.assertListEqual(["liba", "libb", "libc", "libd"], list(deps_cpp_info.libs))
self.assertListEqual(["DEFINEA", "DEFINEB"], deps_cpp_info["dep1"].defines)
self.assertListEqual(["DEFINEA", "DEFINEB"], list(deps_cpp_info.defines))
self.assertListEqual(["sysa", "sysb"], deps_cpp_info["dep1"].system_libs)
self.assertListEqual(["systemlib"], deps_cpp_info["dep2"].system_libs)
self.assertListEqual(["sysa", "sysb", "systemlib"], list(deps_cpp_info.system_libs))
self.assertListEqual(["cxxflaga", "cxxflagb"], deps_cpp_info["dep1"].cxxflags)
self.assertListEqual(["cxxflagc"], deps_cpp_info["dep2"].cxxflags)
self.assertListEqual(["cxxflagc", "cxxflaga", "cxxflagb"], list(deps_cpp_info.cxxflags))
self.assertListEqual(["cflaga", "cflagb"], deps_cpp_info["dep1"].cflags)
self.assertListEqual(["cflagd"], deps_cpp_info["dep2"].cflags)
self.assertListEqual(["cflagd", "cflaga", "cflagb"], list(deps_cpp_info.cflags))
self.assertListEqual(["slinka", "slinkb"], deps_cpp_info["dep1"].sharedlinkflags)
self.assertListEqual(["slinkc", "slinkd"], deps_cpp_info["dep2"].sharedlinkflags)
self.assertListEqual(["slinkc", "slinkd", "slinka", "slinkb"],
list(deps_cpp_info.sharedlinkflags))
self.assertListEqual(["frameworka", "frameworkb"], deps_cpp_info["dep1"].frameworks)
self.assertListEqual(["frameworka", "frameworkb"], list(deps_cpp_info.frameworks))
self.assertListEqual(["elinka", "elinkb"], deps_cpp_info["dep1"].exelinkflags)
self.assertListEqual([], deps_cpp_info["dep2"].exelinkflags)
self.assertListEqual(["elinka", "elinkb"], list(deps_cpp_info.exelinkflags))
def test_deps_cpp_info_libs_release_debug(self):
deps_cpp_info = DepsCppInfo()
dep1 = CppInfo("dep1", "root")
dep1.components["liba"].libs.append("liba")
with self.assertRaises(AttributeError):
dep1.release.components["libb"].libs.append("libb")
with self.assertRaises(AttributeError):
dep1.debug.components["libb"].libs.append("libb_d")
deps_cpp_info.add("dep1", DepCppInfo(dep1))
dep2 = CppInfo("dep2", "root")
dep2.release.libs.append("libdep2")
dep2.debug.libs.append("libdep2_d")
with self.assertRaises(AttributeError):
dep2.components["libc"].release.libs.append("libc")
with self.assertRaises(AttributeError):
dep2.components["libc"].debug.libs.append("libc_d")
dep2.components["libc"].libs.append("libc")
dep2.components["libc"].libs.append("libc2")
deps_cpp_info.add("dep2", DepCppInfo(dep2))
self.assertListEqual(["liba"], deps_cpp_info["dep1"].libs)
self.assertListEqual(["libc", "libc2"], deps_cpp_info["dep2"].libs)
self.assertListEqual(["liba", "libc", "libc2"], list(deps_cpp_info.libs))
self.assertListEqual([], deps_cpp_info["dep1"].release.libs)
self.assertListEqual(["libdep2"], deps_cpp_info["dep2"].release.libs)
self.assertListEqual(["libdep2"], list(deps_cpp_info.release.libs))
self.assertListEqual([], deps_cpp_info["dep1"].debug.libs)
self.assertListEqual(["libdep2_d"], deps_cpp_info["dep2"].debug.libs)
self.assertListEqual(["libdep2_d"], list(deps_cpp_info.debug.libs))
def cpp_info_link_order_test(self):
def _assert_link_order(sorted_libs):
"""
Assert that dependent libs of a component are always found later in the list
"""
assert sorted_libs, "'sorted_libs' is empty"
for num, lib in enumerate(sorted_libs):
component_name = lib[-1]
for dep in info.components[component_name].requires:
for comp_lib in info.components[dep].libs:
self.assertIn(comp_lib, sorted_libs[num:])
info = CppInfo("dep1", "")
info.components["6"].libs = ["lib6"]
info.components["6"].requires = ["4", "5"]
info.components["5"].libs = ["lib5"]
info.components["5"].requires = ["2"]
info.components["4"].libs = ["lib4"]
info.components["4"].requires = ["1"]
info.components["3"].libs = ["lib3"]
info.components["3"].requires = ["1"]
info.components["1"].libs = ["lib1"]
info.components["1"].requires = ["2"]
info.components["2"].libs = ["lib2"]
info.components["2"].requires = []
dep_cpp_info = DepCppInfo(info)
_assert_link_order(dep_cpp_info.libs)
self.assertEqual(["lib6", "lib5", "lib4", "lib3", "lib1", "lib2"], dep_cpp_info.libs)
deps_cpp_info = DepsCppInfo()
deps_cpp_info.add("dep1", dep_cpp_info)
self.assertEqual(["lib6", "lib5", "lib4", "lib3", "lib1", "lib2"], list(deps_cpp_info.libs))
info = CppInfo("dep2", "")
info.components["K"].libs = ["libK"]
info.components["K"].requires = ["G", "H"]
info.components["J"].libs = ["libJ"]
info.components["J"].requires = ["F"]
info.components["G"].libs = ["libG"]
info.components["G"].requires = ["F"]
info.components["H"].libs = ["libH"]
info.components["H"].requires = ["F", "E"]
info.components["L"].libs = ["libL"]
info.components["L"].requires = ["I"]
info.components["F"].libs = ["libF"]
info.components["F"].requires = ["C", "D"]
info.components["I"].libs = ["libI"]
info.components["I"].requires = ["E"]
info.components["C"].libs = ["libC"]
info.components["C"].requires = ["A"]
info.components["D"].libs = ["libD"]
info.components["D"].requires = ["A"]
info.components["E"].libs = ["libE"]
info.components["E"].requires = ["A", "B"]
info.components["A"].libs = ["libA"]
info.components["A"].requires = []
info.components["B"].libs = ["libB"]
info.components["B"].requires = []
dep_cpp_info = DepCppInfo(info)
_assert_link_order(dep_cpp_info.libs)
self.assertEqual(["libK", "libJ", "libG", "libH", "libL", "libF", "libI", "libC", "libD",
"libE", "libA", "libB"], dep_cpp_info.libs)
deps_cpp_info.add("dep2", dep_cpp_info)
self.assertEqual(["lib6", "lib5", "lib4", "lib3", "lib1", "lib2", "libK", "libJ", "libG",
"libH", "libL", "libF", "libI", "libC", "libD", "libE", "libA", "libB"],
list(deps_cpp_info.libs))
def cppinfo_inexistent_component_dep_test(self):
info = CppInfo("", None)
info.components["LIB1"].requires = ["LIB2"]
with six.assertRaisesRegex(self, ConanException, "Component 'LIB1' required components "
"not found in this package: 'LIB2'"):
_ = DepCppInfo(info).libs
info.components["LIB1"].requires = ["::LIB2"]
with six.assertRaisesRegex(self, ConanException, "Leading character '::' not allowed in "
"LIB1 requires"):
_ = DepCppInfo(info).libs
def cpp_info_components_requires_loop_test(self):
info = CppInfo("", "")
info.components["LIB1"].requires = ["LIB1"]
msg = "There is a dependency loop in 'self.cpp_info.components' requires"
with six.assertRaisesRegex(self, ConanException, msg):
_ = DepCppInfo(info).libs
info = CppInfo("", "")
info.components["LIB1"].requires = ["LIB2"]
info.components["LIB2"].requires = ["LIB1", "LIB2"]
with six.assertRaisesRegex(self, ConanException, msg):
_ = DepCppInfo(info).build_paths
info = CppInfo("", "")
info.components["LIB1"].requires = ["LIB2"]
info.components["LIB2"].requires = ["LIB3"]
info.components["LIB3"].requires = ["LIB1"]
with six.assertRaisesRegex(self, ConanException, msg):
_ = DepCppInfo(info).defines
def components_libs_order_test(self):
info = CppInfo("dep1", "")
info.components["liba"].libs = ["liba"]
info.components["libb"].libs = ["libb"]
dep_cpp_info = DepCppInfo(info)
self.assertListEqual(["liba", "libb"], dep_cpp_info.libs)
deps_cpp_info = DepsCppInfo()
deps_cpp_info.add("dep1", dep_cpp_info)
self.assertListEqual(["liba", "libb"], deps_cpp_info["dep1"].libs)
self.assertListEqual(["liba", "libb"], list(deps_cpp_info.libs))
info = CppInfo("dep1", "")
info.components["liba"].libs = ["liba"]
info.components["libb"].libs = ["libb"]
dep_cpp_info = DepCppInfo(info)
info2 = CppInfo("dep2", "")
info2.components["libc"].libs = ["libc"]
dep_cpp_info2 = DepCppInfo(info2)
deps_cpp_info = DepsCppInfo()
# Update in reverse order
deps_cpp_info.add("dep2", dep_cpp_info2)
deps_cpp_info.add("dep1", dep_cpp_info)
self.assertListEqual(["liba", "libb"], deps_cpp_info["dep1"].libs)
self.assertListEqual(["libc"], deps_cpp_info["dep2"].libs)
self.assertListEqual(["libc", "liba", "libb"], list(deps_cpp_info.libs))
info = CppInfo("dep1", "")
info.components["liba"].libs = ["liba"]
info.components["libb"].libs = ["libb"]
info.components["libb"].requires = ["liba"]
dep_cpp_info = DepCppInfo(info)
self.assertListEqual(["libb", "liba"], dep_cpp_info.libs)
deps_cpp_info = DepsCppInfo()
deps_cpp_info.add("dep1", dep_cpp_info)
self.assertListEqual(["libb", "liba"], deps_cpp_info["dep1"].libs)
self.assertListEqual(["libb", "liba"], list(deps_cpp_info.libs))
info = CppInfo("dep1", "")
info.components["liba"].libs = ["liba"]
info.components["libb"].libs = ["libb"]
info.components["libb"].requires = ["liba"]
dep_cpp_info = DepCppInfo(info)
info2 = CppInfo("dep2", "")
info2.components["libc"].libs = ["libc"]
dep_cpp_info2 = DepCppInfo(info2)
deps_cpp_info = DepsCppInfo()
# Update in reverse order
deps_cpp_info.add("dep2", dep_cpp_info2)
deps_cpp_info.add("dep1", dep_cpp_info)
self.assertListEqual(["libb", "liba"], deps_cpp_info["dep1"].libs)
self.assertListEqual(["libc"], deps_cpp_info["dep2"].libs)
self.assertListEqual(["libc", "libb", "liba"], list(deps_cpp_info.libs))
def cppinfo_components_dirs_test(self):
folder = temp_folder()
info = CppInfo("OpenSSL", folder)
info.components["OpenSSL"].includedirs = ["include"]
info.components["OpenSSL"].libdirs = ["lib"]
info.components["OpenSSL"].builddirs = ["build"]
info.components["OpenSSL"].bindirs = ["bin"]
info.components["OpenSSL"].resdirs = ["res"]
info.components["Crypto"].includedirs = ["headers"]
info.components["Crypto"].libdirs = ["libraries"]
info.components["Crypto"].builddirs = ["build_scripts"]
info.components["Crypto"].bindirs = ["binaries"]
info.components["Crypto"].resdirs = ["resources"]
self.assertEqual(["include"], info.components["OpenSSL"].includedirs)
self.assertEqual(["lib"], info.components["OpenSSL"].libdirs)
self.assertEqual(["build"], info.components["OpenSSL"].builddirs)
self.assertEqual(["bin"], info.components["OpenSSL"].bindirs)
self.assertEqual(["res"], info.components["OpenSSL"].resdirs)
self.assertEqual(["headers"], info.components["Crypto"].includedirs)
self.assertEqual(["libraries"], info.components["Crypto"].libdirs)
self.assertEqual(["build_scripts"], info.components["Crypto"].builddirs)
self.assertEqual(["binaries"], info.components["Crypto"].bindirs)
self.assertEqual(["resources"], info.components["Crypto"].resdirs)
info.components["Crypto"].includedirs = ["different_include"]
info.components["Crypto"].libdirs = ["different_lib"]
info.components["Crypto"].builddirs = ["different_build"]
info.components["Crypto"].bindirs = ["different_bin"]
info.components["Crypto"].resdirs = ["different_res"]
self.assertEqual(["different_include"], info.components["Crypto"].includedirs)
self.assertEqual(["different_lib"], info.components["Crypto"].libdirs)
self.assertEqual(["different_build"], info.components["Crypto"].builddirs)
self.assertEqual(["different_bin"], info.components["Crypto"].bindirs)
self.assertEqual(["different_res"], info.components["Crypto"].resdirs)
info.components["Crypto"].includedirs.extend(["another_include"])
info.components["Crypto"].includedirs.append("another_other_include")
info.components["Crypto"].libdirs.extend(["another_lib"])
info.components["Crypto"].libdirs.append("another_other_lib")
info.components["Crypto"].builddirs.extend(["another_build"])
info.components["Crypto"].builddirs.append("another_other_build")
info.components["Crypto"].bindirs.extend(["another_bin"])
info.components["Crypto"].bindirs.append("another_other_bin")
info.components["Crypto"].resdirs.extend(["another_res"])
info.components["Crypto"].resdirs.append("another_other_res")
self.assertEqual(["different_include", "another_include", "another_other_include"],
info.components["Crypto"].includedirs)
self.assertEqual(["different_lib", "another_lib", "another_other_lib"],
info.components["Crypto"].libdirs)
self.assertEqual(["different_build", "another_build", "another_other_build"],
info.components["Crypto"].builddirs)
self.assertEqual(["different_bin", "another_bin", "another_other_bin"],
info.components["Crypto"].bindirs)
self.assertEqual(["different_res", "another_res", "another_other_res"],
info.components["Crypto"].resdirs)
def component_default_dirs_deps_cpp_info_test(self):
folder = temp_folder()
info = CppInfo("my_lib", folder)
info.components["Component"].filter_empty = False # For testing purposes
dep_info = DepCppInfo(info)
deps_cpp_info = DepsCppInfo()
deps_cpp_info.add("my_lib", dep_info)
self.assertListEqual([os.path.join(folder, "include")], list(deps_cpp_info.includedirs))
self.assertListEqual([], list(deps_cpp_info.srcdirs))
self.assertListEqual([os.path.join(folder, "lib")], list(deps_cpp_info.libdirs))
self.assertListEqual([os.path.join(folder, "bin")], list(deps_cpp_info.bindirs))
self.assertListEqual([os.path.join(folder, "")], list(deps_cpp_info.builddirs))
self.assertListEqual([os.path.join(folder, "res")], list(deps_cpp_info.resdirs))
self.assertListEqual([os.path.join(folder, "Frameworks")], list(deps_cpp_info.frameworkdirs))
def deps_cpp_info_components_test(self):
folder = temp_folder()
info = CppInfo("my_lib", folder)
# Create file so path is not cleared
save(os.path.join(folder, "include", "my_file.h"), "")
info.components["Component"].libs = ["libcomp"]
dep_info = DepCppInfo(info)
deps_cpp_info = DepsCppInfo()
deps_cpp_info.add("my_lib", dep_info)
self.assertListEqual(["libcomp"], list(deps_cpp_info.libs))
self.assertListEqual(["libcomp"], deps_cpp_info["my_lib"].components["Component"].libs)
self.assertListEqual([os.path.join(folder, "include")], list(deps_cpp_info.include_paths))
self.assertListEqual([os.path.join(folder, "include")],
list(deps_cpp_info["my_lib"].components["Component"].include_paths))
| 20,896 | 26 | 374 |
2d71e7d9c1a86726265aa5646e677037196bd2a0 | 2,596 | py | Python | release/stubs.min/Wms/RemotingImplementation/Activities/Renderers.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | release/stubs.min/Wms/RemotingImplementation/Activities/Renderers.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | release/stubs.min/Wms/RemotingImplementation/Activities/Renderers.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | # encoding: utf-8
# module Wms.RemotingImplementation.Activities.Renderers calls itself Renderers
# from Wms.RemotingImplementation,Version=1.23.1.0,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no important
from __init__ import *
# no functions
# classes
class MobileProgressBarRenderer(object):
""" MobileProgressBarRenderer(current: Decimal,total: Decimal,title: str,enableDetails: bool,progressColor: str) """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return MobileProgressBarRenderer()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def Dispose(self):
""" Dispose(self: MobileProgressBarRenderer) """
pass
def Render(self):
""" Render(self: MobileProgressBarRenderer) -> Array[Byte] """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,current,total,title,enableDetails,progressColor):
""" __new__(cls: type,current: Decimal,total: Decimal,title: str,enableDetails: bool,progressColor: str) """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Current=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Current(self: MobileProgressBarRenderer) -> Decimal
Set: Current(self: MobileProgressBarRenderer)=value
"""
EnableDetails=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EnableDetails(self: MobileProgressBarRenderer) -> bool
Set: EnableDetails(self: MobileProgressBarRenderer)=value
"""
ProgressColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ProgressColor(self: MobileProgressBarRenderer) -> str
Set: ProgressColor(self: MobileProgressBarRenderer)=value
"""
Title=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Title(self: MobileProgressBarRenderer) -> str
Set: Title(self: MobileProgressBarRenderer)=value
"""
Total=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Total(self: MobileProgressBarRenderer) -> Decimal
Set: Total(self: MobileProgressBarRenderer)=value
"""
| 35.561644 | 215 | 0.720339 | # encoding: utf-8
# module Wms.RemotingImplementation.Activities.Renderers calls itself Renderers
# from Wms.RemotingImplementation,Version=1.23.1.0,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no important
from __init__ import *
# no functions
# classes
class MobileProgressBarRenderer(object):
""" MobileProgressBarRenderer(current: Decimal,total: Decimal,title: str,enableDetails: bool,progressColor: str) """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return MobileProgressBarRenderer()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def Dispose(self):
""" Dispose(self: MobileProgressBarRenderer) """
pass
def Render(self):
""" Render(self: MobileProgressBarRenderer) -> Array[Byte] """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,current,total,title,enableDetails,progressColor):
""" __new__(cls: type,current: Decimal,total: Decimal,title: str,enableDetails: bool,progressColor: str) """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Current=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Current(self: MobileProgressBarRenderer) -> Decimal
Set: Current(self: MobileProgressBarRenderer)=value
"""
EnableDetails=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: EnableDetails(self: MobileProgressBarRenderer) -> bool
Set: EnableDetails(self: MobileProgressBarRenderer)=value
"""
ProgressColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ProgressColor(self: MobileProgressBarRenderer) -> str
Set: ProgressColor(self: MobileProgressBarRenderer)=value
"""
Title=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Title(self: MobileProgressBarRenderer) -> str
Set: Title(self: MobileProgressBarRenderer)=value
"""
Total=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Total(self: MobileProgressBarRenderer) -> Decimal
Set: Total(self: MobileProgressBarRenderer)=value
"""
| 0 | 0 | 0 |
913fccc0de77123589023d5f87c6e399d7e50a20 | 7,384 | py | Python | tools/ports/regal.py | undeadinu/emscripten | e9d8b5c4b62c2b27b92cb7635590238a39310aa0 | [
"MIT"
] | 1 | 2019-01-06T09:32:11.000Z | 2019-01-06T09:32:11.000Z | tools/ports/regal.py | undeadinu/emscripten | e9d8b5c4b62c2b27b92cb7635590238a39310aa0 | [
"MIT"
] | null | null | null | tools/ports/regal.py | undeadinu/emscripten | e9d8b5c4b62c2b27b92cb7635590238a39310aa0 | [
"MIT"
] | null | null | null | # Copyright 2015 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import os, shutil, logging, subprocess, sys, stat
TAG = 'version_4'
| 43.692308 | 149 | 0.551598 | # Copyright 2015 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import os, shutil, logging, subprocess, sys, stat
TAG = 'version_4'
def get(ports, settings, shared):
if settings.USE_REGAL == 1:
ports.fetch_project('regal', 'https://github.com/emscripten-ports/regal/archive/' + TAG + '.zip',
'regal-' + TAG)
def create():
logging.info('building port: regal')
ports.clear_project_build('regal')
# copy sources
# only what is needed is copied: regal, md5, jsonsl, boost, lookup3,
# Note: GLSL Optimizer is included (needed for headers) but not built
source_path_src = os.path.join(ports.get_dir(), 'regal', 'regal-' + TAG, 'src')
dest_path_src = os.path.join(ports.get_build_dir(), 'regal', 'src')
source_path_regal = os.path.join(source_path_src, 'regal')
source_path_md5 = os.path.join(source_path_src, 'md5')
source_path_jsonsl = os.path.join(source_path_src, 'jsonsl')
source_path_boost = os.path.join(source_path_src, 'boost')
source_path_lookup3 = os.path.join(source_path_src, 'lookup3')
source_path_glslopt = os.path.join(source_path_src, 'glsl')
dest_path_regal = os.path.join(dest_path_src, 'regal')
dest_path_md5 = os.path.join(dest_path_src, 'md5')
dest_path_jsonsl = os.path.join(dest_path_src, 'jsonsl')
dest_path_boost = os.path.join(dest_path_src, 'boost')
dest_path_lookup3 = os.path.join(dest_path_src, 'lookup3')
dest_path_glslopt = os.path.join(dest_path_src, 'glsl')
shutil.rmtree(dest_path_src, ignore_errors=True)
shutil.copytree(source_path_regal, dest_path_regal)
shutil.copytree(source_path_md5, dest_path_md5)
shutil.copytree(source_path_jsonsl, dest_path_jsonsl)
shutil.copytree(source_path_boost, dest_path_boost)
shutil.copytree(source_path_lookup3, dest_path_lookup3)
shutil.copytree(source_path_glslopt, dest_path_glslopt)
# includes
source_path_include = os.path.join(ports.get_dir(), 'regal', 'regal-' + TAG, 'include')
dest_path_include = os.path.join(ports.get_build_dir(), 'regal', 'include')
shutil.copytree(source_path_include, dest_path_include)
# build
srcs_regal = ['regal/RegalShaderInstance.cpp',
'regal/RegalIff.cpp',
'regal/RegalQuads.cpp',
'regal/Regal.cpp',
'regal/RegalLog.cpp',
'regal/RegalInit.cpp',
'regal/RegalBreak.cpp',
'regal/RegalUtil.cpp',
'regal/RegalEmu.cpp',
'regal/RegalEmuInfo.cpp',
'regal/RegalFrame.cpp',
'regal/RegalHelper.cpp',
'regal/RegalMarker.cpp',
'regal/RegalTexC.cpp',
'regal/RegalCacheShader.cpp',
'regal/RegalCacheTexture.cpp',
'regal/RegalConfig.cpp',
'regal/RegalContext.cpp',
'regal/RegalContextInfo.cpp',
'regal/RegalDispatch.cpp',
'regal/RegalStatistics.cpp',
'regal/RegalLookup.cpp',
'regal/RegalPlugin.cpp',
'regal/RegalShader.cpp',
'regal/RegalToken.cpp',
'regal/RegalDispatchGlobal.cpp',
'regal/RegalDispatcher.cpp',
'regal/RegalDispatcherGL.cpp',
'regal/RegalDispatcherGlobal.cpp',
'regal/RegalDispatchEmu.cpp',
'regal/RegalDispatchGLX.cpp',
'regal/RegalDispatchLog.cpp',
'regal/RegalDispatchCode.cpp',
'regal/RegalDispatchCache.cpp',
'regal/RegalDispatchError.cpp',
'regal/RegalDispatchLoader.cpp',
'regal/RegalDispatchDebug.cpp',
'regal/RegalDispatchPpapi.cpp',
'regal/RegalDispatchStatistics.cpp',
'regal/RegalDispatchStaticES2.cpp',
'regal/RegalDispatchStaticEGL.cpp',
'regal/RegalDispatchTrace.cpp',
'regal/RegalDispatchMissing.cpp',
'regal/RegalPixelConversions.cpp',
'regal/RegalHttp.cpp',
'regal/RegalDispatchHttp.cpp',
'regal/RegalJson.cpp',
'regal/RegalFavicon.cpp',
'regal/RegalMac.cpp',
'regal/RegalSo.cpp',
'regal/RegalFilt.cpp',
'regal/RegalXfer.cpp',
'regal/RegalX11.cpp',
'regal/RegalDllMain.cpp',
'md5/src/md5.c',
'jsonsl/jsonsl.c']
commands = []
o_s = []
for src in srcs_regal:
c = os.path.join(dest_path_src, src)
o = os.path.join(dest_path_src, src + '.o')
shared.safe_ensure_dirs(os.path.dirname(o))
commands.append([shared.PYTHON, shared.EMCC, c,
# specify the defined symbols as the Regal Makefiles does for Emscripten+Release
# the define logic for other symbols will be handled automatically by Regal headers (SYS_EMSCRIPTEN, SYS_EGL, SYS_ES2, etc.)
'-DNDEBUG',
'-DREGAL_NO_PNG=1',
'-DREGAL_LOG=0',
'-DREGAL_NO_TLS=1',
'-DREGAL_THREAD_LOCKING=0',
'-DREGAL_GLSL_OPTIMIZER=0',
'-fomit-frame-pointer',
'-Wno-constant-logical-operand',
'-fvisibility=hidden',
'-O2',
'-o', o,
'-I' + dest_path_include,
'-I' + dest_path_regal,
'-I' + os.path.join(dest_path_md5, 'include'),
'-I' + dest_path_lookup3,
'-I' + dest_path_jsonsl,
'-I' + dest_path_boost,
'-I' + os.path.join(dest_path_glslopt, 'include'),
'-I' + os.path.join(dest_path_glslopt, 'src', 'glsl'),
'-I' + os.path.join(dest_path_glslopt, 'src', 'mesa'),
'-w'])
o_s.append(o)
ports.run_commands(commands)
final = os.path.join(ports.get_build_dir(), 'regal', 'libregal.bc')
shared.try_delete(final)
shared.Building.link(o_s, final)
assert os.path.exists(final)
return final
return [shared.Cache.get('regal', create, what='port')]
else:
return []
def process_dependencies(settings):
if settings.USE_REGAL == 1:
settings.FULL_ES2 = 1
def process_args(ports, args, settings, shared):
if settings.USE_REGAL == 1:
get(ports, settings, shared)
args += ['-Xclang', '-isystem' + os.path.join(ports.get_build_dir(), 'regal', 'include')]
return args
def show():
return 'regal (USE_REGAL=1; Regal license)'
| 6,968 | 0 | 92 |
a00f9789b69823cad94ca0d2ecec4ce1cc80a8ea | 1,592 | py | Python | web/vendor_scraping_controller.py | Zheruel/compari-ro-scraper | ec4ccdec8ac647d694eb78959f2411d901cfad70 | [
"MIT"
] | 1 | 2021-10-09T14:39:48.000Z | 2021-10-09T14:39:48.000Z | web/vendor_scraping_controller.py | Zheruel/compari-ro-scraper | ec4ccdec8ac647d694eb78959f2411d901cfad70 | [
"MIT"
] | null | null | null | web/vendor_scraping_controller.py | Zheruel/compari-ro-scraper | ec4ccdec8ac647d694eb78959f2411d901cfad70 | [
"MIT"
] | null | null | null | from requests import Session
from models.vendor import Vendor
from bs4 import BeautifulSoup
from io_utils.csv_exporter import save_scraped_vendor
| 33.87234 | 99 | 0.687814 | from requests import Session
from models.vendor import Vendor
from bs4 import BeautifulSoup
from io_utils.csv_exporter import save_scraped_vendor
def grab_vendor_information(vendor_page_url: str, ses: Session) -> Vendor:
res = ses.get(vendor_page_url)
soup = BeautifulSoup(res.content, 'html.parser')
store_info_div = soup.find("div", {"class": "store-header-info"})
vendor_name = soup.find("h1", {"class": "shop-title"}).text.strip()
try:
vendor_email = store_info_div.find("div", {"itemprop": "email"}).text.strip()
except AttributeError:
vendor_email = "Not found"
try:
vendor_phone_number = store_info_div.find("div", {"itemprop": "telephone"}).text.strip()
except AttributeError:
vendor_phone_number = "Not found"
vendor_web_page = "Not found"
vendor_div_list = store_info_div.find_all("div", {"class": "col-xs-8"})
for vendor_div in vendor_div_list:
if "https" in vendor_div.text:
vendor_web_page = vendor_div.text.strip()
return Vendor(vendor_name, vendor_page_url, vendor_email, vendor_phone_number, vendor_web_page)
def scrape_vendors():
ses = Session()
res = ses.get("https://www.compari.ro/stores/")
soup = BeautifulSoup(res.content, 'html.parser')
shop_div_list = soup.find_all("div", {"class": "shop-box"})
for shop_div in shop_div_list:
shop_url = shop_div.find("a").get("href")
vendor = grab_vendor_information(shop_url, ses)
save_scraped_vendor(vendor)
print("Vendor {0} scraped and saved".format(vendor.name))
| 1,398 | 0 | 46 |
3ec8e4b9a94e2276fc35dc4cd360cde128b72c63 | 2,402 | py | Python | dvol_python/testtools.py | ClusterHQ/dvol | adf6c49bbf74d26fbc802a3cdd02ee47e18ad934 | [
"Apache-2.0"
] | 276 | 2015-11-03T23:14:11.000Z | 2021-11-08T11:05:57.000Z | dvol_python/testtools.py | ClusterHQ/dvol | adf6c49bbf74d26fbc802a3cdd02ee47e18ad934 | [
"Apache-2.0"
] | 62 | 2015-11-12T02:02:14.000Z | 2022-02-02T21:10:05.000Z | dvol_python/testtools.py | ClusterHQ/dvol | adf6c49bbf74d26fbc802a3cdd02ee47e18ad934 | [
"Apache-2.0"
] | 19 | 2015-11-14T08:17:14.000Z | 2017-07-19T13:32:57.000Z | """
Common test tools.
"""
from os import environ
from semver import compare
from unittest import skipIf
import requests
import subprocess
import time
TEST_GOLANG_VERSION = environ.get("TEST_GOLANG_VERSION", False)
DOCKER_VERSION = environ.get("DOCKER_VERSION", "")
skip_if_go_version = skipIf(
TEST_GOLANG_VERSION,
"Not expected to work in go version"
)
skip_if_python_version = skipIf(
not TEST_GOLANG_VERSION,
"Not expected to work in Python version"
)
skip_if_docker_version_less_than = lambda ver: skipIf(
_skip_max_docker_ver(ver),
"Not expected to work in this Docker version")
def try_until(f, attempts=5, backoff=0.1, attempt=1):
"""
Synchronously, retry ``f`` every ``backoff`` * (2 ^ ``attempt``) seconds
until it doesn't raise an exception, or we've tried ``attempts`` many
times. Return the result of running ``f`` successfully, or raise the last
exception it raised when attempted.
"""
try:
return f()
except:
if attempt > attempts:
raise
time.sleep(backoff * (2 ** attempt))
return try_until(
f, attempts=attempts, backoff=backoff, attempt=attempt + 1)
def run(cmd):
"""
Run cmd (list of bytes), e.g. ["ls", "/"] and return the result, raising
CalledProcessErrorWithOutput if return code is non-zero.
"""
try:
result = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError, error:
exc = CalledProcessErrorWithOutput(
"\n>> command:\n%(command)s"
"\n>> returncode\n%(returncode)d"
"\n>> output:\n%(output)s" %
dict(command=" ".join(cmd),
returncode=error.returncode,
output=error.output))
exc.original = error
raise exc
return result
| 27.295455 | 77 | 0.636137 | """
Common test tools.
"""
from os import environ
from semver import compare
from unittest import skipIf
import requests
import subprocess
import time
TEST_GOLANG_VERSION = environ.get("TEST_GOLANG_VERSION", False)
DOCKER_VERSION = environ.get("DOCKER_VERSION", "")
skip_if_go_version = skipIf(
TEST_GOLANG_VERSION,
"Not expected to work in go version"
)
skip_if_python_version = skipIf(
not TEST_GOLANG_VERSION,
"Not expected to work in Python version"
)
def _skip_max_docker_ver(ver):
try:
return compare(DOCKER_VERSION, ver) < 0
except ValueError:
return False
skip_if_docker_version_less_than = lambda ver: skipIf(
_skip_max_docker_ver(ver),
"Not expected to work in this Docker version")
def get(*args, **kw):
response = requests.get(*args, **kw)
if response.status_code != 200:
raise Exception("Not 200: %s" % (response,))
return response
def docker_host():
if "DOCKER_HOST" not in environ:
return "localhost"
return environ.get("DOCKER_HOST").split("://")[1].split(":")[0]
def try_until(f, attempts=5, backoff=0.1, attempt=1):
"""
Synchronously, retry ``f`` every ``backoff`` * (2 ^ ``attempt``) seconds
until it doesn't raise an exception, or we've tried ``attempts`` many
times. Return the result of running ``f`` successfully, or raise the last
exception it raised when attempted.
"""
try:
return f()
except:
if attempt > attempts:
raise
time.sleep(backoff * (2 ** attempt))
return try_until(
f, attempts=attempts, backoff=backoff, attempt=attempt + 1)
class CalledProcessErrorWithOutput(Exception):
pass
def run(cmd):
"""
Run cmd (list of bytes), e.g. ["ls", "/"] and return the result, raising
CalledProcessErrorWithOutput if return code is non-zero.
"""
try:
result = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError, error:
exc = CalledProcessErrorWithOutput(
"\n>> command:\n%(command)s"
"\n>> returncode\n%(returncode)d"
"\n>> output:\n%(output)s" %
dict(command=" ".join(cmd),
returncode=error.returncode,
output=error.output))
exc.original = error
raise exc
return result
| 389 | 34 | 92 |
5811d4052a2da8a0c989ae54eefbdd10c28952bc | 6,675 | py | Python | disputatio/routines/vanes/front.py | nicholasmalaya/paleologos | 11959056caa80d3c910759b714a0f8e42f986f0f | [
"MIT"
] | 1 | 2021-11-04T17:49:42.000Z | 2021-11-04T17:49:42.000Z | disputatio/routines/vanes/front.py | nicholasmalaya/paleologos | 11959056caa80d3c910759b714a0f8e42f986f0f | [
"MIT"
] | null | null | null | disputatio/routines/vanes/front.py | nicholasmalaya/paleologos | 11959056caa80d3c910759b714a0f8e42f986f0f | [
"MIT"
] | 2 | 2019-01-04T16:08:18.000Z | 2019-12-16T19:34:24.000Z | #!/bin/py
#
# interpolate over data field with 2d polynomial fit
#
# fit a 2D, 3rd order polynomial to data
# estimate the 16 coefficients using all of your data points.
#
# http://stackoverflow.com/questions/18832763/drawing-directions-fields
#
#
import numpy as np
import matplotlib
matplotlib.use('Agg')
import itertools
import matplotlib.pyplot as plt
from scipy import integrate
from scipy.integrate import ode
hprime = -4.5
#
#
#
#
#
#
#
#
#
#
# main function: execute
#
#
# EXECUTE
#
main()
#
# nick
# 1/30/16
#
# http://stackoverflow.com/questions/7997152/python-3d-polynomial-surface-fit-order-dependent
#
| 21.190476 | 93 | 0.497978 | #!/bin/py
#
# interpolate over data field with 2d polynomial fit
#
# fit a 2D, 3rd order polynomial to data
# estimate the 16 coefficients using all of your data points.
#
# http://stackoverflow.com/questions/18832763/drawing-directions-fields
#
#
import numpy as np
import matplotlib
matplotlib.use('Agg')
import itertools
import matplotlib.pyplot as plt
from scipy import integrate
from scipy.integrate import ode
hprime = -4.5
def load_ell():
#
# Generate Data from ellipses
#
h = hprime
thetaf = 20*np.pi/180.
a = -h*1.0
miny = -0.0
#
# create data
#
space = 0.02
R = 3.0
y0 = np.arange(1.5,miny,-space)
#print y0
#y0 = np.array([1.5, 1.45, 1.35, 1.2, 1.05, 0.9, 0.6, 0.3, 0.15])
x0 = -np.sqrt(R*R-y0*y0)
theta0 = np.arctan2(y0,x0)
thetafy = thetaf*(R-y0)/R
#thetafy = thetaf*np.arccos(y0/R)/2.
thetam = theta0-np.pi/2-thetafy
m = np.tan(thetam)
k = (y0 + a*a*m/(x0-h) - m*(x0-h))
bs = -a*a*m*(y0-k)/(x0-h)
b = np.sqrt(bs)
xl = []
yl = []
zl = []
print 'y0 ', y0
print 'b/a: ',b/a
fudge = 0.05
dx_space=0.1
for i in xrange(len(k)):
dx = np.arange(h,x0[i]+fudge,dx_space)
xl = xl + dx.tolist()
dy = -(b[i]*np.sqrt(1-((dx-h)/(a))**2))+k[i]
#yl.append(-(b[i]*np.sqrt(1-((dx-h)/(a))**2))+k[i])
yl = yl + dy.tolist()
#zl.append(np.arctan(dy/dx))
if(i == 0):
m = np.zeros(len(dy))
else:
m = -b[i]*b[i]*(dx-h)/((dy-k[i])*(a*a))
zl = zl + m.tolist()
#
# convert to numpy array
#
x = np.asarray(xl)
y = np.asarray(yl)
z = np.asarray(zl)
#
# steady as she goes
#
return x,y,z
def vf(t,x,m):
#
# Vector field function
#
dx=np.zeros(2)
zz = polyval2d(x[0], x[1], m)
theta = np.arctan(zz)
dx[0]=np.cos(theta)
dx[1]=np.sin(theta)
#dx[1]=x[0]**2-x[0]-2
#polyval2d(xx, yy, m)
#dx[1]=polyval2d(xx, yy, m)
return dx
def arr(m):
#
# Solution curves
#
h = hprime
ic=[[h,-2],[h,0.8],[h,-1],[h,-3],[h,1.5],[h,0],[h,-3.3]]
t0=0; dt=0.1;
r = ode(vf).set_integrator('vode', method='bdf',max_step=dt)
for k in range(len(ic)):
tEnd=np.sqrt(ic[k][0]**2 + ic[k][1]**2)-0.5
Y=[];T=[];S=[];
r.set_initial_value(ic[k], t0).set_f_params(m)
while r.successful() and r.t +dt < tEnd:
r.integrate(r.t+dt)
Y.append(r.y)
S=np.array(np.real(Y))
plt.plot(S[:,0],S[:,1], color = 'red', lw = 4.25)
def polyfit2d(x, y, z, order=5):
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(range(order+1), range(order+1))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
#
# http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.linalg.lstsq.html
#
cnd=1e-5
#m, _, _, _ = np.linalg.lstsq(G, z,rcond=cnd)
m, _, _, _ = np.linalg.lstsq(G, z)
return m
def polyval2d(x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
tmp = a * x**i * y**j
z += tmp
#print a,i,j,tmp,z
return z
def polyval2d_disp(x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
tmp = a * x**i * y**j
z += tmp
print a,i,j,tmp,z
return z
#
#
#
def poly_disp_fparse(m):
print "#"
print "# Polynomial Interpolation Function"
print "#"
print "slope_func = '"
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
for a, (i,j) in zip(m, ij):
if( (i+1)*(j+1) != len(m)):
print ' %.15f * x^%i * y^%i +' % (a,i,j )
else:
print " %.15f * x^%i * y^%i'" % (a,i,j )
print
return 0
#
#
#
def poly_disp_py(m):
print "#"
print "# Polynomial Interpolation Function"
print "# For python"
print "return ",
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
for a, (i,j) in zip(m, ij):
if( (i+1)*(j+1) != len(m)):
print '%.15f * x**%i * y**%i +' % (a,i,j ),
else:
print "%.15f * x**%i * y**%i" % (a,i,j ),
print
return 0
#
#
#
def poly_disp_py_line(m):
print "#"
print "# Polynomial Interpolation Function"
print "# For python"
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
for a, (i,j) in zip(m, ij):
if( (i+1)*(j+1) != len(m)):
print ' tmp += %.15f * x**%i * y**%i' % (a,i,j )
print ' print tmp'
else:
print " tmp += %.15f * x**%i * y**%i" % (a,i,j )
print ' print tmp'
print
return 0
def load_ex():
#
# Generate Example Data
#
numdata = 100
x = np.random.random(numdata)
y = np.random.random(numdata)
#
# silly fake function for z
#
z = x**2 + y**2 + 3*x**3 + y + np.random.random(numdata)
return x,y,z
#
# main function: execute
#
def main():
#
# load data in
#
x,y,z=load_ell()
#x,y,z=load_ex()
#
# Fit polynomial
#
m = polyfit2d(x,y,z)
#
# Evaluate it on a grid...
#
nx, ny = 200, 200
xx, yy = np.meshgrid(np.linspace(x.min(), x.max(), nx),
np.linspace(y.min(), y.max(), ny))
zz = polyval2d(xx, yy, m)
#
# m is a matrix of polynomial values...
# e.g.
#
# Plot!
#
arr(m)
#
# extent: [ None | (x0,x1,y0,y1) ]
#
plt.imshow(zz, extent=(x.min(), x.max(), y.min(), y.max()))
plt.colorbar()
plt.scatter(x, y, c=z)
plt.title("Elliptic Vane Interpolation")
plt.xlim([-7,1])
plt.ylim([-5,2])
plt.xlabel('Streamwise (x)')
plt.ylabel('Spanwise (y)')
# add circle
R = 1.5
circle=plt.Circle((0,0),R,color='black',fill=False,linewidth=4)
fig = plt.gcf()
fig.gca().add_artist(circle)
plt.savefig('interp_front.png')
#
# output polynomial for input
#
poly_disp_fparse(m)
#poly_disp_py_line(m)
#print
#print polyval2d_disp(-5.5, -3.5, m)
#
# EXECUTE
#
main()
#
# nick
# 1/30/16
#
# http://stackoverflow.com/questions/7997152/python-3d-polynomial-surface-fit-order-dependent
#
| 5,796 | 0 | 250 |
9c3b8eda5d94d6304e5ca32a9a4f89b5936e41a2 | 214 | py | Python | UdemyMegaCourse/cv2/galaxy.py | JeremyPie/PythonCourse1 | 5dd4c417c70c4ade5bb137ea8bdbb56d7d6c71c7 | [
"MIT"
] | null | null | null | UdemyMegaCourse/cv2/galaxy.py | JeremyPie/PythonCourse1 | 5dd4c417c70c4ade5bb137ea8bdbb56d7d6c71c7 | [
"MIT"
] | null | null | null | UdemyMegaCourse/cv2/galaxy.py | JeremyPie/PythonCourse1 | 5dd4c417c70c4ade5bb137ea8bdbb56d7d6c71c7 | [
"MIT"
] | null | null | null | import cv2
from matplotlib import pyplot as plt
img = cv2.imread('galaxy.jpg', 0)
img = cv2.resize(img, (int(img.shape[1]/2), int(img.shape[0]/2)))
cv2.imwrite('GlaxyResized.jpg', img)
plt.imshow(img)
plt.show()
| 21.4 | 65 | 0.700935 | import cv2
from matplotlib import pyplot as plt
img = cv2.imread('galaxy.jpg', 0)
img = cv2.resize(img, (int(img.shape[1]/2), int(img.shape[0]/2)))
cv2.imwrite('GlaxyResized.jpg', img)
plt.imshow(img)
plt.show()
| 0 | 0 | 0 |
b78fdd722081f7d224797321217f5335e2c23b47 | 4,778 | py | Python | xsdata/utils/downloader.py | pashashocky/xsdata | 1cd681598d2235626d0e21716fc9fb885d26e351 | [
"MIT"
] | null | null | null | xsdata/utils/downloader.py | pashashocky/xsdata | 1cd681598d2235626d0e21716fc9fb885d26e351 | [
"MIT"
] | null | null | null | xsdata/utils/downloader.py | pashashocky/xsdata | 1cd681598d2235626d0e21716fc9fb885d26e351 | [
"MIT"
] | null | null | null | import os
import re
from pathlib import Path
from typing import Dict
from typing import Optional
from typing import Union
from urllib.request import urlopen
from xsdata.codegen.parsers import DefinitionsParser
from xsdata.codegen.parsers import SchemaParser
from xsdata.logger import logger
from xsdata.models.wsdl import Definitions
from xsdata.models.xsd import Schema
class Downloader:
"""
Helper class to download a schema or a definitions with all their imports
locally. The imports paths will be adjusted if necessary.
:param output: Output path
"""
__slots__ = ("output", "base_path", "downloaded")
def wget(self, uri: str, location: Optional[str] = None):
"""Download handler for any uri input with circular protection."""
if not (uri in self.downloaded or (location and location in self.downloaded)):
self.downloaded[uri] = None
self.downloaded[location] = None
self.adjust_base_path(uri)
logger.info("Fetching %s", uri)
input_stream = urlopen(uri).read() # nosec
if uri.endswith("wsdl"):
self.parse_definitions(uri, input_stream)
else:
self.parse_schema(uri, input_stream)
self.write_file(uri, location, input_stream.decode())
def parse_schema(self, uri: str, content: bytes):
"""Convert content to a schema instance and process all sub imports."""
parser = SchemaParser(location=uri)
schema = parser.from_bytes(content, Schema)
self.wget_included(schema)
def parse_definitions(self, uri: str, content: bytes):
"""Convert content to a definitions instance and process all sub
imports."""
parser = DefinitionsParser(location=uri)
definitions = parser.from_bytes(content, Definitions)
self.wget_included(definitions)
for schema in definitions.schemas:
self.wget_included(schema)
def adjust_base_path(self, uri: str):
"""
Adjust base path for every new uri loaded.
Example runs:
- file:///schemas/air_v48_0/Air.wsdl -> file:///schemas/air_v48_0
- file:///schemas/common_v48_0/CommonReqRsp.xsd -> file:///schemas
"""
if not self.base_path:
self.base_path = Path(uri).parent
logger.info("Setting base path to %s", self.base_path)
else:
common_path = os.path.commonpath((self.base_path or "", uri))
if common_path:
common_path_path = Path(common_path)
if common_path_path < self.base_path:
self.base_path = Path(common_path)
logger.info("Adjusting base path to %s", self.base_path)
def adjust_imports(self, path: Path, content: str) -> str:
"""Try to adjust the import locations for external locations that are
not relative to the first requested uri."""
matches = re.findall(r"ocation=\"(.*)\"", content)
for match in matches:
if isinstance(self.downloaded.get(match), Path):
location = os.path.relpath(self.downloaded[match], path)
replace = str(location).replace("\\", "/")
content = content.replace(f'ocation="{match}"', f'ocation="{replace}"')
return content
def write_file(self, uri: str, location: Optional[str], content: str):
"""
Write the given uri and it's content according to the base path and if
the uri is relative to first requested uri.
Keep track of all the written file paths, in case we have to
modify the location attribute in an upcoming schema/definition
import.
"""
common_path = os.path.commonpath((self.base_path or "", uri))
if common_path:
file_path = self.output.joinpath(Path(uri).relative_to(common_path))
else:
file_path = self.output.joinpath(Path(uri).name)
content = self.adjust_imports(file_path.parent, content)
file_path.parent.mkdir(parents=True, exist_ok=True)
file_path.write_text(content, encoding="utf-8")
logger.info("Writing %s", file_path)
self.downloaded[uri] = file_path
if location:
self.downloaded[location] = file_path
| 37.328125 | 87 | 0.636668 | import os
import re
from pathlib import Path
from typing import Dict
from typing import Optional
from typing import Union
from urllib.request import urlopen
from xsdata.codegen.parsers import DefinitionsParser
from xsdata.codegen.parsers import SchemaParser
from xsdata.logger import logger
from xsdata.models.wsdl import Definitions
from xsdata.models.xsd import Schema
class Downloader:
"""
Helper class to download a schema or a definitions with all their imports
locally. The imports paths will be adjusted if necessary.
:param output: Output path
"""
__slots__ = ("output", "base_path", "downloaded")
def __init__(self, output: Path):
self.output = output
self.base_path: Optional[Path] = None
self.downloaded: Dict = {}
def wget(self, uri: str, location: Optional[str] = None):
"""Download handler for any uri input with circular protection."""
if not (uri in self.downloaded or (location and location in self.downloaded)):
self.downloaded[uri] = None
self.downloaded[location] = None
self.adjust_base_path(uri)
logger.info("Fetching %s", uri)
input_stream = urlopen(uri).read() # nosec
if uri.endswith("wsdl"):
self.parse_definitions(uri, input_stream)
else:
self.parse_schema(uri, input_stream)
self.write_file(uri, location, input_stream.decode())
def parse_schema(self, uri: str, content: bytes):
"""Convert content to a schema instance and process all sub imports."""
parser = SchemaParser(location=uri)
schema = parser.from_bytes(content, Schema)
self.wget_included(schema)
def parse_definitions(self, uri: str, content: bytes):
"""Convert content to a definitions instance and process all sub
imports."""
parser = DefinitionsParser(location=uri)
definitions = parser.from_bytes(content, Definitions)
self.wget_included(definitions)
for schema in definitions.schemas:
self.wget_included(schema)
def wget_included(self, definition: Union[Schema, Definitions]):
for included in definition.included():
if included.location:
schema_location = getattr(included, "schema_location", None)
self.wget(included.location, schema_location)
def adjust_base_path(self, uri: str):
"""
Adjust base path for every new uri loaded.
Example runs:
- file:///schemas/air_v48_0/Air.wsdl -> file:///schemas/air_v48_0
- file:///schemas/common_v48_0/CommonReqRsp.xsd -> file:///schemas
"""
if not self.base_path:
self.base_path = Path(uri).parent
logger.info("Setting base path to %s", self.base_path)
else:
common_path = os.path.commonpath((self.base_path or "", uri))
if common_path:
common_path_path = Path(common_path)
if common_path_path < self.base_path:
self.base_path = Path(common_path)
logger.info("Adjusting base path to %s", self.base_path)
def adjust_imports(self, path: Path, content: str) -> str:
"""Try to adjust the import locations for external locations that are
not relative to the first requested uri."""
matches = re.findall(r"ocation=\"(.*)\"", content)
for match in matches:
if isinstance(self.downloaded.get(match), Path):
location = os.path.relpath(self.downloaded[match], path)
replace = str(location).replace("\\", "/")
content = content.replace(f'ocation="{match}"', f'ocation="{replace}"')
return content
def write_file(self, uri: str, location: Optional[str], content: str):
"""
Write the given uri and it's content according to the base path and if
the uri is relative to first requested uri.
Keep track of all the written file paths, in case we have to
modify the location attribute in an upcoming schema/definition
import.
"""
common_path = os.path.commonpath((self.base_path or "", uri))
if common_path:
file_path = self.output.joinpath(Path(uri).relative_to(common_path))
else:
file_path = self.output.joinpath(Path(uri).name)
content = self.adjust_imports(file_path.parent, content)
file_path.parent.mkdir(parents=True, exist_ok=True)
file_path.write_text(content, encoding="utf-8")
logger.info("Writing %s", file_path)
self.downloaded[uri] = file_path
if location:
self.downloaded[location] = file_path
| 385 | 0 | 54 |
f5cf2fe8c3a53755d9392bd7e7eed0997ae7b6c5 | 1,052 | py | Python | setup.py | privatwolke/eclipse-profile-selector | e295e0589356aba51fa141420994793bfa62aaa8 | [
"MIT"
] | 1 | 2015-12-18T01:27:37.000Z | 2015-12-18T01:27:37.000Z | setup.py | privatwolke/eclipseprofileselector | e295e0589356aba51fa141420994793bfa62aaa8 | [
"MIT"
] | null | null | null | setup.py | privatwolke/eclipseprofileselector | e295e0589356aba51fa141420994793bfa62aaa8 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
import sys, os
version = '0.2.2'
setup(name = 'eclipseprofileselector',
version = version,
description = 'Manage separate Eclipse profiles and workspaces with a nice graphical user interface.',
long_description = open('README.rst', 'r').read(),
keywords = 'eclipse profile',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Utilities'
],
author = 'Stephan Klein',
url = 'https://github.com/privatwolke/eclipseprofileselector',
license = 'MIT',
packages = ['eclipseprofileselector'],
package_data = {
'eclipseprofileselector': ['ui.glade']
},
include_package_data = True,
zip_safe = True,
entry_points = {
'gui_scripts': [
'eclipse-profile-selector = eclipseprofileselector.profile:main'
]
}
)
| 28.432432 | 103 | 0.695817 | from setuptools import setup, find_packages
import sys, os
version = '0.2.2'
setup(name = 'eclipseprofileselector',
version = version,
description = 'Manage separate Eclipse profiles and workspaces with a nice graphical user interface.',
long_description = open('README.rst', 'r').read(),
keywords = 'eclipse profile',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Utilities'
],
author = 'Stephan Klein',
url = 'https://github.com/privatwolke/eclipseprofileselector',
license = 'MIT',
packages = ['eclipseprofileselector'],
package_data = {
'eclipseprofileselector': ['ui.glade']
},
include_package_data = True,
zip_safe = True,
entry_points = {
'gui_scripts': [
'eclipse-profile-selector = eclipseprofileselector.profile:main'
]
}
)
| 0 | 0 | 0 |
0a6e54573a790fabc971c11e4a1a22c66eac3489 | 100 | py | Python | django_app/contributors/admin.py | ASRG/asrg.io | de29552096505ac03a4ec2602873ba0f688b7072 | [
"MIT"
] | 8 | 2020-04-03T19:33:37.000Z | 2022-01-10T18:46:49.000Z | django_app/contributors/admin.py | ASRG/asrg.io | de29552096505ac03a4ec2602873ba0f688b7072 | [
"MIT"
] | 359 | 2019-08-07T13:31:24.000Z | 2022-03-29T22:45:49.000Z | django_app/contributors/admin.py | ASRG/asrg.io | de29552096505ac03a4ec2602873ba0f688b7072 | [
"MIT"
] | 3 | 2020-08-07T20:43:24.000Z | 2020-08-20T20:54:30.000Z | from django.contrib import admin
from .models import Contributor
admin.site.register(Contributor)
| 16.666667 | 32 | 0.83 | from django.contrib import admin
from .models import Contributor
admin.site.register(Contributor)
| 0 | 0 | 0 |
f4110ee3e6aea36c216c4a811e8afcc766ec14e2 | 2,600 | py | Python | tests/uuidt/migrations/0001_initial.py | OlgaBorisova/django-pgfields | fc21efc116d017f1bf75c88fa3502cc23f8923b3 | [
"BSD-3-Clause"
] | 1 | 2015-03-26T09:34:32.000Z | 2015-03-26T09:34:32.000Z | tests/uuidt/migrations/0001_initial.py | elbaschid/django-pgfields | 37915b92349c392ed4980d255225e991be3463d6 | [
"BSD-3-Clause"
] | null | null | null | tests/uuidt/migrations/0001_initial.py | elbaschid/django-pgfields | 37915b92349c392ed4980d255225e991be3463d6 | [
"BSD-3-Clause"
] | 1 | 2018-10-23T21:31:29.000Z | 2018-10-23T21:31:29.000Z | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| 38.80597 | 128 | 0.565 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Movie'
db.create_table('uuidt_movie', (
('id', self.gf('django_pg.models.fields.uuid.UUIDField')(auto_add=True, primary_key=True, unique=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('uuidt', ['Movie'])
# Adding model 'Game'
db.create_table('uuidt_game', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=50)),
('uuid', self.gf('django_pg.models.fields.uuid.UUIDField')(unique=True)),
))
db.send_create_signal('uuidt', ['Game'])
# Adding model 'Book'
db.create_table('uuidt_book', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=50)),
('uuid', self.gf('django_pg.models.fields.uuid.UUIDField')(null=True, unique=True)),
))
db.send_create_signal('uuidt', ['Book'])
def backwards(self, orm):
# Deleting model 'Movie'
db.delete_table('uuidt_movie')
# Deleting model 'Game'
db.delete_table('uuidt_game')
# Deleting model 'Book'
db.delete_table('uuidt_book')
models = {
'uuidt.book': {
'Meta': {'object_name': 'Book'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'uuid': ('django_pg.models.fields.uuid.UUIDField', [], {'null': 'True', 'unique': 'True'})
},
'uuidt.game': {
'Meta': {'object_name': 'Game'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'uuid': ('django_pg.models.fields.uuid.UUIDField', [], {'unique': 'True'})
},
'uuidt.movie': {
'Meta': {'object_name': 'Movie'},
'id': ('django_pg.models.fields.uuid.UUIDField', [], {'auto_add': 'True', 'primary_key': 'True', 'unique': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['uuidt']
| 1,336 | 1,110 | 23 |
0b7fbd1451d21df02b8ac7806cf7eef5c4dcbb14 | 5,605 | py | Python | porthole/contact_management.py | speedyturkey/porthole | 5d47bb00d33d5aa93c3d2e84af993b5387b66be6 | [
"MIT"
] | 3 | 2017-06-22T01:52:10.000Z | 2019-09-25T22:52:56.000Z | porthole/contact_management.py | speedyturkey/porthole | 5d47bb00d33d5aa93c3d2e84af993b5387b66be6 | [
"MIT"
] | 48 | 2017-06-22T23:36:03.000Z | 2019-11-26T02:51:54.000Z | porthole/contact_management.py | speedyturkey/porthole | 5d47bb00d33d5aa93c3d2e84af993b5387b66be6 | [
"MIT"
] | 1 | 2019-02-27T13:59:07.000Z | 2019-02-27T13:59:07.000Z | from sqlalchemy.orm.exc import NoResultFound
from porthole.app import Session
from .logger import PortholeLogger
from porthole.models import AutomatedReport, AutomatedReportContact, AutomatedReportRecipient
| 49.60177 | 118 | 0.702587 | from sqlalchemy.orm.exc import NoResultFound
from porthole.app import Session
from .logger import PortholeLogger
from porthole.models import AutomatedReport, AutomatedReportContact, AutomatedReportRecipient
class AutomatedReportContactManager(object):
def __init__(self, session=None):
self.session = session or Session()
self.logger = PortholeLogger(name=__name__)
def get_report_by_name(self, report_name, should_exist=False):
report = self.session.query(AutomatedReport).filter_by(report_name=report_name).one_or_none()
if report is None and should_exist:
raise NoResultFound(f"Report {report_name} should exist but no record was found.")
return report
def report_exists(self, report_name):
report = self.get_report_by_name(report_name)
return report is not None
def add_report(self, report_name: str, active: int = 1):
if self.report_exists(report_name):
self.logger.warning(f"{report_name} already exists")
return None
report = AutomatedReport(report_name=report_name, active=active)
self.session.add(report)
self.session.commit()
self.logger.info(f"Report '{report_name}' created successfully")
def report_is_active(self, report_name):
report = self.get_report_by_name(report_name, should_exist=True)
return bool(report.active)
def activate_report(self, report_name):
if self.report_is_active(report_name):
self.logger.warning(f"Report '{report_name}' is already active")
return None
report = self.get_report_by_name(report_name, should_exist=True)
report.active = 1
self.session.commit()
self.logger.info(f"Report '{report_name}' is now active")
def deactivate_report(self, report_name):
if not self.report_is_active(report_name):
self.logger.warning(f"Report '{report_name}' is already inactive")
return None
report = self.get_report_by_name(report_name, should_exist=True)
report.active = 0
self.session.commit()
self.logger.info(f"Report '{report_name}' is now inactive")
def get_contact_by_email_address(self, email_address, should_exist=False):
contact = self.session.query(AutomatedReportContact).filter_by(
email_address=email_address
).one_or_none()
if contact is None and should_exist:
raise NoResultFound(f"Contact with email {email_address} should exist but no record was found.")
return contact
def contact_exists(self, email_address: str):
contact = self.get_contact_by_email_address(email_address)
return contact is not None
def add_contact(self, last_name: str = None, first_name: str = None, email_address: str = None):
if self.contact_exists(email_address):
self.logger.warning(f"Contact {last_name}, {first_name} ({email_address}) already exists ")
return None
contact = AutomatedReportContact(last_name=last_name, first_name=first_name, email_address=email_address)
self.session.add(contact)
self.session.commit()
self.logger.info(f"Contact {last_name}, {first_name} ({email_address}) created successfully")
def get_report_recipient(self, report_name: str, email_address: str, should_exist: bool = False):
report = self.get_report_by_name(report_name, should_exist=True)
contact = self.get_contact_by_email_address(email_address, should_exist=True)
recipient = self.session.query(AutomatedReportRecipient).filter_by(
report_id=report.report_id, contact_id=contact.contact_id
).one_or_none()
if recipient is None and should_exist:
raise NoResultFound(
f"Recipient for report {report_name} with email {email_address} should exist but no record was found."
)
return recipient
def report_recipient_exists(self, report_name: str, email_address: str):
recipient = self.get_report_recipient(report_name, email_address)
return recipient is not None
def add_report_recipient(self, report_name: str, email_address: str, recipient_type: str):
if recipient_type not in ['to', 'cc']:
raise ValueError("Recipient type must be either `to` or `cc`.")
if self.report_recipient_exists(report_name, email_address):
self.logger.warning(f"Recipient '{email_address}' already exists for report '{report_name}'")
return None
report = self.get_report_by_name(report_name, should_exist=True)
contact = self.get_contact_by_email_address(email_address, should_exist=True)
recipient = AutomatedReportRecipient(
report_id=report.report_id, contact_id=contact.contact_id, recipient_type=recipient_type
)
self.session.add(recipient)
self.session.commit()
self.logger.info(f"{recipient_type} recipient '{email_address}' added successfully to report '{report_name}'")
def remove_report_recipient(self, report_name: str, email_address: str):
if not self.report_recipient_exists(report_name, email_address):
self.logger.warning(f"Recipient '{email_address}' does not exist for report '{report_name}'")
return None
recipient = self.get_report_recipient(report_name, email_address)
self.session.delete(recipient)
self.session.commit()
self.logger.info(f"Recipient '{email_address}' removed successfully from report '{report_name}'")
| 4,974 | 23 | 400 |
88cc6c11608e5ecae6a01861ea3c8f6aaf96bc47 | 3,679 | py | Python | commissioning/doscalars.py | dobos/pysynphot | 5d2e0b52ceda78890940ac9239c2d88e149e0bed | [
"BSD-3-Clause"
] | 24 | 2015-01-04T23:38:21.000Z | 2022-02-01T00:11:07.000Z | commissioning/doscalars.py | dobos/pysynphot | 5d2e0b52ceda78890940ac9239c2d88e149e0bed | [
"BSD-3-Clause"
] | 126 | 2015-01-29T14:50:37.000Z | 2022-02-15T01:58:13.000Z | commissioning/doscalars.py | dobos/pysynphot | 5d2e0b52ceda78890940ac9239c2d88e149e0bed | [
"BSD-3-Clause"
] | 25 | 2015-02-09T12:12:02.000Z | 2021-09-09T13:06:54.000Z | from __future__ import print_function
import kwfile_dict
import glob, os, sys
import numpy as N
from astropy.io import fits as pyfits
import pylab as P
import matplotlib
from pysynphot.compat import ASTROPY_LT_1_3
def reverse(d):
"""Return a reverse lookup dictionary for the input dictionary"""
r={}
for k in d:
r[d[k]]=k
return r
if __name__ == '__main__':
#dirpath, fieldname, instr=sys.argv[1:]
try:
run(*sys.argv[1:])
except TypeError as e:
print("sys.argv[1:] = ",sys.argv[1:])
raise e
| 28.3 | 79 | 0.572982 | from __future__ import print_function
import kwfile_dict
import glob, os, sys
import numpy as N
from astropy.io import fits as pyfits
import pylab as P
import matplotlib
from pysynphot.compat import ASTROPY_LT_1_3
def getdata(dirpath,fieldname,instr,save=True):
#get the list of files
flist=glob.glob("%s/*.log"%dirpath)
#make the arrays
nfiles=len(flist)
if nfiles == 0:
raise ValueError('No files found')
val=N.zeros((nfiles,),dtype=N.float64)
obsmode=N.zeros((nfiles,),dtype=N.float64)
spectrum=N.zeros((nfiles,),dtype=N.float64)
#
# Make the dicts
olist=[]
odict={}
ocount=0
sdict={}
scount=0
namedict={}
i=0
#
# Start processing
for fname in flist:
d=kwfile_dict.read_kwfile(fname)
namedict[i]=fname
om=d['tda_obsmode']
olist.append(om)
if om not in odict:
odict[om]=ocount
ocount+=1
obsmode[i]=odict[om]
sp=d['tda_spectrum']
if sp not in sdict:
sdict[sp]=scount
scount+=1
spectrum[i]=sdict[sp]
try:
val[i]=float(d['tra_discrep'])
except KeyError:
#Cases with errors don't have results.
pass
i+=1
#Save our results as a FITS table
if save:
tmp=[len(x) for x in flist]
c1=pyfits.Column(name='logfile',format='%dA'%max(tmp),
array=N.array(flist))
tmp=[len(x) for x in olist]
c2=pyfits.Column(name='obsmode',format='%dA'%max(tmp),
array=N.array(olist))
c3=pyfits.Column(name='obscode',format='I',
array=obsmode)
c4=pyfits.Column(name='spcode',format='I',
array=spectrum)
c5=pyfits.Column(name='discrep',format='D',
array=val)
tbhdu=pyfits.BinTableHDU.from_columns(pyfits.ColDefs([c1,c2,c3,c4,c5]))
outname=os.path.join(os.path.abspath(os.path.dirname(dirpath)),
"%s_%s_table.fits"%(instr,fieldname))
if ASTROPY_LT_1_3:
tbhdu.writeto(outname, clobber=True)
else:
tbhdu.writeto(outname, overwrite=True)
#and return the values for immediate use
return namedict,odict,sdict,obsmode,spectrum,val
def reverse(d):
"""Return a reverse lookup dictionary for the input dictionary"""
r={}
for k in d:
r[d[k]]=k
return r
def plotdata(obsmode,spectrum,val,odict,sdict,
instr,fieldname,outdir,outname):
isetting=P.isinteractive()
P.ioff()
P.clf()
P.plot(obsmode,val,'.')
P.ylabel('(pysyn-syn)/syn')
P.xlabel('obsmode')
P.title("%s: %s"%(instr,fieldname))
P.savefig(os.path.join(outdir,outname+'_obsmode.ps'))
P.clf()
P.plot(spectrum,val,'.')
P.ylabel('(pysyn-syn)/syn')
P.xlabel('spectrum')
P.title("%s: %s"%(instr,fieldname))
P.savefig(os.path.join(outdir,outname+'_spectrum.ps'))
matplotlib.interactive(isetting)
def run(dirpath, fieldname, instr):
namedict,odict,sdict,obsmode,spectrum,val = getdata(dirpath,
fieldname,
instr)
outdir=os.path.abspath(os.path.dirname(dirpath))
outname="%s_%s"%(instr,fieldname)
plotdata(obsmode,spectrum,val,odict,sdict,
instr,fieldname,outdir,outname)
if __name__ == '__main__':
#dirpath, fieldname, instr=sys.argv[1:]
try:
run(*sys.argv[1:])
except TypeError as e:
print("sys.argv[1:] = ",sys.argv[1:])
raise e
| 3,054 | 0 | 69 |
7cec0f109dc0a38586ada7f4713451e263fe1ec9 | 5,408 | py | Python | flask-server/app.py | michael-conrad/neuspell | f1d1a8b4efa7c6aa6e0564ea17db152905f4c7dc | [
"MIT"
] | 1 | 2021-05-21T09:39:50.000Z | 2021-05-21T09:39:50.000Z | flask-server/app.py | michael-conrad/neuspell | f1d1a8b4efa7c6aa6e0564ea17db152905f4c7dc | [
"MIT"
] | null | null | null | flask-server/app.py | michael-conrad/neuspell | f1d1a8b4efa7c6aa6e0564ea17db152905f4c7dc | [
"MIT"
] | null | null | null | """
Usage
-----
CUDA_VISIBLE_DEVICES=0 python app.py
"""
import os
from time import time
# from neuspell import AspellChecker, JamspellChecker
from neuspell import BertsclstmChecker, CnnlstmChecker, ElmosclstmChecker, NestedlstmChecker
from neuspell import SclstmChecker, SclstmbertChecker, SclstmelmoChecker, BertChecker
from flask import Flask, render_template, url_for, request
from flask_cors import CORS
TOKENIZE = True
PRELOADED_MODELS = {}
CURR_MODEL_KEYWORD = "elmosc-rnn"
CURR_MODEL = None
TOPK = 1
LOGS_PATH = "./logs"
if not os.path.exists(LOGS_PATH):
os.makedirs(LOGS_PATH)
opfile = open(os.path.join(LOGS_PATH, str(time()) + ".logs.txt"), "w")
# Define the app
app = Flask(__name__)
CORS(app) # needed for cross-domain requests, allow everything by default
@app.route('/')
@app.route('/home', methods=['POST'])
@app.route('/loaded', methods=['POST'])
@app.route('/reset', methods=['POST'])
@app.route('/predict', methods=['POST'])
if __name__ == "__main__":
print("*** Flask Server ***")
preload_models()
app.run(debug=True, host='0.0.0.0', port=5000)
| 36.789116 | 109 | 0.645155 | """
Usage
-----
CUDA_VISIBLE_DEVICES=0 python app.py
"""
import os
from time import time
# from neuspell import AspellChecker, JamspellChecker
from neuspell import BertsclstmChecker, CnnlstmChecker, ElmosclstmChecker, NestedlstmChecker
from neuspell import SclstmChecker, SclstmbertChecker, SclstmelmoChecker, BertChecker
from flask import Flask, render_template, url_for, request
from flask_cors import CORS
TOKENIZE = True
PRELOADED_MODELS = {}
CURR_MODEL_KEYWORD = "elmosc-rnn"
CURR_MODEL = None
TOPK = 1
LOGS_PATH = "./logs"
if not os.path.exists(LOGS_PATH):
os.makedirs(LOGS_PATH)
opfile = open(os.path.join(LOGS_PATH, str(time()) + ".logs.txt"), "w")
# Define the app
app = Flask(__name__)
CORS(app) # needed for cross-domain requests, allow everything by default
@app.route('/')
@app.route('/home', methods=['POST'])
def home():
return render_template('home.html')
@app.route('/loaded', methods=['POST'])
def loaded():
global CURR_MODEL_KEYWORD, CURR_MODEL
print(request.form)
print(request.form["checkers"])
CURR_MODEL_KEYWORD = request.form["checkers"]
CURR_MODEL = load_model(CURR_MODEL_KEYWORD)
return render_template('loaded.html')
@app.route('/reset', methods=['POST'])
def reset():
return render_template('loaded.html')
@app.route('/predict', methods=['POST'])
def predict():
global CURR_MODEL, CURR_MODEL_KEYWORD, TOPK
if request.method == 'POST':
print("#################")
print(request.form)
print(request.form.keys())
message = request.form['hidden-message']
message = message.strip("\n").strip("\r")
if message == "":
return render_template('loaded.html')
if TOPK == 1:
message_modified, result = CURR_MODEL.correct_string(message, return_all=True)
print(message)
print(message_modified)
print(result)
save_query(CURR_MODEL_KEYWORD + "\t" + message + "\t" + message_modified + "\t" + result + "\n")
paired = [(a, b) if a == b else ("+-+" + a + "-+-", "+-+" + b + "-+-") for a, b in
zip(message_modified.split(), result.split())]
print(paired)
return render_template('result.html', prediction=" ".join([x[1] for x in paired]),
message=" ".join([x[0] for x in paired]))
else:
raise NotImplementedError("please keep TOPK=1")
# results = PRELOADED_MODELS[CURR_MODEL_KEYWORD].correct_strings_for_ui([message], topk=TOPK)
# save_query(CURR_MODEL_KEYWORD+"\t"+message+"\t"+"\t".join(results)+"\n")
# return render_template('results.html', prediction=results, message=message)
return render_template('home.html')
def load_model(model_keyword="elmosc-rnn"):
global PRELOADED_MODELS
if model_keyword in PRELOADED_MODELS:
return PRELOADED_MODELS[model_keyword]
if model_keyword == "aspell":
# return AspellChecker(tokenize=TOKENIZE)
raise Exception("Not enabled. Install required modules and uncomment this to enable")
elif model_keyword == "jamspell":
# return JamspellChecker(tokenize=TOKENIZE)
raise Exception("Not enabled. Install required modules and uncomment this to enable")
elif model_keyword == "cnn-rnn":
return CnnlstmChecker(tokenize=TOKENIZE, pretrained=True)
elif model_keyword == "sc-rnn":
return SclstmChecker(tokenize=TOKENIZE, pretrained=True)
elif model_keyword == "nested-rnn":
return NestedlstmChecker(tokenize=TOKENIZE, pretrained=True)
elif model_keyword == "bert":
return BertChecker(tokenize=TOKENIZE, pretrained=True)
elif model_keyword == "elmosc-rnn":
return ElmosclstmChecker(tokenize=TOKENIZE, pretrained=True)
elif model_keyword == "scrnn-elmo":
return SclstmelmoChecker(tokenize=TOKENIZE, pretrained=True)
elif model_keyword == "bertsc-rnn":
return BertsclstmChecker(tokenize=TOKENIZE, pretrained=True)
elif model_keyword == "scrnn-bert":
return SclstmbertChecker(tokenize=TOKENIZE, pretrained=True)
else:
raise NotImplementedError(f"unknown model_keyword: {model_keyword}")
return
def preload_models():
print("pre-loading models")
global PRELOADED_MODELS
PRELOADED_MODELS = {
# "aspell": AspellChecker(),
# "jamspell": JamspellChecker(),
# "cnn-rnn": CnnlstmChecker(pretrained=True),
"sc-rnn": SclstmChecker(tokenize=TOKENIZE, pretrained=True),
# "nested-rnn": NestedlstmChecker(pretrained=True),
"bert": BertChecker(tokenize=TOKENIZE, pretrained=True),
"elmosc-rnn": ElmosclstmChecker(tokenize=TOKENIZE, pretrained=True),
# "scrnn-elmo": SclstmelmoChecker(pretrained=True),
# "bertsc-rnn": BertsclstmChecker(pretrained=True),
# "scrnn-bert": SclstmbertChecker(pretrained=True)
}
print("\n")
for k, v in PRELOADED_MODELS.items():
print(f"{k}: {v}")
print("\n")
return
def save_query(text):
global opfile
opfile.write(text)
opfile.flush()
return
if __name__ == "__main__":
print("*** Flask Server ***")
preload_models()
app.run(debug=True, host='0.0.0.0', port=5000)
| 4,092 | 0 | 167 |
77594b0aae3b85456427f41cabbaffe0102750d9 | 24,707 | py | Python | dist/ba_data/python/bastd/ui/helpui.py | Bartixxx32/Bombsquad-Ballistica-Modded-Server | 26d36f07a5b96702e4fbdf172c0d66671f1ee0bd | [
"MIT"
] | 317 | 2020-04-04T00:33:10.000Z | 2022-03-28T01:07:09.000Z | assets/src/ba_data/python/bastd/ui/helpui.py | Alshahriah/ballistica | 326f6677a0118667e93ce9034849622ebef706fa | [
"MIT"
] | 315 | 2020-04-04T22:33:10.000Z | 2022-03-31T22:50:02.000Z | assets/src/ba_data/python/bastd/ui/helpui.py | Alshahriah/ballistica | 326f6677a0118667e93ce9034849622ebef706fa | [
"MIT"
] | 97 | 2020-04-04T01:32:17.000Z | 2022-03-16T19:02:59.000Z | # Released under the MIT License. See LICENSE for details.
#
"""Provides help related ui."""
from __future__ import annotations
from typing import TYPE_CHECKING
import _ba
import ba
if TYPE_CHECKING:
from typing import Optional
class HelpWindow(ba.Window):
"""A window providing help on how to play."""
| 41.594276 | 79 | 0.442709 | # Released under the MIT License. See LICENSE for details.
#
"""Provides help related ui."""
from __future__ import annotations
from typing import TYPE_CHECKING
import _ba
import ba
if TYPE_CHECKING:
from typing import Optional
class HelpWindow(ba.Window):
"""A window providing help on how to play."""
def __init__(self,
main_menu: bool = False,
origin_widget: ba.Widget = None):
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
from ba.internal import get_remote_app_name
ba.set_analytics_screen('Help Window')
# If they provided an origin-widget, scale up from that.
scale_origin: Optional[tuple[float, float]]
if origin_widget is not None:
self._transition_out = 'out_scale'
scale_origin = origin_widget.get_screen_space_center()
transition = 'in_scale'
else:
self._transition_out = 'out_right'
scale_origin = None
transition = 'in_right'
self._r = 'helpWindow'
getres = ba.app.lang.get_resource
self._main_menu = main_menu
uiscale = ba.app.ui.uiscale
width = 950 if uiscale is ba.UIScale.SMALL else 750
x_offs = 100 if uiscale is ba.UIScale.SMALL else 0
height = (460 if uiscale is ba.UIScale.SMALL else
530 if uiscale is ba.UIScale.MEDIUM else 600)
super().__init__(root_widget=ba.containerwidget(
size=(width, height),
transition=transition,
toolbar_visibility='menu_minimal',
scale_origin_stack_offset=scale_origin,
scale=(1.77 if uiscale is ba.UIScale.SMALL else
1.25 if uiscale is ba.UIScale.MEDIUM else 1.0),
stack_offset=(0, -30) if uiscale is ba.UIScale.SMALL else (
0, 15) if uiscale is ba.UIScale.MEDIUM else (0, 0)))
ba.textwidget(parent=self._root_widget,
position=(0, height -
(50 if uiscale is ba.UIScale.SMALL else 45)),
size=(width, 25),
text=ba.Lstr(resource=self._r + '.titleText',
subs=[('${APP_NAME}',
ba.Lstr(resource='titleText'))]),
color=ba.app.ui.title_color,
h_align='center',
v_align='top')
self._scrollwidget = ba.scrollwidget(
parent=self._root_widget,
position=(44 + x_offs, 55 if uiscale is ba.UIScale.SMALL else 55),
simple_culling_v=100.0,
size=(width - (88 + 2 * x_offs),
height - 120 + (5 if uiscale is ba.UIScale.SMALL else 0)),
capture_arrows=True)
if ba.app.ui.use_toolbars:
ba.widget(edit=self._scrollwidget,
right_widget=_ba.get_special_widget('party_button'))
ba.containerwidget(edit=self._root_widget,
selected_child=self._scrollwidget)
# ugly: create this last so it gets first dibs at touch events (since
# we have it close to the scroll widget)
if uiscale is ba.UIScale.SMALL and ba.app.ui.use_toolbars:
ba.containerwidget(edit=self._root_widget,
on_cancel_call=self._close)
ba.widget(edit=self._scrollwidget,
left_widget=_ba.get_special_widget('back_button'))
else:
btn = ba.buttonwidget(
parent=self._root_widget,
position=(x_offs +
(40 + 0 if uiscale is ba.UIScale.SMALL else 70),
height -
(59 if uiscale is ba.UIScale.SMALL else 50)),
size=(140, 60),
scale=0.7 if uiscale is ba.UIScale.SMALL else 0.8,
label=ba.Lstr(
resource='backText') if self._main_menu else 'Close',
button_type='back' if self._main_menu else None,
extra_touch_border_scale=2.0,
autoselect=True,
on_activate_call=self._close)
ba.containerwidget(edit=self._root_widget, cancel_button=btn)
if self._main_menu:
ba.buttonwidget(edit=btn,
button_type='backSmall',
size=(60, 55),
label=ba.charstr(ba.SpecialChar.BACK))
self._sub_width = 660
self._sub_height = 1590 + ba.app.lang.get_resource(
self._r + '.someDaysExtraSpace') + ba.app.lang.get_resource(
self._r + '.orPunchingSomethingExtraSpace')
self._subcontainer = ba.containerwidget(parent=self._scrollwidget,
size=(self._sub_width,
self._sub_height),
background=False,
claims_left_right=False,
claims_tab=False)
spacing = 1.0
h = self._sub_width * 0.5
v = self._sub_height - 55
logo_tex = ba.gettexture('logo')
icon_buffer = 1.1
header = (0.7, 1.0, 0.7, 1.0)
header2 = (0.8, 0.8, 1.0, 1.0)
paragraph = (0.8, 0.8, 1.0, 1.0)
txt = ba.Lstr(resource=self._r + '.welcomeText',
subs=[('${APP_NAME}', ba.Lstr(resource='titleText'))
]).evaluate()
txt_scale = 1.4
txt_maxwidth = 480
ba.textwidget(parent=self._subcontainer,
position=(h, v),
size=(0, 0),
scale=txt_scale,
flatness=0.5,
res_scale=1.5,
text=txt,
h_align='center',
color=header,
v_align='center',
maxwidth=txt_maxwidth)
txt_width = min(
txt_maxwidth,
_ba.get_string_width(txt, suppress_warning=True) * txt_scale)
icon_size = 70
hval2 = h - (txt_width * 0.5 + icon_size * 0.5 * icon_buffer)
ba.imagewidget(parent=self._subcontainer,
size=(icon_size, icon_size),
position=(hval2 - 0.5 * icon_size,
v - 0.45 * icon_size),
texture=logo_tex)
force_test = False
app = ba.app
if (app.platform == 'android'
and app.subplatform == 'alibaba') or force_test:
v -= 120.0
txtv = (
'\xe8\xbf\x99\xe6\x98\xaf\xe4\xb8\x80\xe4\xb8\xaa\xe5\x8f\xaf'
'\xe4\xbb\xa5\xe5\x92\x8c\xe5\xae\xb6\xe4\xba\xba\xe6\x9c\x8b'
'\xe5\x8f\x8b\xe4\xb8\x80\xe8\xb5\xb7\xe7\x8e\xa9\xe7\x9a\x84'
'\xe6\xb8\xb8\xe6\x88\x8f,\xe5\x90\x8c\xe6\x97\xb6\xe6\x94\xaf'
'\xe6\x8c\x81\xe8\x81\x94 \xe2\x80\xa8\xe7\xbd\x91\xe5\xaf\xb9'
'\xe6\x88\x98\xe3\x80\x82\n'
'\xe5\xa6\x82\xe6\xb2\xa1\xe6\x9c\x89\xe6\xb8\xb8\xe6\x88\x8f'
'\xe6\x89\x8b\xe6\x9f\x84,\xe5\x8f\xaf\xe4\xbb\xa5\xe4\xbd\xbf'
'\xe7\x94\xa8\xe7\xa7\xbb\xe5\x8a\xa8\xe8\xae\xbe\xe5\xa4\x87'
'\xe6\x89\xab\xe7\xa0\x81\xe4\xb8\x8b\xe8\xbd\xbd\xe2\x80\x9c'
'\xe9\x98\xbf\xe9\x87\x8c\xc2'
'\xa0TV\xc2\xa0\xe5\x8a\xa9\xe6\x89'
'\x8b\xe2\x80\x9d\xe7\x94\xa8 \xe6\x9d\xa5\xe4\xbb\xa3\xe6\x9b'
'\xbf\xe5\xa4\x96\xe8\xae\xbe\xe3\x80\x82\n'
'\xe6\x9c\x80\xe5\xa4\x9a\xe6\x94\xaf\xe6\x8c\x81\xe6\x8e\xa5'
'\xe5\x85\xa5\xc2\xa08\xc2\xa0\xe4\xb8\xaa\xe5\xa4\x96\xe8'
'\xae\xbe')
ba.textwidget(parent=self._subcontainer,
size=(0, 0),
h_align='center',
v_align='center',
maxwidth=self._sub_width * 0.9,
position=(self._sub_width * 0.5, v - 180),
text=txtv)
ba.imagewidget(parent=self._subcontainer,
position=(self._sub_width - 320, v - 120),
size=(200, 200),
texture=ba.gettexture('aliControllerQR'))
ba.imagewidget(parent=self._subcontainer,
position=(90, v - 130),
size=(210, 210),
texture=ba.gettexture('multiplayerExamples'))
v -= 120.0
else:
v -= spacing * 50.0
txt = ba.Lstr(resource=self._r + '.someDaysText').evaluate()
ba.textwidget(parent=self._subcontainer,
position=(h, v),
size=(0, 0),
scale=1.2,
maxwidth=self._sub_width * 0.9,
text=txt,
h_align='center',
color=paragraph,
v_align='center',
flatness=1.0)
v -= (spacing * 25.0 + getres(self._r + '.someDaysExtraSpace'))
txt_scale = 0.66
txt = ba.Lstr(resource=self._r +
'.orPunchingSomethingText').evaluate()
ba.textwidget(parent=self._subcontainer,
position=(h, v),
size=(0, 0),
scale=txt_scale,
maxwidth=self._sub_width * 0.9,
text=txt,
h_align='center',
color=paragraph,
v_align='center',
flatness=1.0)
v -= (spacing * 27.0 +
getres(self._r + '.orPunchingSomethingExtraSpace'))
txt_scale = 1.0
txt = ba.Lstr(resource=self._r + '.canHelpText',
subs=[('${APP_NAME}', ba.Lstr(resource='titleText'))
]).evaluate()
ba.textwidget(parent=self._subcontainer,
position=(h, v),
size=(0, 0),
scale=txt_scale,
flatness=1.0,
text=txt,
h_align='center',
color=paragraph,
v_align='center')
v -= spacing * 70.0
txt_scale = 1.0
txt = ba.Lstr(resource=self._r + '.toGetTheMostText').evaluate()
ba.textwidget(parent=self._subcontainer,
position=(h, v),
size=(0, 0),
scale=txt_scale,
maxwidth=self._sub_width * 0.9,
text=txt,
h_align='center',
color=header,
v_align='center',
flatness=1.0)
v -= spacing * 40.0
txt_scale = 0.74
txt = ba.Lstr(resource=self._r + '.friendsText').evaluate()
hval2 = h - 220
ba.textwidget(parent=self._subcontainer,
position=(hval2, v),
size=(0, 0),
scale=txt_scale,
maxwidth=100,
text=txt,
h_align='right',
color=header,
v_align='center',
flatness=1.0)
txt = ba.Lstr(resource=self._r + '.friendsGoodText',
subs=[('${APP_NAME}', ba.Lstr(resource='titleText'))
]).evaluate()
txt_scale = 0.7
ba.textwidget(parent=self._subcontainer,
position=(hval2 + 10, v + 8),
size=(0, 0),
scale=txt_scale,
maxwidth=500,
text=txt,
h_align='left',
color=paragraph,
flatness=1.0)
app = ba.app
v -= spacing * 45.0
txt = (ba.Lstr(resource=self._r + '.devicesText').evaluate()
if app.vr_mode else ba.Lstr(resource=self._r +
'.controllersText').evaluate())
txt_scale = 0.74
hval2 = h - 220
ba.textwidget(parent=self._subcontainer,
position=(hval2, v),
size=(0, 0),
scale=txt_scale,
maxwidth=100,
text=txt,
h_align='right',
v_align='center',
color=header,
flatness=1.0)
txt_scale = 0.7
if not app.vr_mode:
infotxt = ('.controllersInfoTextRemoteOnly'
if app.iircade_mode else '.controllersInfoText')
txt = ba.Lstr(
resource=self._r + infotxt,
fallback_resource=self._r + '.controllersInfoText',
subs=[('${APP_NAME}', ba.Lstr(resource='titleText')),
('${REMOTE_APP_NAME}', get_remote_app_name())
]).evaluate()
else:
txt = ba.Lstr(resource=self._r + '.devicesInfoText',
subs=[('${APP_NAME}',
ba.Lstr(resource='titleText'))
]).evaluate()
ba.textwidget(parent=self._subcontainer,
position=(hval2 + 10, v + 8),
size=(0, 0),
scale=txt_scale,
maxwidth=500,
max_height=105,
text=txt,
h_align='left',
color=paragraph,
flatness=1.0)
v -= spacing * 150.0
txt = ba.Lstr(resource=self._r + '.controlsText').evaluate()
txt_scale = 1.4
txt_maxwidth = 480
ba.textwidget(parent=self._subcontainer,
position=(h, v),
size=(0, 0),
scale=txt_scale,
flatness=0.5,
text=txt,
h_align='center',
color=header,
v_align='center',
res_scale=1.5,
maxwidth=txt_maxwidth)
txt_width = min(
txt_maxwidth,
_ba.get_string_width(txt, suppress_warning=True) * txt_scale)
icon_size = 70
hval2 = h - (txt_width * 0.5 + icon_size * 0.5 * icon_buffer)
ba.imagewidget(parent=self._subcontainer,
size=(icon_size, icon_size),
position=(hval2 - 0.5 * icon_size,
v - 0.45 * icon_size),
texture=logo_tex)
v -= spacing * 45.0
txt_scale = 0.7
txt = ba.Lstr(resource=self._r + '.controlsSubtitleText',
subs=[('${APP_NAME}', ba.Lstr(resource='titleText'))
]).evaluate()
ba.textwidget(parent=self._subcontainer,
position=(h, v),
size=(0, 0),
scale=txt_scale,
maxwidth=self._sub_width * 0.9,
flatness=1.0,
text=txt,
h_align='center',
color=paragraph,
v_align='center')
v -= spacing * 160.0
sep = 70
icon_size = 100
# icon_size_2 = 30
hval2 = h - sep
vval2 = v
ba.imagewidget(parent=self._subcontainer,
size=(icon_size, icon_size),
position=(hval2 - 0.5 * icon_size,
vval2 - 0.5 * icon_size),
texture=ba.gettexture('buttonPunch'),
color=(1, 0.7, 0.3))
txt_scale = getres(self._r + '.punchInfoTextScale')
txt = ba.Lstr(resource=self._r + '.punchInfoText').evaluate()
ba.textwidget(parent=self._subcontainer,
position=(h - sep - 185 + 70, v + 120),
size=(0, 0),
scale=txt_scale,
flatness=1.0,
text=txt,
h_align='center',
color=(1, 0.7, 0.3, 1.0),
v_align='top')
hval2 = h + sep
vval2 = v
ba.imagewidget(parent=self._subcontainer,
size=(icon_size, icon_size),
position=(hval2 - 0.5 * icon_size,
vval2 - 0.5 * icon_size),
texture=ba.gettexture('buttonBomb'),
color=(1, 0.3, 0.3))
txt = ba.Lstr(resource=self._r + '.bombInfoText').evaluate()
txt_scale = getres(self._r + '.bombInfoTextScale')
ba.textwidget(parent=self._subcontainer,
position=(h + sep + 50 + 60, v - 35),
size=(0, 0),
scale=txt_scale,
flatness=1.0,
maxwidth=270,
text=txt,
h_align='center',
color=(1, 0.3, 0.3, 1.0),
v_align='top')
hval2 = h
vval2 = v + sep
ba.imagewidget(parent=self._subcontainer,
size=(icon_size, icon_size),
position=(hval2 - 0.5 * icon_size,
vval2 - 0.5 * icon_size),
texture=ba.gettexture('buttonPickUp'),
color=(0.5, 0.5, 1))
txtl = ba.Lstr(resource=self._r + '.pickUpInfoText')
txt_scale = getres(self._r + '.pickUpInfoTextScale')
ba.textwidget(parent=self._subcontainer,
position=(h + 60 + 120, v + sep + 50),
size=(0, 0),
scale=txt_scale,
flatness=1.0,
text=txtl,
h_align='center',
color=(0.5, 0.5, 1, 1.0),
v_align='top')
hval2 = h
vval2 = v - sep
ba.imagewidget(parent=self._subcontainer,
size=(icon_size, icon_size),
position=(hval2 - 0.5 * icon_size,
vval2 - 0.5 * icon_size),
texture=ba.gettexture('buttonJump'),
color=(0.4, 1, 0.4))
txt = ba.Lstr(resource=self._r + '.jumpInfoText').evaluate()
txt_scale = getres(self._r + '.jumpInfoTextScale')
ba.textwidget(parent=self._subcontainer,
position=(h - 250 + 75, v - sep - 15 + 30),
size=(0, 0),
scale=txt_scale,
flatness=1.0,
text=txt,
h_align='center',
color=(0.4, 1, 0.4, 1.0),
v_align='top')
txt = ba.Lstr(resource=self._r + '.runInfoText').evaluate()
txt_scale = getres(self._r + '.runInfoTextScale')
ba.textwidget(parent=self._subcontainer,
position=(h, v - sep - 100),
size=(0, 0),
scale=txt_scale,
maxwidth=self._sub_width * 0.93,
flatness=1.0,
text=txt,
h_align='center',
color=(0.7, 0.7, 1.0, 1.0),
v_align='center')
v -= spacing * 280.0
txt = ba.Lstr(resource=self._r + '.powerupsText').evaluate()
txt_scale = 1.4
txt_maxwidth = 480
ba.textwidget(parent=self._subcontainer,
position=(h, v),
size=(0, 0),
scale=txt_scale,
flatness=0.5,
text=txt,
h_align='center',
color=header,
v_align='center',
maxwidth=txt_maxwidth)
txt_width = min(
txt_maxwidth,
_ba.get_string_width(txt, suppress_warning=True) * txt_scale)
icon_size = 70
hval2 = h - (txt_width * 0.5 + icon_size * 0.5 * icon_buffer)
ba.imagewidget(parent=self._subcontainer,
size=(icon_size, icon_size),
position=(hval2 - 0.5 * icon_size,
v - 0.45 * icon_size),
texture=logo_tex)
v -= spacing * 50.0
txt_scale = getres(self._r + '.powerupsSubtitleTextScale')
txt = ba.Lstr(resource=self._r + '.powerupsSubtitleText').evaluate()
ba.textwidget(parent=self._subcontainer,
position=(h, v),
size=(0, 0),
scale=txt_scale,
maxwidth=self._sub_width * 0.9,
text=txt,
h_align='center',
color=paragraph,
v_align='center',
flatness=1.0)
v -= spacing * 1.0
mm1 = -270
mm2 = -215
mm3 = 0
icon_size = 50
shadow_size = 80
shadow_offs_x = 3
shadow_offs_y = -4
t_big = 1.1
t_small = 0.65
shadow_tex = ba.gettexture('shadowSharp')
for tex in [
'powerupPunch', 'powerupShield', 'powerupBomb',
'powerupHealth', 'powerupIceBombs', 'powerupImpactBombs',
'powerupStickyBombs', 'powerupLandMines', 'powerupCurse'
]:
name = ba.Lstr(resource=self._r + '.' + tex + 'NameText')
desc = ba.Lstr(resource=self._r + '.' + tex + 'DescriptionText')
v -= spacing * 60.0
ba.imagewidget(
parent=self._subcontainer,
size=(shadow_size, shadow_size),
position=(h + mm1 + shadow_offs_x - 0.5 * shadow_size,
v + shadow_offs_y - 0.5 * shadow_size),
texture=shadow_tex,
color=(0, 0, 0),
opacity=0.5)
ba.imagewidget(parent=self._subcontainer,
size=(icon_size, icon_size),
position=(h + mm1 - 0.5 * icon_size,
v - 0.5 * icon_size),
texture=ba.gettexture(tex))
txt_scale = t_big
txtl = name
ba.textwidget(parent=self._subcontainer,
position=(h + mm2, v + 3),
size=(0, 0),
scale=txt_scale,
maxwidth=200,
flatness=1.0,
text=txtl,
h_align='left',
color=header2,
v_align='center')
txt_scale = t_small
txtl = desc
ba.textwidget(parent=self._subcontainer,
position=(h + mm3, v),
size=(0, 0),
scale=txt_scale,
maxwidth=300,
flatness=1.0,
text=txtl,
h_align='left',
color=paragraph,
v_align='center',
res_scale=0.5)
def _close(self) -> None:
# pylint: disable=cyclic-import
from bastd.ui.mainmenu import MainMenuWindow
ba.containerwidget(edit=self._root_widget,
transition=self._transition_out)
if self._main_menu:
ba.app.ui.set_main_menu_window(
MainMenuWindow(transition='in_left').get_root_widget())
| 24,336 | 0 | 54 |
53880225806a57b224015eb20089f09627c278cd | 5,153 | py | Python | spyder/utils/switcher.py | suokunlong/spyder | 2d5d450fdcef232fb7f38e7fefc27f0e7f704c9a | [
"MIT"
] | 3 | 2019-09-27T21:00:00.000Z | 2021-03-07T23:28:32.000Z | spyder/utils/switcher.py | jastema/spyder | 0ef48ea227c53f57556cd8002087dc404b0108b0 | [
"MIT"
] | 3 | 2021-10-06T22:49:31.000Z | 2022-02-27T12:28:12.000Z | spyder/utils/switcher.py | jastema/spyder | 0ef48ea227c53f57556cd8002087dc404b0108b0 | [
"MIT"
] | 2 | 2021-04-30T01:18:22.000Z | 2021-09-19T06:31:42.000Z | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Utils to handle Switcher elements.
"""
# Standard library imports
import os
import os.path as osp
import sys
# Local imports
from spyder.config.base import _
from spyder.py3compat import iteritems, PY2
from spyder.utils import icon_manager as ima
if PY2:
from itertools import izip as zip
def shorten_paths(path_list, is_unsaved):
"""
Takes a list of paths and tries to "intelligently" shorten them all. The
aim is to make it clear to the user where the paths differ, as that is
likely what they care about. Note that this operates on a list of paths
not on individual paths.
If the path ends in an actual file name, it will be trimmed off.
"""
# TODO: at the end, if the path is too long, should do a more dumb kind of
# shortening, but not completely dumb.
# Convert the path strings to a list of tokens and start building the
# new_path using the drive
path_list = path_list[:] # Make a local copy
new_path_list = []
common_prefix = osp.dirname(osp.commonprefix(path_list))
for ii, (path, is_unsav) in enumerate(zip(path_list, is_unsaved)):
if is_unsav:
new_path_list.append(_('unsaved file'))
path_list[ii] = None
else:
drive, path = osp.splitdrive(osp.dirname(path))
new_path_list.append(drive + osp.sep)
path_list[ii] = [part for part in path.split(osp.sep) if part]
recurse_level({i: pl for i, pl in enumerate(path_list) if pl})
if common_prefix:
result_paths = []
for path in new_path_list:
path_elements = path.rstrip(os.sep).split(common_prefix)
if len(path_elements) > 1:
result_paths.append("...{}".format(path_elements[-1]))
else:
result_paths.append(path)
else:
result_paths = [path.rstrip(os.sep) for path in new_path_list]
return result_paths
def get_file_icon(path):
"""Get icon for file by extension."""
if sys.platform == 'darwin':
scale_factor = 0.9
elif os.name == 'nt':
scale_factor = 0.8
else:
scale_factor = 0.6
return ima.get_icon_by_extension_or_type(path, scale_factor)
| 35.294521 | 79 | 0.55948 | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Utils to handle Switcher elements.
"""
# Standard library imports
import os
import os.path as osp
import sys
# Local imports
from spyder.config.base import _
from spyder.py3compat import iteritems, PY2
from spyder.utils import icon_manager as ima
if PY2:
from itertools import izip as zip
def shorten_paths(path_list, is_unsaved):
"""
Takes a list of paths and tries to "intelligently" shorten them all. The
aim is to make it clear to the user where the paths differ, as that is
likely what they care about. Note that this operates on a list of paths
not on individual paths.
If the path ends in an actual file name, it will be trimmed off.
"""
# TODO: at the end, if the path is too long, should do a more dumb kind of
# shortening, but not completely dumb.
# Convert the path strings to a list of tokens and start building the
# new_path using the drive
path_list = path_list[:] # Make a local copy
new_path_list = []
common_prefix = osp.dirname(osp.commonprefix(path_list))
for ii, (path, is_unsav) in enumerate(zip(path_list, is_unsaved)):
if is_unsav:
new_path_list.append(_('unsaved file'))
path_list[ii] = None
else:
drive, path = osp.splitdrive(osp.dirname(path))
new_path_list.append(drive + osp.sep)
path_list[ii] = [part for part in path.split(osp.sep) if part]
def recurse_level(level_idx):
sep = os.sep
# If toks are all empty we need not have recursed here
if not any(level_idx.values()):
return
# Firstly, find the longest common prefix for all in the level
# s = len of longest common prefix
sample_toks = list(level_idx.values())[0]
if not sample_toks:
s = 0
else:
for s, sample_val in enumerate(sample_toks):
if not all(len(toks) > s and toks[s] == sample_val
for toks in level_idx.values()):
break
# Shorten longest common prefix
if s == 0:
short_form = ''
else:
if s == 1:
short_form = sample_toks[0]
elif s == 2:
short_form = sample_toks[0] + sep + sample_toks[1]
else:
short_form = "..." + sep + sample_toks[s-1]
for idx in level_idx:
new_path_list[idx] += short_form + sep
level_idx[idx] = level_idx[idx][s:]
# Group the remaining bit after the common prefix, shorten, and recurse
while level_idx:
k, group = 0, level_idx # k is length of the group's common prefix
while True:
# Abort if we've gone beyond end of one or more in the group
prospective_group = {idx: toks for idx, toks
in group.items() if len(toks) == k}
if prospective_group:
if k == 0: # we spit out the group with no suffix
group = prospective_group
break
# Only keep going if all n still match on the kth token
_, sample_toks = next(iteritems(group))
prospective_group = {idx: toks for idx, toks
in group.items()
if toks[k] == sample_toks[k]}
if len(prospective_group) == len(group) or k == 0:
group = prospective_group
k += 1
else:
break
_, sample_toks = next(iteritems(group))
if k == 0:
short_form = ''
elif k == 1:
short_form = sample_toks[0]
elif k == 2:
short_form = sample_toks[0] + sep + sample_toks[1]
else: # k > 2
short_form = sample_toks[0] + "..." + sep + sample_toks[k-1]
for idx in group.keys():
new_path_list[idx] += short_form + (sep if k > 0 else '')
del level_idx[idx]
recurse_level({idx: toks[k:] for idx, toks in group.items()})
recurse_level({i: pl for i, pl in enumerate(path_list) if pl})
if common_prefix:
result_paths = []
for path in new_path_list:
path_elements = path.rstrip(os.sep).split(common_prefix)
if len(path_elements) > 1:
result_paths.append("...{}".format(path_elements[-1]))
else:
result_paths.append(path)
else:
result_paths = [path.rstrip(os.sep) for path in new_path_list]
return result_paths
def get_file_icon(path):
"""Get icon for file by extension."""
if sys.platform == 'darwin':
scale_factor = 0.9
elif os.name == 'nt':
scale_factor = 0.8
else:
scale_factor = 0.6
return ima.get_icon_by_extension_or_type(path, scale_factor)
| 2,763 | 0 | 27 |
43f80d8042c492af4852a86396089d057c3c0ea6 | 448 | py | Python | ml-framework/batch_training.py | JetBrains-Research/extract-method-experiments | c733f6af32de2df9c1a00aa714cca11447ac2518 | [
"Apache-2.0"
] | 2 | 2021-07-01T16:46:13.000Z | 2022-01-22T09:28:25.000Z | ml-framework/batch_training.py | JetBrains-Research/extract-method-experiments | c733f6af32de2df9c1a00aa714cca11447ac2518 | [
"Apache-2.0"
] | 4 | 2021-07-12T07:02:06.000Z | 2022-03-09T09:52:06.000Z | ml-framework/batch_training.py | JetBrains-Research/extract-method-experiments | c733f6af32de2df9c1a00aa714cca11447ac2518 | [
"Apache-2.0"
] | null | null | null | import os
import logging
from src.trainer import train_by_config
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
directory = 'train_settings/'
for filename in os.listdir(directory):
config_path = os.path.join(directory, filename)
logging.info(f'Training model in accordance with {config_path} config')
try:
train_by_config(config_path)
except Exception as err:
print(err)
| 29.866667 | 76 | 0.712054 | import os
import logging
from src.trainer import train_by_config
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
directory = 'train_settings/'
for filename in os.listdir(directory):
config_path = os.path.join(directory, filename)
logging.info(f'Training model in accordance with {config_path} config')
try:
train_by_config(config_path)
except Exception as err:
print(err)
| 0 | 0 | 0 |
2b3b046d327cdb1666c09c8e99e70ac3d58c6717 | 952 | py | Python | freezeword/names.py | mattfister/freezeword | 74025da9e9a811e24b2cb0cfb77717d5eafd0283 | [
"MIT"
] | 10 | 2015-12-04T19:48:15.000Z | 2019-12-26T07:03:13.000Z | freezeword/names.py | mattfister/freezeword | 74025da9e9a811e24b2cb0cfb77717d5eafd0283 | [
"MIT"
] | null | null | null | freezeword/names.py | mattfister/freezeword | 74025da9e9a811e24b2cb0cfb77717d5eafd0283 | [
"MIT"
] | 2 | 2017-07-28T21:20:58.000Z | 2019-02-26T21:57:43.000Z | """
Simple utility to get random names. I think it uses data from a US census, located in the words folder.
"""
import random
import os.path
__author__ = "Matt Fister"
maleFirsts = [line.rstrip('\n').title() for line in open(os.path.join(os.path.dirname(__file__), (os.path.join('words', 'maleFirstNames.txt'))))]
femaleFirsts = [line.rstrip('\n').title() for line in open(os.path.join(os.path.dirname(__file__), (os.path.join('words', 'femaleFirstNames.txt'))))]
lasts = [line.rstrip('\n').title() for line in open(os.path.join(os.path.dirname(__file__), (os.path.join('words', 'lastNames.txt'))))]
if __name__ == "__main__":
print(get_name(random.choice(['male', 'female'])))
| 30.709677 | 149 | 0.682773 | """
Simple utility to get random names. I think it uses data from a US census, located in the words folder.
"""
import random
import os.path
__author__ = "Matt Fister"
maleFirsts = [line.rstrip('\n').title() for line in open(os.path.join(os.path.dirname(__file__), (os.path.join('words', 'maleFirstNames.txt'))))]
femaleFirsts = [line.rstrip('\n').title() for line in open(os.path.join(os.path.dirname(__file__), (os.path.join('words', 'femaleFirstNames.txt'))))]
lasts = [line.rstrip('\n').title() for line in open(os.path.join(os.path.dirname(__file__), (os.path.join('words', 'lastNames.txt'))))]
def get_first(gender):
if gender == 'male':
return random.choice(maleFirsts)
else:
return random.choice(femaleFirsts)
def get_last():
return random.choice(lasts)
def get_name(gender):
return get_first(gender) + " " + get_last()
if __name__ == "__main__":
print(get_name(random.choice(['male', 'female'])))
| 194 | 0 | 69 |
aca2a4e1e23df5e70e73318fb26040b11d54ca14 | 522 | py | Python | access/privileges.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | null | null | null | access/privileges.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | null | null | null | access/privileges.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | null | null | null | from django.conf import settings
from core.utils import ensure_user_group_membership
from .models import SlackAccess
def invite_to_slack(privilege, person):
"""
Invites the user to Slack.
"""
privilege.slack_access.grant(person)
def add_to_group(privilege, person):
"""
Generic "add person to group" privilege. The group to add is taken from the privilege slug.
"""
group = Group.objects.get(name=privilege.slug)
ensure_user_group_membership(person.user, groups_to_add=[group])
| 22.695652 | 95 | 0.733716 | from django.conf import settings
from core.utils import ensure_user_group_membership
from .models import SlackAccess
def invite_to_slack(privilege, person):
"""
Invites the user to Slack.
"""
privilege.slack_access.grant(person)
def add_to_group(privilege, person):
"""
Generic "add person to group" privilege. The group to add is taken from the privilege slug.
"""
group = Group.objects.get(name=privilege.slug)
ensure_user_group_membership(person.user, groups_to_add=[group])
| 0 | 0 | 0 |
041159ae6b0c366db4204359e5e1da904e817c77 | 201 | py | Python | reports/admin.py | dgmarko/zach_p_eq_database | ffb69b84645695fe85c77129381fade5e39628d2 | [
"MIT"
] | null | null | null | reports/admin.py | dgmarko/zach_p_eq_database | ffb69b84645695fe85c77129381fade5e39628d2 | [
"MIT"
] | null | null | null | reports/admin.py | dgmarko/zach_p_eq_database | ffb69b84645695fe85c77129381fade5e39628d2 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Transaction, Header, Ticker
# Register your models here.
admin.site.register(Transaction)
admin.site.register(Header)
admin.site.register(Ticker)
| 22.333333 | 47 | 0.810945 | from django.contrib import admin
from .models import Transaction, Header, Ticker
# Register your models here.
admin.site.register(Transaction)
admin.site.register(Header)
admin.site.register(Ticker)
| 0 | 0 | 0 |
181bb70f734ef532b0fb1a6ac83432c9a5343330 | 3,030 | py | Python | tests/operators/vector/test_ssd_all.py | laekov/akg | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | [
"Apache-2.0"
] | 1 | 2020-08-31T02:43:43.000Z | 2020-08-31T02:43:43.000Z | tests/operators/vector/test_ssd_all.py | laekov/akg | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | [
"Apache-2.0"
] | null | null | null | tests/operators/vector/test_ssd_all.py | laekov/akg | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import datetime
import os
from base import TestBase
import pytest
from test_run.focal_loss_run import focal_loss_run
from test_run.focalloss_ad_run import focalloss_grad_run
from test_run.smooth_l1_loss_run import smooth_l1_loss_run
from test_run.smooth_l1_loss_grad_run import smooth_l1_loss_grad_run
############################################################
# TestCase= class: put to tests/*/
############################################################
if __name__ == "__main__":
c = TestFocalLoss()
c.setup()
c.test_ci_run()
c.teardown()
| 37.407407 | 170 | 0.609901 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import datetime
import os
from base import TestBase
import pytest
from test_run.focal_loss_run import focal_loss_run
from test_run.focalloss_ad_run import focalloss_grad_run
from test_run.smooth_l1_loss_run import smooth_l1_loss_run
from test_run.smooth_l1_loss_grad_run import smooth_l1_loss_grad_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestFocalLoss(TestBase):
def setup(self):
case_name = "test_akg_ssd_all"
case_path = os.getcwd()
# params init
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg_cloud = [
("test_ssd_001_one_hot_001", "one_hot_run", ((32, 8732), 6, "float16", 1, 0, -1)),
("test_ssd_002_one_hot_002", "one_hot_run", ((32, 8732), 6, "float32", 1, 0, -1)),
("test_ssd_003_sum_001", "sum_run", ((32, 8732), (-1,), False, "float16")),
("test_ssd_004_mean_001", "mean_run", ((32,), "float16", (0,), False, "reduce_mean")),
# ("test_ssd_004_mean_001", "mean_run", ((32,), "float16", (0,), True, "reduce_mean")),
("test_ssd_005_focal_loss_001", focal_loss_run, ((32, 8732, 6), "float16", "float16", 2.0, "focal_loss")),
("test_ssd_006_focalloss_grad_001", focalloss_grad_run, ((32, 8732, 6), "float16", "float16", 2)),
("test_ssd_007_smooth_l1_loss_001", smooth_l1_loss_run, ((32, 8732, 4), "float16", (32, 8732, 4), "float16", (32, 8732), "int32", 0, 1.0, "smooth_l1_loss")),
("test_ssd_008_smooth_l1_loss_grad_001", smooth_l1_loss_grad_run, ((32, 8732, 4), "float16")),
]
return
@pytest.mark.rpc_cloud
@pytest.mark.env_onecard
@pytest.mark.platform_x86_ascend_training
def test_cloud_run(self):
self.common_run(self.testarg_cloud)
def teardown(self):
self._log.info("============= {0} Teardown============".format(self.casename))
return
if __name__ == "__main__":
c = TestFocalLoss()
c.setup()
c.test_ci_run()
c.teardown()
| 1,469 | 200 | 24 |
d5c648535ff4b61dff0b8d67671256949535c4ee | 3,658 | py | Python | cfn_encrypt/ssm_parameter.py | nordcloud/cfn-encrypt | 551f8d507477ff7ace03ec06339ed370d9b6c378 | [
"Apache-2.0"
] | 14 | 2018-01-16T08:51:21.000Z | 2020-11-18T20:47:53.000Z | cfn_encrypt/ssm_parameter.py | nordcloud/cfn-encrypt | 551f8d507477ff7ace03ec06339ed370d9b6c378 | [
"Apache-2.0"
] | 4 | 2018-05-17T13:42:47.000Z | 2019-11-21T10:28:36.000Z | cfn_encrypt/ssm_parameter.py | nordcloud/cfn-encrypt | 551f8d507477ff7ace03ec06339ed370d9b6c378 | [
"Apache-2.0"
] | 8 | 2018-02-13T12:48:51.000Z | 2020-03-03T18:29:15.000Z | import cfnresponse, logging, traceback, boto3
from random import choice
from string import ascii_uppercase, ascii_lowercase, digits
| 43.035294 | 187 | 0.609349 | import cfnresponse, logging, traceback, boto3
from random import choice
from string import ascii_uppercase, ascii_lowercase, digits
def parameter_exist(name):
response = boto3.client('ssm').describe_parameters(
ParameterFilters=[{
'Key': 'Name',
'Values': [
name
]
}]
)
return len(response["Parameters"]) > 0
def handler(event, context):
logger = logging.getLogger("crypto_cfn")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
logger.addHandler(ch)
name = event["ResourceProperties"]["Name"]
value = None
try:
if event["RequestType"] in ["Create", "Update"]:
if event["RequestType"] == "Create" and parameter_exist(name):
raise NameError("A Parameter named {} already exists".format(name))
generate_password = event["ResourceProperties"]["GeneratePassword"] if "GeneratePassword" in event["ResourceProperties"] else None
value = event["ResourceProperties"]["Value"] if "Value" in event["ResourceProperties"] else None
if value and generate_password in ['true', 'True', '1', True, 1]:
raise ValueError("Property Value and GeneratePassword cannot be used at the same time")
if generate_password in ['true', 'True', '1', True, 1]:
password_length = event["ResourceProperties"]["GeneratePasswordLength"] if "GeneratePasswordLength" in event["ResourceProperties"] else None
allow_specials = event["ResourceProperties"]["GeneratePasswordAllowSpecialCharacters"] if "GeneratePasswordAllowSpecialCharacters" in event["ResourceProperties"] else None
if not password_length:
raise ValueError("The Resource property GeneratePasswordLength is required")
try:
password_length = int(password_length)
except:
raise ValueError("The Resource property GeneratePasswordLength must be an integer or castable to an integer")
charset = ascii_uppercase + ascii_lowercase + digits
if allow_specials and allow_specials in ['true', 'True', '1', True, 1]:
charset = charset + "!\"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"
value = ''.join(choice(charset) for i in range(password_length))
if not value:
raise ValueError("Either generate a password or set a value")
response = boto3.client('ssm').put_parameter(
Name=name,
Description=event["ResourceProperties"]["Description"],
Value=value,
Type="SecureString",
KeyId=event["ResourceProperties"]["KeyId"],
Overwrite=True
)
logger.info("Successfully stored parameter {}".format(name))
cfnresponse.send(event, context, cfnresponse.SUCCESS, response, name)
else:
boto3.client('ssm').delete_parameter(
Name=event["PhysicalResourceId"],
)
logger.info("Successfully deleted parameter: {}".format(name))
cfnresponse.send(event, context, cfnresponse.SUCCESS, None, name)
except Exception as ex:
logger.error("Failed to %s parameter: %s", event["RequestType"], name)
logger.debug("Stack trace %s", traceback.format_exc())
if event["RequestType"] in ["Create", "Update"]:
cfnresponse.send(event, context, cfnresponse.FAILED, None, "0")
else:
cfnresponse.send(event, context, cfnresponse.SUCCESS, None, "0")
| 3,478 | 0 | 46 |
a3960d16f61c3d52f45afc8d25e8cb8c2a0a5a3e | 519 | py | Python | supplier/urls.py | CrownKira/digitalace-backend | 9489b3dc4b856688360b60a7dca040c945faab74 | [
"MIT"
] | 1 | 2021-05-28T05:22:54.000Z | 2021-05-28T05:22:54.000Z | supplier/urls.py | CrownKira/digitalace-backend | 9489b3dc4b856688360b60a7dca040c945faab74 | [
"MIT"
] | 3 | 2021-05-31T15:44:14.000Z | 2021-06-29T07:48:13.000Z | supplier/urls.py | CrownKira/digitalace-backend | 9489b3dc4b856688360b60a7dca040c945faab74 | [
"MIT"
] | 1 | 2021-05-30T07:42:54.000Z | 2021-05-30T07:42:54.000Z | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from rest_framework_bulk.routes import BulkRouter
from supplier import views
router = DefaultRouter()
bulk_router = BulkRouter()
bulk_router.register("receives", views.ReceiveViewSet)
bulk_router.register("suppliers", views.SupplierViewSet)
bulk_router.register("purchase_orders", views.PurchaseOrderViewSet)
app_name = "supplier"
urlpatterns = [
path("", include(router.urls)),
path("", include(bulk_router.urls)),
]
| 24.714286 | 67 | 0.789981 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from rest_framework_bulk.routes import BulkRouter
from supplier import views
router = DefaultRouter()
bulk_router = BulkRouter()
bulk_router.register("receives", views.ReceiveViewSet)
bulk_router.register("suppliers", views.SupplierViewSet)
bulk_router.register("purchase_orders", views.PurchaseOrderViewSet)
app_name = "supplier"
urlpatterns = [
path("", include(router.urls)),
path("", include(bulk_router.urls)),
]
| 0 | 0 | 0 |
b4ebdd7e5b3a3d2cc1c95090b70f472364b115d9 | 957 | py | Python | CheckIfPangram.py | vanigupta20024/Programming-Challenges | 578dba33e9f6b04052a503bcb5de9b32f33494a5 | [
"MIT"
] | 14 | 2020-10-15T21:47:18.000Z | 2021-12-01T06:06:51.000Z | CheckIfPangram.py | vanigupta20024/Programming-Challenges | 578dba33e9f6b04052a503bcb5de9b32f33494a5 | [
"MIT"
] | null | null | null | CheckIfPangram.py | vanigupta20024/Programming-Challenges | 578dba33e9f6b04052a503bcb5de9b32f33494a5 | [
"MIT"
] | 4 | 2020-06-15T14:40:45.000Z | 2021-06-15T06:22:03.000Z | '''
A pangram is a sentence where every letter of the English alphabet appears at least once.
Given a string sentence containing only lowercase English letters, return true if sentence is a pangram, or false otherwise.
Example 1:
Input: sentence = "thequickbrownfoxjumpsoverthelazydog"
Output: true
Explanation: sentence contains at least one of every letter of the English alphabet.
Example 2:
Input: sentence = "leetcode"
Output: false
'''
| 28.147059 | 124 | 0.607106 | '''
A pangram is a sentence where every letter of the English alphabet appears at least once.
Given a string sentence containing only lowercase English letters, return true if sentence is a pangram, or false otherwise.
Example 1:
Input: sentence = "thequickbrownfoxjumpsoverthelazydog"
Output: true
Explanation: sentence contains at least one of every letter of the English alphabet.
Example 2:
Input: sentence = "leetcode"
Output: false
'''
class Solution:
def checkIfPangram(self, sentence: str) -> bool:
# naive approach - 1
# freq = {}
# for i in sentence:
# freq[i] = freq.get(i, 0) + 1
# if len(freq) == 26: return True
# return False
# optimized approach - 2
occurred = 0
for i in sentence:
temp = ord(i) - ord('a')
occurred |= (1 << temp)
if occurred == (1 << 26) - 1:
return True
return False
| 468 | -6 | 49 |
69611705ef9ded51b631df10fca6237883ab4cac | 9,208 | py | Python | free_hand.py | orenber/Widgets | 0df26370d9ac4d805d5c644f6db432c6732846d9 | [
"BSD-2-Clause"
] | 2 | 2019-08-31T05:15:22.000Z | 2019-09-19T20:27:21.000Z | free_hand.py | orenber/Widgets | 0df26370d9ac4d805d5c644f6db432c6732846d9 | [
"BSD-2-Clause"
] | null | null | null | free_hand.py | orenber/Widgets | 0df26370d9ac4d805d5c644f6db432c6732846d9 | [
"BSD-2-Clause"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import QApplication
from draw_binary_image import plot_binary_image
if __name__ == "__main__":
main() | 29.703226 | 93 | 0.590356 | import matplotlib.pyplot as plt
import numpy as np
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import QApplication
from draw_binary_image import plot_binary_image
class AxesEvent:
def __init__(self, *args, **kwargs):
self.ax = plt.axes(*args, **kwargs)
self.figure = self.ax.get_figure()
self.__in_axes = False
self.callback = {'on_press': '',
'on_release': '',
'on_motion': '',
'on_scroll': '',
'enter_axes': '',
'leave_axes': ''}
def connect(self):
# connect to all the events we need
self.cidpress = self.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.enter_axes = self.figure.canvas.mpl_connect(
'axes_enter_event', self.enter_axes)
self.leave_axes = self.figure.canvas.mpl_connect(
'axes_leave_event', self.leave_axes)
def mouse_motion(self, state: bool = False):
if state:
self.cidrelease = self.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
self.scroll = self.figure.canvas.mpl_connect(
'scroll_event', self.on_scroll)
elif state == False:
self.figure.canvas.mpl_disconnect(self.cidrelease)
self.figure.canvas.mpl_disconnect(self.cidmotion)
self.figure.canvas.mpl_disconnect(self.scroll)
def on_press(self, event):
# on button press we will see if the mouse is over us and store some data
if self.__in_axes and id(self.ax) == id(event.inaxes):
self.mouse_motion(True)
callback = self.callback['on_press']
if callable(callback):
callback(event)
pass
def on_scroll(self, event):
callback = self.callback['on_scroll']
if callable(callback):
callback(event)
def on_motion(self, event):
if self.__in_axes and id(self.ax) == id(event.inaxes):
callback = self.callback['on_motion']
if callable(callback):
callback(event)
pass
def on_release(self, event):
self.__in_axes = True
if id(self.ax) == id(event.inaxes):
self.mouse_motion(False)
callback = self.callback['on_release']
if callable(callback):
callback(event)
pass
def enter_axes(self, event):
QApplication.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.__in_axes = True
if id(self.ax) == id(event.inaxes):
callback = self.callback['enter_axes']
if callable(callback):
callback(event)
pass
def leave_axes(self, event):
QApplication.restoreOverrideCursor()
self.__in_axes = False
if id(self.ax) == id(event.inaxes):
callback = self.callback['leave_axes']
if callable(callback):
callback(event)
pass
def disconnect(self):
self.figure.canvas.mpl_disconnect(self.cidpress)
self.figure.canvas.mpl_disconnect(self.cidrelease)
self.figure.canvas.mpl_disconnect(self.cidmotion)
class BoardFreeHand:
def __init__(self, *args, **kwargs):
self.ax = plt.axes(*args, **kwargs)
self.figure = self.ax.get_figure()
self.press = None
self.penColor = 'blue'
self.background = None
self.__in_axes = False
self._xdata = []
self._ydata = []
self.erse_color = self.ax.get_facecolor()
self.erse_size = 1
self.__erse_size = 1
self.ax.set_xlim((0,1))
self.ax.set_ylim((0,1))
self.clear_axes()
@property
def draw(self) -> bool:
return self.__draw
@draw.setter
def draw(self, state: bool):
self.__draw = state
if self.__draw:
self.mouse_motion(True)
else:
self.mouse_motion(False)
@property
def erse_size(self) -> int:
return self.__erse_size
@erse_size.setter
def erse_size(self, size: int):
if size < 1:
self.__erse_size = 1
else:
self.__erse_size = size
def connect(self):
# connect to all the events we need
self.cidpress = self.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.enter_axes = self.figure.canvas.mpl_connect(
'axes_enter_event', self.enter_axes)
self.leave_axes = self.figure.canvas.mpl_connect(
'axes_leave_event', self.leave_axes)
def mouse_motion(self, state: bool = False):
if state:
self.cidrelease = self.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
self.scroll = self.figure.canvas.mpl_connect(
'scroll_event', self.on_scroll)
elif state == False:
self.figure.canvas.mpl_disconnect(self.cidrelease)
self.figure.canvas.mpl_disconnect(self.cidmotion)
self.figure.canvas.mpl_disconnect(self.scroll)
def on_press(self, event):
# on button press we will see if the mouse is over us and store some data
if self.__in_axes and id(self.ax) == id(event.inaxes):
self.draw = True
if event.button == 3:
self.clear_axes()
def on_scroll(self, event):
scroll_direction = event.button
print('scroll dirction' + scroll_direction)
if scroll_direction == 'down':
self.erse_size -= 1
pass
elif scroll_direction == 'up':
self.erse_size += 1
pass
def on_motion(self, event):
if self.__in_axes:
if event.button == 1:
self.draw_on( event.xdata, event.ydata, self.penColor)
pass
elif event.button == 3:
self.draw_on(event.xdata, event.ydata, self.erse_color, self.erse_size)
pass
def on_release(self, event):
self.draw = False
self._xdata = []
self._ydata = []
def draw_on(self, x0: float, y0: float, draw_color='black', size=1):
# LIFO
self._xdata.append(x0)
self._ydata.append(y0)
if len(self._xdata) > 2:
self._xdata.pop(0)
self._ydata.pop(0)
self.ax.plot( self._xdata, self._ydata, color=draw_color, linewidth=size )
self.figure.canvas.draw()
pass
def clear_axes(self):
xlim = self.ax.get_xlim()
ylim = self.ax.get_ylim()
self.ax.cla()
self.ax.set_xlim( xlim )
self.ax.set_ylim( ylim )
self.figure.canvas.draw()
def enter_axes(self, event):
self.__in_axes = True
QApplication.setOverrideCursor(QCursor(Qt.ArrowCursor))
pass
def leave_axes(self, event):
self.__in_axes = False
QApplication.restoreOverrideCursor()
def disconnect(self):
self.figure.canvas.mpl_disconnect(self.cidpress)
self.figure.canvas.mpl_disconnect(self.cidrelease)
self.figure.canvas.mpl_disconnect(self.cidmotion)
if self.draw:
self.mouse_motion(False)
class BoardImage(BoardFreeHand):
def __init__(self,image_size = [45, 45], *args, **kwargs):
super().__init__(*args, **kwargs)
self.image_size = image_size
self.image = self.create_binary_image()
def create_binary_image(self):
black_image = np.zeros([self.image_size[0], self.image_size[0]], dtype=np.bool)
self.ax.cla()
self.ax.imshow( black_image, cmap=plt.cm.gray)
return black_image
def draw_on(self, x0: int, y0: int, drawcolor='black', size=1):
self._xdata.append(x0)
self._ydata.append(y0)
self.ax.plot( self._xdata, self._ydata, color=drawcolor, linewidth=size )
self.figure.canvas.draw()
def on_release(self, event):
if event.button == 1:
black_image = np.zeros( [self.image_size[0], self.image_size[0]], dtype=np.bool )
blob_image = plot_binary_image(black_image,self._xdata,self._ydata)
self.image = self.image + blob_image
self.ax.imshow(self.image, cmap=plt.cm.gray)
self.figure.canvas.draw()
super().on_release(event)
def on_press(self, event):
# on button press we will see if the mouse is over us and store some data
super().on_press(event)
if event.button == 3:
self.image = np.zeros([self.image_size[0], self.image_size[0]], dtype=np.bool )
self.ax.imshow(self.image, cmap=plt.cm.gray)
self.figure.canvas.draw()
def main():
plt.figure()
ax1 = BoardImage()
ax1.connect()
plt.show()
if __name__ == "__main__":
main() | 7,960 | 508 | 497 |
b945b76ccccdb43041716831f0119db495983ba8 | 5,921 | py | Python | src/legacy/evaluate.py | christear/APAIQ_release | 5a5031e69cabd4dbcf1d44ef05a7097cbb1e5c35 | [
"MIT"
] | null | null | null | src/legacy/evaluate.py | christear/APAIQ_release | 5a5031e69cabd4dbcf1d44ef05a7097cbb1e5c35 | [
"MIT"
] | null | null | null | src/legacy/evaluate.py | christear/APAIQ_release | 5a5031e69cabd4dbcf1d44ef05a7097cbb1e5c35 | [
"MIT"
] | null | null | null | from PolyAModel import *
import re
import os, sys, copy, getopt, re, argparse
import random
import pandas as pd
import numpy as np
from Bio.Seq import Seq
from TrimmedMean import TrimmedMean
import gc
#from extract_coverage_from_scanGenome import check
#def dataProcessing(scan_file,window,rst):
if __name__ == "__main__":
Evaluate(*args())
| 32.894444 | 108 | 0.576254 | from PolyAModel import *
import re
import os, sys, copy, getopt, re, argparse
import random
import pandas as pd
import numpy as np
from Bio.Seq import Seq
from TrimmedMean import TrimmedMean
import gc
#from extract_coverage_from_scanGenome import check
def check(line1,line2,window):
#_,pos1,_ = line1[0].split(':')
#_,pos2,_ = line2[0].split(':')
pos1 = line1[0]
pos2 = line2[0]
if(pos2-pos1==window-1):
return True
else:
return False
def collpase(strand,array,rst):
#def collpase(pas_id,array,rst=0):
#complement = {'A':'T','T':'A','C':'G','G':'C'}
sequence = ''
#sequence = []
#coverage = np.zeros(len(array))
coverage = []
contain_N = False
#if ('N' in sequence):
# contain_N = True
#for i,line in enumerate(array):
for line in array:
#line = line.rstrip('\n')
_,rpm,base = line
base = base.upper()
if(base=='N'):
contain_N = True
break
sequence += base
#sequence.append(base)
coverage.append(rpm)
#coverage[i] = rpm
if(not contain_N):
#chromosome,pos,strand = pas_id.split(':')
if(strand == "-"):
sequence = Seq(sequence)
sequence = sequence.reverse_complement()
#sequence = [complement[base] for base in sequence]
#sequence.reverse()
#coverage.reverse()
coverage = coverage[::-1]
trimMean = TrimmedMean([float(coverage[i]) for i in range(int(len(coverage)/2))])
if(trimMean>=rst):
return sequence,coverage
else:
return 0,0
else:
print("Discard item containig N")
return 0,0
#def dataProcessing(scan_file,window,rst):
def dataProcessing(baseName,lines,window,rst):
extend = int(window/2)
alphabet = np.array(['A', 'T', 'C', 'G'])
name,block_name = baseName.split('.')
chromosome,strand,_ = block_name.split('_')
#f = open(scan_file,'r')
#lines = f.readlines()
data1 = []
data2 = []
PASID = []
##data1 = np.zeros([len(lines),window,4])
#data2 = np.zeros([len(lines),window,1])
#PASID = np.empty(len(lines),dtype='object')
#index = np.zeros(len(lines),dtype=bool)
#n_pos = 0 #position containing N
for i,line in enumerate(lines):
#for line in f.readlines():
#line = line.rstrip('\n')
#pas_id,_,base = line.split('\t')
pos,_,base = line
if(base.upper()=='N'):
continue
start = i-extend
end = i+extend
if(start>0 and end+1<len(lines)):
if(not check(lines[start],lines[end],window)):
continue
#sequence,coverage = collpase(pas_id,lines[start:end+1],rst)
sequence,coverage = collpase(strand,lines[start:end+1],rst)
if(sequence!=0):
pas_id = '%s:%s:%s'%(chromosome,pos,strand)
sequence = list(sequence)
seq = np.array(sequence, dtype = '|U1').reshape(-1,1)
seq_data = (seq == alphabet).astype(np.float32)
coverage = np.array(coverage).astype(np.float32)
data1.append(seq_data)
data2.append(coverage)
PASID.append(pas_id)
#data1[i,:,:] = seq_data
#data2[i,:,:] = coverage.reshape([-1,1])
#PASID[i] = pas_id
#index[i] = True
data1 = np.stack(data1).reshape([-1, window, 4])
data2 = np.stack(data2).reshape([-1, window, 1])
PASID = np.array(PASID)
#data1 = data1[index]
#data2 = data2[index]
#PASID = PASID[index]
#f.close()
return data1 , data2, PASID
def args():
parser = argparse.ArgumentParser(description="identification of pAs cleavage site")
parser.add_argument("--model", help="the model weights file", required=True)
parser.add_argument('--baseName', help='baseName')
parser.add_argument("--out_dir", help="prediction files", required=True)
parser.add_argument("--RNASeqRCThreshold",default=0.05,type=float,help="RNA-Seq Coverage Threshold")
parser.add_argument('--window', default=201, type=int, help='input length')
parser.add_argument('--keep_temp',default=None,help='if you want to keep temporary file, set to "yes"')
args = parser.parse_args()
model = args.model
out_dir = args.out_dir
rst = args.RNASeqRCThreshold
window=args.window
baseName = args.baseName
#baseName = '%s.%s'%(name,baseName)
keep_temp = args.keep_temp
return model,out_dir,rst,window,baseName,keep_temp
def Evaluate(baseName,block,model,out_dir,rst,window,keep_temp):
if(out_dir[-1] == '/'):
out_dir = out_dir[0:-1]
#data="%s/data/%s"%(out_dir,baseName)
out_dir = out_dir+'/predict'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out="%s/%s.txt"%(out_dir,baseName)
print("Start processing data")
#seq_data,cov_data,pas_id = dataProcessing(data,window,rst)
seq_data,cov_data,pas_id = dataProcessing(baseName,block,window,rst)
print("Finish processing data")
print("Start Evaluating %s"%baseName)
keras_Model = PolyA_CNN(window)
keras_Model.load_weights(model)
pred = keras_Model.predict({"seq_input": seq_data, "cov_input": cov_data})
OUT=open(out,'w')
for i in range(len(pas_id)):
OUT.write('%s\t%s\n'%(str(pas_id[i]),str(pred[i][0])))
OUT.close()
print("End Evaluation\n")
del seq_data,cov_data,pas_id,pred,keras_Model #delete reference
gc.collect() #manually run garbage collection process
return 0
if __name__ == "__main__":
Evaluate(*args())
| 5,408 | 0 | 124 |
bf4a6fef0a07d3313320f3e022cb1693d9674103 | 473 | py | Python | tools/vagrant/data/scripts/apply_env.py | hsong-rh/pinakes | 2f08cb757ca64c866af3244686b92a3074fc7571 | [
"Apache-2.0"
] | 2 | 2022-03-17T18:53:58.000Z | 2022-03-17T22:04:22.000Z | tools/vagrant/data/scripts/apply_env.py | hsong-rh/pinakes | 2f08cb757ca64c866af3244686b92a3074fc7571 | [
"Apache-2.0"
] | 9 | 2022-03-18T08:22:57.000Z | 2022-03-30T17:14:49.000Z | tools/vagrant/data/scripts/apply_env.py | hsong-rh/pinakes | 2f08cb757ca64c866af3244686b92a3074fc7571 | [
"Apache-2.0"
] | 7 | 2022-03-17T22:03:08.000Z | 2022-03-28T21:28:34.000Z | import sys
import os
from jinja2 import Template
# Apply environment variables to a Jinga2 Template file
# Save the results into an output file specified by the caller
if __name__ == "__main__":
if len(sys.argv) != 3:
print("usage: python apply_env.py input_file output_file")
sys.exit(1)
with open(sys.argv[1], "r") as f:
data = f.read()
t = Template(data)
with open(sys.argv[2], "w") as f:
f.write(t.render(os.environ))
| 26.277778 | 66 | 0.651163 | import sys
import os
from jinja2 import Template
# Apply environment variables to a Jinga2 Template file
# Save the results into an output file specified by the caller
if __name__ == "__main__":
if len(sys.argv) != 3:
print("usage: python apply_env.py input_file output_file")
sys.exit(1)
with open(sys.argv[1], "r") as f:
data = f.read()
t = Template(data)
with open(sys.argv[2], "w") as f:
f.write(t.render(os.environ))
| 0 | 0 | 0 |
f4d3e2fa9b5d5a03e3d66a3a1060b0253a42f444 | 1,666 | py | Python | lab4/image_stats.py | CG2016/barkovsky_3 | 784bb3e419736cd11b9bc84b3055b8a4b9ec57e6 | [
"MIT"
] | null | null | null | lab4/image_stats.py | CG2016/barkovsky_3 | 784bb3e419736cd11b9bc84b3055b8a4b9ec57e6 | [
"MIT"
] | null | null | null | lab4/image_stats.py | CG2016/barkovsky_3 | 784bb3e419736cd11b9bc84b3055b8a4b9ec57e6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import argparse
import PIL.Image
import PIL.ExifTags
if __name__ == '__main__':
main()
| 26.03125 | 97 | 0.605642 | #!/usr/bin/env python
import os
import argparse
import PIL.Image
import PIL.ExifTags
def get_image_paths(path):
if os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
yield os.path.join(dirpath, filename)
else:
yield path
def get_image_metadata(image):
metadata = image.info.copy()
metadata['size'] = image.size
metadata['format'] = image.format.lower()
metadata['color_mode'] = image.mode
try:
metadata.update({
PIL.ExifTags.TAGS[k]: v
for k, v in image._getexif().items()
if k in PIL.ExifTags.TAGS
})
except AttributeError:
pass
if 'XResolution' in metadata and 'YResolution' in metadata and 'dpi' not in metadata:
metadata['dpi'] = (metadata['XResolution'][0], metadata['YResolution'][0])
for name in ['exif', 'icc_profile', 'MakerNote', 'UserComment']:
if name in metadata:
del metadata[name]
return metadata
def main():
parser = argparse.ArgumentParser()
parser.add_argument('images_path', type=str, help='Path to an image or a folder with images')
args = parser.parse_args()
image_paths = get_image_paths(args.images_path)
for image_path in image_paths:
try:
image = PIL.Image.open(image_path)
print(image_path)
for key, value in sorted(get_image_metadata(image).items()):
print('%s: %s' % (key, value))
print()
image.close()
except Exception as ex:
print(ex)
if __name__ == '__main__':
main()
| 1,468 | 0 | 69 |
1f016bf5c421cd9b6e7ae9b9544f42ef7a8267f7 | 5,733 | py | Python | Tail/plugin.py | hacklab/doorbot | a0837505beedab8436bc6d451644eb18dde0a31d | [
"BSD-3-Clause"
] | 1 | 2016-05-09T01:55:04.000Z | 2016-05-09T01:55:04.000Z | Tail/plugin.py | hacklab/doorbot | a0837505beedab8436bc6d451644eb18dde0a31d | [
"BSD-3-Clause"
] | null | null | null | Tail/plugin.py | hacklab/doorbot | a0837505beedab8436bc6d451644eb18dde0a31d | [
"BSD-3-Clause"
] | null | null | null | ###
# Copyright (c) 2004-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.utils as utils
from supybot.commands import *
import supybot.ircutils as ircutils
import supybot.schedule as schedule
import supybot.callbacks as callbacks
import commands
Class = Tail
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 35.83125 | 79 | 0.631258 | ###
# Copyright (c) 2004-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.utils as utils
from supybot.commands import *
import supybot.ircutils as ircutils
import supybot.schedule as schedule
import supybot.callbacks as callbacks
import commands
class Tail(callbacks.Plugin):
def __init__(self, irc):
self.__parent = super(Tail, self)
self.__parent.__init__(irc)
self.files = {}
period = self.registryValue('period')
schedule.addPeriodicEvent(self._checkFiles, period, name=self.name())
for filename in self.registryValue('files'):
self._add(filename)
def die(self):
self.__parent.die()
schedule.removeEvent(self.name())
for fd in self.files.values():
fd.close()
def __call__(self, irc, msg):
irc = callbacks.SimpleProxy(irc, msg)
self.lastIrc = irc
self.lastMsg = msg
def _checkFiles(self):
self.log.debug('Checking files.')
for filename in self.registryValue('files'):
self._checkFile(filename)
def _checkFile(self, filename):
fd = self.files[filename]
pos = fd.tell()
line = fd.readline()
while line:
line = line.strip()
if line:
self._send(self.lastIrc, filename, line)
self._sign(line)
pos = fd.tell()
line = fd.readline()
fd.seek(pos)
def _add(self, filename):
try:
fd = file(filename)
except EnvironmentError, e:
self.log.warning('Couldn\'t open %s: %s', filename, e)
raise
fd.seek(0, 2) # 0 bytes, offset from the end of the file.
self.files[filename] = fd
self.registryValue('files').add(filename)
def _remove(self, filename):
fd = self.files.pop(filename)
fd.close()
self.registryValue('files').remove(filename)
def _send(self, irc, filename, text):
if self.registryValue('bold'):
filename = ircutils.bold(filename)
if self.registryValue('hidefilename'):
filename = ""
notice = self.registryValue('notice')
payload = '%s: %s' % (filename, text)
for target in self.registryValue('targets'):
irc.reply(payload, to=target, notice=notice, private=True)
def _sign(text):
(stat, out) = commands.getstatusoutput(
"java -jar /home/doorbot/sendText.jar localhost 2332 3 '%s'"%text)
def add(self, irc, msg, args, filename):
"""<filename>
Basically does the equivalent of tail -f to the targets.
"""
try:
self._add(filename)
except EnvironmentError, e:
irc.error(utils.exnToString(e))
return
irc.replySuccess()
add = wrap(add, ['filename'])
def remove(self, irc, msg, args, filename):
"""<filename>
Stops announcing the lines appended to <filename>.
"""
try:
self._remove(filename)
irc.replySuccess()
except KeyError:
irc.error(format('I\'m not currently announcing %s.', filename))
remove = wrap(remove, ['filename'])
def target(self, irc, msg, args, optlist, targets):
"""[--remove] [<target> ...]
If given no arguments, returns the current list of targets for this
plugin. If given any number of targets, will add these targets to
the current list of targets. If given --remove and any number of
targets, will remove those targets from the current list of targets.
"""
remove = False
for (option, arg) in optlist:
if option == 'remove':
remove = True
if not targets:
L = self.registryValue('targets')
if L:
utils.sortBy(ircutils.toLower, L)
irc.reply(format('%L', L))
else:
irc.reply('I\'m not currently targeting anywhere.')
elif remove:
pass #XXX
target = wrap(target, [getopts({'remove': ''}), any('something')])
Class = Tail
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 1,973 | 1,881 | 23 |
0f64541f7886f83908fd68e4ab290832262593f2 | 4,229 | py | Python | fusion_pickles_probability_3.py | pdturney/management-theory-revised | 12ac601658fec7001817bc981e8e851e853d4803 | [
"MIT"
] | null | null | null | fusion_pickles_probability_3.py | pdturney/management-theory-revised | 12ac601658fec7001817bc981e8e851e853d4803 | [
"MIT"
] | null | null | null | fusion_pickles_probability_3.py | pdturney/management-theory-revised | 12ac601658fec7001817bc981e8e851e853d4803 | [
"MIT"
] | null | null | null | #
# Fusion Pickles Probability 3 Parts
#
# Peter Turney, July 14, 2021
#
# From the 20 runs, extract all of the pickled three-part seeds
# that are stored in the 20 "fusion_storage.bin" pickle files.
# Read the pickles and run each pickle, recording the results in
# a numpy tensor:
#
# tensor = num_seeds x num_steps x num_colours x num_parts
#
# num_seeds = to be determined
# num_steps = 1001
# num_colours = 5 (white, red, orange, blue, green)
# num_parts = 3
#
# After this tensor has been filled with values, generate
# a table of the form:
#
# <prob N M> = <probability for N managers and M workers>
#
# row in table = <step number> <prob 3 0> <prob 2 1> <prob 1 2> <prob 0 3>
#
import golly as g
import model_classes as mclass
import model_functions as mfunc
import model_parameters as mparam
import numpy as np
import copy
import time
import pickle
import os
import re
import sys
#
# Parameter values for making the graphs.
#
max_seeds = 2000 # probably won't need more seeds than this
num_steps = 1001 # number of time steps in the game
num_colours = 5 # 5 colours [white, red, blue, orange, green]
num_parts = 3 # number of parts
num_files = 20 # number of fusion pickle files
step_size = 20 # number of time steps between each plot point
#
# Location of fusion_storage.bin files -- the input pickles.
#
fusion_dir = "C:/Users/peter/Peter's Projects" + \
"/management-theory-revised/Experiments"
# list of pickle files
fusion_files = []
# loop through the fusion files and record the file paths
# -- we assume the folders have the form "run1", "run2", ...
for i in range(num_files):
fusion_files.append(fusion_dir + "/run" + str(i + 1) + \
"/fusion_storage.bin")
#
# Loop through the pickles, loading them into fusion_list.
# Each fusion file will contain several pickles.
#
seed_list = mfunc.read_fusion_pickles(fusion_files)
#
# Given a list of seeds, fill a tensor with counts of the growth of colours
# generated by running the Management Game.
#
[tensor, num_seeds] = mfunc.growth_tensor(g, seed_list, step_size,
max_seeds, num_steps, num_colours, num_parts)
#
# now the tensor is full, so let's make the graph for 3 parts
#
graph_file = fusion_dir + "/fusion_pickles_probability_3.txt"
graph_handle = open(graph_file, "w")
graph_handle.write("\n\nNOTE: {} Seeds -- {} Parts per seed\n\n".format(
num_seeds, num_parts))
header = ["step num", \
"3 managers and 0 workers", \
"2 managers and 1 worker", \
"1 manager and 2 workers", \
"0 managers and 3 workers"]
graph_handle.write("\t".join(header) + "\n")
#
for step_num in range(0, num_steps, step_size):
# initialize counts
count_3m0w = 0 # 3 managers, 0 workers
count_2m1w = 0 # 2 managers, 1 worker
count_1m2w = 0 # 1 manager, 2 workers
count_0m3w = 0 # 0 managers, 3 workers
# iterate over seed_num
for seed_num in range(num_seeds):
# iterate over parts
manager_count = 0
for part_num in range(num_parts):
# extract colours
red = tensor[seed_num, step_num, 1, part_num]
blue = tensor[seed_num, step_num, 2, part_num]
orange = tensor[seed_num, step_num, 3, part_num]
green = tensor[seed_num, step_num, 4, part_num]
# we focus on the current part (part_num) only
# -- the current part is always red, by convention
red_manager = (orange > green) # true or false
manager_count += red_manager # will increment by 0 or 1
# increment counts
if (manager_count == 3):
count_3m0w += 1
elif (manager_count == 2):
count_2m1w += 1
elif (manager_count == 1):
count_1m2w += 1
else:
count_0m3w += 1
#
assert count_3m0w + count_2m1w + count_1m2w + count_0m3w == num_seeds
#
probability_3m0w = count_3m0w / num_seeds
probability_2m1w = count_2m1w / num_seeds
probability_1m2w = count_1m2w / num_seeds
probability_0m3w = count_0m3w / num_seeds
#
graph_handle.write("{}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\n".format(step_num,
probability_3m0w, probability_2m1w, probability_1m2w, probability_0m3w))
#
#
graph_handle.close()
#
# | 33.832 | 77 | 0.674391 | #
# Fusion Pickles Probability 3 Parts
#
# Peter Turney, July 14, 2021
#
# From the 20 runs, extract all of the pickled three-part seeds
# that are stored in the 20 "fusion_storage.bin" pickle files.
# Read the pickles and run each pickle, recording the results in
# a numpy tensor:
#
# tensor = num_seeds x num_steps x num_colours x num_parts
#
# num_seeds = to be determined
# num_steps = 1001
# num_colours = 5 (white, red, orange, blue, green)
# num_parts = 3
#
# After this tensor has been filled with values, generate
# a table of the form:
#
# <prob N M> = <probability for N managers and M workers>
#
# row in table = <step number> <prob 3 0> <prob 2 1> <prob 1 2> <prob 0 3>
#
import golly as g
import model_classes as mclass
import model_functions as mfunc
import model_parameters as mparam
import numpy as np
import copy
import time
import pickle
import os
import re
import sys
#
# Parameter values for making the graphs.
#
max_seeds = 2000 # probably won't need more seeds than this
num_steps = 1001 # number of time steps in the game
num_colours = 5 # 5 colours [white, red, blue, orange, green]
num_parts = 3 # number of parts
num_files = 20 # number of fusion pickle files
step_size = 20 # number of time steps between each plot point
#
# Location of fusion_storage.bin files -- the input pickles.
#
fusion_dir = "C:/Users/peter/Peter's Projects" + \
"/management-theory-revised/Experiments"
# list of pickle files
fusion_files = []
# loop through the fusion files and record the file paths
# -- we assume the folders have the form "run1", "run2", ...
for i in range(num_files):
fusion_files.append(fusion_dir + "/run" + str(i + 1) + \
"/fusion_storage.bin")
#
# Loop through the pickles, loading them into fusion_list.
# Each fusion file will contain several pickles.
#
seed_list = mfunc.read_fusion_pickles(fusion_files)
#
# Given a list of seeds, fill a tensor with counts of the growth of colours
# generated by running the Management Game.
#
[tensor, num_seeds] = mfunc.growth_tensor(g, seed_list, step_size,
max_seeds, num_steps, num_colours, num_parts)
#
# now the tensor is full, so let's make the graph for 3 parts
#
graph_file = fusion_dir + "/fusion_pickles_probability_3.txt"
graph_handle = open(graph_file, "w")
graph_handle.write("\n\nNOTE: {} Seeds -- {} Parts per seed\n\n".format(
num_seeds, num_parts))
header = ["step num", \
"3 managers and 0 workers", \
"2 managers and 1 worker", \
"1 manager and 2 workers", \
"0 managers and 3 workers"]
graph_handle.write("\t".join(header) + "\n")
#
for step_num in range(0, num_steps, step_size):
# initialize counts
count_3m0w = 0 # 3 managers, 0 workers
count_2m1w = 0 # 2 managers, 1 worker
count_1m2w = 0 # 1 manager, 2 workers
count_0m3w = 0 # 0 managers, 3 workers
# iterate over seed_num
for seed_num in range(num_seeds):
# iterate over parts
manager_count = 0
for part_num in range(num_parts):
# extract colours
red = tensor[seed_num, step_num, 1, part_num]
blue = tensor[seed_num, step_num, 2, part_num]
orange = tensor[seed_num, step_num, 3, part_num]
green = tensor[seed_num, step_num, 4, part_num]
# we focus on the current part (part_num) only
# -- the current part is always red, by convention
red_manager = (orange > green) # true or false
manager_count += red_manager # will increment by 0 or 1
# increment counts
if (manager_count == 3):
count_3m0w += 1
elif (manager_count == 2):
count_2m1w += 1
elif (manager_count == 1):
count_1m2w += 1
else:
count_0m3w += 1
#
assert count_3m0w + count_2m1w + count_1m2w + count_0m3w == num_seeds
#
probability_3m0w = count_3m0w / num_seeds
probability_2m1w = count_2m1w / num_seeds
probability_1m2w = count_1m2w / num_seeds
probability_0m3w = count_0m3w / num_seeds
#
graph_handle.write("{}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\n".format(step_num,
probability_3m0w, probability_2m1w, probability_1m2w, probability_0m3w))
#
#
graph_handle.close()
#
# | 0 | 0 | 0 |
65ac9cb1a116de9b39966c6fe34e3dcee6cf7692 | 8,839 | py | Python | cli/aws_ddk/__main__.py | vemel/aws-ddk | d34bd1d98f5a170026a1b65f9629e909ca839930 | [
"Apache-2.0"
] | null | null | null | cli/aws_ddk/__main__.py | vemel/aws-ddk | d34bd1d98f5a170026a1b65f9629e909ca839930 | [
"Apache-2.0"
] | null | null | null | cli/aws_ddk/__main__.py | vemel/aws-ddk | d34bd1d98f5a170026a1b65f9629e909ca839930 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import sys
from typing import Optional, Tuple
import click
from aws_ddk.__metadata__ import __version__
from aws_ddk.commands.bootstrap import bootstrap_account
from aws_ddk.commands.create import create_code_repository
from aws_ddk.commands.deploy import cdk_deploy
from aws_ddk.commands.init import init_project
from aws_ddk.utils import get_package_root
from boto3 import Session, _get_default_session, setup_default_session
DEBUG_LOGGING_FORMAT = "[%(asctime)s][%(filename)-13s:%(lineno)3d] %(message)s"
DEBUG_LOGGING_FORMAT_REMOTE = "[%(filename)-13s:%(lineno)3d] %(message)s"
DEFAULT_PROJECT_TEMPLATE = "data/project_templates/ddk_app/"
_logger: logging.Logger = logging.getLogger(__name__)
@click.group()
@click.version_option(__version__)
@click.option(
"--debug/--no-debug",
default=False,
help="Turn debug logging on/off.",
show_default=True,
)
def cli(
debug: bool,
) -> None:
"""AWS DDK CLI."""
click.echo(f"AWS DDK CLI {__version__} (Python {sys.version})")
if debug:
enable_debug(format=DEBUG_LOGGING_FORMAT)
_logger.debug(f"debug: {debug}")
@cli.command(name="init")
@click.argument(
"name",
type=str,
required=True,
)
@click.option(
"--environment",
"-e",
type=str,
help="The id of the environment.",
required=True,
default="dev",
show_default=True,
)
@click.option(
"--template",
"-t",
type=str,
help="A directory containing a project template directory, or a URL to a git repository",
)
def init(name: str, environment: str, template: Optional[str] = None) -> None:
"""
Create the local structure for a new AWS DDK Python project.
NAME is the name of the project.
"""
# Use default Cookiecutter project template
if not template:
template = os.path.join(get_package_root(), DEFAULT_PROJECT_TEMPLATE)
return init_project(name=name, environment=environment, template=template)
@cli.command(name="bootstrap")
@click.option(
"--environment",
"-e",
type=RegexString(regex=r"^[A-Za-z0-9_-]{1,4}$"),
help="The id of the environment.",
required=True,
default="dev",
show_default=True,
)
@click.option(
"--profile",
"-p",
type=str,
default="default",
help="Use a specific profile from your AWS credentials file.",
show_default=True,
required=False,
)
@click.option(
"--region",
"-r",
type=str,
default=None,
help="AWS Region name (e.g. us-east-1). If None, it will be inferred.",
show_default=False,
required=False,
)
@click.option(
"--prefix",
type=RegexString(regex=r"^[A-Za-z0-9_-]{1,5}$"),
help="The prefix to resource names.",
required=False,
default="ddk",
show_default=True,
)
@click.option(
"--qualifier",
type=RegexString(regex=r"^[A-Za-z0-9_-]{1,10}$"),
help="The CDK bootstrap qualifier.",
required=False,
)
@click.option(
"--trusted-accounts",
"-a",
type=str,
help="List of trusted AWS accounts to perform deployments (e.g. -a 111111111111 -a 222222222222).",
multiple=True,
required=False,
)
@click.option(
"--iam-policies",
"-i",
type=str,
help="""List of IAM managed policy ARNs that should be attached to the role performing deployments.
(e.g. -i arn1 -i arn2)""",
multiple=True,
required=False,
)
@click.option(
"--permissions-boundary",
type=str,
help="IAM managed permissions boundary policy ARN that should be attached to the role performing deployments.",
required=False,
)
@click.option(
"--tags",
"-t",
type=(str, str),
help="List of tags to apply to the stack (e.g -t CostCenter 1984 -t Framework DDK).",
multiple=True,
required=False,
)
def bootstrap(
environment: str,
profile: str,
region: Optional[str] = None,
prefix: Optional[str] = None,
qualifier: Optional[str] = None,
trusted_accounts: Optional[Tuple[str]] = None,
iam_policies: Optional[Tuple[str]] = None,
permissions_boundary: Optional[str] = None,
tags: Optional[Tuple[Tuple[str, str]]] = None,
) -> None:
"""Bootstrap the AWS account with DDK resources."""
setup_boto_session(profile, region)
bootstrap_account(
environment=environment,
prefix=prefix,
qualifier=qualifier,
trusted_accounts=trusted_accounts,
iam_policies=iam_policies,
permissions_boundary=permissions_boundary,
tags=tags,
)
@cli.command(name="create-repository")
@click.argument(
"name",
type=str,
required=True,
)
@click.option(
"--profile",
"-p",
type=str,
default="default",
help="Use a specific profile from your AWS credentials file.",
show_default=True,
required=False,
)
@click.option(
"--region",
"-r",
type=str,
default=None,
help="AWS Region name (e.g. us-east-1). If None, it will be inferred.",
show_default=False,
required=False,
)
@click.option(
"--description",
"-d",
type=str,
help="The description of the repository.",
required=False,
)
@click.option(
"--tags",
"-t",
type=(str, str),
help="List of tags to apply to the repository (e.g -t CostCenter 1984 -t Framework DDK).",
multiple=True,
required=False,
)
def create_repository(
name: str,
profile: str,
region: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[Tuple[Tuple[str, str]]] = None,
) -> None:
"""
Create a code repository from the source system provider.
NAME is the name of the repository.
"""
setup_boto_session(profile, region)
create_code_repository(
name=name,
description=description,
tags=tags,
)
@cli.command(name="deploy")
@click.option(
"--profile",
"-p",
type=str,
default="default",
help="Use a specific profile from your AWS credentials file.",
show_default=True,
required=False,
)
@click.option(
"--require-approval",
type=click.Choice(["never", "any-change", "broadening"], case_sensitive=False),
default="never",
help="What security-sensitive changes need manual approval.",
required=False,
)
@click.option(
"--force",
"-f",
is_flag=True,
default=False,
help="Always deploy stack even if templates are identical.",
required=False,
)
@click.option(
"--output-dir",
"-o",
type=str,
help="Directory where cloud assembly is synthesized.",
required=False,
)
def deploy(
profile: str,
require_approval: Optional[str] = None,
force: Optional[bool] = None,
output_dir: Optional[str] = None,
) -> None:
"""Deploy DDK stacks to AWS account."""
setup_boto_session(profile)
cdk_deploy(
profile=profile,
require_approval=require_approval,
force=force,
output_dir=output_dir,
)
| 26.703927 | 115 | 0.663084 | #!/usr/bin/env python3
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import sys
from typing import Optional, Tuple
import click
from aws_ddk.__metadata__ import __version__
from aws_ddk.commands.bootstrap import bootstrap_account
from aws_ddk.commands.create import create_code_repository
from aws_ddk.commands.deploy import cdk_deploy
from aws_ddk.commands.init import init_project
from aws_ddk.utils import get_package_root
from boto3 import Session, _get_default_session, setup_default_session
DEBUG_LOGGING_FORMAT = "[%(asctime)s][%(filename)-13s:%(lineno)3d] %(message)s"
DEBUG_LOGGING_FORMAT_REMOTE = "[%(filename)-13s:%(lineno)3d] %(message)s"
DEFAULT_PROJECT_TEMPLATE = "data/project_templates/ddk_app/"
_logger: logging.Logger = logging.getLogger(__name__)
def enable_debug(format: str) -> None:
logging.basicConfig(level=logging.DEBUG, format=format)
_logger.setLevel(logging.DEBUG)
logging.getLogger("boto3").setLevel(logging.ERROR)
logging.getLogger("botocore").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("cookiecutter").setLevel(logging.ERROR)
logging.getLogger("sh").setLevel(logging.ERROR)
def setup_boto_session(profile: str, region: Optional[str] = None) -> None:
# Setup profile and region globally in boto3 sessions
setup_default_session(profile_name=profile, region_name=region)
session: Session = _get_default_session()
_logger.debug(f"profile: {session.profile_name}")
_logger.debug(f"region: {session.region_name}")
class RegexString(click.ParamType):
name = "regex-string"
def __init__(self, regex: str):
self._regex = regex
def convert(self, value: str, param: Optional[click.Parameter], ctx: Optional[click.Context]) -> str:
found = re.match(self._regex, value)
if not found:
self.fail(
f"Value '{value}' does not match regex {self._regex}",
param,
ctx,
)
return value
@click.group()
@click.version_option(__version__)
@click.option(
"--debug/--no-debug",
default=False,
help="Turn debug logging on/off.",
show_default=True,
)
def cli(
debug: bool,
) -> None:
"""AWS DDK CLI."""
click.echo(f"AWS DDK CLI {__version__} (Python {sys.version})")
if debug:
enable_debug(format=DEBUG_LOGGING_FORMAT)
_logger.debug(f"debug: {debug}")
@cli.command(name="init")
@click.argument(
"name",
type=str,
required=True,
)
@click.option(
"--environment",
"-e",
type=str,
help="The id of the environment.",
required=True,
default="dev",
show_default=True,
)
@click.option(
"--template",
"-t",
type=str,
help="A directory containing a project template directory, or a URL to a git repository",
)
def init(name: str, environment: str, template: Optional[str] = None) -> None:
"""
Create the local structure for a new AWS DDK Python project.
NAME is the name of the project.
"""
# Use default Cookiecutter project template
if not template:
template = os.path.join(get_package_root(), DEFAULT_PROJECT_TEMPLATE)
return init_project(name=name, environment=environment, template=template)
@cli.command(name="bootstrap")
@click.option(
"--environment",
"-e",
type=RegexString(regex=r"^[A-Za-z0-9_-]{1,4}$"),
help="The id of the environment.",
required=True,
default="dev",
show_default=True,
)
@click.option(
"--profile",
"-p",
type=str,
default="default",
help="Use a specific profile from your AWS credentials file.",
show_default=True,
required=False,
)
@click.option(
"--region",
"-r",
type=str,
default=None,
help="AWS Region name (e.g. us-east-1). If None, it will be inferred.",
show_default=False,
required=False,
)
@click.option(
"--prefix",
type=RegexString(regex=r"^[A-Za-z0-9_-]{1,5}$"),
help="The prefix to resource names.",
required=False,
default="ddk",
show_default=True,
)
@click.option(
"--qualifier",
type=RegexString(regex=r"^[A-Za-z0-9_-]{1,10}$"),
help="The CDK bootstrap qualifier.",
required=False,
)
@click.option(
"--trusted-accounts",
"-a",
type=str,
help="List of trusted AWS accounts to perform deployments (e.g. -a 111111111111 -a 222222222222).",
multiple=True,
required=False,
)
@click.option(
"--iam-policies",
"-i",
type=str,
help="""List of IAM managed policy ARNs that should be attached to the role performing deployments.
(e.g. -i arn1 -i arn2)""",
multiple=True,
required=False,
)
@click.option(
"--permissions-boundary",
type=str,
help="IAM managed permissions boundary policy ARN that should be attached to the role performing deployments.",
required=False,
)
@click.option(
"--tags",
"-t",
type=(str, str),
help="List of tags to apply to the stack (e.g -t CostCenter 1984 -t Framework DDK).",
multiple=True,
required=False,
)
def bootstrap(
environment: str,
profile: str,
region: Optional[str] = None,
prefix: Optional[str] = None,
qualifier: Optional[str] = None,
trusted_accounts: Optional[Tuple[str]] = None,
iam_policies: Optional[Tuple[str]] = None,
permissions_boundary: Optional[str] = None,
tags: Optional[Tuple[Tuple[str, str]]] = None,
) -> None:
"""Bootstrap the AWS account with DDK resources."""
setup_boto_session(profile, region)
bootstrap_account(
environment=environment,
prefix=prefix,
qualifier=qualifier,
trusted_accounts=trusted_accounts,
iam_policies=iam_policies,
permissions_boundary=permissions_boundary,
tags=tags,
)
@cli.command(name="create-repository")
@click.argument(
"name",
type=str,
required=True,
)
@click.option(
"--profile",
"-p",
type=str,
default="default",
help="Use a specific profile from your AWS credentials file.",
show_default=True,
required=False,
)
@click.option(
"--region",
"-r",
type=str,
default=None,
help="AWS Region name (e.g. us-east-1). If None, it will be inferred.",
show_default=False,
required=False,
)
@click.option(
"--description",
"-d",
type=str,
help="The description of the repository.",
required=False,
)
@click.option(
"--tags",
"-t",
type=(str, str),
help="List of tags to apply to the repository (e.g -t CostCenter 1984 -t Framework DDK).",
multiple=True,
required=False,
)
def create_repository(
name: str,
profile: str,
region: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[Tuple[Tuple[str, str]]] = None,
) -> None:
"""
Create a code repository from the source system provider.
NAME is the name of the repository.
"""
setup_boto_session(profile, region)
create_code_repository(
name=name,
description=description,
tags=tags,
)
@cli.command(name="deploy")
@click.option(
"--profile",
"-p",
type=str,
default="default",
help="Use a specific profile from your AWS credentials file.",
show_default=True,
required=False,
)
@click.option(
"--require-approval",
type=click.Choice(["never", "any-change", "broadening"], case_sensitive=False),
default="never",
help="What security-sensitive changes need manual approval.",
required=False,
)
@click.option(
"--force",
"-f",
is_flag=True,
default=False,
help="Always deploy stack even if templates are identical.",
required=False,
)
@click.option(
"--output-dir",
"-o",
type=str,
help="Directory where cloud assembly is synthesized.",
required=False,
)
def deploy(
profile: str,
require_approval: Optional[str] = None,
force: Optional[bool] = None,
output_dir: Optional[str] = None,
) -> None:
"""Deploy DDK stacks to AWS account."""
setup_boto_session(profile)
cdk_deploy(
profile=profile,
require_approval=require_approval,
force=force,
output_dir=output_dir,
)
def main() -> int:
cli()
return 0
| 1,107 | 94 | 92 |