hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3c4ec73cc65108ee20c2a64c2347e068d28407dc | 223 | py | Python | weather/forms.py | Arvind-4/Weather-app-using-Django | c57382e32d08835afcb93c7d212448a0c8f30324 | [
"MIT"
] | null | null | null | weather/forms.py | Arvind-4/Weather-app-using-Django | c57382e32d08835afcb93c7d212448a0c8f30324 | [
"MIT"
] | null | null | null | weather/forms.py | Arvind-4/Weather-app-using-Django | c57382e32d08835afcb93c7d212448a0c8f30324 | [
"MIT"
] | null | null | null | from django import forms | 27.875 | 52 | 0.695067 | from django import forms
class WeatherForm(forms.Form):
name = forms.CharField(max_length=100, label=False,
widget=forms.TextInput(attrs={
'placeholder': 'Enter a Valid Place ...',
'class': 'form-control',
})) | 0 | 175 | 23 |
05e8b623f889d82018e9585037b0879e1e151079 | 46 | py | Python | tests/tasks/sodasql/__init__.py | concreted/prefect | dd732f5990ee2b0f3d816adb285168fd63b239e4 | [
"Apache-2.0"
] | 8,633 | 2019-03-23T17:51:03.000Z | 2022-03-31T22:17:42.000Z | tests/tasks/sodasql/__init__.py | concreted/prefect | dd732f5990ee2b0f3d816adb285168fd63b239e4 | [
"Apache-2.0"
] | 3,903 | 2019-03-23T19:11:21.000Z | 2022-03-31T23:21:23.000Z | tests/tasks/sodasql/__init__.py | ngriffiths13/prefect | 7f5613abcb182494b7dc12159277c3bc5f3c9898 | [
"Apache-2.0"
] | 937 | 2019-03-23T18:49:44.000Z | 2022-03-31T21:45:13.000Z | import pytest
pytest.importorskip("sodasql")
| 11.5 | 30 | 0.804348 | import pytest
pytest.importorskip("sodasql")
| 0 | 0 | 0 |
2aa9bb37b9056e8c27c42f3ffddbdeab93cee3f4 | 1,615 | py | Python | examples/world_example.py | ABostrom/py_adventure | 4e2765be1d05aba45c3a45280aeea0b709814d63 | [
"MIT"
] | 2 | 2020-10-12T13:33:31.000Z | 2020-10-14T12:00:40.000Z | examples/world_example.py | ABostrom/py_adventure | 4e2765be1d05aba45c3a45280aeea0b709814d63 | [
"MIT"
] | null | null | null | examples/world_example.py | ABostrom/py_adventure | 4e2765be1d05aba45c3a45280aeea0b709814d63 | [
"MIT"
] | 1 | 2021-04-30T08:10:34.000Z | 2021-04-30T08:10:34.000Z | from py_adventure import PointOfInterest
from py_adventure import ZoneConnection
from py_adventure import City, Zone, Location, Building
from py_adventure import Region
from typing import Dict, List
barrel : PointOfInterest = PointOfInterest("Barrel")
gate : Location = Location("City Gate")
inn : Location = Building("Lion's Rest Inn", [barrel])
city1 : Zone = City("Baldur's Gate", [gate,inn])
city2 : Zone = City("Elturel")
road1 : Zone = Zone("Fields of the Dead")
#exit from BG to Fields of the Dead
connection1 = ZoneConnection("North Exit", road1)
#region exits
connection2 = ZoneConnection("West Exit", city1)
connection3 = ZoneConnection("East Exit", city2)
#exit from elturel to region
connection4 = ZoneConnection("South Exit", road1)
connections : Dict[Zone, List[ZoneConnection]] = {
city1 : [connection1],
road1 : [connection2, connection3],
city2 : [connection4]
}
world : Region = Region("Faerun", city1, connections)
current_zone : Zone = world.get_current_zone()
print(f"You arrive at {current_zone}")
print(f"Where would you like to explore?")
for loc in current_zone.get_locations():
print(loc)
print("Or you can leave via:")
for connection in world.get_available_exits():
print(connection)
print("--------------------------------")
#go to the lions rest inn.
current_location : Location = current_zone.get_locations()[1]
print(f"You arrive at the {current_location.get_name()}")
print("Looking around you notice:")
for poi in current_location.get_points_of_interest():
print(poi.get_name())
print(f"or you can leave to go back to {current_zone}")
| 25.234375 | 61 | 0.723839 | from py_adventure import PointOfInterest
from py_adventure import ZoneConnection
from py_adventure import City, Zone, Location, Building
from py_adventure import Region
from typing import Dict, List
barrel : PointOfInterest = PointOfInterest("Barrel")
gate : Location = Location("City Gate")
inn : Location = Building("Lion's Rest Inn", [barrel])
city1 : Zone = City("Baldur's Gate", [gate,inn])
city2 : Zone = City("Elturel")
road1 : Zone = Zone("Fields of the Dead")
#exit from BG to Fields of the Dead
connection1 = ZoneConnection("North Exit", road1)
#region exits
connection2 = ZoneConnection("West Exit", city1)
connection3 = ZoneConnection("East Exit", city2)
#exit from elturel to region
connection4 = ZoneConnection("South Exit", road1)
connections : Dict[Zone, List[ZoneConnection]] = {
city1 : [connection1],
road1 : [connection2, connection3],
city2 : [connection4]
}
world : Region = Region("Faerun", city1, connections)
current_zone : Zone = world.get_current_zone()
print(f"You arrive at {current_zone}")
print(f"Where would you like to explore?")
for loc in current_zone.get_locations():
print(loc)
print("Or you can leave via:")
for connection in world.get_available_exits():
print(connection)
print("--------------------------------")
#go to the lions rest inn.
current_location : Location = current_zone.get_locations()[1]
print(f"You arrive at the {current_location.get_name()}")
print("Looking around you notice:")
for poi in current_location.get_points_of_interest():
print(poi.get_name())
print(f"or you can leave to go back to {current_zone}")
| 0 | 0 | 0 |
e6742061c0ba67cbd67700b553a644503d8034c7 | 2,105 | py | Python | 2016/python/day8.py | jO-Osko/adventofcode2015 | 274fe880b91dffff5fba15220c9045a933e319ec | [
"MIT"
] | null | null | null | 2016/python/day8.py | jO-Osko/adventofcode2015 | 274fe880b91dffff5fba15220c9045a933e319ec | [
"MIT"
] | null | null | null | 2016/python/day8.py | jO-Osko/adventofcode2015 | 274fe880b91dffff5fba15220c9045a933e319ec | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = "Filip Koprivec"
from helper import get_file
DAY = 8
HEIGHT = 6
WIDTH = 50
data = [line.strip() for line in get_file(DAY)]
print(part1(data))
print(part2(data))
| 24.195402 | 65 | 0.430879 | # -*- coding: utf-8 -*-
__author__ = "Filip Koprivec"
from helper import get_file
DAY = 8
HEIGHT = 6
WIDTH = 50
data = [line.strip() for line in get_file(DAY)]
def part1(data):
screen = [["." for k in range(WIDTH)] for j in range(HEIGHT)]
for line in data:
cmd, *rest = line.split(" ", 1)
if cmd == "rect":
A, B = map(int, rest[0].split("x"))
A %= WIDTH
B %= HEIGHT
for j in range(B):
screen[j][0:A] = "#"*A
continue
rest = rest[0].split(" ")
B = int(rest[-1])
A = int(rest[1].split("=")[-1])
if rest[0].startswith("row"):
B %= WIDTH
temp = [k for k in screen[A]]
for j in range(WIDTH):
screen[A][(j + B) % WIDTH] = temp[j]
else:
B %= HEIGHT
temp = [screen[j][A] for j in range(HEIGHT)]
for j in range(HEIGHT):
screen[(j + B) % HEIGHT][A] = temp[j]
su = 0
for j in screen:
print("".join(j))
for k in j:
if k == "#":
su += 1
return su
def part2(data):
screen = [["." for k in range(WIDTH)] for j in range(HEIGHT)]
for line in data:
cmd, *rest = line.split(" ", 1)
if cmd == "rect":
A, B = map(int, rest[0].split("x"))
A %= WIDTH
B %= HEIGHT
for j in range(B):
screen[j][0:A] = "#"*A
continue
rest = rest[0].split(" ")
B = int(rest[-1])
A = int(rest[1].split("=")[-1])
if rest[0].startswith("row"):
B %= WIDTH
temp = [k for k in screen[A]]
for j in range(WIDTH):
screen[A][(j + B) % WIDTH] = temp[j]
else:
B %= HEIGHT
temp = [screen[j][A] for j in range(HEIGHT)]
for j in range(HEIGHT):
screen[(j + B) % HEIGHT][A] = temp[j]
rtr = ""
for j in screen:
rtr += "".join(j) + "\n"
return rtr
print(part1(data))
print(part2(data))
| 1,850 | 0 | 46 |
d5cbf354618f1283683aac70b1bdc1d8054c347a | 598 | py | Python | HW2/code_review/q2/reducer.py | CrossD/Stuff | 1556f046c73488b9e52e06a67324b959fd1af534 | [
"MIT"
] | null | null | null | HW2/code_review/q2/reducer.py | CrossD/Stuff | 1556f046c73488b9e52e06a67324b959fd1af534 | [
"MIT"
] | null | null | null | HW2/code_review/q2/reducer.py | CrossD/Stuff | 1556f046c73488b9e52e06a67324b959fd1af534 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# from operator import itemgetter
import sys
last_group = None
current_count = 1
# input comes from STDIN
for line in sys.stdin:
# Remove trailing '\n'
line = line.strip()
# Extract (key,value)
vs = line.split('\t')
# print vs[0]
current_group = vs[0].strip()
if last_group == current_group:
current_count += int(vs[1])
else:
if last_group != None:
print last_group, current_count
last_group = current_group
current_count = 1
# Last one:
if last_group != None:
print last_group, current_count | 22.148148 | 43 | 0.635452 | #!/usr/bin/env python
# from operator import itemgetter
import sys
last_group = None
current_count = 1
# input comes from STDIN
for line in sys.stdin:
# Remove trailing '\n'
line = line.strip()
# Extract (key,value)
vs = line.split('\t')
# print vs[0]
current_group = vs[0].strip()
if last_group == current_group:
current_count += int(vs[1])
else:
if last_group != None:
print last_group, current_count
last_group = current_group
current_count = 1
# Last one:
if last_group != None:
print last_group, current_count | 0 | 0 | 0 |
c273d771d285b1dfd2707b7f1a4e798420a388cb | 901 | py | Python | tui/widgets/label.py | sweettuse/texttui | 773a6bd1cafda5cded2333c6782fdb379b77c8ed | [
"BSD-2-Clause"
] | 3 | 2022-02-09T03:25:49.000Z | 2022-02-10T04:54:34.000Z | tui/widgets/label.py | sweettuse/texttui | 773a6bd1cafda5cded2333c6782fdb379b77c8ed | [
"BSD-2-Clause"
] | null | null | null | tui/widgets/label.py | sweettuse/texttui | 773a6bd1cafda5cded2333c6782fdb379b77c8ed | [
"BSD-2-Clause"
] | 2 | 2022-02-10T04:54:57.000Z | 2022-02-10T05:50:00.000Z | from __future__ import annotations
import rich
from rich.align import Align
from rich.style import Style, StyleType
from textual.widget import Reactive, Widget
| 25.027778 | 61 | 0.601554 | from __future__ import annotations
import rich
from rich.align import Align
from rich.style import Style, StyleType
from textual.widget import Reactive, Widget
class Label(Widget):
def __init__(
self,
label: str,
style: StyleType = "bold",
name: str | None = None,
align: str = "left",
):
super().__init__(name=name)
self.name = name or label
self.label = label
self.style = style
self.align = align
label: Reactive[str] = Reactive('')
def render(self) -> RenderableType:
if self.align == "left":
return Align.left(self.label, style=self.style)
elif self.align == "center":
return Align.center(self.label, style=self.style)
else:
return Align.right(self.label, style=self.style)
def set_label(self, label):
self.label = label
| 595 | 120 | 23 |
75fce0614ba5dae29fdccb541feb0832d2f980cf | 12,828 | py | Python | package/code/gtfs_process/data_engineering/enrich_travel.py | highered-esricanada/Parallel-GTFS-Workflow | 5386ca58708cfcf3e9aa901b02e273b98dfe2fcb | [
"MIT"
] | null | null | null | package/code/gtfs_process/data_engineering/enrich_travel.py | highered-esricanada/Parallel-GTFS-Workflow | 5386ca58708cfcf3e9aa901b02e273b98dfe2fcb | [
"MIT"
] | null | null | null | package/code/gtfs_process/data_engineering/enrich_travel.py | highered-esricanada/Parallel-GTFS-Workflow | 5386ca58708cfcf3e9aa901b02e273b98dfe2fcb | [
"MIT"
] | null | null | null | """
Author: Anastassios Dardas, PhD - Higher Education Specialist at the Education & Research at Esri Canada.
Date: Re-modified Q1 - 2022
About:
"""
from pandas import DataFrame
from ..util import TimeDelta, SpatialDelta
| 50.703557 | 237 | 0.664718 | """
Author: Anastassios Dardas, PhD - Higher Education Specialist at the Education & Research at Esri Canada.
Date: Re-modified Q1 - 2022
About:
"""
from pandas import DataFrame
from ..util import TimeDelta, SpatialDelta
class RteEnricher:
def __init__(self, clean_df: DataFrame, undiss_df, stp_df: DataFrame, stop_times: DataFrame, folder_date, output_folder, raw_date, unique_val, L3):
"""
Enriches additional features for each trip per trip_id.
:param clean_df: Dataframe of the cleaner version (from QA/QC) of the GTFS-RT per transit route.
:param undiss_df: The undissolved shapefile read as a spatial dataframe of the transit route.
:param stp_df: The stop csv file as a dataframe of the transit route.
:param stop_times: The schedule (from GTFS static) per stop_id per trip_id (vehicle's id associated to transit rte).
:param folder_date: The date that belongs to the static GTFS update across the project directory (e.g., 0_external/2021-11-17).
:param output_folder: Contents exported & stored in the output folder.
:param raw_date: The date of the raw GTFS-RT.
:param unique_val: The unique-rte currently inspecting.
:param L3: The list that is part of the Manager in Multiprocessing.
:return: An enriched dataframe filled with stop information, index information, point info., time info., and spatial info.
"""
self.enrich_df = self._main(df=clean_df,
undiss_df=undiss_df,
stp_df=stp_df,
stop_times=stop_times,
folder_date=folder_date,
output_folder=output_folder,
raw_date=raw_date,
unique_val=unique_val,
L3=L3)
def _get_max_seq_idx(self, stp_df: DataFrame, undiss_df):
"""
Validate if the max stop sequence exists in undissolved - performs another layer of QA/QC with the static GTFS data.
Sometimes the static GTFS files are not entirely accurate - for instance, the terminus is supposed to be the
63rd stop of the route, but it may only have 62. This function will determine terminus status based on the data provided.
Acquires max stop sequence number, max index value of the undissolved segment, and statement validation match (true or false).
:param stp_df: The stop csv file as a dataframe of the transit route.
:param undiss_df: The undissolved shapefile read as a spatial dataframe of the transit route.
:return: A tuple - max stop sequence number, max index value of the undissolved segment, and statement validation match.
"""
try:
# Get true max stop sequence & index
max_stp_seq = max(stp_df['stop_seque'])
max_idx_seg = max(undiss_df.query('stop_seque == @max_stp_seq')['index'])
true_max_stp = True
return (max_stp_seq, max_idx_seg, true_max_stp)
except Exception as e:
# If not true, then find the next highest using the undissolved shapefile of the transit route
max_stp_seq = max(undiss_df['stop_seque'])
max_idx_seg = max(undiss_df.query('stop_seque == @max_stp_seq')['index'])
true_max_stp = False
return (max_stp_seq, max_idx_seg, true_max_stp)
def _last_clean(self, df: DataFrame):
"""
Removing incorrect observations - unordered trends - final sweep.
:param df: DataFrame by grouped trip_id.
:return: Cleaner DataFrame.
"""
return (
df
.groupby(['trip_id', 'stop_seque'], as_index=False)
.apply(lambda e: e.assign(Idx_Diff = lambda d: d['index'].diff(1)))
.sort_values(['trip_id', 'barcode', 'Local_Time'])
.query('Idx_Diff >= 0 or Idx_Diff.isnull()', engine='python')
)
def _set_mvmt(self, idx_diff, stp_diff, stp_seq, max_stp_seq):
"""
Identify the movement status of the vehicle by comparing to the next observation (consecutive).
:param idx_diff: The value from the index difference (Idx_Diff) column.
:param stp_diff: The value from the stop sequence difference (Stp_Diff) column.
:param stp_seq: The value from the stop sequence (stop_seque) column.
:param max_stp_seg: The max value from the max. stop sequence (MaxStpSeq) column.
:return: Pre-determined (require distance delta to confirm) status of the vehicle's movement.
"""
# If index difference or stop sequence difference is zero, and current stop sequence is equal to max stop sequence
# Set to Terminus
if (idx_diff == 0 or stp_diff == 0) and (int(stp_seq) == max_stp_seq):
return "Terminus"
else:
# If current stop sequence is equal to max stop sequence, and the index and stop sequence difference is greater than 0.
# Set to Terminus
if (int(stp_seq) == max_stp_seq) and (idx_diff > 0 or stp_diff > 0):
return "Terminus"
# Set to terminus if the current stop sequence is equal to the max stop sequence.
elif int(stp_seq) == max_stp_seq:
return "Terminus"
# Otherwise set to stationary (idling/very slow movement) if the vehicle has zero index and stop sequence difference.
elif idx_diff == 0 and stp_diff == 0:
return "Stationary"
# Otherwise set to movement if the vehicle is en-transit.
else:
return "Movement"
def _eval_pnts(self, pnt, pnt_2):
"""
Preparation to estimate the distance travelled between two consecutive snapped points.
Applies only to the consecutive pair that have been flagged as stationary-stationary,
stationary-movement, and stationary-terminus.
It validates whether the vehicle has truly been idled en-transit.
:param pnt: Snapped point (str) of the vehicle's location.
:param pnt_2: The second snapped point (consecutive - str) of the vehicle's location.
:return: A tuple of extracted points (x, y) and wkid.
"""
eval_1 = eval(pnt)
eval_2 = eval(pnt_2)
wkid = eval_1['spatialReference']['wkid']
x1 = eval_1['x']
y1 = eval_1['y']
x2 = eval_2['x']
y2 = eval_2['y']
return (y1, x1, y2, x2, wkid)
def _get_dist(self, stat, stat_2, pnt, pnt_2):
"""
Uses the self._eval_pnts function to extract snapped consecutive vehicle locations.
Estimates the distance between the two snapped consecutive vehicle locations via the SpatialDelta class.
Applies only to the consecutive movements for validation: stationary-stationary, stationary-movement, stationary-terminus.
:param stat: Vehicle's location status.
:param stat_2: The next vehicle's location status.
:param pnt: Snapped point (str) vehicle's location.
:param pnt_2: Snapped point (str) of the next vehicle's location status.
:return: Distance in meters or None if not applicable.
"""
try:
if (stat == 'Stationary' and stat_2 == 'Stationary') or \
(stat == 'Stationary' and stat_2 == 'Terminus') or \
(stat == 'Stationary' and stat_2 == 'Movement'):
info = self._eval_pnts(pnt, pnt_2)
paths = [[[info[1], info[0]], [info[3], info[2]]]]
dist = SpatialDelta(paths=paths, wkid=info[4]).dist
# If the distance is less than or equal 20 meters, then it is considered idle/ very slow transit.
if dist <= 20:
return dist
else:
return None
else:
return None
except Exception as e:
return None
def _main(self, df: DataFrame, undiss_df, stp_df: DataFrame, stop_times: DataFrame, folder_date, output_folder, raw_date, unique_val, L3):
"""
The main function that enriches the vehicle's travel/recording.
:param df: Dataframe of the cleaner version (from QA/QC) of the GTFS-RT per transit route.
:param undiss_df: The undissolved shapefile read as a spatial dataframe of the transit route.
:param stp_df: The stop csv file as a dataframe of the transit route.
:param stop_times: The schedule per stop_id (transit stop) per trip_id (vehicle's id associated to transit rte).
:param folder_date: The date that belongs to the static GTFS update across the project directory (e.g., 0_external/2021-11-17).
:param output_folder: Contents exported & stored in the output folder.
:param raw_date: The date of the raw GTFS-RT.
:param unique_val: The unique-rte currently inspecting.
:param L3: The list that is part of the Manager in Multiprocessing.
:return: DataFrame of the GTFS-RT per transit route with enriched data.
"""
get_max_info = self._get_max_seq_idx(stp_df=stp_df, undiss_df=undiss_df)
suppl_df = (
df
.merge(stop_times, on=['trip_id', 'stop_id'], how='left', validate='m:m') # Merge with the scheduled GTFS file.
.drop_duplicates(['Local_Time']) # Reduce unnecessary observations if duplicates arise.
.drop(columns=['pickup_type', 'drop_off_type', 'shape_dist_traveled', 'timepoint']) # Remove unnecessary fields.
.assign(MaxIndex = get_max_info[1], # Get max index value of the transit route's undissolved segment.
MaxStpSeq = get_max_info[0], # Get max stop sequence of the transit route.
true_max_stp = get_max_info[2]) # Indicate if the max stop is true - whether undissolved's stop sequence match with the stop sequence from the stop csv file - a warning of GTFS quality & determine terminus.
.pipe(lambda d: self._last_clean(df=d)) # Another sweep of QA/QC
.pipe(lambda d: self._last_clean(df=d)) # Another sweep of QA/QC
.pipe(lambda d: self._last_clean(df=d)) # Final sweep of QA/QC
.drop(columns = ['Idx_Diff'])
.groupby(['trip_id'], as_index=False)
.apply(lambda e: e.assign(Idx_Left = lambda d: d['MaxIndex'] - d['index'], # Find how many indices the vehicle of the trip_id has left from its current record.
Stp_Left = lambda d: d['MaxStpSeq'] - d['stop_seque'], # Find how many stops the vehicle of the trip_id has left from its current record.
Idx_Diff = lambda d: d['Idx_Left'].diff(1), # The difference between index left values - potentially identifies stationary values - compares next set.
Stp_Diff = lambda d: d['Stp_Left'].diff(1), # The difference between stop left values - potentially identifies stationary values - compares next set.
Status = lambda d: d[['Idx_Diff', 'Stp_Diff', 'stop_seque', 'MaxStpSeq']].apply(lambda r: self._set_mvmt(*r), axis=1), # Pre-determine movement status of the vehicle (will require distance as well).
val = 1, # Set value
idx = lambda d: d['val'].cumsum(), # Cumulate the number of vehicle movements (aka - recordings; not original after QA/QC) per trip_id
stat_shift = lambda d: d['Status'].shift(-1), # Shift the Status column up by 1 - consecutive pair comparison of movement status.
pnt_shift = lambda d: d['point'].shift(-1), # Shift the point column up by 1 - consecutive pair comparison of distance via haversine in future
time_shift = lambda d: d['Local_Time'].shift(-1), # Shift the Local_Time column up by 1 - consecutive recorded time pair comparison via timedelta.
delta_time = lambda d: d[['Local_Time', 'time_shift']].apply(lambda r: TimeDelta(*r).change_time, axis=1), # Get the time delta between consecutive pair.
delta_dist = lambda d: d[['Status', 'stat_shift', 'point', 'pnt_shift']].apply(lambda r: self._get_dist(*r), axis=1))) # Get the delta distance between consecutive pair - applies only stationary
.drop(columns=['val'])
[['trip_id', 'idx', 'barcode', 'Status', 'stat_shift', # trip_id - to movement status
'stop_id', 'stop_seque', 'MaxStpSeq', 'true_max_stp', 'Stp_Left', 'Stp_Diff', # stop information
'objectid', 'index', 'MaxIndex', 'Idx_Left', 'Idx_Diff', # index information of the undissolved segment
'x', 'y', 'wkid', 'point', 'pnt_shift', # Point information
'Local_Time', 'time_shift', 'delta_time', 'arrival_time', 'departure_time', # time information
'delta_dist', 'SHAPE' # spatial information (delta_dist = dist. covered; SHAPE = polyline of undissolved seg.)
]]
)
ori_length = df.shape[0]
fin_length = suppl_df.shape[0]
retained = round((fin_length / ori_length)*100, 2)
file_name = f"{output_folder}/{raw_date}_{unique_val}_processed.csv"
suppl_df.to_csv(file_name, index=False)
L3.append(f"{unique_val},{raw_date},{folder_date},{retained}")
return suppl_df | 0 | 12,571 | 24 |
9f3f5d7cb3da60a58ddf422c60347e212e92b80d | 2,982 | py | Python | kwiklib/dataio/probe.py | fiath/test | b50898dafa90e93da48f573e0b3feb1bb6acd8de | [
"MIT",
"BSD-3-Clause"
] | 7 | 2015-01-20T13:55:51.000Z | 2018-02-06T09:31:21.000Z | kwiklib/dataio/probe.py | fiath/test | b50898dafa90e93da48f573e0b3feb1bb6acd8de | [
"MIT",
"BSD-3-Clause"
] | 6 | 2015-01-08T18:13:53.000Z | 2016-06-22T09:53:53.000Z | kwiklib/dataio/probe.py | fiath/test | b50898dafa90e93da48f573e0b3feb1bb6acd8de | [
"MIT",
"BSD-3-Clause"
] | 8 | 2015-01-22T22:57:19.000Z | 2020-03-19T11:43:56.000Z | """This module provides functions used to generate and load probe files."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import pprint
import itertools
from tools import MemMappedText, MemMappedBinary
# -----------------------------------------------------------------------------
# Probe file functions
# -----------------------------------------------------------------------------
def generate_probe(channel_groups, topology='linear'):
"""channel_groups is a dict {channel_group: nchannels}."""
if not isinstance(channel_groups, dict):
channel_groups = {0: channel_groups}
groups = sorted(channel_groups.keys())
r = {}
curchannel = 0
for i in range(len(groups)):
id = groups[i] # channel group index
n = channel_groups[id] # number of channels
channels = range(curchannel, curchannel + n)
if topology == 'linear':
graph = [[ch, ch + 1] for ch in channels[:-1]]
elif topology == 'complete':
graph = map(list, list(itertools.product(channels, repeat=2)))
geometry = {channels[_]: [float(i), float(_)]
for _ in range(n)}
d = {'channels': channels,
'graph': graph,
'geometry': geometry,
}
r[id] = d
curchannel += n
return r
def old_to_new(probe_ns):
"""Convert from the old Python .probe format to the new .PRB format."""
graph = probe_ns['probes']
shanks = sorted(graph.keys())
if 'geometry' in probe_ns:
geometry = probe_ns['geometry']
else:
geometry = None
# Find the list of shanks.
shank_channels = {shank: flatten(graph[shank]) for shank in shanks}
# Find the list of channels.
channels = flatten(shank_channels.values())
nchannels = len(channels)
# Create JSON dictionary.
channel_groups = {
shank: {
'channels': shank_channels[shank],
'graph': graph[shank],
}
for shank in shanks
}
# Add the geometry if it exists.
if geometry:
# Find out if there's one geometry per shank, or a common geometry
# for all shanks.
for k, d in channel_groups.iteritems():
multiple = k in geometry and isinstance(geometry[k], dict)
if multiple:
d['geometry'] = geometry[k]
else:
d['geometry'] = geometry
return channel_groups
| 32.413043 | 79 | 0.514085 | """This module provides functions used to generate and load probe files."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import pprint
import itertools
from tools import MemMappedText, MemMappedBinary
# -----------------------------------------------------------------------------
# Probe file functions
# -----------------------------------------------------------------------------
def flatten(l):
return sorted(set([item for sublist in l for item in sublist]))
def generate_probe(channel_groups, topology='linear'):
"""channel_groups is a dict {channel_group: nchannels}."""
if not isinstance(channel_groups, dict):
channel_groups = {0: channel_groups}
groups = sorted(channel_groups.keys())
r = {}
curchannel = 0
for i in range(len(groups)):
id = groups[i] # channel group index
n = channel_groups[id] # number of channels
channels = range(curchannel, curchannel + n)
if topology == 'linear':
graph = [[ch, ch + 1] for ch in channels[:-1]]
elif topology == 'complete':
graph = map(list, list(itertools.product(channels, repeat=2)))
geometry = {channels[_]: [float(i), float(_)]
for _ in range(n)}
d = {'channels': channels,
'graph': graph,
'geometry': geometry,
}
r[id] = d
curchannel += n
return r
def load_probe(filename):
prb = {}
execfile(filename, {}, prb)
return prb['channel_groups']
def save_probe(filename, prb):
with open(filename, 'w') as f:
f.write('channel_groups = ' + str(prb))
def old_to_new(probe_ns):
"""Convert from the old Python .probe format to the new .PRB format."""
graph = probe_ns['probes']
shanks = sorted(graph.keys())
if 'geometry' in probe_ns:
geometry = probe_ns['geometry']
else:
geometry = None
# Find the list of shanks.
shank_channels = {shank: flatten(graph[shank]) for shank in shanks}
# Find the list of channels.
channels = flatten(shank_channels.values())
nchannels = len(channels)
# Create JSON dictionary.
channel_groups = {
shank: {
'channels': shank_channels[shank],
'graph': graph[shank],
}
for shank in shanks
}
# Add the geometry if it exists.
if geometry:
# Find out if there's one geometry per shank, or a common geometry
# for all shanks.
for k, d in channel_groups.iteritems():
multiple = k in geometry and isinstance(geometry[k], dict)
if multiple:
d['geometry'] = geometry[k]
else:
d['geometry'] = geometry
return channel_groups
| 236 | 0 | 76 |
78beb1fb64ac738431067cfbf5396c7525caf013 | 1,770 | py | Python | evaluatePath.py | ForrestHurley/nBodySystem | 72e77665f5e181811a111872debd50eb9b263fa2 | [
"MIT"
] | null | null | null | evaluatePath.py | ForrestHurley/nBodySystem | 72e77665f5e181811a111872debd50eb9b263fa2 | [
"MIT"
] | null | null | null | evaluatePath.py | ForrestHurley/nBodySystem | 72e77665f5e181811a111872debd50eb9b263fa2 | [
"MIT"
] | null | null | null | import numpy as np
| 35.4 | 93 | 0.650847 | import numpy as np
class body_value(object):
def __init__(self, body = 599, value = 100):
self.body = body
self.value = value
def get_distances(self, rocket_locs, body_locs):
body_locs = np.expand_dims(body_locs, axis = 1)
distances = np.linalg.norm(rocket_locs - body_locs, axis = -1)
return distances
def __call__(self, times, rocket_locs, body_locs):
distances = self.get_distances(rocket_locs, body_locs)
total = np.sum( np.expand_dims(times, 1) * distances, axis = 0 )
reciprocal = self.value / total
return reciprocal
class distance_threshold(body_value):
def __init__(self, distance = 1e8, *args, **kwargs):
super().__init__(*args, **kwargs)
self.distance = distance
def __call__(self, times, rocket_locs, body_locs):
distances = self.get_distance(rocket_locs, body_locs)
in_radius = distances < self.distance
total_time_in = np.sum( np.expand_dims(times, 1) * in_radius, axis = 0 )
return self.value * total_time_in
class path_evaluator(object):
def __init__(self, ephemerides, value_list = None):
self.ephemerides = ephemerides
self.value_list = value_list
def __call__(self, times, states):
scores = np.zeros(shape = states.shape[1])
body_set = list(set(value_check.body for value_check in self.value_list))
body_locs = {body : locs for body, locs in
zip(body_set, self.ephemerides.object_paths(objects = body_set, times = times)) }
locations = np.take(states, 0, axis = -2)
for value_check in self.value_list:
scores += value_check(times, locations, body_locs[value_check.body])
return scores
| 1,451 | 28 | 259 |
4032e29b743868d23c95303949c7f8be39e46b7a | 6,653 | py | Python | tests/components/mfi/test_sensor.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 7 | 2019-08-15T13:36:58.000Z | 2020-03-18T10:46:29.000Z | tests/components/mfi/test_sensor.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 100 | 2020-06-17T22:22:41.000Z | 2022-03-31T06:24:19.000Z | tests/components/mfi/test_sensor.py | winning1120xx/home-assistant | 53d4c0ce2d374b5e97bbdc37742656c27adf8eea | [
"Apache-2.0"
] | 11 | 2020-12-16T13:48:14.000Z | 2022-02-01T00:28:05.000Z | """The tests for the mFi sensor platform."""
import unittest.mock as mock
from mficlient.client import FailedToLogin
import pytest
import requests
import homeassistant.components.mfi.sensor as mfi
import homeassistant.components.sensor as sensor_component
from homeassistant.const import DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS
from homeassistant.setup import async_setup_component
PLATFORM = mfi
COMPONENT = sensor_component
THING = "sensor"
GOOD_CONFIG = {
"sensor": {
"platform": "mfi",
"host": "foo",
"port": 6123,
"username": "user",
"password": "pass",
"ssl": True,
"verify_ssl": True,
}
}
async def test_setup_missing_config(hass):
"""Test setup with missing configuration."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = {"sensor": {"platform": "mfi"}}
assert await async_setup_component(hass, "sensor", config)
assert not mock_client.called
async def test_setup_failed_login(hass):
"""Test setup with login failure."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
mock_client.side_effect = FailedToLogin
assert not PLATFORM.setup_platform(hass, dict(GOOD_CONFIG), None)
async def test_setup_failed_connect(hass):
"""Test setup with connection failure."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
mock_client.side_effect = requests.exceptions.ConnectionError
assert not PLATFORM.setup_platform(hass, dict(GOOD_CONFIG), None)
async def test_setup_minimum(hass):
"""Test setup with minimum configuration."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = dict(GOOD_CONFIG)
del config[THING]["port"]
assert await async_setup_component(hass, COMPONENT.DOMAIN, config)
await hass.async_block_till_done()
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6443, use_tls=True, verify=True
)
async def test_setup_with_port(hass):
"""Test setup with port."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = dict(GOOD_CONFIG)
config[THING]["port"] = 6123
assert await async_setup_component(hass, COMPONENT.DOMAIN, config)
await hass.async_block_till_done()
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6123, use_tls=True, verify=True
)
async def test_setup_with_tls_disabled(hass):
"""Test setup without TLS."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = dict(GOOD_CONFIG)
del config[THING]["port"]
config[THING]["ssl"] = False
config[THING]["verify_ssl"] = False
assert await async_setup_component(hass, COMPONENT.DOMAIN, config)
await hass.async_block_till_done()
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6080, use_tls=False, verify=False
)
async def test_setup_adds_proper_devices(hass):
"""Test if setup adds devices."""
with mock.patch(
"homeassistant.components.mfi.sensor.MFiClient"
) as mock_client, mock.patch(
"homeassistant.components.mfi.sensor.MfiSensor", side_effect=mfi.MfiSensor
) as mock_sensor:
ports = {
i: mock.MagicMock(model=model, label=f"Port {i}", value=0)
for i, model in enumerate(mfi.SENSOR_MODELS)
}
ports["bad"] = mock.MagicMock(model="notasensor")
mock_client.return_value.get_devices.return_value = [
mock.MagicMock(ports=ports)
]
assert await async_setup_component(hass, COMPONENT.DOMAIN, GOOD_CONFIG)
await hass.async_block_till_done()
for ident, port in ports.items():
if ident != "bad":
mock_sensor.assert_any_call(port, hass)
assert mock.call(ports["bad"], hass) not in mock_sensor.mock_calls
@pytest.fixture(name="port")
def port_fixture():
"""Port fixture."""
return mock.MagicMock()
@pytest.fixture(name="sensor")
def sensor_fixture(hass, port):
"""Sensor fixture."""
sensor = mfi.MfiSensor(port, hass)
sensor.hass = hass
return sensor
async def test_name(port, sensor):
"""Test the name."""
assert port.label == sensor.name
async def test_uom_temp(port, sensor):
"""Test the UOM temperature."""
port.tag = "temperature"
assert sensor.unit_of_measurement == TEMP_CELSIUS
assert sensor.device_class == DEVICE_CLASS_TEMPERATURE
async def test_uom_power(port, sensor):
"""Test the UOEM power."""
port.tag = "active_pwr"
assert sensor.unit_of_measurement == "Watts"
assert sensor.device_class is None
async def test_uom_digital(port, sensor):
"""Test the UOM digital input."""
port.model = "Input Digital"
assert sensor.unit_of_measurement == "State"
assert sensor.device_class is None
async def test_uom_unknown(port, sensor):
"""Test the UOM."""
port.tag = "balloons"
assert sensor.unit_of_measurement == "balloons"
assert sensor.device_class is None
async def test_uom_uninitialized(port, sensor):
"""Test that the UOM defaults if not initialized."""
type(port).tag = mock.PropertyMock(side_effect=ValueError)
assert sensor.unit_of_measurement == "State"
assert sensor.device_class is None
async def test_state_digital(port, sensor):
"""Test the digital input."""
port.model = "Input Digital"
port.value = 0
assert mfi.STATE_OFF == sensor.state
port.value = 1
assert mfi.STATE_ON == sensor.state
port.value = 2
assert mfi.STATE_ON == sensor.state
async def test_state_digits(port, sensor):
"""Test the state of digits."""
port.tag = "didyoucheckthedict?"
port.value = 1.25
with mock.patch.dict(mfi.DIGITS, {"didyoucheckthedict?": 1}):
assert sensor.state == 1.2
with mock.patch.dict(mfi.DIGITS, {}):
assert sensor.state == 1.0
async def test_state_uninitialized(port, sensor):
"""Test the state of uninitialized sensorfs."""
type(port).tag = mock.PropertyMock(side_effect=ValueError)
assert mfi.STATE_OFF == sensor.state
async def test_update(port, sensor):
"""Test the update."""
sensor.update()
assert port.refresh.call_count == 1
assert port.refresh.call_args == mock.call()
| 33.099502 | 84 | 0.681647 | """The tests for the mFi sensor platform."""
import unittest.mock as mock
from mficlient.client import FailedToLogin
import pytest
import requests
import homeassistant.components.mfi.sensor as mfi
import homeassistant.components.sensor as sensor_component
from homeassistant.const import DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS
from homeassistant.setup import async_setup_component
PLATFORM = mfi
COMPONENT = sensor_component
THING = "sensor"
GOOD_CONFIG = {
"sensor": {
"platform": "mfi",
"host": "foo",
"port": 6123,
"username": "user",
"password": "pass",
"ssl": True,
"verify_ssl": True,
}
}
async def test_setup_missing_config(hass):
"""Test setup with missing configuration."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = {"sensor": {"platform": "mfi"}}
assert await async_setup_component(hass, "sensor", config)
assert not mock_client.called
async def test_setup_failed_login(hass):
"""Test setup with login failure."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
mock_client.side_effect = FailedToLogin
assert not PLATFORM.setup_platform(hass, dict(GOOD_CONFIG), None)
async def test_setup_failed_connect(hass):
"""Test setup with connection failure."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
mock_client.side_effect = requests.exceptions.ConnectionError
assert not PLATFORM.setup_platform(hass, dict(GOOD_CONFIG), None)
async def test_setup_minimum(hass):
"""Test setup with minimum configuration."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = dict(GOOD_CONFIG)
del config[THING]["port"]
assert await async_setup_component(hass, COMPONENT.DOMAIN, config)
await hass.async_block_till_done()
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6443, use_tls=True, verify=True
)
async def test_setup_with_port(hass):
"""Test setup with port."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = dict(GOOD_CONFIG)
config[THING]["port"] = 6123
assert await async_setup_component(hass, COMPONENT.DOMAIN, config)
await hass.async_block_till_done()
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6123, use_tls=True, verify=True
)
async def test_setup_with_tls_disabled(hass):
"""Test setup without TLS."""
with mock.patch("homeassistant.components.mfi.sensor.MFiClient") as mock_client:
config = dict(GOOD_CONFIG)
del config[THING]["port"]
config[THING]["ssl"] = False
config[THING]["verify_ssl"] = False
assert await async_setup_component(hass, COMPONENT.DOMAIN, config)
await hass.async_block_till_done()
assert mock_client.call_count == 1
assert mock_client.call_args == mock.call(
"foo", "user", "pass", port=6080, use_tls=False, verify=False
)
async def test_setup_adds_proper_devices(hass):
"""Test if setup adds devices."""
with mock.patch(
"homeassistant.components.mfi.sensor.MFiClient"
) as mock_client, mock.patch(
"homeassistant.components.mfi.sensor.MfiSensor", side_effect=mfi.MfiSensor
) as mock_sensor:
ports = {
i: mock.MagicMock(model=model, label=f"Port {i}", value=0)
for i, model in enumerate(mfi.SENSOR_MODELS)
}
ports["bad"] = mock.MagicMock(model="notasensor")
mock_client.return_value.get_devices.return_value = [
mock.MagicMock(ports=ports)
]
assert await async_setup_component(hass, COMPONENT.DOMAIN, GOOD_CONFIG)
await hass.async_block_till_done()
for ident, port in ports.items():
if ident != "bad":
mock_sensor.assert_any_call(port, hass)
assert mock.call(ports["bad"], hass) not in mock_sensor.mock_calls
@pytest.fixture(name="port")
def port_fixture():
"""Port fixture."""
return mock.MagicMock()
@pytest.fixture(name="sensor")
def sensor_fixture(hass, port):
"""Sensor fixture."""
sensor = mfi.MfiSensor(port, hass)
sensor.hass = hass
return sensor
async def test_name(port, sensor):
"""Test the name."""
assert port.label == sensor.name
async def test_uom_temp(port, sensor):
"""Test the UOM temperature."""
port.tag = "temperature"
assert sensor.unit_of_measurement == TEMP_CELSIUS
assert sensor.device_class == DEVICE_CLASS_TEMPERATURE
async def test_uom_power(port, sensor):
"""Test the UOEM power."""
port.tag = "active_pwr"
assert sensor.unit_of_measurement == "Watts"
assert sensor.device_class is None
async def test_uom_digital(port, sensor):
"""Test the UOM digital input."""
port.model = "Input Digital"
assert sensor.unit_of_measurement == "State"
assert sensor.device_class is None
async def test_uom_unknown(port, sensor):
"""Test the UOM."""
port.tag = "balloons"
assert sensor.unit_of_measurement == "balloons"
assert sensor.device_class is None
async def test_uom_uninitialized(port, sensor):
"""Test that the UOM defaults if not initialized."""
type(port).tag = mock.PropertyMock(side_effect=ValueError)
assert sensor.unit_of_measurement == "State"
assert sensor.device_class is None
async def test_state_digital(port, sensor):
"""Test the digital input."""
port.model = "Input Digital"
port.value = 0
assert mfi.STATE_OFF == sensor.state
port.value = 1
assert mfi.STATE_ON == sensor.state
port.value = 2
assert mfi.STATE_ON == sensor.state
async def test_state_digits(port, sensor):
"""Test the state of digits."""
port.tag = "didyoucheckthedict?"
port.value = 1.25
with mock.patch.dict(mfi.DIGITS, {"didyoucheckthedict?": 1}):
assert sensor.state == 1.2
with mock.patch.dict(mfi.DIGITS, {}):
assert sensor.state == 1.0
async def test_state_uninitialized(port, sensor):
"""Test the state of uninitialized sensorfs."""
type(port).tag = mock.PropertyMock(side_effect=ValueError)
assert mfi.STATE_OFF == sensor.state
async def test_update(port, sensor):
"""Test the update."""
sensor.update()
assert port.refresh.call_count == 1
assert port.refresh.call_args == mock.call()
| 0 | 0 | 0 |
6af98a51ccaa0e397fde107282bf1f5ba5d8d7b8 | 6,884 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/module_utils/network/eos/config/lacp_interfaces/lacp_interfaces.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/venv/lib/python2.7/site-packages/ansible/module_utils/network/eos/config/lacp_interfaces/lacp_interfaces.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/venv/lib/python2.7/site-packages/ansible/module_utils/network/eos/config/lacp_interfaces/lacp_interfaces.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | # -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The eos_lacp_interfaces class
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to it's desired end-state is
created
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.network.common.cfg.base import ConfigBase
from ansible.module_utils.network.common.utils import to_list, dict_diff, param_list_to_dict
from ansible.module_utils.network.eos.facts.facts import Facts
from ansible.module_utils.network.eos.utils.utils import normalize_interface
class Lacp_interfaces(ConfigBase):
"""
The eos_lacp_interfaces class
"""
gather_subset = [
'!all',
'!min',
]
gather_network_resources = [
'lacp_interfaces',
]
def get_lacp_interfaces_facts(self):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
lacp_interfaces_facts = facts['ansible_network_resources'].get('lacp_interfaces')
if not lacp_interfaces_facts:
return []
return lacp_interfaces_facts
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
result = {'changed': False}
warnings = list()
commands = list()
existing_lacp_interfaces_facts = self.get_lacp_interfaces_facts()
commands.extend(self.set_config(existing_lacp_interfaces_facts))
if commands:
if not self._module.check_mode:
self._connection.edit_config(commands)
result['changed'] = True
result['commands'] = commands
changed_lacp_interfaces_facts = self.get_lacp_interfaces_facts()
result['before'] = existing_lacp_interfaces_facts
if result['changed']:
result['after'] = changed_lacp_interfaces_facts
result['warnings'] = warnings
return result
def set_config(self, existing_lacp_interfaces_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
want = self._module.params['config']
have = existing_lacp_interfaces_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
state = self._module.params['state']
want = param_list_to_dict(want)
have = param_list_to_dict(have)
if state == 'overridden':
commands = self._state_overridden(want, have)
elif state == 'deleted':
commands = self._state_deleted(want, have)
elif state == 'merged':
commands = self._state_merged(want, have)
elif state == 'replaced':
commands = self._state_replaced(want, have)
return commands
@staticmethod
def _state_replaced(want, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
for key, desired in want.items():
interface_name = normalize_interface(key)
if interface_name in have:
extant = have[interface_name]
else:
extant = dict()
add_config = dict_diff(extant, desired)
del_config = dict_diff(desired, extant)
commands.extend(generate_commands(key, add_config, del_config))
return commands
@staticmethod
def _state_overridden(want, have):
""" The command generator when state is overridden
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
for key, extant in have.items():
if key in want:
desired = want[key]
else:
desired = dict()
add_config = dict_diff(extant, desired)
del_config = dict_diff(desired, extant)
commands.extend(generate_commands(key, add_config, del_config))
return commands
@staticmethod
def _state_merged(want, have):
""" The command generator when state is merged
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
commands = []
for key, desired in want.items():
interface_name = normalize_interface(key)
if interface_name in have:
extant = have[interface_name]
else:
extant = dict()
add_config = dict_diff(extant, desired)
commands.extend(generate_commands(key, add_config, {}))
return commands
@staticmethod
def _state_deleted(want, have):
""" The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
commands = []
for key in want:
desired = dict()
if key in have:
extant = have[key]
else:
continue
del_config = dict_diff(desired, extant)
commands.extend(generate_commands(key, {}, del_config))
return commands
| 32.018605 | 107 | 0.622894 | # -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The eos_lacp_interfaces class
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to it's desired end-state is
created
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.network.common.cfg.base import ConfigBase
from ansible.module_utils.network.common.utils import to_list, dict_diff, param_list_to_dict
from ansible.module_utils.network.eos.facts.facts import Facts
from ansible.module_utils.network.eos.utils.utils import normalize_interface
class Lacp_interfaces(ConfigBase):
"""
The eos_lacp_interfaces class
"""
gather_subset = [
'!all',
'!min',
]
gather_network_resources = [
'lacp_interfaces',
]
def get_lacp_interfaces_facts(self):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
lacp_interfaces_facts = facts['ansible_network_resources'].get('lacp_interfaces')
if not lacp_interfaces_facts:
return []
return lacp_interfaces_facts
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
result = {'changed': False}
warnings = list()
commands = list()
existing_lacp_interfaces_facts = self.get_lacp_interfaces_facts()
commands.extend(self.set_config(existing_lacp_interfaces_facts))
if commands:
if not self._module.check_mode:
self._connection.edit_config(commands)
result['changed'] = True
result['commands'] = commands
changed_lacp_interfaces_facts = self.get_lacp_interfaces_facts()
result['before'] = existing_lacp_interfaces_facts
if result['changed']:
result['after'] = changed_lacp_interfaces_facts
result['warnings'] = warnings
return result
def set_config(self, existing_lacp_interfaces_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
want = self._module.params['config']
have = existing_lacp_interfaces_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
state = self._module.params['state']
want = param_list_to_dict(want)
have = param_list_to_dict(have)
if state == 'overridden':
commands = self._state_overridden(want, have)
elif state == 'deleted':
commands = self._state_deleted(want, have)
elif state == 'merged':
commands = self._state_merged(want, have)
elif state == 'replaced':
commands = self._state_replaced(want, have)
return commands
@staticmethod
def _state_replaced(want, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
for key, desired in want.items():
interface_name = normalize_interface(key)
if interface_name in have:
extant = have[interface_name]
else:
extant = dict()
add_config = dict_diff(extant, desired)
del_config = dict_diff(desired, extant)
commands.extend(generate_commands(key, add_config, del_config))
return commands
@staticmethod
def _state_overridden(want, have):
""" The command generator when state is overridden
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
for key, extant in have.items():
if key in want:
desired = want[key]
else:
desired = dict()
add_config = dict_diff(extant, desired)
del_config = dict_diff(desired, extant)
commands.extend(generate_commands(key, add_config, del_config))
return commands
@staticmethod
def _state_merged(want, have):
""" The command generator when state is merged
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
commands = []
for key, desired in want.items():
interface_name = normalize_interface(key)
if interface_name in have:
extant = have[interface_name]
else:
extant = dict()
add_config = dict_diff(extant, desired)
commands.extend(generate_commands(key, add_config, {}))
return commands
@staticmethod
def _state_deleted(want, have):
""" The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
commands = []
for key in want:
desired = dict()
if key in have:
extant = have[key]
else:
continue
del_config = dict_diff(desired, extant)
commands.extend(generate_commands(key, {}, del_config))
return commands
def generate_commands(interface, to_set, to_remove):
commands = []
for key, value in to_set.items():
if value is None:
continue
commands.append("lacp {0} {1}".format(key.replace("_", "-"), value))
for key in to_remove.keys():
commands.append("no lacp {0}".format(key.replace("_", "-")))
if commands:
commands.insert(0, "interface {0}".format(interface))
return commands
| 416 | 0 | 23 |
e6b45d0d9e51c9dc6728577cd71a8df214401175 | 1,267 | py | Python | main.py | Dany013/meet-attender | 1e29048df95aa773953490ecc68ad41d0ae9142b | [
"MIT"
] | null | null | null | main.py | Dany013/meet-attender | 1e29048df95aa773953490ecc68ad41d0ae9142b | [
"MIT"
] | null | null | null | main.py | Dany013/meet-attender | 1e29048df95aa773953490ecc68ad41d0ae9142b | [
"MIT"
] | null | null | null | import os
import sys
import threading
import tkinter as tk
os.chdir(os.getcwd())
self = tk.Tk()
self.title("meet")
self.resizable(0, 0)
self.grid()
ch_time = tk.Button(self, text="CHANGE SCHEDULE", command=exc)
ch_time.grid(row=0, column=0, sticky="nswe")
ch_link = tk.Button(self, text="CHANGE LINKS", command=lnk)
ch_link.grid(row=0, column=1, sticky="nswe")
guid_time = tk.Button(self, text="guide schedule", command=gdsch)
guid_time.grid(row=1, column=0, sticky="nswe")
guid_link = tk.Button(self, text="guide links", command=gdlnk)
guid_link.grid(row=1, column=1, sticky="nswe")
start = tk.Button(
self, text="START", bg="green", activebackground="green", command=strt
)
start.grid(row=2, column=0, sticky="nswe")
bad_gui = tk.Label(self, text="This is a really bad GUI :)")
bad_gui.grid(row=3, column=0, sticky="nswe")
me = tk.Label(self, text="made with <3 by DanyB0")
me.grid(row=3, column=1, sticky="nswe")
if __name__ == "__main__":
self.mainloop()
| 23.90566 | 75 | 0.659826 | import os
import sys
import threading
import tkinter as tk
os.chdir(os.getcwd())
self = tk.Tk()
self.title("meet")
self.resizable(0, 0)
self.grid()
def strt():
os.system("python main.py")
def exc():
os.startfile("schedule.xlsx")
def lnk():
os.startfile("meet-link.json")
def gdsch():
os.startfile("guide-schedule.txt")
def gdlnk():
os.system("guide-links.txt")
ch_time = tk.Button(self, text="CHANGE SCHEDULE", command=exc)
ch_time.grid(row=0, column=0, sticky="nswe")
ch_link = tk.Button(self, text="CHANGE LINKS", command=lnk)
ch_link.grid(row=0, column=1, sticky="nswe")
guid_time = tk.Button(self, text="guide schedule", command=gdsch)
guid_time.grid(row=1, column=0, sticky="nswe")
guid_link = tk.Button(self, text="guide links", command=gdlnk)
guid_link.grid(row=1, column=1, sticky="nswe")
start = tk.Button(
self, text="START", bg="green", activebackground="green", command=strt
)
start.grid(row=2, column=0, sticky="nswe")
bad_gui = tk.Label(self, text="This is a really bad GUI :)")
bad_gui.grid(row=3, column=0, sticky="nswe")
me = tk.Label(self, text="made with <3 by DanyB0")
me.grid(row=3, column=1, sticky="nswe")
if __name__ == "__main__":
self.mainloop()
| 128 | 0 | 125 |
49a3aeb2c56029fdc26f4572ef6ebb391bf9054b | 7,621 | py | Python | outrun/tests/test_filesystem/test_caching/test_cache.py | Jacke/outrun | c67779b4c8c3f1095e84158b10a5307a443936a2 | [
"Apache-2.0"
] | 3,070 | 2020-07-14T21:43:05.000Z | 2022-03-30T05:10:35.000Z | outrun/tests/test_filesystem/test_caching/test_cache.py | Jacke/outrun | c67779b4c8c3f1095e84158b10a5307a443936a2 | [
"Apache-2.0"
] | 17 | 2020-07-19T21:46:13.000Z | 2021-12-27T16:18:38.000Z | outrun/tests/test_filesystem/test_caching/test_cache.py | Jacke/outrun | c67779b4c8c3f1095e84158b10a5307a443936a2 | [
"Apache-2.0"
] | 61 | 2020-07-23T23:34:00.000Z | 2022-02-13T01:28:25.000Z | import contextlib
import dataclasses
import os
import time
from unittest import mock
from outrun.filesystem.caching.service import LocalCacheService
from outrun.filesystem.caching.cache import CacheEntry, RemoteCache
| 25.235099 | 80 | 0.650833 | import contextlib
import dataclasses
import os
import time
from unittest import mock
from outrun.filesystem.caching.service import LocalCacheService
from outrun.filesystem.caching.cache import CacheEntry, RemoteCache
def create_cache(tmp_path, **override_args):
base_args = dict(
base_path=str(tmp_path / "cache"),
machine_id="machine",
client=LocalCacheService(),
prefetch=False,
max_entries=1024,
max_size=1024 * 1024,
cacheable_paths=["/"],
)
final_args = {**base_args, **override_args}
return RemoteCache(**final_args)
def test_cache_entry_newer_than():
entry_a = CacheEntry("a", LocalCacheService().get_metadata("/"))
entry_b = CacheEntry("b", LocalCacheService().get_metadata("/"))
assert entry_b.newer_than(entry_a)
entry_a.last_update = time.time()
assert not entry_b.newer_than(entry_a)
def test_concurrent_cache_get_metadata(tmp_path):
meta = LocalCacheService().get_metadata("/")
meta = dataclasses.replace(meta, attr=meta.attr.as_readonly())
mock_client = mock.Mock()
mock_client.get_metadata.return_value = meta
(tmp_path / "cache").mkdir()
cache = create_cache(tmp_path, client=mock_client)
for _ in range(10):
assert cache.get_metadata("/") == meta
assert mock_client.get_metadata.call_count == 1
def test_concurrent_cache_open_content(tmp_path):
fs = LocalCacheService()
(tmp_path / "cache").mkdir()
(tmp_path / "hello").write_text("world")
mock_client = mock.Mock()
mock_client.readfile.side_effect = fs.readfile
cache = create_cache(tmp_path, client=mock_client)
for _ in range(10):
fd = cache.open_contents(str(tmp_path / "hello"), os.O_RDONLY)
try:
os.lseek(fd, 0, 0)
assert os.read(fd, 1024) == b"world"
finally:
os.close(fd)
assert mock_client.readfile.call_count == 1
def test_concurrent_cache_load_save(tmp_path):
meta = LocalCacheService().get_metadata("/")
meta = dataclasses.replace(meta, attr=meta.attr.as_readonly())
mock_client = mock.Mock()
mock_client.get_metadata.return_value = meta
(tmp_path / "cache").mkdir()
cache_a = create_cache(tmp_path, client=mock_client)
assert cache_a.get_metadata("/") == meta
cache_a.save()
cache_b = create_cache(tmp_path, client=mock_client)
cache_b.load()
assert cache_b.get_metadata("/") == meta
assert mock_client.get_metadata.call_count == 1
def test_concurrent_cache_per_machine(tmp_path):
meta = LocalCacheService().get_metadata("/")
meta = dataclasses.replace(meta, attr=meta.attr.as_readonly())
mock_client = mock.Mock()
mock_client.get_metadata.return_value = meta
(tmp_path / "cache").mkdir()
cache_a = create_cache(tmp_path, machine_id="machine_a", client=mock_client)
assert cache_a.get_metadata("/") == meta
cache_a.save()
cache_b = create_cache(tmp_path, machine_id="machine_b", client=mock_client)
cache_b.load()
assert cache_b.get_metadata("/") == meta
assert mock_client.get_metadata.call_count == 2
def test_concurrent_cache_lru_entries(tmp_path):
(tmp_path / "cache").mkdir()
cache = create_cache(tmp_path, max_entries=3)
for x in ["a", "b", "c", "d"]:
with contextlib.suppress(OSError):
cache.get_metadata(f"/{x}")
cache.save()
cache.load()
assert cache.count() == 3
assert cache.size() == 0
def test_concurrent_cache_lru_size(tmp_path):
(tmp_path / "cache").mkdir()
cache = create_cache(tmp_path, max_size=3)
for x in ["a", "b", "c", "d"]:
(tmp_path / x).write_text(" ")
for x in ["a", "b", "c", "d"]:
fd = cache.open_contents(str(tmp_path / x), os.O_RDONLY)
os.close(fd)
cache.save()
cache.load()
assert cache.count() == 4
assert cache.size() == 3
def test_concurrent_cache_content_cleanup(tmp_path):
(tmp_path / "cache").mkdir()
cache = create_cache(tmp_path, max_size=3)
for x in ["a", "b", "c", "d"]:
(tmp_path / x).write_text("123")
for x in ["a", "b", "c", "d"]:
fd = cache.open_contents(str(tmp_path / x), os.O_RDONLY)
os.close(fd)
cache.save()
assert len(os.listdir(tmp_path / "cache" / "contents")) == 1
cache = RemoteCache(
str(tmp_path / "cache"),
"machine",
LocalCacheService(),
prefetch=False,
max_entries=1024,
max_size=1024 * 1024,
cacheable_paths=["/"],
)
cache.save(merge_disk_cache=False)
assert len(os.listdir(tmp_path / "cache" / "contents")) == 0
def test_concurrent_cache_refresh_metadata(tmp_path):
(tmp_path / "file").write_text("foo")
(tmp_path / "cache").mkdir()
cache = create_cache(tmp_path)
meta_1 = cache.get_metadata(str(tmp_path / "file"))
os.truncate(tmp_path / "file", 20)
meta_2 = cache.get_metadata(str(tmp_path / "file"))
assert meta_2 == meta_1
cache.sync()
meta_3 = cache.get_metadata(str(tmp_path / "file"))
assert meta_3 != meta_1
def test_concurrent_cache_refresh_contents(tmp_path):
(tmp_path / "file").write_text("foo")
(tmp_path / "cache").mkdir()
cache = create_cache(tmp_path)
fd = cache.open_contents(str(tmp_path / "file"), os.O_RDONLY)
try:
os.lseek(fd, 0, 0)
assert os.read(fd, 1024) == b"foo"
finally:
os.close(fd)
(tmp_path / "file").write_text("foobar")
fd = cache.open_contents(str(tmp_path / "file"), os.O_RDONLY)
try:
os.lseek(fd, 0, 0)
assert os.read(fd, 1024) == b"foo"
finally:
os.close(fd)
cache.sync()
fd = cache.open_contents(str(tmp_path / "file"), os.O_RDONLY)
try:
os.lseek(fd, 0, 0)
assert os.read(fd, 1024) == b"foobar"
finally:
os.close(fd)
def test_concurrent_cache_disk_merge(tmp_path):
(tmp_path / "foo").touch()
(tmp_path / "bar").touch()
cache_a = create_cache(tmp_path)
cache_b = create_cache(tmp_path)
cache_a.get_metadata(str(tmp_path / "foo"))
cache_b.get_metadata(str(tmp_path / "bar"))
cache_a.save()
cache_b.save()
cache_c = RemoteCache(
str(tmp_path / "cache"),
"machine",
LocalCacheService(),
prefetch=False,
max_entries=1024,
max_size=1024 * 1024,
cacheable_paths=["/"],
)
cache_c.load()
assert cache_c.count() == 2
def test_concurrent_cache_prefetch_symlink(tmp_path):
os.symlink("bar", tmp_path / "foo")
cache = create_cache(tmp_path, prefetch=True)
cache.get_metadata(str(tmp_path / "foo"))
assert cache.count() == 2
def test_concurrent_cache_prefetch_contents_upon_access(tmp_path):
(tmp_path / "test.py").write_text("abc")
cache = create_cache(tmp_path, prefetch=True)
cache.get_metadata(str(tmp_path / "test.py"))
assert cache.size() == 3
def test_concurrent_cache_mark_fetched_contents(tmp_path):
(tmp_path / "file").touch()
# Cache contents of a file
cache_a = create_cache(tmp_path, prefetch=True)
fd = cache_a.open_contents(str(tmp_path / "file"), os.O_RDONLY)
os.close(fd)
cache_a.save()
# Reload cache and expect that local is informed about cached contents
mock_client = mock.Mock()
mock_client.get_changed_metadata.return_value = {}
cache_b = create_cache(tmp_path, client=mock_client, prefetch=True)
cache_b.load()
cache_b.sync()
mock_client.mark_previously_fetched_contents.assert_called_with(
[str(tmp_path / "file")]
)
| 7,043 | 0 | 345 |
edff084c7f8d8cf7747e5d8b6ac0a224fad1ac17 | 57 | py | Python | apps/markets3/__init__.py | uktrade/enav-alpha | 8d38f05763367ca6b6747203241f267612fd6e44 | [
"MIT"
] | null | null | null | apps/markets3/__init__.py | uktrade/enav-alpha | 8d38f05763367ca6b6747203241f267612fd6e44 | [
"MIT"
] | 67 | 2016-07-11T12:57:58.000Z | 2016-08-08T12:59:19.000Z | apps/markets3/__init__.py | UKTradeInvestment/enav-alpha | 8d38f05763367ca6b6747203241f267612fd6e44 | [
"MIT"
] | null | null | null | default_app_config = 'apps.markets3.apps.Markets3Config'
| 28.5 | 56 | 0.842105 | default_app_config = 'apps.markets3.apps.Markets3Config'
| 0 | 0 | 0 |
e13fb19614fc68c7f13ee01ff2579502976fc093 | 1,781 | py | Python | tools/sanitize_junitxml.py | osrf/cloudsim-legacy | 01ea7dd2708ed9797a860ac839028ec62fd96a23 | [
"Apache-2.0"
] | null | null | null | tools/sanitize_junitxml.py | osrf/cloudsim-legacy | 01ea7dd2708ed9797a860ac839028ec62fd96a23 | [
"Apache-2.0"
] | null | null | null | tools/sanitize_junitxml.py | osrf/cloudsim-legacy | 01ea7dd2708ed9797a860ac839028ec62fd96a23 | [
"Apache-2.0"
] | 1 | 2021-03-16T15:00:51.000Z | 2021-03-16T15:00:51.000Z | #!/usr/bin/env python
import os
import codecs
import re
import sys
# Borrowed from rosunit
## unit test suites are not good about screening out illegal
## unicode characters. This little recipe I from http://boodebr.org/main/python/all-about-python-and-unicode#UNI_XML
## screens these out
RE_XML_ILLEGAL = u'([\u0000-\u0008\u000b-\u000c\u000e-\u001f\ufffe-\uffff])' + \
u'|' + \
u'([%s-%s][^%s-%s])|([^%s-%s][%s-%s])|([%s-%s]$)|(^[%s-%s])' % \
(unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff))
_safe_xml_regex = re.compile(RE_XML_ILLEGAL)
def _read_file_safe_xml(test_file, write_back_sanitized=True):
"""
read in file, screen out unsafe unicode characters
"""
f = None
try:
# this is ugly, but the files in question that are problematic
# do not declare unicode type.
if not os.path.isfile(test_file):
raise Exception("test file does not exist")
try:
f = codecs.open(test_file, "r", "utf-8" )
x = f.read()
except:
if f is not None:
f.close()
f = codecs.open(test_file, "r", "iso8859-1" )
x = f.read()
for match in _safe_xml_regex.finditer(x):
x = x[:match.start()] + "?" + x[match.end():]
x = x.encode("utf-8")
if write_back_sanitized:
with open(test_file, 'w') as h:
h.write(x)
return x
finally:
if f is not None:
f.close()
if __name__ == '__main__':
for f in sys.argv[1:]:
_read_file_safe_xml(f, True)
| 32.981481 | 116 | 0.568782 | #!/usr/bin/env python
import os
import codecs
import re
import sys
# Borrowed from rosunit
## unit test suites are not good about screening out illegal
## unicode characters. This little recipe I from http://boodebr.org/main/python/all-about-python-and-unicode#UNI_XML
## screens these out
RE_XML_ILLEGAL = u'([\u0000-\u0008\u000b-\u000c\u000e-\u001f\ufffe-\uffff])' + \
u'|' + \
u'([%s-%s][^%s-%s])|([^%s-%s][%s-%s])|([%s-%s]$)|(^[%s-%s])' % \
(unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff))
_safe_xml_regex = re.compile(RE_XML_ILLEGAL)
def _read_file_safe_xml(test_file, write_back_sanitized=True):
"""
read in file, screen out unsafe unicode characters
"""
f = None
try:
# this is ugly, but the files in question that are problematic
# do not declare unicode type.
if not os.path.isfile(test_file):
raise Exception("test file does not exist")
try:
f = codecs.open(test_file, "r", "utf-8" )
x = f.read()
except:
if f is not None:
f.close()
f = codecs.open(test_file, "r", "iso8859-1" )
x = f.read()
for match in _safe_xml_regex.finditer(x):
x = x[:match.start()] + "?" + x[match.end():]
x = x.encode("utf-8")
if write_back_sanitized:
with open(test_file, 'w') as h:
h.write(x)
return x
finally:
if f is not None:
f.close()
if __name__ == '__main__':
for f in sys.argv[1:]:
_read_file_safe_xml(f, True)
| 0 | 0 | 0 |
021f1d33629a142f5ca040ae826f680a01d1eab3 | 3,300 | py | Python | tests/test_format.py | erkia/rivescript-python | a28deb389d88c2a3f988dd93e15a04c0e536704e | [
"MIT"
] | 154 | 2015-02-04T08:41:23.000Z | 2022-03-18T19:39:53.000Z | tests/test_format.py | erkia/rivescript-python | a28deb389d88c2a3f988dd93e15a04c0e536704e | [
"MIT"
] | 115 | 2015-06-14T13:31:07.000Z | 2022-02-14T23:02:19.000Z | tests/test_format.py | erkia/rivescript-python | a28deb389d88c2a3f988dd93e15a04c0e536704e | [
"MIT"
] | 87 | 2015-04-18T23:15:18.000Z | 2022-03-18T09:52:06.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from .config import RiveScriptTestCase
class MessageFormatTests(RiveScriptTestCase):
"""Test format message."""
| 35.106383 | 115 | 0.477576 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from .config import RiveScriptTestCase
class MessageFormatTests(RiveScriptTestCase):
"""Test format message."""
def test_format_message(self):
self.new("""
+ hello bot
- hello human
""")
self.reply("hello bot", "hello human")
self.reply("Hello Bot", "hello human")
self.reply(" hello bot ", "hello human") # Strip leading and trailing whitespaces
self.reply(" hello bot ", "hello human") # Replace the multiple whitespaces by single whitespace
self.reply("hello bot!!!??? ", "hello human") # Strip nasties
def test_format_triggers(self):
self.new("""
+ hi there
- hi there
+hi here
-hi here
""")
self.reply("hi there", "hi there")
self.reply("hi here", "hi here")
def test_check_syntax(self):
mismatch_brackets = ["a (b", "a [b", "a {b", "a <b", "a b)", "a b]", "a b}", "a b>"]
empty_pipes = ["[a|b| ]", "[a|b|]", "[a| |c]", "[a||c]", "[ |b|c]", "[|b|c]"]
advanced_brackets = [") a (", "] b [", "> c <", "} d {", "a (b [c) d]", "a (b [c|d) e]"]
angle_brackets = ["(a <b) c>", "<a (b > c)", "[a <b ] c>", "< a [b > c]", "{ a < b } c >", "< a {b > c }"]
pipe_outside = ["a|b", "a|", "|b", "(a|b) | (c|d)", "(a|b)|(c|d)"]
for failing_trigger in mismatch_brackets + empty_pipes + advanced_brackets + pipe_outside + angle_brackets:
self.assertRaises(Exception, self.new, """
+ {}
- hi
""".format(failing_trigger))
self.new("""
! version = 2.0
// Bot variables
! var name = Tutorial
! var nickname = tut
+ [<bot name>|<nickname>] *
- You called?
""")
self.reply("Tutorial", "You called?")
self.reply("tut", "You called?")
def test_invalid_character_raise_exception(self):
self.assertRaises(Exception, self.new, """
+ $hello
- hi
""") # This test passes with `match`, which only check at the beginning
self.assertRaises(Exception, self.new, """
+ hello$
- hi
""") # This test does not pass because the beginning is good, no $
self.assertRaises(Exception, self.new, """
> topic Greetings
+ hello
- hi
<topics
""")
self.assertRaises(Exception, self.new, """
> object hash %perl
my ($rs, $args) = @_;
my $method = shift @{$args};
<object
""") # Test for character violation in object, no %
self.new("""
> object hash Perl
my ($rs, $args) = @_;
my $method = shift @{$args};
<object
""") # No exception raised for uppercase character in object
def test_space_tolerance_with_pipe(self):
self.new("""
+ hey [ a | b|c ]
- hi
""")
for message in ['hey a', 'hey b', 'hey c']:
self.reply(message, "hi")
| 2,944 | 0 | 134 |
6feca837f59280a582dbcf726c80b99b3bd4518e | 313 | py | Python | DeathSwitch/Config.py | henfredemars/python-personal-projects | b5939e8d5d1a528c0ce74ac85524c31674c96244 | [
"MIT"
] | null | null | null | DeathSwitch/Config.py | henfredemars/python-personal-projects | b5939e8d5d1a528c0ce74ac85524c31674c96244 | [
"MIT"
] | null | null | null | DeathSwitch/Config.py | henfredemars/python-personal-projects | b5939e8d5d1a528c0ce74ac85524c31674c96244 | [
"MIT"
] | null | null | null | #Some configuration parameters
from datetime import timedelta
host = "mail.messagingengine.com"
port = 465
email = "*********@fastmail.com"
password = "password"
touchfile = "/home/henfredemars/.bashrc"
time_to_wait = timedelta(14)
check_period = timedelta(0,0,0,0,30)
min_sane_year = 2016
max_sane_year = 3016
| 22.357143 | 40 | 0.747604 | #Some configuration parameters
from datetime import timedelta
host = "mail.messagingengine.com"
port = 465
email = "*********@fastmail.com"
password = "password"
touchfile = "/home/henfredemars/.bashrc"
time_to_wait = timedelta(14)
check_period = timedelta(0,0,0,0,30)
min_sane_year = 2016
max_sane_year = 3016
| 0 | 0 | 0 |
1a29a4d673889cd9227ff3291f1c12f022c1e5bd | 7,329 | py | Python | E2E/networks/FCnet_pytorch.py | FrancescoMarra/E2E-ForgeryDetection | 352a788cdbe00184a6a29158c5c315a9832b326e | [
"BSD-4-Clause-UC"
] | 26 | 2020-04-10T13:25:12.000Z | 2022-03-20T12:27:02.000Z | E2E/networks/FCnet_pytorch.py | FrancescoMarra/E2E-ForgeryDetection | 352a788cdbe00184a6a29158c5c315a9832b326e | [
"BSD-4-Clause-UC"
] | 10 | 2020-04-05T10:42:47.000Z | 2022-03-12T00:12:23.000Z | E2E/networks/FCnet_pytorch.py | FrancescoMarra/E2E-ForgeryDetection | 352a788cdbe00184a6a29158c5c315a9832b326e | [
"BSD-4-Clause-UC"
] | 5 | 2020-04-05T10:44:36.000Z | 2022-03-29T06:41:03.000Z | # -*- coding: utf-8 -*-
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# Copyright (c) 2019 Image Processing Research Group of University Federico II of Naples ('GRIP-UNINA').
# All rights reserved.
# This work should only be used for nonprofit purposes.
#
# By downloading and/or using any of these files, you implicitly agree to all the
# terms of the license, as specified in the document LICENSE.md
# (included in this package) and online at
# http://www.grip.unina.it/download/LICENSE_OPEN.txt
#
from numpy import sqrt, maximum
import torch
from torch.nn import Conv2d, BatchNorm2d, ReLU, Sequential
import numpy as np
import E2E.parameters as parameters
| 43.111765 | 159 | 0.585073 | # -*- coding: utf-8 -*-
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# Copyright (c) 2019 Image Processing Research Group of University Federico II of Naples ('GRIP-UNINA').
# All rights reserved.
# This work should only be used for nonprofit purposes.
#
# By downloading and/or using any of these files, you implicitly agree to all the
# terms of the license, as specified in the document LICENSE.md
# (included in this package) and online at
# http://www.grip.unina.it/download/LICENSE_OPEN.txt
#
from numpy import sqrt, maximum
import torch
from torch.nn import Conv2d, BatchNorm2d, ReLU, Sequential
import numpy as np
import E2E.parameters as parameters
class FullConvNet(torch.nn.Module):
def __init__(self, n_channels=3, num_levels=17, padding="same", momentum=0.1):
super(FullConvNet, self).__init__()
self._num_levels = num_levels
self._actfun = [ReLU(), ] * (self._num_levels - 1) + [None, ]
self._f_size = [3, ] * self._num_levels
padding = padding.lower()
if padding == 'valid':
self._f_pad = [0, ] * self._num_levels
elif padding == 'same':
self._f_pad = [1, ] * self._num_levels
else:
raise ValueError('Padding must be either "valid" or "same", instead "%s" is given.' % padding)
self._f_num = [64, ] * (self._num_levels - 1) + [1, ]
self._f_in = [n_channels, ] + [64, ] * (self._num_levels - 1)
self._f_stride = [1, ] * self._num_levels
self._bnorm = [False, ] + [True, ] * (self._num_levels - 2) + [False, ]
self._bnorm_epsilon = 1e-5
self._bnorm_momentum = momentum
self.decay_list = []
self.features = Sequential()
for i in range(self._num_levels):
# convolution (with bias if batch normalization is not executed in this level)
self.features.add_module(module=Conv2d(self._f_in[i], self._f_num[i], self._f_size[i], self._f_stride[i], self._f_pad[i], bias=not self._bnorm[i]),
name='level_%d/conv' % i)
torch.nn.init.normal_(self.features[-1].weight, std=sqrt(2.0/self._f_size[i]*self._f_size[i]*maximum(self._f_in[i], self._f_num[i])))
self.decay_list.append(self.features[-1].weight)
# eventual batch normalization
if self._bnorm[i]:
self.features.add_module(module=BatchNorm2d(self._f_num[i], eps=self._bnorm_epsilon, momentum=self._bnorm_momentum, affine=True),
name='level_%d/bn' % i)
# eventual activation
if self._actfun[i] is not None:
self.features.add_module(module=self._actfun[i],
name='level_%d/activation' % i)
def load_pretrained_weights(self, filename):
try:
self.load_state_dict(torch.load(filename))
except:
print('Trying to convert file %s to ./temp.pth' % filename)
convert_numpy_weights(filename, './temp.pth')
print('Conversion compleated!\nLoading ./temp.pth')
self.load_state_dict(torch.load('./temp.pth'))
print('Loading compleated!\nRemoving ./temp.pth')
try:
from os import remove
remove(filename + '.pth')
except:
print('Cannot remove ./temp.pth')
def forward(self, images):
return self.features(images)
def convert_numpy_weights(input_filename, output_filename=None):
import numpy as np
in_file = np.load(input_filename)
num_levels = max([int(name.split('/')[0].split('_')[1]) for name in in_file['list']]) + 1
net = FullConvNet(num_levels)
for name, mod in net.features.named_modules():
print('Module %s:' % name)
if isinstance(mod, Conv2d):
mod.weight.data = torch.from_numpy(np.transpose(in_file[name + '/weights:0'], (3, 2, 0, 1)))
print('+ weight loaded')
if 'level_0' in name or 'level_%d' % (num_levels-1) in name:
mod.bias.data = torch.from_numpy(in_file[name.split('/')[0] + '/bias/beta:0'])
print('+ bias loaded')
else:
print('- no bias')
elif isinstance(mod, BatchNorm2d):
mod.running_mean = torch.from_numpy(in_file[name + '/moving_mean:0'])
print('+ moving_mean loaded')
mod.running_var = torch.from_numpy(in_file[name + '/moving_variance:0'])
print('+ moving_variance loaded')
mod.weight.data = torch.from_numpy(in_file[name + '/gamma:0'])
print('+ weight loaded')
if not('level_0' in name or 'level_%d' % (num_levels-1) in name):
mod.bias.data = torch.from_numpy(in_file[name.split('/')[0] + '/bias/beta:0'])
print('+ bias loaded')
else:
print('- no bias')
print('* DONE\n')
if output_filename is None:
output_filename = input_filename + '.pth'
print('Conversion completed: saving weights in %s' % output_filename)
torch.save(net.state_dict(), output_filename)
def get_FCnet(weights_filename,padding='same'):
FCnet = FullConvNet(padding=padding)
FCnet.load_state_dict(torch.load(weights_filename, map_location='cpu'))
FCnet.eval()
return FCnet
def extractFC(FCnet, img, use_cuda = False):
image = np.transpose(img , (2, 0, 1)) # * 255. / 256.
image = torch.autograd.Variable(torch.from_numpy(image.astype(np.float32))[:3].unsqueeze(0), requires_grad=False)
FCnet = FCnet.eval()
if use_cuda:
FCnet = FCnet.cuda()
image = image.cuda()
with torch.no_grad():
res = FCnet(image)
res = res.cpu().data.numpy().squeeze()
return res
def extractFC_stride(img,FCnet,use_cuda=False):
slice_dim = 512
if use_cuda:
FCnet = FCnet.cuda()
if img.shape[1] > 2700:
posSplitend = img.shape[1] - slice_dim - 34
res = extractFC(FCnet, img[:, :(slice_dim + 34),:], use_cuda)
res = res[:, :-34]
posSplit = slice_dim
while posSplit < posSplitend:
resA = extractFC(FCnet, img[:, (posSplit - 34): (posSplit + slice_dim + 34), :] ,use_cuda)
posSplit = posSplit + slice_dim
res = np.concatenate((res, resA[:, 34:-34]), 1)
resC = extractFC(FCnet, img[:, (posSplit - 34):, :], use_cuda)
res = np.concatenate((res, resC[:, 34:]), 1)
elif img.shape[1] > 1024:
posSplit = (int(img.shape[1] // 3), int(img.shape[1] // 3 * 2))
resA = extractFC(FCnet, img[:, :posSplit[0] + 34, :],use_cuda)
resB = extractFC(FCnet, img[:, posSplit[0] - 34: posSplit[1] + 34, :],use_cuda)
resC = extractFC(FCnet, img[:, posSplit[1] - 34:, :],use_cuda)
res = np.concatenate((resA[:, :-34], resB[:, 34:-34], resC[:, 34:]), 1)
elif img.shape[1] > 512:
posSplit = img.shape[1] // 2
resA = extractFC(FCnet, img[:, :posSplit + 34, :],use_cuda)
resB = extractFC(FCnet, img[:, posSplit - 34:, :],use_cuda)
res = np.concatenate((resA[:, :-34], resB[:, 34:]), 1)
else:
res = extractFC(FCnet, img,use_cuda)
if use_cuda:
FCnet = FCnet.cpu()
res = np.squeeze(res)
return res
| 6,425 | 14 | 196 |
26a45aa2a62f42248727ef8a913017d53af4b073 | 2,124 | py | Python | opencv_tutorial/opencv_python_tutorials/Image_Processing/image_pyramids.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | opencv_tutorial/opencv_python_tutorials/Image_Processing/image_pyramids.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | opencv_tutorial/opencv_python_tutorials/Image_Processing/image_pyramids.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 14:39:33 2019
@author: jone
"""
#%% Gaussian Pyramids
import cv2
img = cv2.imread('img/monkey.tiff')
lower_reso = cv2.pyrDown(img) # 원본 이미지의 1/4 사이즈
higher_reso = cv2.pyrUp(img) # 원본 이미지의 4배 사이즈
cv2.imshow('img', img)
cv2.imshow('lower', lower_reso)
cv2.imshow('higher', higher_reso)
cv2.waitKey(0)
cv2.destroyAllWindows()
#%% Laplacian Pyramids
import cv2
img = cv2.imread('img/monkey.tiff')
print(img.shape) # (512, 512, 3)
GAD = cv2.pyrDown(img)
print(GAD.shape) # (256, 256, 3)
GAU = cv2.pyrUp(GAD)
print(GAU.shape) # (512, 512, 3)
temp = cv2.resize(GAU, (512, 512))
res = cv2.subtract(img, temp)
cv2.imshow('res', res)
cv2.waitKey(0)
# 이미지 저장
cv2.imwrite('img/lap_pyramids.png', res)
cv2.destroyAllWindows()
#%%
import cv2
import numpy as np
STEP = 6
# 1단계
A = cv2.imread('img/apple.jpg')
B = cv2.imread('img/orange.jpg')
# 2단계
# A 이미지에 대한 Gaussian Pyramid를 생성
# 점점 작아지는 Pyramid
G = A.copy()
gpA = [G]
for i in range(STEP):
G = cv2.pyrDown(G)
gpA.append(G)
# B 이미지에 대한 Gaussian Pyramid 생성
# 점점 작아지는 Pyramid
G = B.copy()
gpB = [G]
for i in range(STEP):
G = cv2.pyrDown(G)
gpB.append(G)
# 3단계
# A 이미지에 대한 Laplacian Pyramid 생성
lpA = [gpA[STEP-1]] # n번쨰 추가된 Gaussian Image
for i in range(STEP-1, 0, -1):
GE = cv2.pyrUp(gpA[i])
L = cv2.subtract(gpA[i-1], GE)
lpA.append(L)
# B 이미지에 대한 Laplacian Pyramid 생성
lpB = [gpB[STEP-1]]
for i in range(STEP-1, 0, -1):
GE = cv2.pyrUp(gpB[i])
L = cv2.subtract(gpB[i-1], GE)
lpB.append(L)
# 4단계
# Laplacian Pyramid를 누적으로 좌측과 우측으로 재결합
LS = []
for la, lb in zip(lpA, lpB):
rows, cols, dpt = la.shape
ls = np.hstack((la[:,0:int(cols/2)], lb[:,int(cols/2):]))
LS.append(ls)
# 5단계
ls_ = LS[0] # 좌측과 우측이 합쳐진 가장 작은 이미지
for i in range(1, STEP):
ls_ = cv2.pyrUp(ls_) # Up scale
ls_ = cv2.add(ls_, LS[i]) # Up Scale된 이미지에 외곽서늘 추가하여 선명한 이미지로 생성
# 원본 이미지를 그대로 붙인 경우
real = np.hstack((A[:, :int(cols/2)], B[:, int(cols/2):]))
cv2.imshow('real', real)
cv2.imshow('blending', ls_)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 19.135135 | 69 | 0.620527 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 14:39:33 2019
@author: jone
"""
#%% Gaussian Pyramids
import cv2
img = cv2.imread('img/monkey.tiff')
lower_reso = cv2.pyrDown(img) # 원본 이미지의 1/4 사이즈
higher_reso = cv2.pyrUp(img) # 원본 이미지의 4배 사이즈
cv2.imshow('img', img)
cv2.imshow('lower', lower_reso)
cv2.imshow('higher', higher_reso)
cv2.waitKey(0)
cv2.destroyAllWindows()
#%% Laplacian Pyramids
import cv2
img = cv2.imread('img/monkey.tiff')
print(img.shape) # (512, 512, 3)
GAD = cv2.pyrDown(img)
print(GAD.shape) # (256, 256, 3)
GAU = cv2.pyrUp(GAD)
print(GAU.shape) # (512, 512, 3)
temp = cv2.resize(GAU, (512, 512))
res = cv2.subtract(img, temp)
cv2.imshow('res', res)
cv2.waitKey(0)
# 이미지 저장
cv2.imwrite('img/lap_pyramids.png', res)
cv2.destroyAllWindows()
#%%
import cv2
import numpy as np
STEP = 6
# 1단계
A = cv2.imread('img/apple.jpg')
B = cv2.imread('img/orange.jpg')
# 2단계
# A 이미지에 대한 Gaussian Pyramid를 생성
# 점점 작아지는 Pyramid
G = A.copy()
gpA = [G]
for i in range(STEP):
G = cv2.pyrDown(G)
gpA.append(G)
# B 이미지에 대한 Gaussian Pyramid 생성
# 점점 작아지는 Pyramid
G = B.copy()
gpB = [G]
for i in range(STEP):
G = cv2.pyrDown(G)
gpB.append(G)
# 3단계
# A 이미지에 대한 Laplacian Pyramid 생성
lpA = [gpA[STEP-1]] # n번쨰 추가된 Gaussian Image
for i in range(STEP-1, 0, -1):
GE = cv2.pyrUp(gpA[i])
L = cv2.subtract(gpA[i-1], GE)
lpA.append(L)
# B 이미지에 대한 Laplacian Pyramid 생성
lpB = [gpB[STEP-1]]
for i in range(STEP-1, 0, -1):
GE = cv2.pyrUp(gpB[i])
L = cv2.subtract(gpB[i-1], GE)
lpB.append(L)
# 4단계
# Laplacian Pyramid를 누적으로 좌측과 우측으로 재결합
LS = []
for la, lb in zip(lpA, lpB):
rows, cols, dpt = la.shape
ls = np.hstack((la[:,0:int(cols/2)], lb[:,int(cols/2):]))
LS.append(ls)
# 5단계
ls_ = LS[0] # 좌측과 우측이 합쳐진 가장 작은 이미지
for i in range(1, STEP):
ls_ = cv2.pyrUp(ls_) # Up scale
ls_ = cv2.add(ls_, LS[i]) # Up Scale된 이미지에 외곽서늘 추가하여 선명한 이미지로 생성
# 원본 이미지를 그대로 붙인 경우
real = np.hstack((A[:, :int(cols/2)], B[:, int(cols/2):]))
cv2.imshow('real', real)
cv2.imshow('blending', ls_)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 0 | 0 | 0 |
b3771d1bce1aa0d85dd8aaee017490daad6aa1a5 | 1,522 | py | Python | product/migrations/0001_initial.py | michwh/onehome-server | 4ee6599e1d320a249f55fb8682693b649416bb0f | [
"MIT"
] | 26 | 2019-03-08T01:11:23.000Z | 2021-09-13T08:45:30.000Z | product/migrations/0001_initial.py | gaozongji/onehome-server | 4ee6599e1d320a249f55fb8682693b649416bb0f | [
"MIT"
] | 7 | 2020-02-11T23:44:42.000Z | 2022-03-11T23:39:57.000Z | product/migrations/0001_initial.py | gaozongji/onehome-server | 4ee6599e1d320a249f55fb8682693b649416bb0f | [
"MIT"
] | 11 | 2019-04-05T04:51:29.000Z | 2021-01-10T08:26:38.000Z | # Generated by Django 2.1.2 on 2019-01-17 02:37
from django.db import migrations, models
| 33.822222 | 87 | 0.521682 | # Generated by Django 2.1.2 on 2019-01-17 02:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Collection',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('c_time', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': '收藏',
'ordering': ['-c_time'],
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('c_time', models.DateTimeField(auto_now_add=True)),
('goods_price', models.CharField(max_length=20)),
('title', models.CharField(max_length=100, verbose_name='标题')),
('description', models.TextField(max_length=300, verbose_name='商品描述')),
('goods_img1', models.CharField(max_length=100)),
('goods_img2', models.CharField(max_length=100, null=True)),
('goods_img3', models.CharField(max_length=100, null=True)),
('goods_img4', models.CharField(max_length=100, null=True)),
],
options={
'verbose_name': '商品',
'verbose_name_plural': '商品',
'ordering': ['-c_time'],
},
),
]
| 0 | 1,432 | 23 |
603404fd91f24cee3048f98688861113496dce4d | 117 | py | Python | Python/CeV/Exercicios/ex5.py | WerickL/Learning | 5a9a488f0422454e612439b89093d5bc11242e65 | [
"MIT"
] | null | null | null | Python/CeV/Exercicios/ex5.py | WerickL/Learning | 5a9a488f0422454e612439b89093d5bc11242e65 | [
"MIT"
] | null | null | null | Python/CeV/Exercicios/ex5.py | WerickL/Learning | 5a9a488f0422454e612439b89093d5bc11242e65 | [
"MIT"
] | null | null | null | n = int(input('digite um número:'))
print('O antecessor do número {} é {}, e o sucessor é {}!'.format(n, n-1, n+1))
| 39 | 80 | 0.598291 | n = int(input('digite um número:'))
print('O antecessor do número {} é {}, e o sucessor é {}!'.format(n, n-1, n+1))
| 0 | 0 | 0 |
cd1591e747331b07fbe7623164fdf6a62e3ecab7 | 3,088 | py | Python | fill_8949.py | gsugar87/CryptoTaxes | 7bb0332a3fb8ddc18a4009b00aeca46fab838573 | [
"MIT"
] | 37 | 2017-12-07T23:16:42.000Z | 2022-02-11T22:10:23.000Z | fill_8949.py | gsugar87/CryptoTaxes | 7bb0332a3fb8ddc18a4009b00aeca46fab838573 | [
"MIT"
] | 1 | 2018-04-15T15:42:18.000Z | 2018-05-01T22:42:31.000Z | fill_8949.py | gsugar87/CryptoTaxes | 7bb0332a3fb8ddc18a4009b00aeca46fab838573 | [
"MIT"
] | 16 | 2017-12-07T23:16:03.000Z | 2022-02-27T21:55:13.000Z | import os
from fdfgen import forge_fdf
| 44.753623 | 135 | 0.532707 | import os
from fdfgen import forge_fdf
def makePDF(fifoResult, fname, person, social):
# Write to the PDF
# Create the directories if they don't already exist
if not os.path.exists("FDFs"):
os.makedirs("FDFs")
if not os.path.exists("PDFs"):
os.makedirs("PDFs")
counter = 0
fileCounter = 0
fields = [('topmostSubform[0].Page1[0].f1_1[0]', person),
('topmostSubform[0].Page1[0].f1_2[0]', social)]
fnums = [3+i*8 for i in range(14)]
lastRow1 = 0
lastRow2 = 0
lastRow3 = 0
lastRow4 = 0
# loop through all FIFO sales
for sale in fifoResult:
counter += 1
# append to the form
row = counter
fnum = fnums[row-1]
fields.append(('topmostSubform[0].Page1[0].Table_Line1[0].Row%d[0].f1_%d[0]' % (row, fnum), sale[0]))
fields.append(('topmostSubform[0].Page1[0].Table_Line1[0].Row%d[0].f1_%d[0]' % (row, fnum+1), sale[1]))
fields.append(('topmostSubform[0].Page1[0].Table_Line1[0].Row%d[0].f1_%d[0]' % (row, fnum+2), sale[2]))
fields.append(('topmostSubform[0].Page1[0].Table_Line1[0].Row%d[0].f1_%d[0]' % (row, fnum+3), "%1.2f" % sale[3]))
fields.append(('topmostSubform[0].Page1[0].Table_Line1[0].Row%d[0].f1_%d[0]' % (row, fnum+4), "%1.2f" % sale[4]))
if (sale[3]-sale[4]) < 0:
fields.append(('topmostSubform[0].Page1[0].Table_Line1[0].Row%d[0].f1_%d[0]' % (row, fnum + 7),
"(%1.2f)" % (sale[4] - sale[3])))
else:
fields.append(('topmostSubform[0].Page1[0].Table_Line1[0].Row%d[0].f1_%d[0]' % (row, fnum+7), "%1.2f" % (sale[3]-sale[4])))
lastRow1 += float("%1.2f" % sale[3])
lastRow2 += float("%1.2f" % sale[4])
lastRow3 += 0
lastRow4 += float("%1.2f" % (sale[3]-sale[4]))
if row == 14 or sale == fifoResult[-1]:
fields.append(("topmostSubform[0].Page1[0].f1_115[0]", "%1.2f" % lastRow1))
fields.append(("topmostSubform[0].Page1[0].f1_116[0]", "%1.2f" % lastRow2))
if lastRow4 < 0:
fields.append(("topmostSubform[0].Page1[0].f1_118[0]", "(%1.2f)" % abs(lastRow4)))
else:
fields.append(("topmostSubform[0].Page1[0].f1_118[0]", "%1.2f" % lastRow4))
fields.append(("topmostSubform[0].Page1[0].c1_1[2]", 3))
# save the file and reset the counter
fdf = forge_fdf("", fields, [], [], [])
fdf_file = open("FDFs\\" + fname + "_%03d.fdf" % fileCounter, "w")
fdf_file.write(fdf)
fdf_file.close()
# call PDFTK to make the PDF
os.system("pdftk f8949.pdf fill_form FDFs\\" + fname + "_%03d.fdf" % fileCounter + " output PDFs\\" +
fname + "_%03d.pdf" % fileCounter)
# delete the FDF
os.system("del FDFs\\" + fname + "_%03d.fdf" % fileCounter)
counter = 0
fileCounter += 1
fields = []
lastRow1 = 0
lastRow2 = 0
lastRow3 = 0
lastRow4 = 0
| 3,025 | 0 | 23 |
fa1587a6e0adbb5d1a169c239bdedad30265d37a | 5,878 | py | Python | src/m2_Extra.py | Kent1227/99-CapstoneProject-201920 | 53399ebb2131f5e1d547413ae782ba065f99cef8 | [
"MIT"
] | null | null | null | src/m2_Extra.py | Kent1227/99-CapstoneProject-201920 | 53399ebb2131f5e1d547413ae782ba065f99cef8 | [
"MIT"
] | null | null | null | src/m2_Extra.py | Kent1227/99-CapstoneProject-201920 | 53399ebb2131f5e1d547413ae782ba065f99cef8 | [
"MIT"
] | null | null | null | #
# This is the beginning of Eddie's Extra file for the Final Capstone Project
# The idea behind this design is to make an accurate representation of a Rose-Hulman Student
#
# The rosebot import was added in order for me to use the dot trick
import rosebot
import time
# def beep_proxy(robot, initial, delta, speed):
# ps = robot.sensor_system.ir_proximity_sensor
# b = robot.sound_system
# robot.drive_system.go(int(speed),int(speed))
# while ps.get_distance_in_inches() > 2:
# rate = float(initial) + float(delta) / float(ps.get_distance_in_inches())
# b.beep_number_of_times(2)
# time.sleep(1 / rate)
# robot.drive_system.stop()
# robot.arm_and_claw.raise_arm()
#
#
# def beep_retrieve(robot, direction, speed):
# d = robot.drive_system
# if direction == "CW":
# d.spin_clockwise_until_sees_object(int(speed), 100)
# elif direction == "CCW":
# d.spin_counterclockwise_until_sees_object(int(speed), 100)
# d.stop()
# camera_aim()
# beep_proxy(robot, 1, 0.1, int(speed))
#
#
# def camera_aim():
# robot = rosebot.RoseBot()
# d = robot.drive_system
# c = robot.sensor_system.camera
# while True:
# print(c.get_biggest_blob().center.x)
# while c.get_biggest_blob().center.x > 170:
# d.go(-20, 20)
# print(c.get_biggest_blob().center.x)
# d.stop()
# while c.get_biggest_blob().center.x < 160:
# d.go(20, -20)
# print(c.get_biggest_blob().center.x)
# d.stop()
# if 160 < c.get_biggest_blob().center.x < 170:
# break
# Not sure which find homework will work better, if they work at all
# The one below uses the object mode
# The one below uses the color mode
# The one above does indeed work better
# Do not use the one below
#######################################################################
# def find_homework2(robot):
# robot.drive_system.spin_clockwise_until_sees_color(100, "White")
# robot.drive_system.go_forward_until_distance_is_less_than(7, 100)
# robot.drive_system.stop()
# robot.arm_and_claw.raise_arm()
# robot.drive_system.go_straight_for_inches_using_encoder(6, 100)
# robot.arm_and_claw.lower_arm()
# robot.drive_system.go_straight_for_inches_using_encoder(4, 50)
# robot.drive_system.stop()
# robot.sound_system.speak("DEATH TO ALL HOMEWORK!")
#######################################################################
| 35.409639 | 116 | 0.690031 | #
# This is the beginning of Eddie's Extra file for the Final Capstone Project
# The idea behind this design is to make an accurate representation of a Rose-Hulman Student
#
# The rosebot import was added in order for me to use the dot trick
import rosebot
import time
# def beep_proxy(robot, initial, delta, speed):
# ps = robot.sensor_system.ir_proximity_sensor
# b = robot.sound_system
# robot.drive_system.go(int(speed),int(speed))
# while ps.get_distance_in_inches() > 2:
# rate = float(initial) + float(delta) / float(ps.get_distance_in_inches())
# b.beep_number_of_times(2)
# time.sleep(1 / rate)
# robot.drive_system.stop()
# robot.arm_and_claw.raise_arm()
#
#
# def beep_retrieve(robot, direction, speed):
# d = robot.drive_system
# if direction == "CW":
# d.spin_clockwise_until_sees_object(int(speed), 100)
# elif direction == "CCW":
# d.spin_counterclockwise_until_sees_object(int(speed), 100)
# d.stop()
# camera_aim()
# beep_proxy(robot, 1, 0.1, int(speed))
#
#
# def camera_aim():
# robot = rosebot.RoseBot()
# d = robot.drive_system
# c = robot.sensor_system.camera
# while True:
# print(c.get_biggest_blob().center.x)
# while c.get_biggest_blob().center.x > 170:
# d.go(-20, 20)
# print(c.get_biggest_blob().center.x)
# d.stop()
# while c.get_biggest_blob().center.x < 160:
# d.go(20, -20)
# print(c.get_biggest_blob().center.x)
# d.stop()
# if 160 < c.get_biggest_blob().center.x < 170:
# break
# Not sure which find homework will work better, if they work at all
# The one below uses the object mode
def find_homework(robot):
# robot.arm_and_claw.lower_arm()
# robot.drive_system.pivot_left(50, 5)
robot.drive_system.go_forward_until_distance_is_less_than(5, 50)
robot.drive_system.stop()
robot.arm_and_claw.raise_arm()
robot.drive_system.go_straight_for_inches_using_encoder(3, 50)
robot.arm_and_claw.lower_arm()
robot.drive_system.go_straight_for_inches_using_encoder(6, 50)
robot.drive_system.stop()
robot.sound_system.speak("DEATH TO ALL HOMEWORK!")
# The one below uses the color mode
# The one above does indeed work better
# Do not use the one below
#######################################################################
# def find_homework2(robot):
# robot.drive_system.spin_clockwise_until_sees_color(100, "White")
# robot.drive_system.go_forward_until_distance_is_less_than(7, 100)
# robot.drive_system.stop()
# robot.arm_and_claw.raise_arm()
# robot.drive_system.go_straight_for_inches_using_encoder(6, 100)
# robot.arm_and_claw.lower_arm()
# robot.drive_system.go_straight_for_inches_using_encoder(4, 50)
# robot.drive_system.stop()
# robot.sound_system.speak("DEATH TO ALL HOMEWORK!")
#######################################################################
def find_games(robot):
# robot.arm_and_claw.lower_arm()
# robot.drive_system.spin_clockwise_until_sees_object(25, 1) # Need the area of a game box
robot.drive_system.go_forward_until_distance_is_less_than(2, 50)
robot.drive_system.stop()
robot.arm_and_claw.raise_arm()
robot.drive_system.pivot_left(100, 5)
time.sleep(5)
robot.drive_system.stop()
robot.sound_system.speak("I LOVE VIDEO GAMES! I think I will go play some right now.")
time.sleep(6)
robot.drive_system.go_straight_for_inches_using_encoder(12, 50)
robot.drive_system.stop()
robot.drive_system.pivot_right(100, 5)
time.sleep(5)
robot.drive_system.stop()
robot.sound_system.speak("Well, maybe I should do my homework.")
time.sleep(4)
robot.drive_system.pivot_left(100, 5)
time.sleep(5)
robot.drive_system.stop()
robot.sound_system.speak("Nah, I will do it later.")
time.sleep(4)
robot.drive_system.go_straight_for_inches_using_encoder(12, 100)
robot.arm_and_claw.lower_arm()
def find_food(robot):
# robot.arm_and_claw.lower_arm()
# robot.drive_system.spin_clockwise_until_sees_object(25, 1) # Need the area of some type of food item
robot.drive_system.go_forward_until_distance_is_less_than(2, 50)
robot.drive_system.stop()
robot.arm_and_claw.raise_arm()
robot.drive_system.pivot_left(100, 5)
time.sleep(5)
robot.drive_system.stop()
robot.sound_system.speak("Oh my God I am literally so hungry I haven't eaten in over 15 minutes")
time.sleep(7)
robot.drive_system.go_straight_for_inches_using_encoder(6, 50)
robot.drive_system.pivot_right(100, 5)
time.sleep(5)
robot.drive_system.stop()
robot.sound_system.speak("Where is the nearest microwave?")
time.sleep(3)
robot.arm_and_claw.lower_arm()
def go_to_sleep(robot):
# robot.arm_and_claw.lower_arm()
robot.sound_system.speak("Well, it has been a long day. Time to get some sleep.")
time.sleep(5)
robot.drive_system.go_straight_until_color_is("White", 50) # May need to add a different color, not sure yet
robot.drive_system.stop()
robot.drive_system.pivot_left(100, 5)
time.sleep(5)
robot.drive_system.stop()
robot.sound_system.speak("I sure hope they have Chicken Strips for lunch tomorrow.")
time.sleep(6)
snore(robot, 3)
robot.arm_and_claw.raise_arm()
robot.drive_system.pivot_left(100, 5)
time.sleep(5)
robot.drive_system.stop()
robot.sound_system.speak("OH MY LORD I FORGOT ABOUT MY HOMEWORK DUE AT MIDNIGHT OH NO PLEASE NO WHY ME!")
time.sleep(7)
robot.drive_system.go_straight_for_inches_using_encoder(24, 100)
robot.arm_and_claw.lower_arm()
def snore(robot, num_of_snores):
for _ in range(num_of_snores):
robot.sound_system.speak("Snore")
time.sleep(2)
def play_tone(robot):
robot.sound_system.play_tone(500, 440)
| 3,271 | 0 | 138 |
886985500f902e41f6cbac744d21f7967ca5f7e7 | 10,241 | py | Python | src/bpp/models/wydawnictwo_zwarte.py | iplweb/bpp | f027415cc3faf1ca79082bf7bacd4be35b1a6fdf | [
"BSD-3-Clause"
] | null | null | null | src/bpp/models/wydawnictwo_zwarte.py | iplweb/bpp | f027415cc3faf1ca79082bf7bacd4be35b1a6fdf | [
"BSD-3-Clause"
] | 41 | 2019-11-07T00:07:02.000Z | 2022-02-27T22:09:39.000Z | src/bpp/models/wydawnictwo_zwarte.py | iplweb/bpp | f027415cc3faf1ca79082bf7bacd4be35b1a6fdf | [
"BSD-3-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
import re
import warnings
from denorm import denormalized, depend_on_related
from dirtyfields.dirtyfields import DirtyFieldsMixin
from django.db import models
from django.db.models import CASCADE, PROTECT
from django.db.models.expressions import RawSQL
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.postgres.fields import ArrayField, JSONField
from bpp.models import (
DodajAutoraMixin,
MaProcentyMixin,
ModelOpcjonalnieNieEksportowanyDoAPI,
ModelZMiejscemPrzechowywania,
ModelZPBN_UID,
)
from bpp.models.abstract import (
BazaModeluOdpowiedzialnosciAutorow,
DwaTytuly,
ModelPunktowany,
ModelRecenzowany,
ModelTypowany,
ModelWybitny,
ModelZAbsolutnymUrl,
ModelZAdnotacjami,
ModelZCharakterem,
ModelZDOI,
ModelZeStatusem,
ModelZeSzczegolami,
ModelZeZnakamiWydawniczymi,
ModelZInformacjaZ,
ModelZISBN,
ModelZISSN,
ModelZKonferencja,
ModelZLiczbaCytowan,
ModelZOpenAccess,
ModelZPrzeliczaniemDyscyplin,
ModelZPubmedID,
ModelZRokiem,
ModelZSeria_Wydawnicza,
ModelZWWW,
Wydawnictwo_Baza,
)
from bpp.models.autor import Autor
from bpp.models.nagroda import Nagroda
from bpp.models.system import Zewnetrzna_Baza_Danych
from bpp.models.util import ZapobiegajNiewlasciwymCharakterom
from bpp.models.wydawca import Wydawca
class Wydawnictwo_Zwarte_Autor(
DirtyFieldsMixin,
BazaModeluOdpowiedzialnosciAutorow,
):
"""Model zawierający informację o przywiązaniu autorów do wydawnictwa
zwartego."""
rekord = models.ForeignKey(
"Wydawnictwo_Zwarte", CASCADE, related_name="autorzy_set"
)
MIEJSCE_I_ROK_MAX_LENGTH = 256
class Wydawnictwo_Zwarte_Baza(
Wydawnictwo_Baza,
DwaTytuly,
ModelZRokiem,
ModelZeStatusem,
ModelZWWW,
ModelZPubmedID,
ModelZDOI,
ModelRecenzowany,
ModelPunktowany,
ModelTypowany,
ModelZeSzczegolami,
ModelZInformacjaZ,
ModelZISBN,
ModelZAdnotacjami,
ModelZAbsolutnymUrl,
ModelZLiczbaCytowan,
ModelZMiejscemPrzechowywania,
ModelOpcjonalnieNieEksportowanyDoAPI,
):
"""Baza dla klas Wydawnictwo_Zwarte oraz Praca_Doktorska_Lub_Habilitacyjna"""
miejsce_i_rok = models.CharField(
max_length=MIEJSCE_I_ROK_MAX_LENGTH,
blank=True,
null=True,
help_text="""Przykładowo:
Warszawa 2012. Wpisz proszę najpierw miejsce potem rok; oddziel
spacją.""",
)
wydawca = models.ForeignKey(Wydawca, PROTECT, null=True, blank=True)
wydawca_opis = models.CharField(
"Wydawca - szczegóły", max_length=256, null=True, blank=True
)
oznaczenie_wydania = models.CharField(max_length=400, null=True, blank=True)
wydawnictwo = property(get_wydawnictwo, set_wydawnictwo)
redakcja = models.TextField(null=True, blank=True)
rok_regex = re.compile(r"\s[12]\d\d\d")
class Wydawnictwo_Zwarte(
ZapobiegajNiewlasciwymCharakterom,
Wydawnictwo_Zwarte_Baza,
ModelZCharakterem,
ModelZOpenAccessWydawnictwoZwarte,
ModelZeZnakamiWydawniczymi,
ModelZKonferencja,
ModelZSeria_Wydawnicza,
ModelZISSN,
ModelWybitny,
ModelZPBN_UID,
MaProcentyMixin,
DodajAutoraMixin,
DirtyFieldsMixin,
ModelZPrzeliczaniemDyscyplin,
):
"""Wydawnictwo zwarte, czyli: książki, broszury, skrypty, fragmenty,
doniesienia zjazdowe."""
objects = Wydawnictwo_Zwarte_Manager()
autor_rekordu_klass = Wydawnictwo_Zwarte_Autor
autorzy = models.ManyToManyField(Autor, through=autor_rekordu_klass)
wydawnictwo_nadrzedne = models.ForeignKey(
"self",
CASCADE,
blank=True,
null=True,
help_text="""Jeżeli dodajesz rozdział,
tu wybierz pracę, w ramach której dany rozdział występuje.""",
related_name="wydawnictwa_powiazane_set",
)
calkowita_liczba_autorow = models.PositiveIntegerField(
blank=True,
null=True,
help_text="""Jeżeli dodajesz monografię, wpisz
tutaj całkowitą liczbę autorów monografii. Ta informacja zostanie
użyta w eksporcie danych do PBN. Jeżeli informacja ta nie zostanie
uzupełiona, wartość tego pola zostanie obliczona i będzie to ilość
wszystkich autorów przypisanych do danej monografii""",
)
calkowita_liczba_redaktorow = models.PositiveIntegerField(
blank=True,
null=True,
help_text="""Jeżeli dodajesz monografię, wpisz tutaj całkowitą liczbę
redaktorów monografii. Ta informacja zostanie użyta w eksporcie
danych do PBN. Jeżeli pole to nie zostanie uzupełnione, wartość ta
zostanie obliczona i będzie to ilość wszystkich redaktorów
przypisanych do danej monografii""",
)
nagrody = GenericRelation(Nagroda)
def wydawnictwa_powiazane_posortowane(self):
"""
Sortowanie wydawnictw powiązanych wg pierwszej liczby dziesiętnej występującej w polu 'Strony'
"""
return self.wydawnictwa_powiazane_set.order_by(
RawSQL(
r"CAST((regexp_match(COALESCE(bpp_wydawnictwo_zwarte.strony, '99999999'), '(\d+)'))[1] AS INT)",
"",
)
)
#
# Cache framework by django-denorm-iplweb
#
denorm_always_skip = ("ostatnio_zmieniony",)
@denormalized(JSONField, blank=True, null=True)
@depend_on_related(
"bpp.Wydawnictwo_Zwarte_Autor",
only=(
"typ_odpowiedzialnosci_id",
"afiliuje",
"dyscyplina_naukowa_id",
"upowaznienie_pbn",
"przypieta",
),
)
@depend_on_related("bpp.Wydawca", only=("lista_poziomow", "alias_dla_id"))
@denormalized(models.TextField, default="")
@depend_on_related("self", "wydawnictwo_nadrzedne")
@depend_on_related(
"bpp.Wydawnictwo_Zwarte_Autor",
only=("zapisany_jako", "typ_odpowiedzialnosci_id", "kolejnosc"),
)
@depend_on_related("bpp.Wydawca", only=("nazwa", "alias_dla_id"))
@depend_on_related("bpp.Charakter_Formalny")
@depend_on_related("bpp.Typ_KBN")
@depend_on_related("bpp.Status_Korekty")
@denormalized(ArrayField, base_field=models.TextField(), blank=True, null=True)
@depend_on_related(
"bpp.Autor",
only=(
"nazwisko",
"imiona",
),
)
@depend_on_related("bpp.Wydawnictwo_Zwarte_Autor", only=("kolejnosc",))
@denormalized(models.TextField, blank=True, null=True)
@depend_on_related(
"bpp.Wydawnictwo_Zwarte_Autor",
only=("zapisany_jako", "kolejnosc"),
)
@denormalized(
models.SlugField,
max_length=400,
unique=True,
db_index=True,
null=True,
blank=True,
)
@depend_on_related(
"bpp.Wydawnictwo_Zwarte_Autor",
only=("zapisany_jako", "kolejnosc"),
)
@depend_on_related(
"bpp.Autor",
only=("nazwisko", "imiona"),
)
@depend_on_related("self", "wydawnictwo_nadrzedne")
| 30.570149 | 112 | 0.683332 | # -*- encoding: utf-8 -*-
import re
import warnings
from denorm import denormalized, depend_on_related
from dirtyfields.dirtyfields import DirtyFieldsMixin
from django.db import models
from django.db.models import CASCADE, PROTECT
from django.db.models.expressions import RawSQL
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.postgres.fields import ArrayField, JSONField
from bpp.models import (
DodajAutoraMixin,
MaProcentyMixin,
ModelOpcjonalnieNieEksportowanyDoAPI,
ModelZMiejscemPrzechowywania,
ModelZPBN_UID,
)
from bpp.models.abstract import (
BazaModeluOdpowiedzialnosciAutorow,
DwaTytuly,
ModelPunktowany,
ModelRecenzowany,
ModelTypowany,
ModelWybitny,
ModelZAbsolutnymUrl,
ModelZAdnotacjami,
ModelZCharakterem,
ModelZDOI,
ModelZeStatusem,
ModelZeSzczegolami,
ModelZeZnakamiWydawniczymi,
ModelZInformacjaZ,
ModelZISBN,
ModelZISSN,
ModelZKonferencja,
ModelZLiczbaCytowan,
ModelZOpenAccess,
ModelZPrzeliczaniemDyscyplin,
ModelZPubmedID,
ModelZRokiem,
ModelZSeria_Wydawnicza,
ModelZWWW,
Wydawnictwo_Baza,
)
from bpp.models.autor import Autor
from bpp.models.nagroda import Nagroda
from bpp.models.system import Zewnetrzna_Baza_Danych
from bpp.models.util import ZapobiegajNiewlasciwymCharakterom
from bpp.models.wydawca import Wydawca
class Wydawnictwo_Zwarte_Autor(
DirtyFieldsMixin,
BazaModeluOdpowiedzialnosciAutorow,
):
"""Model zawierający informację o przywiązaniu autorów do wydawnictwa
zwartego."""
rekord = models.ForeignKey(
"Wydawnictwo_Zwarte", CASCADE, related_name="autorzy_set"
)
class Meta:
verbose_name = "powiązanie autora z wyd. zwartym"
verbose_name_plural = "powiązania autorów z wyd. zwartymi"
app_label = "bpp"
ordering = ("kolejnosc",)
unique_together = [
("rekord", "autor", "typ_odpowiedzialnosci"),
# Tu musi być autor, inaczej admin nie pozwoli wyedytować
("rekord", "autor", "kolejnosc"),
]
MIEJSCE_I_ROK_MAX_LENGTH = 256
class Wydawnictwo_Zwarte_Baza(
Wydawnictwo_Baza,
DwaTytuly,
ModelZRokiem,
ModelZeStatusem,
ModelZWWW,
ModelZPubmedID,
ModelZDOI,
ModelRecenzowany,
ModelPunktowany,
ModelTypowany,
ModelZeSzczegolami,
ModelZInformacjaZ,
ModelZISBN,
ModelZAdnotacjami,
ModelZAbsolutnymUrl,
ModelZLiczbaCytowan,
ModelZMiejscemPrzechowywania,
ModelOpcjonalnieNieEksportowanyDoAPI,
):
"""Baza dla klas Wydawnictwo_Zwarte oraz Praca_Doktorska_Lub_Habilitacyjna"""
miejsce_i_rok = models.CharField(
max_length=MIEJSCE_I_ROK_MAX_LENGTH,
blank=True,
null=True,
help_text="""Przykładowo:
Warszawa 2012. Wpisz proszę najpierw miejsce potem rok; oddziel
spacją.""",
)
wydawca = models.ForeignKey(Wydawca, PROTECT, null=True, blank=True)
wydawca_opis = models.CharField(
"Wydawca - szczegóły", max_length=256, null=True, blank=True
)
oznaczenie_wydania = models.CharField(max_length=400, null=True, blank=True)
def get_wydawnictwo(self):
# Zwróć nazwę wydawcy + pole wydawca_opis lub samo pole wydawca_opis, jeżeli
# wydawca (indeksowany) nie jest ustalony
if self.wydawca_id is None:
return self.wydawca_opis
opis = self.wydawca_opis or ""
try:
if opis[0] in ".;-/,":
# Nie wstawiaj spacji między wydawcę a opis jeżeli zaczyna się od kropki, przecinka itp
return f"{self.wydawca.nazwa}{opis}".strip()
except IndexError:
pass
return f"{self.wydawca.nazwa} {opis}".strip()
def set_wydawnictwo(self, value):
warnings.warn("W przyszlosci uzyj 'wydawca_opis'", DeprecationWarning)
self.wydawca_opis = value
wydawnictwo = property(get_wydawnictwo, set_wydawnictwo)
redakcja = models.TextField(null=True, blank=True)
class Meta:
abstract = True
class ModelZOpenAccessWydawnictwoZwarte(ModelZOpenAccess):
openaccess_tryb_dostepu = models.ForeignKey(
"Tryb_OpenAccess_Wydawnictwo_Zwarte", CASCADE, blank=True, null=True
)
class Meta:
abstract = True
rok_regex = re.compile(r"\s[12]\d\d\d")
class Wydawnictwo_Zwarte_Manager(models.Manager):
def wydawnictwa_nadrzedne_dla_innych(self):
return (
self.exclude(wydawnictwo_nadrzedne_id=None)
.values_list("wydawnictwo_nadrzedne_id", flat=True)
.distinct()
)
class Wydawnictwo_Zwarte(
ZapobiegajNiewlasciwymCharakterom,
Wydawnictwo_Zwarte_Baza,
ModelZCharakterem,
ModelZOpenAccessWydawnictwoZwarte,
ModelZeZnakamiWydawniczymi,
ModelZKonferencja,
ModelZSeria_Wydawnicza,
ModelZISSN,
ModelWybitny,
ModelZPBN_UID,
MaProcentyMixin,
DodajAutoraMixin,
DirtyFieldsMixin,
ModelZPrzeliczaniemDyscyplin,
):
"""Wydawnictwo zwarte, czyli: książki, broszury, skrypty, fragmenty,
doniesienia zjazdowe."""
objects = Wydawnictwo_Zwarte_Manager()
autor_rekordu_klass = Wydawnictwo_Zwarte_Autor
autorzy = models.ManyToManyField(Autor, through=autor_rekordu_klass)
wydawnictwo_nadrzedne = models.ForeignKey(
"self",
CASCADE,
blank=True,
null=True,
help_text="""Jeżeli dodajesz rozdział,
tu wybierz pracę, w ramach której dany rozdział występuje.""",
related_name="wydawnictwa_powiazane_set",
)
calkowita_liczba_autorow = models.PositiveIntegerField(
blank=True,
null=True,
help_text="""Jeżeli dodajesz monografię, wpisz
tutaj całkowitą liczbę autorów monografii. Ta informacja zostanie
użyta w eksporcie danych do PBN. Jeżeli informacja ta nie zostanie
uzupełiona, wartość tego pola zostanie obliczona i będzie to ilość
wszystkich autorów przypisanych do danej monografii""",
)
calkowita_liczba_redaktorow = models.PositiveIntegerField(
blank=True,
null=True,
help_text="""Jeżeli dodajesz monografię, wpisz tutaj całkowitą liczbę
redaktorów monografii. Ta informacja zostanie użyta w eksporcie
danych do PBN. Jeżeli pole to nie zostanie uzupełnione, wartość ta
zostanie obliczona i będzie to ilość wszystkich redaktorów
przypisanych do danej monografii""",
)
nagrody = GenericRelation(Nagroda)
class Meta:
verbose_name = "wydawnictwo zwarte"
verbose_name_plural = "wydawnictwa zwarte"
app_label = "bpp"
def wydawnictwa_powiazane_posortowane(self):
"""
Sortowanie wydawnictw powiązanych wg pierwszej liczby dziesiętnej występującej w polu 'Strony'
"""
return self.wydawnictwa_powiazane_set.order_by(
RawSQL(
r"CAST((regexp_match(COALESCE(bpp_wydawnictwo_zwarte.strony, '99999999'), '(\d+)'))[1] AS INT)",
"",
)
)
#
# Cache framework by django-denorm-iplweb
#
denorm_always_skip = ("ostatnio_zmieniony",)
@denormalized(JSONField, blank=True, null=True)
@depend_on_related(
"bpp.Wydawnictwo_Zwarte_Autor",
only=(
"typ_odpowiedzialnosci_id",
"afiliuje",
"dyscyplina_naukowa_id",
"upowaznienie_pbn",
"przypieta",
),
)
@depend_on_related("bpp.Wydawca", only=("lista_poziomow", "alias_dla_id"))
def cached_punkty_dyscyplin(self):
# TODO: idealnie byłoby uzależnić zmiane od pola 'rok' które by było identyczne
# dla bpp.Poziom_Wydawcy, rok i id z nadrzędnego. Składnia SQLowa ewentualnie
# jakis zapis django-podobny mile widziany.
return self.przelicz_punkty_dyscyplin()
@denormalized(models.TextField, default="")
@depend_on_related("self", "wydawnictwo_nadrzedne")
@depend_on_related(
"bpp.Wydawnictwo_Zwarte_Autor",
only=("zapisany_jako", "typ_odpowiedzialnosci_id", "kolejnosc"),
)
@depend_on_related("bpp.Wydawca", only=("nazwa", "alias_dla_id"))
@depend_on_related("bpp.Charakter_Formalny")
@depend_on_related("bpp.Typ_KBN")
@depend_on_related("bpp.Status_Korekty")
def opis_bibliograficzny_cache(self):
return self.opis_bibliograficzny()
@denormalized(ArrayField, base_field=models.TextField(), blank=True, null=True)
@depend_on_related(
"bpp.Autor",
only=(
"nazwisko",
"imiona",
),
)
@depend_on_related("bpp.Wydawnictwo_Zwarte_Autor", only=("kolejnosc",))
def opis_bibliograficzny_autorzy_cache(self):
return [
"%s %s" % (x.autor.nazwisko, x.autor.imiona)
for x in self.autorzy_dla_opisu()
]
@denormalized(models.TextField, blank=True, null=True)
@depend_on_related(
"bpp.Wydawnictwo_Zwarte_Autor",
only=("zapisany_jako", "kolejnosc"),
)
def opis_bibliograficzny_zapisani_autorzy_cache(self):
return ", ".join([x.zapisany_jako for x in self.autorzy_dla_opisu()])
@denormalized(
models.SlugField,
max_length=400,
unique=True,
db_index=True,
null=True,
blank=True,
)
@depend_on_related(
"bpp.Wydawnictwo_Zwarte_Autor",
only=("zapisany_jako", "kolejnosc"),
)
@depend_on_related(
"bpp.Autor",
only=("nazwisko", "imiona"),
)
@depend_on_related("self", "wydawnictwo_nadrzedne")
def slug(self):
return self.get_slug()
class Wydawnictwo_Zwarte_Zewnetrzna_Baza_Danych(models.Model):
rekord = models.ForeignKey(
Wydawnictwo_Zwarte, CASCADE, related_name="zewnetrzna_baza_danych"
)
baza = models.ForeignKey(Zewnetrzna_Baza_Danych, CASCADE)
info = models.CharField(
verbose_name="Informacje dodatkowe", max_length=512, blank=True, null=True
)
class Meta:
verbose_name = "powiązanie wydawnictwa zwartego z zewnętrznymi bazami danych"
verbose_name_plural = (
"powiązania wydawnictw zwartych z zewnętrznymi bazami danych"
)
| 1,534 | 1,312 | 360 |
dfc56194264825b9409ec25826c21ead408cdf2b | 199 | py | Python | {{cookiecutter.project_slug}}/config/settings/test.py | dakzh/cookiecutter-django-api | 253d6a96b6092c1218dba2ff3f24169a6363e4ff | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/config/settings/test.py | dakzh/cookiecutter-django-api | 253d6a96b6092c1218dba2ff3f24169a6363e4ff | [
"BSD-3-Clause"
] | 4 | 2021-03-19T03:24:13.000Z | 2021-09-22T19:00:54.000Z | {{cookiecutter.project_slug}}/config/settings/test.py | dakzh/cookiecutter-django-api | 253d6a96b6092c1218dba2ff3f24169a6363e4ff | [
"BSD-3-Clause"
] | null | null | null | from .base import * # noqa
from .base import env
# GENERAL
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="!!!SET DJANGO_SECRET_KEY!!!",
)
TEST_RUNNER = "django.test.runner.DiscoverRunner"
| 19.9 | 49 | 0.703518 | from .base import * # noqa
from .base import env
# GENERAL
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="!!!SET DJANGO_SECRET_KEY!!!",
)
TEST_RUNNER = "django.test.runner.DiscoverRunner"
| 0 | 0 | 0 |
6b12ac8ce00e432226e276bb544b94efce354dfe | 4,472 | py | Python | common_nyu.py | itineraries/scheduler-and-mapper | 18c79167596b05739ad003f73c6c015d1cee8e72 | [
"MIT"
] | null | null | null | common_nyu.py | itineraries/scheduler-and-mapper | 18c79167596b05739ad003f73c6c015d1cee8e72 | [
"MIT"
] | 1 | 2018-04-29T08:24:41.000Z | 2018-04-29T08:24:41.000Z | ctip/scheduler-and-mapper/common_nyu.py | itineraries/flask-app | d94ffa484358e843f12466c79b9387418b5175b2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import attr, collections, datetime
from common import file_in_this_dir
NYU_PICKLE = file_in_this_dir("NYU.pickle")
DAYS_OF_WEEK = (
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday"
)
def _deques_increasing_first(list_of_deques, greater_than=None):
'''
Yields tuples. In each tuple, a) each item is a value from a deque and
b) each item is greater than the previous. The first item is greater than
greater_than. This generator yields all combinations that satisfy these
conditions. It is assumed that the deques are sorted in ascending order.
'''
if list_of_deques:
# Get the first deque in the list of deques.
q = list_of_deques[0]
# Get the first value in the deque that is greater than greater_than.
# Discard all values before it.
if greater_than is not None:
try:
while q[0] <= greater_than:
q.popleft()
except IndexError:
# This deque is empty. The generator must terminate.
return
# At this point, the first value in the deque is greater than
# greater_than.
for value in q:
# Construct the tuple, starting with the value from the deque.
head = (value,)
# If there are more deques, values from them will form the rest of
# the tuple. Otherwise, just yield the head with no tail.
if len(list_of_deques) > 1:
# Recursively call this generator on the rest of the deques.
for tail in _deques_increasing_first(
list_of_deques[1:],
value
):
yield head + tail
else:
yield head
@attr.s
@attr.s
| 42.590476 | 79 | 0.630367 | #!/usr/bin/env python3
import attr, collections, datetime
from common import file_in_this_dir
NYU_PICKLE = file_in_this_dir("NYU.pickle")
DAYS_OF_WEEK = (
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday"
)
def _deques_increasing_first(list_of_deques, greater_than=None):
'''
Yields tuples. In each tuple, a) each item is a value from a deque and
b) each item is greater than the previous. The first item is greater than
greater_than. This generator yields all combinations that satisfy these
conditions. It is assumed that the deques are sorted in ascending order.
'''
if list_of_deques:
# Get the first deque in the list of deques.
q = list_of_deques[0]
# Get the first value in the deque that is greater than greater_than.
# Discard all values before it.
if greater_than is not None:
try:
while q[0] <= greater_than:
q.popleft()
except IndexError:
# This deque is empty. The generator must terminate.
return
# At this point, the first value in the deque is greater than
# greater_than.
for value in q:
# Construct the tuple, starting with the value from the deque.
head = (value,)
# If there are more deques, values from them will form the rest of
# the tuple. Otherwise, just yield the head with no tail.
if len(list_of_deques) > 1:
# Recursively call this generator on the rest of the deques.
for tail in _deques_increasing_first(
list_of_deques[1:],
value
):
yield head + tail
else:
yield head
@attr.s
class NYUSchedule:
route = attr.ib(validator=attr.validators.instance_of(str))
header_row = attr.ib(validator=attr.validators.instance_of(list))
other_rows = attr.ib(validator=attr.validators.instance_of(list))
days_of_week = attr.ib()
def get_columns_indices(self, *nodes):
'''
Yields tuples of indices. In each tuple, the nth item is an index of
self.header_row where the value equals the nth argument (not counting
self). In each tuple, every item is greater than the last.
'''
# There is no guarantee that all values in the header row are unique.
# Also, there is no guarantee that the requested nodes are different.
# We assume that vehicles travel to the stops in the order in which
# they are stored in the schedule from left to right; each node must be
# to the right of the last.
nodes_occurrences = [collections.deque() for _ in nodes]
for index, header in enumerate(self.header_row):
for occurrences, node in zip(nodes_occurrences, nodes):
if header == node:
occurrences.append(index)
# Find combinations of indices. Each combination contains one index of
# an occurrence of each requested node. We can take advantage of the
# fact that the lists of indices of occurrences are sorted.
return _deques_increasing_first(nodes_occurrences)
def get_column_indices(self, from_node):
'''
Yields the indices that correspond to values in self.header_row that
are equal to from_node.
'''
for i, v in enumerate(self.header_row):
if v == from_node:
yield i
@attr.s
class NYUTime:
def __str__(self):
options = ["DO"] # drop-off is always available
if self.pickup:
options.append("PU")
if self.soft:
options.append("Soft")
return str(self.time) + \
(" (" + ", ".join(options) + ")" if options else "")
def __bool__(self):
return bool(self.time)
# Instead of representing the time as a time object, the time should be
# represented as the amount of time since midnight. This allows schedules
# to wrap to the next day.
time = attr.ib(validator=attr.validators.instance_of(datetime.timedelta))
# If True, then the user can board the vehicle at this time.
pickup = attr.ib(validator=attr.validators.instance_of(bool))
# If True, a rider must signal the driver to stop here.
soft = attr.ib(validator=attr.validators.instance_of(bool), default=False)
| 295 | 2,280 | 44 |
16c4b4235cced384a6bb0b400b8b6044cefc2a83 | 895 | py | Python | parse.py | jt28828/-fontawesome-enum-generator | 30b27885f3c3dadb5b17af5033b4c57169dda8f4 | [
"Unlicense"
] | null | null | null | parse.py | jt28828/-fontawesome-enum-generator | 30b27885f3c3dadb5b17af5033b4c57169dda8f4 | [
"Unlicense"
] | null | null | null | parse.py | jt28828/-fontawesome-enum-generator | 30b27885f3c3dadb5b17af5033b4c57169dda8f4 | [
"Unlicense"
] | null | null | null | import json
from http.client import HTTPResponse
from typing import TextIO, Dict
from urllib import request
from platforms.cpp import as_cpp_enum
from platforms.csharp import as_csharp_enum
from platforms.java import as_java_enum
from platforms.python import as_python_enum
from platforms.typescript import as_typescript_enum
def read_icons_json() -> Dict:
"""Opens the icons.json and converts into a json object"""
json_file: HTTPResponse = request.urlopen(
"https://raw.githubusercontent.com/FortAwesome/Font-Awesome/master/metadata/icons.json")
file_contents = json_file.read()
return json.loads(file_contents)
print_first()
| 27.121212 | 96 | 0.774302 | import json
from http.client import HTTPResponse
from typing import TextIO, Dict
from urllib import request
from platforms.cpp import as_cpp_enum
from platforms.csharp import as_csharp_enum
from platforms.java import as_java_enum
from platforms.python import as_python_enum
from platforms.typescript import as_typescript_enum
def read_icons_json() -> Dict:
"""Opens the icons.json and converts into a json object"""
json_file: HTTPResponse = request.urlopen(
"https://raw.githubusercontent.com/FortAwesome/Font-Awesome/master/metadata/icons.json")
file_contents = json_file.read()
return json.loads(file_contents)
def print_first():
icon_json = read_icons_json()
# Generate all the enums
as_csharp_enum(icon_json)
as_typescript_enum(icon_json)
as_python_enum(icon_json)
as_java_enum(icon_json)
as_cpp_enum(icon_json)
print_first()
| 209 | 0 | 23 |
9ba05970e994b87ae3a885e7676a8b4773c7f636 | 10,488 | py | Python | EEGNAS/utilities/data_utils.py | puzis/OverflowPrediction | 01341df701e513025cb427d4cdf1db0868a5963b | [
"MIT"
] | 5 | 2019-11-19T11:53:23.000Z | 2022-03-11T05:54:46.000Z | EEGNAS/utilities/data_utils.py | puzis/OverflowPrediction | 01341df701e513025cb427d4cdf1db0868a5963b | [
"MIT"
] | 5 | 2020-05-29T23:53:14.000Z | 2022-03-12T00:05:11.000Z | EEGNAS/utilities/data_utils.py | erap129/EEGNAS | 1d9c94b106d40317146f7f09d79fad489f1059dc | [
"MIT"
] | 1 | 2021-12-17T14:25:04.000Z | 2021-12-17T14:25:04.000Z | import os
from copy import deepcopy
import mne
import torch
from braindecode.torch_ext.util import np_to_var
from mne.time_frequency import tfr_morlet
from EEGNAS import global_vars
import numpy as np
from scipy.io import savemat
from PIL import Image
from EEGNAS.utilities.misc import create_folder, label_by_idx, unify_dataset
from sktime.utils.load_data import load_from_tsfile_to_dataframe
import pandas as pd
import numpy as np
from EEGNAS.visualization.wavelet_functions import get_tf_data_efficient
# def EEG_to_TF_mike(dataset):
# for segment in dataset.keys():
# TF_list = []
# for example in range(len(dataset[segment].X)):
# for channel_idx in lenexample:
# tf = get_tf_data_efficient(example[None, :, :], eeg_chan,
# global_vars.get('frequency'), global_vars.get('num_frex'),
# dB=global_vars.get('db_normalization'))
# TF_list.append(tf)
| 40.338462 | 131 | 0.600973 | import os
from copy import deepcopy
import mne
import torch
from braindecode.torch_ext.util import np_to_var
from mne.time_frequency import tfr_morlet
from EEGNAS import global_vars
import numpy as np
from scipy.io import savemat
from PIL import Image
from EEGNAS.utilities.misc import create_folder, label_by_idx, unify_dataset
from sktime.utils.load_data import load_from_tsfile_to_dataframe
import pandas as pd
import numpy as np
from EEGNAS.visualization.wavelet_functions import get_tf_data_efficient
def get_dummy_input():
input_shape = (2, global_vars.get('eeg_chans'), global_vars.get('input_height'), global_vars.get('input_width'))
return np_to_var(np.random.random(input_shape).astype(np.float32))
def prepare_data_for_NN(X):
if X.ndim == 3:
X = X[:, :, :, None]
X = np_to_var(X, pin_memory=global_vars.get('pin_memory'))
if torch.cuda.is_available():
with torch.cuda.device(0):
X = X.cuda()
return X
def split_sequence(sequence, n_steps, n_steps_ahead, jumps, buffer):
X, y = list(), list()
for i in range(len(sequence)):
end_ix = i + n_steps
if end_ix % jumps != 0:
continue
if end_ix + n_steps_ahead + buffer - 1 > len(sequence) - 1:
break
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix+buffer:end_ix+buffer+n_steps_ahead]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
def split_parallel_sequences(sequences, n_steps, n_steps_ahead, jumps, buffer):
X, y = list(), list()
for i in range(len(sequences)):
end_ix = i + n_steps
if end_ix % jumps != 0:
continue
if end_ix + n_steps_ahead + buffer - 1 > len(sequences) - 1:
break
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix+buffer:end_ix+buffer+n_steps_ahead, :]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
def noise_input(data, devs_id):
noise_data = deepcopy(data)
for id_in_batch in range(data.shape[0]):
noise_steps = np.random.choice(range(global_vars.get('steps')), size=int(global_vars.get('steps')
* global_vars.get('noise_ratio')), replace=False)
b_mean = np.mean(data[id_in_batch])
b_std = np.std(data[id_in_batch])
for dev_id in range(len(devs_id)):
noise_data[id_in_batch][dev_id][noise_steps] = np.random.normal(b_mean, b_std)
return noise_data
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def calc_regression_accuracy(y_pred, y_real, threshold, moving_threshold):
actual = []
predicted = []
for idx, (yp, yr) in enumerate(zip(y_pred, y_real)):
if np.isnan(yp) or np.isnan(yr):
continue
if moving_threshold is not False:
thresh = moving_threshold[idx]
else:
thresh = threshold
predicted.append((yp > thresh).astype('int'))
actual.append((yr > thresh).astype('int'))
return actual, predicted
def aggregate_accuracies(ys, agg_len):
ys_new = []
for y in ys:
y_new = np.zeros(int(len(y)/agg_len))
for i in range(0, len(y), agg_len):
if np.sum(y[i:i+agg_len]) > 1:
y_new[int(i/agg_len)] = 1
else:
y_new[int(i / agg_len)] = 0
ys_new.append(y_new)
return ys_new
def write_config(dict, filename):
with open(filename, 'w') as f:
all_keys = []
for _, inner_dict in sorted(dict.items()):
for K, _ in sorted(inner_dict.items()):
all_keys.append(K)
for K in all_keys:
if type(global_vars.get(K)) == 'str':
f.write(f"{K}\t'{global_vars.get(K)}'\n")
else:
f.write(f"{K}\t{global_vars.get(K)}\n")
def export_data_to_file(dataset, format, out_folder, classes=None, transpose_time=False, unify=False):
create_folder(out_folder)
if unify:
dataset = unify_dataset(dataset)
dataset = {'all': dataset}
for segment in dataset.keys():
if classes is None:
X_data = [dataset[segment].X]
y_data = [dataset[segment].y]
class_strs = ['']
else:
X_data = []
y_data = []
class_strs = []
for class_idx in classes:
X_data.append(dataset[segment].X[np.where(dataset[segment].y == class_idx)])
y_data.append(dataset[segment].y[np.where(dataset[segment].y == class_idx)])
class_strs.append(f'_{label_by_idx(class_idx)}')
for X,y,class_str in zip(X_data, y_data, class_strs):
if transpose_time:
X = np.transpose(X, (0, 2, 1))
if format == 'numpy':
np.save(f'{out_folder}/X_{segment}{class_str}', X)
np.save(f'{out_folder}/y_{segment}{class_str}', y)
elif format == 'matlab':
X = np.transpose(X, [1, 2, 0])
savemat(f'{out_folder}/X_{segment}{class_str}.mat', {'data': X})
savemat(f'{out_folder}/y_{segment}{class_str}.mat', {'data': y})
def EEG_to_TF_mne(dataset):
ch_names = [str(i) for i in range(global_vars.get('eeg_chans'))]
ch_types = ['eeg' for i in range(global_vars.get('eeg_chans'))]
info = mne.create_info(ch_names=ch_names, sfreq=global_vars.get('frequency'), ch_types=ch_types)
freqs = np.arange(1, global_vars.get('max_tf_freq'), 1) # frequencies of interest
n_cycles = freqs / 4. # different number of cycle per frequency
for segment in dataset.keys():
TF_list = []
epochs = mne.EpochsArray(dataset[segment].X, info=info, baseline=(0, 0.5))
for idx in range(len(dataset[segment].X)):
power = tfr_morlet(epochs[idx], freqs=freqs, n_cycles=n_cycles,
return_itc=False, decim=3, n_jobs=1)
TF_list.append(power.data.astype(np.float32))
dataset[segment].X = np.stack(TF_list, axis=0)
# def EEG_to_TF_mike(dataset):
# for segment in dataset.keys():
# TF_list = []
# for example in range(len(dataset[segment].X)):
# for channel_idx in lenexample:
# tf = get_tf_data_efficient(example[None, :, :], eeg_chan,
# global_vars.get('frequency'), global_vars.get('num_frex'),
# dB=global_vars.get('db_normalization'))
# TF_list.append(tf)
def EEG_to_TF_matlab(dataset, out_folder):
create_folder(out_folder)
for segment in dataset.keys():
if segment == 'train':
continue
TF_array = np.zeros((len(dataset[segment].X), global_vars.get('eeg_chans'), 49, 50))
for ex_idx, example in enumerate(dataset[segment].X):
with oct2py.Oct2Py() as octave:
octave.addpath('eeglab/functions/guifunc')
octave.addpath('eeglab/functions/popfunc')
octave.addpath('eeglab/functions/adminfunc')
octave.addpath('eeglab/functions/sigprocfunc')
octave.addpath('eeglab/functions/miscfunc')
octave.addpath('eeglab/functions/timefreqfunc')
for ch_idx, channel in enumerate(example):
finished = False
while not finished:
try:
tf = octave.newtimef(channel.reshape(1, -1), global_vars.get('input_height'), [0, 4500], 250, [3, 0.5],
'baseline', 0, 'plotphase', 'off', 'padratio', 1, 'ntimesout', 50)
TF_array[ex_idx, ch_idx] = tf
finished = True
print(f'created TF for example {ex_idx}/{len(dataset[segment].X)}, '
f'channel {ch_idx}/{len(example)} in {segment} data\n')
except Exception as e:
print(f'failed TF for example {ex_idx}/{len(dataset[segment].X)}, '
f'channel {ch_idx}/{len(example)} in {segment} data with msg {str(e)}'
f'\ntrying again...\n')
np.save(f'{out_folder}/X_{segment}_{global_vars.get("dataset")}_TF_matlab', TF_array)
np.save(f'{out_folder}/y_{segment}_{global_vars.get("dataset")}_TF_matlab', dataset[segment].y)
def tensor_to_eeglab(X, filepath):
savemat(filepath, {'data': np.transpose(X.cpu().detach().numpy().squeeze(axis=3), [1, 2, 0])})
def sktime_to_numpy(file):
X_ts, y = load_from_tsfile_to_dataframe(file)
max_len = global_vars.get('input_height')
X = np.zeros((len(X_ts), len(X_ts.columns), max_len))
for i in range(len(X_ts)):
for col_idx, col in enumerate(X_ts.columns):
X[i, col_idx] = np.pad(X_ts.iloc[i][col].values, pad_width=(0,max_len-len(X_ts.iloc[i][col].values)), mode='constant')
return X, pd.Categorical(pd.Series(y)).codes
def set_global_vars_by_sktime(train_file, test_file):
X_train_ts, y_train = load_from_tsfile_to_dataframe(train_file)
X_test_ts, y_test = load_from_tsfile_to_dataframe(test_file)
train_max_len = max([len(X_train_ts.iloc[i]['dim_0']) for i in range(len(X_train_ts))])
test_max_len = max([len(X_test_ts.iloc[i]['dim_0']) for i in range(len(X_test_ts))])
max_len = max(train_max_len, test_max_len)
global_vars.set('input_height', max_len)
global_vars.set('eeg_chans', len(X_train_ts.columns))
global_vars.set('n_classes', len(np.unique(y_train)))
def set_global_vars_by_dataset(data):
global_vars.set('eeg_chans', data.X.shape[1])
global_vars.set('input_height', data.X.shape[2])
if data.X.ndim == 4:
global_vars.set('input_width', data.X.shape[3])
global_vars.set('n_classes', len(np.unique(data.y)))
def load_values_from_config(config_file, keys):
configuration = {}
f = open(config_file, 'r')
list_of_lines = f.readlines()
for line in list_of_lines:
line_split = line.split('\t')
configuration[line_split[0]] = line_split[1][:-1]
for key in keys:
if key in configuration:
try:
global_vars.set(key, eval(configuration[key]))
except SyntaxError:
global_vars.set(key, configuration[key])
| 9,106 | 0 | 391 |
5c2a77eb4d2f7a165f41b1eba1961ca14e7b9aff | 1,726 | py | Python | tick/linear_model/tests/model_hinge_test.py | sumau/tick | 1b56924a35463e12f7775bc0aec182364f26f2c6 | [
"BSD-3-Clause"
] | 411 | 2017-03-30T15:22:05.000Z | 2022-03-27T01:58:34.000Z | tick/linear_model/tests/model_hinge_test.py | saurabhdash/tick | bbc561804eb1fdcb4c71b9e3e2d83a66e7b13a48 | [
"BSD-3-Clause"
] | 345 | 2017-04-13T14:53:20.000Z | 2022-03-26T00:46:22.000Z | tick/linear_model/tests/model_hinge_test.py | saurabhdash/tick | bbc561804eb1fdcb4c71b9e3e2d83a66e7b13a48 | [
"BSD-3-Clause"
] | 102 | 2017-04-25T11:47:53.000Z | 2022-02-15T11:45:49.000Z | # License: BSD 3 clause
import unittest
import numpy as np
from scipy.sparse import csr_matrix
from tick.linear_model import SimuLogReg, ModelHinge
from tick.base_model.tests.generalized_linear_model import TestGLM
if __name__ == '__main__':
unittest.main()
| 33.192308 | 78 | 0.672654 | # License: BSD 3 clause
import unittest
import numpy as np
from scipy.sparse import csr_matrix
from tick.linear_model import SimuLogReg, ModelHinge
from tick.base_model.tests.generalized_linear_model import TestGLM
class ModelHingeTest(object):
def test_ModelHinge(self):
"""...Numerical consistency check of loss and gradient for Hinge model
"""
np.random.seed(12)
n_samples, n_features = 5000, 10
w0 = np.random.randn(n_features)
c0 = np.random.randn()
# First check with intercept
X, y = SimuLogReg(w0, c0, n_samples=n_samples, verbose=False,
dtype=self.dtype).simulate()
X_spars = csr_matrix(X, dtype=self.dtype)
model = ModelHinge(fit_intercept=True).fit(X, y)
model_spars = ModelHinge(fit_intercept=True).fit(X_spars, y)
self.run_test_for_glm(model, model_spars)
self._test_glm_intercept_vs_hardcoded_intercept(model)
# Then check without intercept
X, y = SimuLogReg(w0, None, n_samples=n_samples, verbose=False,
seed=2038, dtype=self.dtype).simulate()
X_spars = csr_matrix(X)
model = ModelHinge(fit_intercept=False).fit(X, y)
model_spars = ModelHinge(fit_intercept=False).fit(X_spars, y)
self.run_test_for_glm(model, model_spars)
class ModelHingeTestFloat32(TestGLM, ModelHingeTest):
def __init__(self, *args, **kwargs):
TestGLM.__init__(self, *args, dtype="float32", **kwargs)
class ModelHingeTestFloat64(TestGLM, ModelHingeTest):
def __init__(self, *args, **kwargs):
TestGLM.__init__(self, *args, dtype="float64", **kwargs)
if __name__ == '__main__':
unittest.main()
| 160 | 1,175 | 121 |
cabb93dc958813657f0ef4295e937bba72d8c4bf | 2,473 | py | Python | ui/ChannelVersionItem.py | game-platform-awaresome/XSdkTools | 2d5454f998014c130a28695dfcd9da155d20c9e9 | [
"MIT"
] | 2 | 2020-09-24T10:47:27.000Z | 2020-09-24T10:49:57.000Z | ui/ChannelVersionItem.py | game-platform-awaresome/XSdkTools | 2d5454f998014c130a28695dfcd9da155d20c9e9 | [
"MIT"
] | null | null | null | ui/ChannelVersionItem.py | game-platform-awaresome/XSdkTools | 2d5454f998014c130a28695dfcd9da155d20c9e9 | [
"MIT"
] | 4 | 2019-03-25T04:22:30.000Z | 2021-05-16T12:52:41.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ChannelVersionItem.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
try:
_encoding = QtGui.QApplication.UnicodeUTF8
except AttributeError:
import res_rc
| 32.116883 | 86 | 0.672463 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ChannelVersionItem.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(498, 42)
Form.setStyleSheet(_fromUtf8("QToolButton[objectName=\"closetoolButton\"] {\n"
"border:0px;\n"
"}\n"
"QToolButton[objectName=\"closetoolButton\"]:hover {\n"
"image:url(:/images/close_hover.png);\n"
"}\n"
"QToolButton[objectName=\"closetoolButton\"]:pressed {\n"
"image:url(:/images/close_pressed.png);\n"
"}\n"
"\n"
"QWidget[objectName=\"widget\"]{\n"
"background-image: url(:/images/funcell_bg.png);\n"
"}\n"
"QPushButton[objectName=\"pushButton\"]{\n"
"background-color: rgb(0, 170, 255);\n"
"color: rgb(255, 255, 255);\n"
"}\n"
"QPushButton[objectName=\"pushButton_2\"]{\n"
"background-color: rgb(0, 165, 0);\n"
"color: rgb(255, 255, 255);\n"
"}\n"
"QCheckBox[objectName=\"checkBox\"]{\n"
"color: rgb(255, 0, 0);\n"
"}\n"
"QWidget[objectName=\"Form\"]{\n"
"background-color: rgb(255, 255, 255);\n"
"}\n"
""))
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(50, 15, 110, 12))
self.label.setObjectName(_fromUtf8("label"))
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(200, 15, 110, 12))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(360, 15, 110, 12))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.label.setText(_translate("Form", "时间", None))
self.label_2.setText(_translate("Form", "版本", None))
self.label_3.setText(_translate("Form", "描述", None))
import res_rc
| 1,931 | 1 | 154 |
38532ff4490c410439672db3e62f93714c2f46ba | 4,077 | py | Python | tools/collect_data.py | arora-anmol/producer-consumer | 92c15c1e6ef47e970becfb6a1f14ab0b7c4b83a4 | [
"MIT"
] | null | null | null | tools/collect_data.py | arora-anmol/producer-consumer | 92c15c1e6ef47e970becfb6a1f14ab0b7c4b83a4 | [
"MIT"
] | null | null | null | tools/collect_data.py | arora-anmol/producer-consumer | 92c15c1e6ef47e970becfb6a1f14ab0b7c4b83a4 | [
"MIT"
] | null | null | null | import subprocess
import re
import os
from math import sqrt
import sys
# SETTINGS
PROG = "./produce.out"
# FOR TESTING: reduce this number when debugging
X = 500 # number of times to repeat the test
# This data structure holds the raw data for each test run
times = []
DEBUG = True
def call_produce(program, cmd_str, num_repeat_calls):
'''
This function calls the program `PROG`, with the commands provided by `cmds`, `X` number of times.
The timing provided by the program (printed on the last line) will be saved in the `times` global variable.
@param `program` string to the program `produce` to run
@param `cmd_str` is a string containing the parameters to send to `program`
'''
print 'Calling "{} {}", {} times'.format(program, cmd_str, num_repeat_calls)
my_times = []
for i in xrange(num_repeat_calls):
output = subprocess.check_output('{} {}'.format(program, cmd_str), stderr=subprocess.STDOUT, shell=True)
matchObj = re.search(r'System execution time: ([0-9.]+) seconds', output)
if matchObj:
if DEBUG and i==0:
print ' > First Returned Time: {} sec'.format(matchObj.group(1))
my_times.append(1000.0 * float(matchObj.group(1)))
else:
print '\nError trying to find time for the following output:'
print output
quit(1)
if i % 10 == 0:
print '.',
sys.stdout.flush()
times.append({'cmd':cmd_str, 'times':my_times})
print ''
def generate_test_data():
'''
Calls the specific test cases asked for by the lab.
'''
test_cases = [
# N=100, B=4
{'N':100, 'B':4, 'P':1, 'C':1},
{'N':100, 'B':4, 'P':1, 'C':2},
{'N':100, 'B':4, 'P':1, 'C':3},
{'N':100, 'B':4, 'P':2, 'C':1},
{'N':100, 'B':4, 'P':3, 'C':1},
{'N':100, 'B':4, 'P':2, 'C':2},
{'N':100, 'B':4, 'P':3, 'C':3},
#############################
# N=100, B=8
{'N':100, 'B':8, 'P':1, 'C':1},
{'N':100, 'B':8, 'P':1, 'C':2},
{'N':100, 'B':8, 'P':1, 'C':3},
{'N':100, 'B':8, 'P':2, 'C':1},
{'N':100, 'B':8, 'P':3, 'C':1},
{'N':100, 'B':8, 'P':2, 'C':2},
{'N':100, 'B':8, 'P':3, 'C':3},
#############################
# N=398, B=8
{'N':398, 'B':8, 'P':1, 'C':1},
{'N':398, 'B':8, 'P':1, 'C':2},
{'N':398, 'B':8, 'P':1, 'C':3},
{'N':398, 'B':8, 'P':2, 'C':1},
{'N':398, 'B':8, 'P':3, 'C':1},
{'N':398, 'B':8, 'P':2, 'C':2},
{'N':398, 'B':8, 'P':3, 'C':3},
]
i = 1
for t in test_cases:
print 'Test Case: {}/{}'.format(i, len(test_cases))
i += 1
call_produce(PROG, '{} {} {} {}'.format(t['N'], t['B'], t['P'], t['C']), X)
print ''
def generate_stats_table():
'''
Converts the raw times in `times` into a text table
containing the average time and the standard deviation.
'''
with open('lab3-stats.csv', 'w') as file:
file.write("N,B,P,C,Average Time (ms),Standard Deviation (ms)\n")
for t in times:
avg = sum(t['times']) / float(len(t['times']))
std = sqrt(float(reduce(lambda x, y: x + y, map(lambda x: (x - avg) ** 2, t['times']))) / float(len(t['times'])))
k = t['cmd'].split()
file.write('{},{},{},{},{},{}\n'.format(k[0],k[1],k[2],k[3], avg, std))
print 'Written the statistics out to lab3-stats.csv'
def dump_raw_times():
'''
Writes the raw times to a csv file
'''
with open('lab3-times.csv', 'w') as file:
for k in times:
t = str(k['times'])
file.write('{},{}\n'.format(k['cmd'],t[1:-1]))
print 'Written the raw times out to lab3-times.csv'
if __name__ == '__main__':
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
main()
| 30.654135 | 125 | 0.498896 | import subprocess
import re
import os
from math import sqrt
import sys
# SETTINGS
PROG = "./produce.out"
# FOR TESTING: reduce this number when debugging
X = 500 # number of times to repeat the test
# This data structure holds the raw data for each test run
times = []
DEBUG = True
def call_produce(program, cmd_str, num_repeat_calls):
'''
This function calls the program `PROG`, with the commands provided by `cmds`, `X` number of times.
The timing provided by the program (printed on the last line) will be saved in the `times` global variable.
@param `program` string to the program `produce` to run
@param `cmd_str` is a string containing the parameters to send to `program`
'''
print 'Calling "{} {}", {} times'.format(program, cmd_str, num_repeat_calls)
my_times = []
for i in xrange(num_repeat_calls):
output = subprocess.check_output('{} {}'.format(program, cmd_str), stderr=subprocess.STDOUT, shell=True)
matchObj = re.search(r'System execution time: ([0-9.]+) seconds', output)
if matchObj:
if DEBUG and i==0:
print ' > First Returned Time: {} sec'.format(matchObj.group(1))
my_times.append(1000.0 * float(matchObj.group(1)))
else:
print '\nError trying to find time for the following output:'
print output
quit(1)
if i % 10 == 0:
print '.',
sys.stdout.flush()
times.append({'cmd':cmd_str, 'times':my_times})
print ''
def generate_test_data():
'''
Calls the specific test cases asked for by the lab.
'''
test_cases = [
# N=100, B=4
{'N':100, 'B':4, 'P':1, 'C':1},
{'N':100, 'B':4, 'P':1, 'C':2},
{'N':100, 'B':4, 'P':1, 'C':3},
{'N':100, 'B':4, 'P':2, 'C':1},
{'N':100, 'B':4, 'P':3, 'C':1},
{'N':100, 'B':4, 'P':2, 'C':2},
{'N':100, 'B':4, 'P':3, 'C':3},
#############################
# N=100, B=8
{'N':100, 'B':8, 'P':1, 'C':1},
{'N':100, 'B':8, 'P':1, 'C':2},
{'N':100, 'B':8, 'P':1, 'C':3},
{'N':100, 'B':8, 'P':2, 'C':1},
{'N':100, 'B':8, 'P':3, 'C':1},
{'N':100, 'B':8, 'P':2, 'C':2},
{'N':100, 'B':8, 'P':3, 'C':3},
#############################
# N=398, B=8
{'N':398, 'B':8, 'P':1, 'C':1},
{'N':398, 'B':8, 'P':1, 'C':2},
{'N':398, 'B':8, 'P':1, 'C':3},
{'N':398, 'B':8, 'P':2, 'C':1},
{'N':398, 'B':8, 'P':3, 'C':1},
{'N':398, 'B':8, 'P':2, 'C':2},
{'N':398, 'B':8, 'P':3, 'C':3},
]
i = 1
for t in test_cases:
print 'Test Case: {}/{}'.format(i, len(test_cases))
i += 1
call_produce(PROG, '{} {} {} {}'.format(t['N'], t['B'], t['P'], t['C']), X)
print ''
def generate_stats_table():
'''
Converts the raw times in `times` into a text table
containing the average time and the standard deviation.
'''
with open('lab3-stats.csv', 'w') as file:
file.write("N,B,P,C,Average Time (ms),Standard Deviation (ms)\n")
for t in times:
avg = sum(t['times']) / float(len(t['times']))
std = sqrt(float(reduce(lambda x, y: x + y, map(lambda x: (x - avg) ** 2, t['times']))) / float(len(t['times'])))
k = t['cmd'].split()
file.write('{},{},{},{},{},{}\n'.format(k[0],k[1],k[2],k[3], avg, std))
print 'Written the statistics out to lab3-stats.csv'
def dump_raw_times():
'''
Writes the raw times to a csv file
'''
with open('lab3-times.csv', 'w') as file:
for k in times:
t = str(k['times'])
file.write('{},{}\n'.format(k['cmd'],t[1:-1]))
print 'Written the raw times out to lab3-times.csv'
def main():
generate_test_data()
print '='*50
generate_stats_table()
dump_raw_times()
if __name__ == '__main__':
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
main()
| 80 | 0 | 23 |
f63cabc66794453590aa81d01864709fb76f69b4 | 946 | py | Python | example/client.py | aaronluoq/python-flask | 74bfe8bcd00eee9ce75a15c1634fda4c5d5f26ca | [
"BSD-3-Clause"
] | 136 | 2016-08-24T17:57:45.000Z | 2022-03-17T03:43:19.000Z | example/client.py | aaronluoq/python-flask | 74bfe8bcd00eee9ce75a15c1634fda4c5d5f26ca | [
"BSD-3-Clause"
] | 43 | 2016-12-21T19:11:33.000Z | 2021-06-16T09:10:16.000Z | example/client.py | aaronluoq/python-flask | 74bfe8bcd00eee9ce75a15c1634fda4c5d5f26ca | [
"BSD-3-Clause"
] | 45 | 2016-09-04T03:23:25.000Z | 2022-03-12T20:38:18.000Z | import requests
import time
from opentracing_instrumentation.client_hooks import install_all_patches
from jaeger_client import Config
from os import getenv
JAEGER_HOST = getenv('JAEGER_HOST', 'localhost')
WEBSERVER_HOST = getenv('WEBSERVER_HOST', 'localhost')
# Create configuration object with enabled logging and sampling of all requests.
config = Config(config={'sampler': {'type': 'const', 'param': 1},
'logging': True,
'local_agent': {'reporting_host': JAEGER_HOST}},
service_name="jaeger_opentracing_example")
tracer = config.initialize_tracer()
# Automatically trace all requests made with 'requests' library.
install_all_patches()
url = "http://{}:5000/log".format(WEBSERVER_HOST)
# Make the actual request to webserver.
requests.get(url)
# allow tracer to flush the spans - https://github.com/jaegertracing/jaeger-client-python/issues/50
time.sleep(2)
tracer.close()
| 33.785714 | 99 | 0.729387 | import requests
import time
from opentracing_instrumentation.client_hooks import install_all_patches
from jaeger_client import Config
from os import getenv
JAEGER_HOST = getenv('JAEGER_HOST', 'localhost')
WEBSERVER_HOST = getenv('WEBSERVER_HOST', 'localhost')
# Create configuration object with enabled logging and sampling of all requests.
config = Config(config={'sampler': {'type': 'const', 'param': 1},
'logging': True,
'local_agent': {'reporting_host': JAEGER_HOST}},
service_name="jaeger_opentracing_example")
tracer = config.initialize_tracer()
# Automatically trace all requests made with 'requests' library.
install_all_patches()
url = "http://{}:5000/log".format(WEBSERVER_HOST)
# Make the actual request to webserver.
requests.get(url)
# allow tracer to flush the spans - https://github.com/jaegertracing/jaeger-client-python/issues/50
time.sleep(2)
tracer.close()
| 0 | 0 | 0 |
c3ced4f9f6cfa360c88c1c1d9ded14000e89bd11 | 1,137 | py | Python | python_mock/print_call.py | enamrik/python-mock | 817bf1ada9346445912e2cd2e2f65306e21cb8d8 | [
"MIT"
] | null | null | null | python_mock/print_call.py | enamrik/python-mock | 817bf1ada9346445912e2cd2e2f65306e21cb8d8 | [
"MIT"
] | null | null | null | python_mock/print_call.py | enamrik/python-mock | 817bf1ada9346445912e2cd2e2f65306e21cb8d8 | [
"MIT"
] | null | null | null | from typing import Any, List
| 36.677419 | 108 | 0.51803 | from typing import Any, List
def print_call(args: List[Any], kwargs: dict):
if kwargs is None:
kwargs = {}
if args is None:
args = []
def _print_val(arg):
if type(arg) == dict and '__match__' in arg:
match_info = arg['__match__']
if type(match_info) == str and match_info == '<Any>':
return "<Any>"
if type(match_info) == dict:
return "{}".format(match_info)
if callable(match_info):
return "{}".format(match_info)
if type(arg) == list:
print_arg = lambda x: '\t{}: {}'.format(x[0], _print_val(x[1]))
return '\n'.join(list(map(print_arg, enumerate(list(args)))))
if type(arg) == str:
return arg.replace('\n', '\n\t ')
if type(arg) == dict:
return '\n\t '.join(list(map(lambda x: '{}: {}'.format(x[0], _print_val(x[1])), arg.items())))
if type(arg) == tuple:
return _print_val(list(arg))
return '\n\t {}'.format(arg)
return '\nargs:{}\nkwargs:{}'.format(_print_val(args), _print_val(kwargs))
| 1,084 | 0 | 23 |
dbc6bc86b5512dd0aa5c362462417049896f7956 | 695 | py | Python | keybr worldrecord.py | MarcPartensky/Python-2020 | 1a4ef2edfea6efb353249d5e32c06b230b293c62 | [
"MIT"
] | 1 | 2020-09-02T10:41:49.000Z | 2020-09-02T10:41:49.000Z | keybr worldrecord.py | MarcPartensky/Python-2020 | 1a4ef2edfea6efb353249d5e32c06b230b293c62 | [
"MIT"
] | null | null | null | keybr worldrecord.py | MarcPartensky/Python-2020 | 1a4ef2edfea6efb353249d5e32c06b230b293c62 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import pyautogui
import time
driver = webdriver.Chrome()
driver.get('https://www.keybr.com/multiplayer')
while True:
ticker = driver.find_element_by_class_name('Ticker')
if ticker.text == "GO!":
break
inp = driver.find_element_by_class_name('TextInput-fragment')
text = driver.find_element_by_xpath("//*[@type='text']")
print(text.send_keys('salut'))
time.sleep(1)
for e in inp.find_elements_by_tag_name('span'):
print(e)
if e.text=='␣':
pyautogui.press(' ')
elif e.text=='↵':
pyautogui.press('enter')
else:
pyautogui.press(e.text)
time.sleep(0.01)
| 23.166667 | 62 | 0.682014 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import pyautogui
import time
driver = webdriver.Chrome()
driver.get('https://www.keybr.com/multiplayer')
while True:
ticker = driver.find_element_by_class_name('Ticker')
if ticker.text == "GO!":
break
inp = driver.find_element_by_class_name('TextInput-fragment')
text = driver.find_element_by_xpath("//*[@type='text']")
print(text.send_keys('salut'))
time.sleep(1)
for e in inp.find_elements_by_tag_name('span'):
print(e)
if e.text=='␣':
pyautogui.press(' ')
elif e.text=='↵':
pyautogui.press('enter')
else:
pyautogui.press(e.text)
time.sleep(0.01)
| 0 | 0 | 0 |
c2e4b3ca08f10bf7b9d3d1495957a84fd669d006 | 122 | py | Python | Lab3/gauss/__init__.py | neseleznev/urfu-CompExp | 33101e34c6df893b0b19517d694dc55dc7e98f6f | [
"MIT"
] | null | null | null | Lab3/gauss/__init__.py | neseleznev/urfu-CompExp | 33101e34c6df893b0b19517d694dc55dc7e98f6f | [
"MIT"
] | null | null | null | Lab3/gauss/__init__.py | neseleznev/urfu-CompExp | 33101e34c6df893b0b19517d694dc55dc7e98f6f | [
"MIT"
] | null | null | null | from .compact_gauss import compact_gauss_scheme
from .main_item_gauss import main_item_gauss_scheme
__author__ = 'Nikita'
| 30.5 | 51 | 0.868852 | from .compact_gauss import compact_gauss_scheme
from .main_item_gauss import main_item_gauss_scheme
__author__ = 'Nikita'
| 0 | 0 | 0 |
f83fd4e3a1be34b4e93415b72cff6896fdb1c569 | 1,874 | py | Python | flowcat/preprocessing/scalers.py | xiamaz/flowCat | 5fea92eff3112ea3bb669595b469735b2bfa3938 | [
"MIT"
] | 4 | 2020-03-06T14:06:12.000Z | 2021-06-25T15:03:54.000Z | flowcat/preprocessing/scalers.py | xiamaz/flowCat | 5fea92eff3112ea3bb669595b469735b2bfa3938 | [
"MIT"
] | 3 | 2020-03-25T10:54:52.000Z | 2020-11-26T19:06:23.000Z | flowcat/preprocessing/scalers.py | xiamaz/flowCat | 5fea92eff3112ea3bb669595b469735b2bfa3938 | [
"MIT"
] | 2 | 2020-04-14T11:26:25.000Z | 2021-04-02T19:25:52.000Z | import pandas as pd
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.base import TransformerMixin, BaseEstimator
from flowcat.types import fcsdata as fcs
from . import FCSDataMixin
class FCSMinMaxScaler(FCSDataMixin, TransformerMixin, BaseEstimator):
"""MinMaxScaling with adaptations for FCSData."""
def fit(self, X, *_):
"""Fit min max range to the given data."""
self._model = MinMaxScaler()
if self._fit_to_range:
data = X.ranges_array
else:
data = X.data
self._model.fit(data)
return self
def transform(self, X, *_):
"""Transform data to be 0 min and 1 max using the fitted values."""
X = X.copy()
X.data = self._model.transform(X.data)
X.update_range(self._model.transform(X.ranges_array))
return X
class FCSStandardScaler(FCSDataMixin, TransformerMixin, BaseEstimator):
"""Standard deviation scaling adapted for FCSData objects."""
def fit(self, X, *_):
"""Fit standard deviation to the given data."""
self._model = StandardScaler().fit(X.data)
return self
def transform(self, X, *_):
"""Transform data to be zero mean and unit standard deviation"""
X = X.copy()
X.data = self._model.transform(X.data)
X.update_range(self._model.transform(X.ranges_array))
return X
class RefitScaler(FCSDataMixin, TransformerMixin, BaseEstimator):
"""Always refit the containing scaler class."""
| 28.830769 | 75 | 0.649413 | import pandas as pd
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.base import TransformerMixin, BaseEstimator
from flowcat.types import fcsdata as fcs
from . import FCSDataMixin
class FCSMinMaxScaler(FCSDataMixin, TransformerMixin, BaseEstimator):
"""MinMaxScaling with adaptations for FCSData."""
def __init__(self, fit_to_range=False):
self._model = None
self._fit_to_range = fit_to_range
def fit(self, X, *_):
"""Fit min max range to the given data."""
self._model = MinMaxScaler()
if self._fit_to_range:
data = X.ranges_array
else:
data = X.data
self._model.fit(data)
return self
def transform(self, X, *_):
"""Transform data to be 0 min and 1 max using the fitted values."""
X = X.copy()
X.data = self._model.transform(X.data)
X.update_range(self._model.transform(X.ranges_array))
return X
class FCSStandardScaler(FCSDataMixin, TransformerMixin, BaseEstimator):
"""Standard deviation scaling adapted for FCSData objects."""
def __init__(self):
self._model = None
def fit(self, X, *_):
"""Fit standard deviation to the given data."""
self._model = StandardScaler().fit(X.data)
return self
def transform(self, X, *_):
"""Transform data to be zero mean and unit standard deviation"""
X = X.copy()
X.data = self._model.transform(X.data)
X.update_range(self._model.transform(X.ranges_array))
return X
class RefitScaler(FCSDataMixin, TransformerMixin, BaseEstimator):
"""Always refit the containing scaler class."""
def __init__(self, base):
self._base = base
def fit(self, *_):
return self
def transform(self, X, *_):
return self._base().fit_transform(X)
| 210 | 0 | 134 |
31d4a2f35b27a409db89cd230b0056bb208ad526 | 260 | py | Python | setup.py | jhunhwang/goldenretriever | 08df451c2d726678d91bab372936e95b6cf88732 | [
"Apache-2.0"
] | 8 | 2020-03-06T02:22:24.000Z | 2022-03-08T04:18:42.000Z | setup.py | jhunhwang/goldenretriever | 08df451c2d726678d91bab372936e95b6cf88732 | [
"Apache-2.0"
] | 7 | 2020-11-13T18:54:23.000Z | 2022-02-10T02:29:15.000Z | setup.py | jhunhwang/goldenretriever | 08df451c2d726678d91bab372936e95b6cf88732 | [
"Apache-2.0"
] | 3 | 2020-11-12T13:18:13.000Z | 2021-10-15T05:50:44.000Z | from setuptools import setup, find_packages
setup(name='Golden Retriever',
version='0.1',
description='Information retrieval using fine-tuned semantic similarity',
author='AI Singapore',
packages=find_packages(),
zip_safe=False)
| 28.888889 | 79 | 0.711538 | from setuptools import setup, find_packages
setup(name='Golden Retriever',
version='0.1',
description='Information retrieval using fine-tuned semantic similarity',
author='AI Singapore',
packages=find_packages(),
zip_safe=False)
| 0 | 0 | 0 |
ed22fd349b855954eb1be52c233a04e3c1c71bff | 793 | py | Python | projects/website.py | kbrohkahn/kevinbrohkahn.com | 092457bea6207dcccd0f1c947c81097d9e5080a3 | [
"Apache-2.0"
] | null | null | null | projects/website.py | kbrohkahn/kevinbrohkahn.com | 092457bea6207dcccd0f1c947c81097d9e5080a3 | [
"Apache-2.0"
] | null | null | null | projects/website.py | kbrohkahn/kevinbrohkahn.com | 092457bea6207dcccd0f1c947c81097d9e5080a3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
with open("../templates/header.html", "r") as header:
print header.read()
with open("../templates/navbar.html", "r") as navbar:
print navbar.read()
print("""
<div class="row">
<div class="col-xxs-6 col-xxs-offset-3 col-xs-4 col-sm-3 col-md-2 col-lg-1">
<img class="img-responsive" alt="kevin.broh-kahn.com Icon" src="/assets/img/kevin.broh-kahn.com/icon.png">
</div>
<div class="col-xxs-12 col-xs-8 col-sm-9 col-md-10 col-lg-11">
<h1>kevin.broh-kahn.com <small><a target="blank" href="https://github.com/kbrohkahn/kevin.broh-kahn.com">Github</a></small></h1>
<div class="subheader">A site for displaying all of the projects and applications I have created.</div>
</div>
</div>
""")
with open("../templates/footer.html", "r") as footer:
print footer.read()
| 37.761905 | 130 | 0.679697 | #!/usr/bin/env python
with open("../templates/header.html", "r") as header:
print header.read()
with open("../templates/navbar.html", "r") as navbar:
print navbar.read()
print("""
<div class="row">
<div class="col-xxs-6 col-xxs-offset-3 col-xs-4 col-sm-3 col-md-2 col-lg-1">
<img class="img-responsive" alt="kevin.broh-kahn.com Icon" src="/assets/img/kevin.broh-kahn.com/icon.png">
</div>
<div class="col-xxs-12 col-xs-8 col-sm-9 col-md-10 col-lg-11">
<h1>kevin.broh-kahn.com <small><a target="blank" href="https://github.com/kbrohkahn/kevin.broh-kahn.com">Github</a></small></h1>
<div class="subheader">A site for displaying all of the projects and applications I have created.</div>
</div>
</div>
""")
with open("../templates/footer.html", "r") as footer:
print footer.read()
| 0 | 0 | 0 |
44dc27dfca78bb459b1f14bfdb1f264bfabad973 | 5,803 | py | Python | experiments/tomita_resample.py | ozturkosu/umps_code | 81130ac2207531776aee6ab0c71a535bf2218c92 | [
"MIT"
] | 11 | 2020-07-07T01:18:51.000Z | 2021-07-03T11:40:56.000Z | experiments/tomita_resample.py | jemisjoky/umps_code | 81130ac2207531776aee6ab0c71a535bf2218c92 | [
"MIT"
] | null | null | null | experiments/tomita_resample.py | jemisjoky/umps_code | 81130ac2207531776aee6ab0c71a535bf2218c92 | [
"MIT"
] | 2 | 2021-11-03T01:50:13.000Z | 2022-01-13T08:39:23.000Z | #!/usr/bin/env python3
import os
import sys
import pickle
from functools import partial
from string import ascii_lowercase
import jax
import torch
sys.path.append('..')
from ti_mps import TI_MPS
samp_lens = [16, 30] # What lengths we want to sample at
samp_size = 1000 # Number of samples to draw
dataset = 'tomita' # Dataset models were trained on
save_name = ".tomita_exp.record" # Where the record is saved
ALPHABET = {'brackets': ['(', ')', '*'],
'tomita': ['0', '1'],
'bos_eos': ['^', '$'],
}
alph = ALPHABET[dataset]
if dataset == 'brackets':
from toy_datasets import score_brackets as score_fun
elif dataset == 'tomita':
from toy_datasets import score_tomita as tom_score
def mps_sample_fun(rng_key, mps, target_lens, score_fun, ref_sets=None):
"""Draw samples from MPS model within JAX"""
from sampler import draw_samples, fill_in_blanks
bi_exp = ref_sets is not None
examp_samps = {}
if bi_exp:
corr_frac = {}
for samp_l in target_lens:
ref_s = ref_sets[samp_l]
ref_sets = to_string(ref_s, alph)
samp_chars = fill_in_blanks(key, mps, alphabet=alph,
ref_strset=ref_s)
# TODO: Fold this code into fill_in_blanks
# Generate validation strings with each character replaced by
# suggested character from samp_chars
samples = [s[:i] + c + s[i+1:] for s, cs in zip(ref_sets, samp_chars)
for i, c in enumerate(cs)]
corr_frac[samp_l] = 100 * score_fun(samples)
examp_samps[samp_l] = samples[:10]
print(f"Correct frac len={samp_l}: {corr_frac[samp_l]:.1f}%")
print(f"Replacement examples: {samples[:10]}\n")
else:
corr_frac = {}
for samp_l in target_lens:
rng_key, key = jax.random.split(rng_key)
samples = draw_samples(key, mps, alphabet=alph,
num_samps=samp_size, samp_len=samp_l)
score = score_fun(samples)
corr_frac[samp_l] = 100 * score
examp_samps[samp_l] = samples[:10]
print(f"Correct frac len={samp_l}: {100 * score:.1f}%")
print(f"Example samples: {samples[:10]}\n")
return corr_frac
def lstm_sample_fun(rng_key, lstm, target_lens, score_fun, ref_sets=None):
"""Draw samples from LSTM model within Pytorch"""
samp_mode = 'fixed'
bi_exp = lstm.bi_dir
this_alph = alph + ALPHABET['bos_eos']
lstm = lstm.eval()
examp_samps = {}
if bi_exp:
corr_frac = {}
for samp_l in target_lens:
ref_s = ref_sets[samp_l]
rng_key, key = jax.random.split(rng_key)
# TODO: Finish up better bidirectional sampling code, including
# (a) deal with BOS/EOS, (b) properly put samp_chars in
# ref_set strings
raise NotImplementedError
ref_sets = [s[1:-1] for s in to_string(ref_s, this_alph)]
samp_chars = lstm.sample(key, alph,
samp_mode='completion', ref_strset=ref_s)
# BOS and EOS should never be sampled, so replace those with
# incorrect strings
samples = [')(' if ('^' in s or '$' in s) else s for s in samples]
corr_frac[samp_l] = 100 * score_fun(samples)
examp_samps[samp_l] = samples[:10]
print(f"Correct frac len={samp_l}: {corr_frac[samp_l]:.1f}%")
print(f"Replacement examples:{examp_samps[samp_l]}\n")
else:
corr_frac = {}
for samp_l in target_lens:
rng_key, key = jax.random.split(rng_key)
samples = lstm.sample(key, this_alph, samp_mode=samp_mode,
num_samps=samp_size, samp_len=samp_l)
score = score_fun(samples)
corr_frac[samp_l] = 100 * score
examp_samps[samp_l] = samples[:10]
print(f"Correct frac len={samp_l}: {100 * score:.1f}%")
print(f"Example samples: {examp_samps[samp_l]}\n")
return corr_frac
rng_key = jax.random.PRNGKey(0)
# Load the data record we're interested in
full_record = pickle.load(open(save_name, 'rb'))
# Go through each experimental setting and resample with trained model
for setting, global_rec in full_record.items():
# Get relevant data for this experimental setting
print(setting)
tom_num, _, _, model = setting[:4]
assert model in ['mps', 'lstm']
assert len(setting) in [4, 5]
score_fun = partial(tom_score, tomita_num=tom_num)
samp_fun = lstm_sample_fun if model == 'lstm' else mps_sample_fun
best_model = global_rec['best_model']
best_epoch = global_rec['best_epoch']
local_rec = global_rec['local_recs'][best_epoch]
# Figure out which lengths haven't been sampled yet
these_lens = [l for l in samp_lens if f"corr_frac_{l}" not in local_rec]
if these_lens == []: continue
# Perform the resampling and add results to local_rec
rng_key, key = jax.random.split(rng_key)
corr_frac = samp_fun(key, best_model, these_lens, score_fun)
for s_len, score in corr_frac.items():
lookup = f"corr_frac_{s_len}"
if lookup in local_rec:
print(f"Already have samples from len {s_len}")
continue
local_rec[lookup] = score
print
# Put this back in full_record and save
global_rec['local_recs'][best_epoch] = local_rec
full_record[setting] = global_rec
pickle.dump(full_record, open(save_name, 'wb')) | 38.946309 | 81 | 0.611408 | #!/usr/bin/env python3
import os
import sys
import pickle
from functools import partial
from string import ascii_lowercase
import jax
import torch
sys.path.append('..')
from ti_mps import TI_MPS
samp_lens = [16, 30] # What lengths we want to sample at
samp_size = 1000 # Number of samples to draw
dataset = 'tomita' # Dataset models were trained on
save_name = ".tomita_exp.record" # Where the record is saved
ALPHABET = {'brackets': ['(', ')', '*'],
'tomita': ['0', '1'],
'bos_eos': ['^', '$'],
}
alph = ALPHABET[dataset]
if dataset == 'brackets':
from toy_datasets import score_brackets as score_fun
elif dataset == 'tomita':
from toy_datasets import score_tomita as tom_score
def is_lstm(model):
assert isinstance(model, (torch.nn.Module, TI_MPS))
return isinstance(model, torch.nn.Module)
def mps_sample_fun(rng_key, mps, target_lens, score_fun, ref_sets=None):
"""Draw samples from MPS model within JAX"""
from sampler import draw_samples, fill_in_blanks
bi_exp = ref_sets is not None
examp_samps = {}
if bi_exp:
corr_frac = {}
for samp_l in target_lens:
ref_s = ref_sets[samp_l]
ref_sets = to_string(ref_s, alph)
samp_chars = fill_in_blanks(key, mps, alphabet=alph,
ref_strset=ref_s)
# TODO: Fold this code into fill_in_blanks
# Generate validation strings with each character replaced by
# suggested character from samp_chars
samples = [s[:i] + c + s[i+1:] for s, cs in zip(ref_sets, samp_chars)
for i, c in enumerate(cs)]
corr_frac[samp_l] = 100 * score_fun(samples)
examp_samps[samp_l] = samples[:10]
print(f"Correct frac len={samp_l}: {corr_frac[samp_l]:.1f}%")
print(f"Replacement examples: {samples[:10]}\n")
else:
corr_frac = {}
for samp_l in target_lens:
rng_key, key = jax.random.split(rng_key)
samples = draw_samples(key, mps, alphabet=alph,
num_samps=samp_size, samp_len=samp_l)
score = score_fun(samples)
corr_frac[samp_l] = 100 * score
examp_samps[samp_l] = samples[:10]
print(f"Correct frac len={samp_l}: {100 * score:.1f}%")
print(f"Example samples: {samples[:10]}\n")
return corr_frac
def lstm_sample_fun(rng_key, lstm, target_lens, score_fun, ref_sets=None):
"""Draw samples from LSTM model within Pytorch"""
samp_mode = 'fixed'
bi_exp = lstm.bi_dir
this_alph = alph + ALPHABET['bos_eos']
lstm = lstm.eval()
examp_samps = {}
if bi_exp:
corr_frac = {}
for samp_l in target_lens:
ref_s = ref_sets[samp_l]
rng_key, key = jax.random.split(rng_key)
# TODO: Finish up better bidirectional sampling code, including
# (a) deal with BOS/EOS, (b) properly put samp_chars in
# ref_set strings
raise NotImplementedError
ref_sets = [s[1:-1] for s in to_string(ref_s, this_alph)]
samp_chars = lstm.sample(key, alph,
samp_mode='completion', ref_strset=ref_s)
# BOS and EOS should never be sampled, so replace those with
# incorrect strings
samples = [')(' if ('^' in s or '$' in s) else s for s in samples]
corr_frac[samp_l] = 100 * score_fun(samples)
examp_samps[samp_l] = samples[:10]
print(f"Correct frac len={samp_l}: {corr_frac[samp_l]:.1f}%")
print(f"Replacement examples:{examp_samps[samp_l]}\n")
else:
corr_frac = {}
for samp_l in target_lens:
rng_key, key = jax.random.split(rng_key)
samples = lstm.sample(key, this_alph, samp_mode=samp_mode,
num_samps=samp_size, samp_len=samp_l)
score = score_fun(samples)
corr_frac[samp_l] = 100 * score
examp_samps[samp_l] = samples[:10]
print(f"Correct frac len={samp_l}: {100 * score:.1f}%")
print(f"Example samples: {examp_samps[samp_l]}\n")
return corr_frac
rng_key = jax.random.PRNGKey(0)
# Load the data record we're interested in
full_record = pickle.load(open(save_name, 'rb'))
# Go through each experimental setting and resample with trained model
for setting, global_rec in full_record.items():
# Get relevant data for this experimental setting
print(setting)
tom_num, _, _, model = setting[:4]
assert model in ['mps', 'lstm']
assert len(setting) in [4, 5]
score_fun = partial(tom_score, tomita_num=tom_num)
samp_fun = lstm_sample_fun if model == 'lstm' else mps_sample_fun
best_model = global_rec['best_model']
best_epoch = global_rec['best_epoch']
local_rec = global_rec['local_recs'][best_epoch]
# Figure out which lengths haven't been sampled yet
these_lens = [l for l in samp_lens if f"corr_frac_{l}" not in local_rec]
if these_lens == []: continue
# Perform the resampling and add results to local_rec
rng_key, key = jax.random.split(rng_key)
corr_frac = samp_fun(key, best_model, these_lens, score_fun)
for s_len, score in corr_frac.items():
lookup = f"corr_frac_{s_len}"
if lookup in local_rec:
print(f"Already have samples from len {s_len}")
continue
local_rec[lookup] = score
print
# Put this back in full_record and save
global_rec['local_recs'][best_epoch] = local_rec
full_record[setting] = global_rec
pickle.dump(full_record, open(save_name, 'wb')) | 100 | 0 | 23 |
d4172c18e680f02e7225ce6bdf18a4de6e46a0c0 | 963 | py | Python | examples/housing/tracing.py | rlnsanz/inspectional-rara-parakeet | 2c7919ed432616ec016a5afcd6718d16fa65e8af | [
"Apache-2.0"
] | null | null | null | examples/housing/tracing.py | rlnsanz/inspectional-rara-parakeet | 2c7919ed432616ec016a5afcd6718d16fa65e8af | [
"Apache-2.0"
] | null | null | null | examples/housing/tracing.py | rlnsanz/inspectional-rara-parakeet | 2c7919ed432616ec016a5afcd6718d16fa65e8af | [
"Apache-2.0"
] | 1 | 2021-06-25T16:06:59.000Z | 2021-06-25T16:06:59.000Z | """
import numpy as np # line_no: 1
import gadget
gadget.record('docs_training_py')
x = np.arange(5) # line_no: 2
s = x[0] # line_no: 3
for i in x: # line_no: 4
if i % 2 == 0: # line_no: 5
s += i # line_no: 6
print('done')
"""
import gadget as ln
with ln.tracking("docs_training_py3"):
import numpy as np
ln.importing(np, module="numpy", name="np", line_no=1)
x = ln.call(np.arange(5), args=(5, "5"), text="np.arange(5)", line_no=2).assign(
target="x"
)
s = ln.assign(x[0], text="x[0]", target="s", line_no=3)
for i in ln.loop_it.new(x, text="i in x", name="main_loop", line_no=4):
if ln.pred.new(i % 2 == 0, text="i % 2 == 0", name="main_cond", line_no=5):
s += ln.assign(i, target="s", text="i", mod="+=", line_no=6)
ln.pred.pop()
ln.loop_it.pop()
# ln.call(eval(f'done'), args=('done', "'done'"), text="eval('done')")
| 27.514286 | 84 | 0.529595 | """
import numpy as np # line_no: 1
import gadget
gadget.record('docs_training_py')
x = np.arange(5) # line_no: 2
s = x[0] # line_no: 3
for i in x: # line_no: 4
if i % 2 == 0: # line_no: 5
s += i # line_no: 6
print('done')
"""
import gadget as ln
with ln.tracking("docs_training_py3"):
import numpy as np
ln.importing(np, module="numpy", name="np", line_no=1)
x = ln.call(np.arange(5), args=(5, "5"), text="np.arange(5)", line_no=2).assign(
target="x"
)
s = ln.assign(x[0], text="x[0]", target="s", line_no=3)
for i in ln.loop_it.new(x, text="i in x", name="main_loop", line_no=4):
if ln.pred.new(i % 2 == 0, text="i % 2 == 0", name="main_cond", line_no=5):
s += ln.assign(i, target="s", text="i", mod="+=", line_no=6)
ln.pred.pop()
ln.loop_it.pop()
# ln.call(eval(f'done'), args=('done', "'done'"), text="eval('done')")
| 0 | 0 | 0 |
1990e5a8034f4590ccc82b1a639f39cfac674ea5 | 539 | py | Python | assets/handle-start-stop-status.py | ryantuck/gibson-presentation | 1d61701d15533c5fc69606848fd8779d50e2ac49 | [
"MIT"
] | null | null | null | assets/handle-start-stop-status.py | ryantuck/gibson-presentation | 1d61701d15533c5fc69606848fd8779d50e2ac49 | [
"MIT"
] | null | null | null | assets/handle-start-stop-status.py | ryantuck/gibson-presentation | 1d61701d15533c5fc69606848fd8779d50e2ac49 | [
"MIT"
] | null | null | null | # ... continued ...
if action == 'start':
gibson.start(user_instance['instance_id'])
return _return_payload(
message=start_message.format(user=who),
color='green',
)
elif action == 'stop':
gibson.stop(user_instance['instance_id'])
return _return_payload(
message='stopping instance for: {}'.format(who),
color='green',
)
elif action == 'status':
return _return_payload(
message='status for user {}: {}'.format(who, user_instance['state']),
color='gray',
)
| 24.5 | 77 | 0.608534 | # ... continued ...
if action == 'start':
gibson.start(user_instance['instance_id'])
return _return_payload(
message=start_message.format(user=who),
color='green',
)
elif action == 'stop':
gibson.stop(user_instance['instance_id'])
return _return_payload(
message='stopping instance for: {}'.format(who),
color='green',
)
elif action == 'status':
return _return_payload(
message='status for user {}: {}'.format(who, user_instance['state']),
color='gray',
)
| 0 | 0 | 0 |
6d20b20d69f8282a99430826508eb0cf3cb957fb | 280 | py | Python | nb.py | itdaniher/MPU9250 | 2524dda72c032238e026e0fa5e382e3b0c603d37 | [
"Apache-2.0"
] | null | null | null | nb.py | itdaniher/MPU9250 | 2524dda72c032238e026e0fa5e382e3b0c603d37 | [
"Apache-2.0"
] | null | null | null | nb.py | itdaniher/MPU9250 | 2524dda72c032238e026e0fa5e382e3b0c603d37 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from MPU9250 import MPU9250
from BMP280 import BMP280
m = MPU9250()
b = BMP280()
print(b.pressure(), 'Pa')
avg_x_accel = lambda x: np.array([list(m.readAccel().values()) for _ in range(x)]).mean(axis=0)
x = np.sum(avg_x_accel(10) - avg_x_accel(10))
print(x)
| 21.538462 | 95 | 0.7 | import numpy as np
from MPU9250 import MPU9250
from BMP280 import BMP280
m = MPU9250()
b = BMP280()
print(b.pressure(), 'Pa')
avg_x_accel = lambda x: np.array([list(m.readAccel().values()) for _ in range(x)]).mean(axis=0)
x = np.sum(avg_x_accel(10) - avg_x_accel(10))
print(x)
| 0 | 0 | 0 |
145a351f59c3a02b8714430d96061401b91abd88 | 2,659 | py | Python | source/camera/basic/capture_hdr_complete_settings.py | zivid/zivid-python-samples | 83cdfa39f93221ef1c523640495a9ccb356d3aed | [
"BSD-3-Clause"
] | 10 | 2020-12-03T22:59:39.000Z | 2022-03-27T07:31:42.000Z | source/camera/basic/capture_hdr_complete_settings.py | zivid/zivid-python-samples | 83cdfa39f93221ef1c523640495a9ccb356d3aed | [
"BSD-3-Clause"
] | 20 | 2021-01-04T08:30:56.000Z | 2022-03-08T11:25:55.000Z | source/camera/basic/capture_hdr_complete_settings.py | zivid/zivid-python-samples | 83cdfa39f93221ef1c523640495a9ccb356d3aed | [
"BSD-3-Clause"
] | 3 | 2021-04-21T01:42:58.000Z | 2021-06-17T08:13:25.000Z | """
This example shows how to capture point clouds, with color, from the Zivid camera.
For scenes with high dynamic range we combine multiple acquisitions to get an HDR point cloud. This example shows how
to fully configure settings for each acquisition. In general, capturing an HDR point cloud is a lot simpler than this.
The purpose of this example is to demonstrate how to configure all the settings.
"""
import datetime
import zivid
if __name__ == "__main__":
_main()
| 34.532468 | 118 | 0.662655 | """
This example shows how to capture point clouds, with color, from the Zivid camera.
For scenes with high dynamic range we combine multiple acquisitions to get an HDR point cloud. This example shows how
to fully configure settings for each acquisition. In general, capturing an HDR point cloud is a lot simpler than this.
The purpose of this example is to demonstrate how to configure all the settings.
"""
import datetime
import zivid
def _main():
app = zivid.Application()
print("Connecting to camera")
camera = app.connect_camera()
print("Configuring acquisition settings different for all HDR acquisitions")
settings = zivid.Settings(
acquisitions=[
zivid.Settings.Acquisition(
aperture=8.0,
exposure_time=datetime.timedelta(microseconds=10000),
brightness=1.8,
gain=1.0,
),
zivid.Settings.Acquisition(
aperture=4.0,
exposure_time=datetime.timedelta(microseconds=10000),
brightness=1.8,
gain=1.0,
),
zivid.Settings.Acquisition(
aperture=4.0,
exposure_time=datetime.timedelta(microseconds=40000),
brightness=1.8,
gain=2.0,
),
],
)
for acquisition in settings.acquisitions:
print(acquisition)
print("Configuring global processing settings")
filters = settings.processing.filters
filters.smoothing.gaussian.enabled = True
filters.smoothing.gaussian.sigma = 1.5
filters.noise.removal.enabled = True
filters.noise.removal.threshold = 7.0
filters.outlier.removal.enabled = True
filters.outlier.removal.threshold = 5.0
filters.reflection.removal.enabled = True
filters.experimental.contrast_distortion.correction.enabled = True
filters.experimental.contrast_distortion.correction.strength = 0.4
filters.experimental.contrast_distortion.removal.enabled = False
filters.experimental.contrast_distortion.removal.threshold = 0.5
color = settings.processing.color
color.balance.red = 1.0
color.balance.blue = 1.0
color.balance.green = 1.0
color.gamma = 1.0
settings.processing.color.experimental.tone_mapping.enabled = "hdrOnly"
print(settings.processing)
print("Capturing frame (HDR)")
with camera.capture(settings) as frame:
print("Complete settings used:")
print(frame.settings)
data_file = "Frame.zdf"
print(f"Saving frame to file: {data_file}")
frame.save(data_file)
if __name__ == "__main__":
_main()
| 2,154 | 0 | 23 |
56dee3dcf7fa372c941594bc5b1813517e02b773 | 399 | py | Python | src/bpmn_python/graph/classes/activities/task_type.py | ToJestKrzysio/ProcessVisualization | 9a359a31816bf1be65e3684a571509e3a2c2c0ac | [
"MIT"
] | null | null | null | src/bpmn_python/graph/classes/activities/task_type.py | ToJestKrzysio/ProcessVisualization | 9a359a31816bf1be65e3684a571509e3a2c2c0ac | [
"MIT"
] | null | null | null | src/bpmn_python/graph/classes/activities/task_type.py | ToJestKrzysio/ProcessVisualization | 9a359a31816bf1be65e3684a571509e3a2c2c0ac | [
"MIT"
] | null | null | null | # coding=utf-8
"""
Class used for representing tTask of BPMN 2.0 graph
"""
import graph.classes.activities.activity_type as activity
class Task(activity.Activity):
"""
Class used for representing tTask of BPMN 2.0 graph
"""
def __init__(self):
"""
Default constructor, initializes object fields with new instances.
"""
super(Task, self).__init__()
| 22.166667 | 74 | 0.659148 | # coding=utf-8
"""
Class used for representing tTask of BPMN 2.0 graph
"""
import graph.classes.activities.activity_type as activity
class Task(activity.Activity):
"""
Class used for representing tTask of BPMN 2.0 graph
"""
def __init__(self):
"""
Default constructor, initializes object fields with new instances.
"""
super(Task, self).__init__()
| 0 | 0 | 0 |
72b3e3844a7f8ef1fd0375ef6727f587c90a6e6e | 3,392 | py | Python | Midterm Exam/Midterm P4.py | Karoline0097/MITx---6.00.1x-EdX22 | 6ff61f86429e1b72d9cfcd1a9082c66bb528034f | [
"MIT"
] | 1 | 2022-03-25T22:26:24.000Z | 2022-03-25T22:26:24.000Z | Midterm Exam/Midterm P4.py | Karoline0097/MITx---6.00.1x-EdX22 | 6ff61f86429e1b72d9cfcd1a9082c66bb528034f | [
"MIT"
] | null | null | null | Midterm Exam/Midterm P4.py | Karoline0097/MITx---6.00.1x-EdX22 | 6ff61f86429e1b72d9cfcd1a9082c66bb528034f | [
"MIT"
] | null | null | null | # Problem 4 MIT Midterm #
# Write a function called gcd
# that calculates the greatest common divisor of two positive integers.
# The gcd of two or more integers, when at least one of them is not zero,
# is the largest positive integer that divides the numbers without a remainder.
# 20 min until finished
# One way is recursively,
# where the greatest common denominator of a and b can be calculated as gcd(a, b) = gcd(b, a mod b).
# Hint: remember the mod symbol is % in Python. Do not import anything.
# For example, the greatest common divisor (gcd) between a = 20 and b = 12 is: 4
# gcd(20,12) is the same as gcd(12, 20 mod 12) = gcd(12,8)
# gcd(12,8) is the same as gcd(8, 12 mod 8) = gcd(8,4)
# gcd(8,4) is the same as gcd(4, 8 mod 4) = gcd(4,0)
# The gcd is found (and the gcd is equal to a) when we reach 0 for b.
def gcd (a, b):
"""
:param a: int
:param b: int
at least one of the two integers is not 0
:return: largest positive integer gcd that divides the numbers a and b without remainder
"""
# handling of negative integers
if a < 0 and b < 0:
a = abs(a)
b = abs(b)
elif a < 0 or b < 0:
return 0
# a > b, so b is smaller integer of pair
if a > b:
# base case, if one of two integers is 0, other non zero integer is greatest common divisor
if b == 0:
return a
# recursive case
else:
return gcd(b, a % b)
# b > a, so a is smaller integer of pair
elif b > a:
# base case, if one of two integers is 0, other non zero integer is greatest common divisor
if a == 0:
return b
# recursive case
else:
return gcd(a, b % a)
# b == a
else:
return a
print (gcd (20, 12))
print (gcd (12, 20))
print (gcd (0, 20))
print (gcd (-20, -12))
print (gcd (-12, -20))
print (gcd (0, -20))
# Other way is iteratively
def gcd_iter(a, b):
"""
:param a: int
:param b: int
, at least one of the two integers is not 0
:return: largest positive integer gcd that divides the numbers a and b without remainder
"""
# handling of negative integers
if a < 0 and b < 0:
a = abs(a)
b = abs(b)
elif a < 0 or b < 0:
return 0
# a > b, so b is smaller integer of pair
if a > b:
# base case, if one of two integers is 0, other non zero integer is greatest common divisor
if b == 0:
return a
# else enter loop, divide bigger integer by every gcd from smaller integer to 0
else:
for gcd in range(b, -1, -1):
rem = a % gcd
if rem == 0 and b % gcd == 0:
return gcd
# b > a, so a is smaller integer of pair
elif b > a:
# base case, if one of two integers is 0, other non zero integer is greatest common divisor
if a == 0:
return b
# else enter loop, decreasing smaller of two integers by 1 until bigger % smaller == 0
else:
for gcd in range(a, -1, -1):
rem = b % gcd
if rem == 0 and a % gcd == 0:
return gcd
# b == a
else:
return a
print (gcd_iter (20, 12))
print (gcd_iter (12, 20))
print (gcd_iter (0, 20))
print (gcd_iter (-20, -12))
print (gcd_iter (-12, -20))
print (gcd_iter (0, -20)) | 30.285714 | 100 | 0.569575 | # Problem 4 MIT Midterm #
# Write a function called gcd
# that calculates the greatest common divisor of two positive integers.
# The gcd of two or more integers, when at least one of them is not zero,
# is the largest positive integer that divides the numbers without a remainder.
# 20 min until finished
# One way is recursively,
# where the greatest common denominator of a and b can be calculated as gcd(a, b) = gcd(b, a mod b).
# Hint: remember the mod symbol is % in Python. Do not import anything.
# For example, the greatest common divisor (gcd) between a = 20 and b = 12 is: 4
# gcd(20,12) is the same as gcd(12, 20 mod 12) = gcd(12,8)
# gcd(12,8) is the same as gcd(8, 12 mod 8) = gcd(8,4)
# gcd(8,4) is the same as gcd(4, 8 mod 4) = gcd(4,0)
# The gcd is found (and the gcd is equal to a) when we reach 0 for b.
def gcd (a, b):
"""
:param a: int
:param b: int
at least one of the two integers is not 0
:return: largest positive integer gcd that divides the numbers a and b without remainder
"""
# handling of negative integers
if a < 0 and b < 0:
a = abs(a)
b = abs(b)
elif a < 0 or b < 0:
return 0
# a > b, so b is smaller integer of pair
if a > b:
# base case, if one of two integers is 0, other non zero integer is greatest common divisor
if b == 0:
return a
# recursive case
else:
return gcd(b, a % b)
# b > a, so a is smaller integer of pair
elif b > a:
# base case, if one of two integers is 0, other non zero integer is greatest common divisor
if a == 0:
return b
# recursive case
else:
return gcd(a, b % a)
# b == a
else:
return a
print (gcd (20, 12))
print (gcd (12, 20))
print (gcd (0, 20))
print (gcd (-20, -12))
print (gcd (-12, -20))
print (gcd (0, -20))
# Other way is iteratively
def gcd_iter(a, b):
"""
:param a: int
:param b: int
, at least one of the two integers is not 0
:return: largest positive integer gcd that divides the numbers a and b without remainder
"""
# handling of negative integers
if a < 0 and b < 0:
a = abs(a)
b = abs(b)
elif a < 0 or b < 0:
return 0
# a > b, so b is smaller integer of pair
if a > b:
# base case, if one of two integers is 0, other non zero integer is greatest common divisor
if b == 0:
return a
# else enter loop, divide bigger integer by every gcd from smaller integer to 0
else:
for gcd in range(b, -1, -1):
rem = a % gcd
if rem == 0 and b % gcd == 0:
return gcd
# b > a, so a is smaller integer of pair
elif b > a:
# base case, if one of two integers is 0, other non zero integer is greatest common divisor
if a == 0:
return b
# else enter loop, decreasing smaller of two integers by 1 until bigger % smaller == 0
else:
for gcd in range(a, -1, -1):
rem = b % gcd
if rem == 0 and a % gcd == 0:
return gcd
# b == a
else:
return a
print (gcd_iter (20, 12))
print (gcd_iter (12, 20))
print (gcd_iter (0, 20))
print (gcd_iter (-20, -12))
print (gcd_iter (-12, -20))
print (gcd_iter (0, -20)) | 0 | 0 | 0 |
cb6e51561b1b69a0be9fba956b115f616d09886f | 334 | py | Python | config.py | val09072010/libraryms | 5f5cd62e6eebafb1e942746a02822a7bd810c405 | [
"MIT"
] | null | null | null | config.py | val09072010/libraryms | 5f5cd62e6eebafb1e942746a02822a7bd810c405 | [
"MIT"
] | null | null | null | config.py | val09072010/libraryms | 5f5cd62e6eebafb1e942746a02822a7bd810c405 | [
"MIT"
] | 1 | 2018-07-19T05:12:45.000Z | 2018-07-19T05:12:45.000Z | # -*- coding: utf-8 -*-
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
SQLALCHEMY_TRACK_MODIFICATIONS = False
CSRF_ENABLED = True
SECRET_KEY = 'ochen-secretnyj-klyuch'
| 30.363636 | 73 | 0.730539 | # -*- coding: utf-8 -*-
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
SQLALCHEMY_TRACK_MODIFICATIONS = False
CSRF_ENABLED = True
SECRET_KEY = 'ochen-secretnyj-klyuch'
| 0 | 0 | 0 |
b6ce6a3353807b4b3cf8e40fb30166edd4b3e0dd | 1,219 | py | Python | Capitulo_08/exercise8_7.py | thiagosouzalink/my_codes-exercices-book-curso_intensivo_de_python | 841aa855a7450ad3d0ba65393ba0b6debcd6a770 | [
"MIT"
] | null | null | null | Capitulo_08/exercise8_7.py | thiagosouzalink/my_codes-exercices-book-curso_intensivo_de_python | 841aa855a7450ad3d0ba65393ba0b6debcd6a770 | [
"MIT"
] | null | null | null | Capitulo_08/exercise8_7.py | thiagosouzalink/my_codes-exercices-book-curso_intensivo_de_python | 841aa855a7450ad3d0ba65393ba0b6debcd6a770 | [
"MIT"
] | null | null | null | """
8.7 – Álbum: Escreva uma função chamada make_album() que construa um
dicionário descrevendo um álbum musical. A função deve aceitar o nome de um
artista e o título de um álbum e deve devolver um dicionário contendo essas
duas informações. Use a função para criar três dicionários que representem
álbuns diferentes. Apresente cada valor devolvido para mostrar que os
dicionários estão armazenando as informações do álbum corretamente.
Acrescente um parâmetro opcional em make_album() que permita armazenar
o número de faixas em um álbum. Se a linha que fizer a chamada incluir um
valor para o número de faixas, acrescente esse valor ao dicionário do álbum.
Faça pelo menos uma nova chamada da função incluindo o número de faixas
em um álbum.
"""
album1 = make_album('Arctic Monkeys', 'AM')
album2 = make_album('U2', 'The Joshua Tree')
album3 = make_album('Red Hot Chili Peppers', 'Californication')
album4 = make_album('The Strokes', 'Is This It', 11)
print(album1)
print(album2)
print(album3)
print(album4) | 38.09375 | 76 | 0.76128 | """
8.7 – Álbum: Escreva uma função chamada make_album() que construa um
dicionário descrevendo um álbum musical. A função deve aceitar o nome de um
artista e o título de um álbum e deve devolver um dicionário contendo essas
duas informações. Use a função para criar três dicionários que representem
álbuns diferentes. Apresente cada valor devolvido para mostrar que os
dicionários estão armazenando as informações do álbum corretamente.
Acrescente um parâmetro opcional em make_album() que permita armazenar
o número de faixas em um álbum. Se a linha que fizer a chamada incluir um
valor para o número de faixas, acrescente esse valor ao dicionário do álbum.
Faça pelo menos uma nova chamada da função incluindo o número de faixas
em um álbum.
"""
def make_album(name, title_album, number_tracks=0):
album = {}
album['Nome'] = name
album['Título'] = title_album
if number_tracks:
album['Faixas'] = number_tracks
return album
album1 = make_album('Arctic Monkeys', 'AM')
album2 = make_album('U2', 'The Joshua Tree')
album3 = make_album('Red Hot Chili Peppers', 'Californication')
album4 = make_album('The Strokes', 'Is This It', 11)
print(album1)
print(album2)
print(album3)
print(album4) | 184 | 0 | 23 |
24fe7815d16c16a1a1eed0509d520d003faddbac | 752 | py | Python | scripts/clean_issues.py | ruben-iteng/faebryk | 58810da4cb24581f421c39784ccf61e1a4ea8ae5 | [
"MIT"
] | 7 | 2021-11-22T20:02:14.000Z | 2022-03-04T19:35:04.000Z | scripts/clean_issues.py | ruben-iteng/faebryk | 58810da4cb24581f421c39784ccf61e1a4ea8ae5 | [
"MIT"
] | 45 | 2021-11-22T20:24:40.000Z | 2022-03-25T11:01:28.000Z | scripts/clean_issues.py | ruben-iteng/faebryk | 58810da4cb24581f421c39784ccf61e1a4ea8ae5 | [
"MIT"
] | 3 | 2021-11-22T19:58:08.000Z | 2021-12-17T16:14:08.000Z | # This file is part of the faebryk project
# SPDX-License-Identifier: MIT
import csv
import subprocess
import re
import logging
logger = logging.getLogger("script")
# Expects a csv file in the format: issue_number,title
# Can be generated with gh issue list and some manual editing
with open("issues.txt", 'r') as f:
reader = csv.DictReader(f)
rows = list(reader)
issues = {
row["issue"]:row["title"]
for row in rows
}
new_titles = {
issue: re.sub(r"^\[[^\]]*\][ :]*", "", title)
for issue,title in issues.items()
}
for issue,title in issues.items():
logger.info("{}->{}".format(title, new_titles[issue]))
for issue,title in new_titles.items():
subprocess.run(["gh", "issue", "edit", issue, "--title", title])
| 21.485714 | 68 | 0.658245 | # This file is part of the faebryk project
# SPDX-License-Identifier: MIT
import csv
import subprocess
import re
import logging
logger = logging.getLogger("script")
# Expects a csv file in the format: issue_number,title
# Can be generated with gh issue list and some manual editing
with open("issues.txt", 'r') as f:
reader = csv.DictReader(f)
rows = list(reader)
issues = {
row["issue"]:row["title"]
for row in rows
}
new_titles = {
issue: re.sub(r"^\[[^\]]*\][ :]*", "", title)
for issue,title in issues.items()
}
for issue,title in issues.items():
logger.info("{}->{}".format(title, new_titles[issue]))
for issue,title in new_titles.items():
subprocess.run(["gh", "issue", "edit", issue, "--title", title])
| 0 | 0 | 0 |
aa4e84cbdc5bc1fb51ac4d4bbf0927467505eddb | 7,568 | py | Python | setup.py | intactio/line-bot-sdk-python | c9876c587c3a819c28ab412f7c971ca40f5a8895 | [
"Apache-2.0"
] | 1 | 2022-03-16T07:58:21.000Z | 2022-03-16T07:58:21.000Z | setup.py | intactio/line-bot-sdk-python | c9876c587c3a819c28ab412f7c971ca40f5a8895 | [
"Apache-2.0"
] | 1 | 2022-03-28T20:35:10.000Z | 2022-03-28T20:35:10.000Z | setup.py | intactio/line-bot-sdk-python | c9876c587c3a819c28ab412f7c971ca40f5a8895 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import sys
import subprocess
from setuptools import setup, Command
from setuptools.command.test import test as TestCommand
__version__ = ''
with open('linebot/__about__.py', 'r') as fd:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fd:
m = reg.match(line)
if m:
__version__ = m.group(1)
break
with open('README.rst', 'r') as fd:
long_description = fd.read()
setup(
name="line-bot-sdk",
version=__version__,
author="RyosukeHasebe",
author_email="hsb.1014@gmail.com",
maintainer="RyosukeHasebe",
maintainer_email="hsb.1014@gmail.com",
url="https://github.com/line/line-bot-sdk-python",
description="LINE Messaging API SDK for Python",
long_description=long_description,
license='Apache License 2.0',
packages=[
"linebot", "linebot.models"
],
python_requires=">=3.6.0",
install_requires=_requirements(),
tests_require=_requirements_test(),
cmdclass={
'test': PyTest,
'codegen': CodegenCommand
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development"
]
)
| 36.560386 | 99 | 0.552061 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import sys
import subprocess
from setuptools import setup, Command
from setuptools.command.test import test as TestCommand
__version__ = ''
with open('linebot/__about__.py', 'r') as fd:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fd:
m = reg.match(line)
if m:
__version__ = m.group(1)
break
def _requirements():
with open('requirements.txt', 'r') as fd:
return [name.strip() for name in fd.readlines()]
def _requirements_test():
with open('requirements-test.txt', 'r') as fd:
return [name.strip() for name in fd.readlines()]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
class CodegenCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
basedir = os.path.abspath(os.path.dirname(__file__))
header = (
"# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
"#\n"
"# *** DO NOT EDIT THIS FILE ***\n"
"#\n"
"# 1) Modify linebot/api.py\n"
"# 2) Run `python setup.py codegen`\n"
"#\n"
"# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
"\n"
)
with open(f"{basedir}/linebot/api.py", "r") as original:
source = original.read()
import re
async_source = header + source
async_source = re.sub(" def (?!__init__)", " async def ",
async_source)
async_source = re.sub("from .http_client import HttpClient, RequestsHttpClient", "",
async_source)
# Change the signature of the __init__.
# self, channel_access_token
async_source = re.sub(r"def __init__\(self, channel_access_token,",
"def __init__(self, channel_access_token, async_http_client,",
async_source)
async_source = re.sub(
r",\s*timeout=HttpClient.DEFAULT_TIMEOUT, http_client=RequestsHttpClient",
"", async_source)
async_source = re.sub(
r"if http_client:\n"
+ r"\s*self.http_client = http_client\(timeout=timeout\)\n"
+ r"\s*else:\n"
+ r"\s*self.http_client = RequestsHttpClient\(timeout=timeout\)",
"self.async_http_client = async_http_client", async_source)
async_source = re.sub(
r"\"\"\"__init__ method.*?\"\"\"\n",
'"""__init__ method.' + "\n\n"
+ " :param str channel_access_token: Your channel access token\n"
+ " :param str endpoint: (optional) Default is https://api.line.me\n"
+ " :param str data_endpoint: (optional) Default is https://api-data.line.me\n"
+ "\n\"\"\"\n"
, async_source, flags=re.DOTALL)
async_source = re.sub("'line-bot-sdk-python/'", '"line-bot-sdk-python-async/"',
async_source)
async_source = re.sub('"""linebot.api module."""', '"""linebot.async_api module."""',
async_source)
async_source = re.sub(
"self.(_get|_post|_delete|_put)", "await self.\\1", async_source
)
async_source = re.sub(
"self.http_client.(get|post|delete|put)", "await self.async_http_client.\\1",
async_source
)
async_source = re.sub(
"response.json", "(await response.json)", async_source
)
async_source = re.sub(
"response.json", "(await response.json)", async_source
)
async_source = re.sub(
"from .http_client import HttpClient, RequestsHttpClient",
"from .async_http_client import AsyncHttpClient, AiohttpAsyncHttpClient",
async_source
)
async_source = re.sub(
"linebot.http_client.RequestsHttpClient",
"linebot.async_http_client.AiohttpAsyncHttpClient",
async_source
)
async_source = re.sub(
"HttpClient.DEFAULT_TIMEOUT", "AsyncHttpClient.DEFAULT_TIMEOUT", async_source
)
async_source = re.sub(
"RequestsHttpClient", "AiohttpAsyncHttpClient", async_source
)
async_source = re.sub(
"Default is self.http_client.timeout", "Default is self.async_http_client.timeout",
async_source
)
async_source = re.sub(
"self.__check_error", "await self.__check_error", async_source
)
async_source = re.sub(
"self.__check_error", "await self.__check_error", async_source
)
async_source = re.sub(
"class LineBotApi",
"class AsyncLineBotApi", async_source
)
async_source = re.sub("stream=(stream|False|True), ", "", async_source)
with open(f"{basedir}/linebot/async_api.py", "w") as output:
output.write(async_source)
subprocess.check_call(
[sys.executable, "-m", "black", f"{basedir}/linebot/async_api.py"],
)
with open('README.rst', 'r') as fd:
long_description = fd.read()
setup(
name="line-bot-sdk",
version=__version__,
author="RyosukeHasebe",
author_email="hsb.1014@gmail.com",
maintainer="RyosukeHasebe",
maintainer_email="hsb.1014@gmail.com",
url="https://github.com/line/line-bot-sdk-python",
description="LINE Messaging API SDK for Python",
long_description=long_description,
license='Apache License 2.0',
packages=[
"linebot", "linebot.models"
],
python_requires=">=3.6.0",
install_requires=_requirements(),
tests_require=_requirements_test(),
cmdclass={
'test': PyTest,
'codegen': CodegenCommand
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development"
]
)
| 5,153 | 117 | 145 |
4ec4a66c1b9b047e0cf13082152155ca0580f248 | 163 | py | Python | python_bindings/pytest.py | zmcgrath96/nf_prefix_tree | ca49ca33e5ce99c7775d60aeb7c2952628e95886 | [
"MIT"
] | null | null | null | python_bindings/pytest.py | zmcgrath96/nf_prefix_tree | ca49ca33e5ce99c7775d60aeb7c2952628e95886 | [
"MIT"
] | null | null | null | python_bindings/pytest.py | zmcgrath96/nf_prefix_tree | ca49ca33e5ce99c7775d60aeb7c2952628e95886 | [
"MIT"
] | null | null | null | from nf_prefix_tree import PyPrefixTree
t = PyPrefixTree()
t.addSequence('ABCDE', 'key1')
t.addSequence('ABCXY', 'key2')
t.addSequence('ZYAPW', 'key3')
t.show() | 18.111111 | 39 | 0.717791 | from nf_prefix_tree import PyPrefixTree
t = PyPrefixTree()
t.addSequence('ABCDE', 'key1')
t.addSequence('ABCXY', 'key2')
t.addSequence('ZYAPW', 'key3')
t.show() | 0 | 0 | 0 |
a61542e53fb771207126b26591e7c32500b86e28 | 2,545 | py | Python | app.py | dimas-avila/youtube_downloader | e96bfce75b9e365e2100433f42ae28f4b197c69d | [
"MIT"
] | 1 | 2022-03-14T19:56:54.000Z | 2022-03-14T19:56:54.000Z | app.py | dimas-avila/youtube_downloader | e96bfce75b9e365e2100433f42ae28f4b197c69d | [
"MIT"
] | null | null | null | app.py | dimas-avila/youtube_downloader | e96bfce75b9e365e2100433f42ae28f4b197c69d | [
"MIT"
] | null | null | null | import downloader
import tkinter as tk
from tkinter import filedialog
import pathlib
import os
root = tk.Tk()
app = Application(master=root)
app.mainloop()
| 33.051948 | 95 | 0.603536 | import downloader
import tkinter as tk
from tkinter import filedialog
import pathlib
import os
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.master.geometry("500x250")
self.grid()
self.row = 0
self.quality = tk.StringVar(self, value="480p")
self.isAudio = tk.BooleanVar(self)
self.descargador = downloader.Downloader(
pathlib.Path().absolute().joinpath('descargas'))
self.create_widgets()
def create_widgets(self):
self.link_label = tk.Label(self)
self.link_label["text"] = "Introduce el link del vídeo"
self.link_label.grid(row=self.row, column=0)
self.link_entry = tk.Entry(self)
self.link_entry.grid(row=self.row, column=1)
self.updaterow()
media = {"Solo Audio": True, "Audio y Vídeo": False}
qualities = ["480p", "720p", "1080p"]
tk.Label(
self, text="¿Quieres solo Audio o Vídeo+Audio?").grid(row=self.row, column=0)
self.updaterow()
for (text, value) in media.items():
tk.Radiobutton(self, text=text, value=value,
variable=self.isAudio).grid(row=self.row, column=0)
self.updaterow()
tk.Label(self, text="¿Qué calidad deseas?").grid(
row=self.row, column=0)
self.updaterow()
for quality in qualities:
tk.Radiobutton(self, text=quality, value=quality,
variable=self.quality).grid(row=self.row, column=0)
self.updaterow()
self.descarga = tk.Button(self)
self.descarga["text"] = "Descargar"
self.descarga["command"] = self.descargar
self.descarga.grid(row=self.row, column=0)
self.abrir = tk.Button(self)
self.abrir["text"] = "Abrir"
self.abrir["command"] = self.abrir_fichero
self.abrir.grid(row=self.row, column=2)
def abrir_fichero(self):
file_path = tk.filedialog.askopenfilename(
initialdir=pathlib.Path().absolute().joinpath('descargas'))
os.system(f'start {file_path}')
def updaterow(self):
self.row += 1
def descargar(self):
print(
f'El link del vídeo es: {self.link_entry.get()} y la calidad {self.quality.get()}')
self.descargador.download(self.link_entry.get(
), self.isAudio.get(), quality=self.quality.get())
root = tk.Tk()
app = Application(master=root)
app.mainloop()
| 2,229 | 7 | 157 |
6cf0b5dd6bffac99344a730cb61bbf631ce0c317 | 247 | py | Python | src/sample_pck/entry_points.py | dataPuzzler/python_package_starter | dc3d5aaab59b0d4e66175b57284826f0e4582b5a | [
"BSD-3-Clause"
] | null | null | null | src/sample_pck/entry_points.py | dataPuzzler/python_package_starter | dc3d5aaab59b0d4e66175b57284826f0e4582b5a | [
"BSD-3-Clause"
] | null | null | null | src/sample_pck/entry_points.py | dataPuzzler/python_package_starter | dc3d5aaab59b0d4e66175b57284826f0e4582b5a | [
"BSD-3-Clause"
] | null | null | null | """
entry-point functions for the sample_pck module, as referenced in setup.cfg
"""
from .animals import Animal, create_jerry, create_tom | 24.7 | 76 | 0.712551 | """
entry-point functions for the sample_pck module, as referenced in setup.cfg
"""
from .animals import Animal, create_jerry, create_tom
def main() -> tuple:
tom = create_tom(Animal)
jerry = create_jerry(Animal)
return (tom, jerry) | 85 | 0 | 23 |
b893c8dd01e71515a8cddbd9f049bee2611e6e32 | 772 | py | Python | community/polls/forms.py | evinjaff/speedrunsunchained | 335a127e6deffc280009ca86f2c4dd148c1629f0 | [
"BSD-3-Clause-Clear"
] | 1 | 2022-03-21T03:01:14.000Z | 2022-03-21T03:01:14.000Z | community/polls/forms.py | evinjaff/speedrunsunchained | 335a127e6deffc280009ca86f2c4dd148c1629f0 | [
"BSD-3-Clause-Clear"
] | 13 | 2022-03-21T18:07:43.000Z | 2022-03-28T15:58:56.000Z | community/polls/forms.py | evinjaff/speedrunsunchained | 335a127e6deffc280009ca86f2c4dd148c1629f0 | [
"BSD-3-Clause-Clear"
] | null | null | null | from email.policy import default
from time import timezone
from django import forms
from django.utils import timezone
| 35.090909 | 68 | 0.762953 | from email.policy import default
from time import timezone
from django import forms
from django.utils import timezone
class AddGame(forms.Form):
game_title = forms.CharField(max_length=100)
pub_date = forms.DateTimeField()
year_published = forms.CharField(max_length=5)
console = forms.CharField(max_length=100)
genre = forms.CharField(max_length=100)
tagblob = forms.CharField(required=False, widget=forms.Textarea)
class AddChallenge(forms.Form):
game_title = forms.CharField(max_length=100)
pub_date = forms.DateTimeField()
year_published = forms.CharField(max_length=5)
console = forms.CharField(max_length=100)
genre = forms.CharField(max_length=100)
tagblob = forms.CharField(required=False, widget=forms.Textarea) | 0 | 607 | 46 |
3c1ee97b46f138e474be8a6010eadb0598166fa0 | 3,652 | py | Python | run_PyTorch_cpu.py | swenkel/python-math-benchmark | adf95263f85dca78f1b13e5283fc7f16edb6bb8d | [
"Apache-2.0"
] | 2 | 2022-01-21T02:18:28.000Z | 2022-03-08T12:30:14.000Z | run_PyTorch_cpu.py | berhane/python-math-benchmark | adf95263f85dca78f1b13e5283fc7f16edb6bb8d | [
"Apache-2.0"
] | null | null | null | run_PyTorch_cpu.py | berhane/python-math-benchmark | adf95263f85dca78f1b13e5283fc7f16edb6bb8d | [
"Apache-2.0"
] | 1 | 2022-01-21T02:19:19.000Z | 2022-01-21T02:19:19.000Z | ################################################################################
# #
# Script to run PyTorch CPU benchmarks #
# #
# (c) Simon Wenkel, released under the Apache v2 license (see license file) #
# #
# #
################################################################################
################################################################################
# import libraries #
# #
import pickle
import time
from tqdm import tqdm
import torch
# #
################################################################################
################################################################################
# function dict #
# #
functions = {}
functions["sin"] = torch.sin
functions["cos"] = torch.cos
functions["tan"] = torch.tan
functions["asin"] = torch.asin
functions["acos"] = torch.acos
functions["atan"] = torch.atan
functions["exp"] = torch.exp
functions["sinh"] = torch.sinh
functions["cosh"] = torch.cosh
functions["tanh"] = torch.tanh
functions["abs"] = torch.abs
functions["ceil"] = torch.ceil
functions["floor"] = torch.floor
functions["sqrt"] = torch.sqrt
# #
################################################################################
################################################################################
# functions #
# #
# #
################################################################################
if __name__ == "__main__":
main()
| 37.265306 | 80 | 0.334885 | ################################################################################
# #
# Script to run PyTorch CPU benchmarks #
# #
# (c) Simon Wenkel, released under the Apache v2 license (see license file) #
# #
# #
################################################################################
################################################################################
# import libraries #
# #
import pickle
import time
from tqdm import tqdm
import torch
# #
################################################################################
################################################################################
# function dict #
# #
functions = {}
functions["sin"] = torch.sin
functions["cos"] = torch.cos
functions["tan"] = torch.tan
functions["asin"] = torch.asin
functions["acos"] = torch.acos
functions["atan"] = torch.atan
functions["exp"] = torch.exp
functions["sinh"] = torch.sinh
functions["cosh"] = torch.cosh
functions["tanh"] = torch.tanh
functions["abs"] = torch.abs
functions["ceil"] = torch.ceil
functions["floor"] = torch.floor
functions["sqrt"] = torch.sqrt
# #
################################################################################
################################################################################
# functions #
# #
def list_of_items(listSize:int,
functions:dict):
itemList = torch.rand(listSize)
results = {}
for function in functions:
results[function] = {}
counter = 0
for i in range(200):
startTime = time.time()
functions[function](itemList)
results[function][counter] = time.time()-startTime
counter += 1
return results
def array_of_items(arraySize:int,
functions:dict):
matrix = torch.rand((arraySize,arraySize))
results = {}
for function in functions:
results[function] = {}
counter = 0
for iteration in range(200):
startTime = time.time()
functions[function](matrix)
results[function][counter] = time.time()-startTime
counter += 1
return results
def main():
start = time.time()
results = {}
device = torch.device('cpu')
for i in tqdm([1,10,100,1000,10000,100000,1000000]):
results["List_"+str(i)] = list_of_items(i, functions)
for i in tqdm([1,10,100,1000,10000]):
results["Matrix_"+str(i)] = array_of_items(i, functions)
pickle.dump(results,open("./results/PyTorch_cpu.pkl", "wb"))
# #
################################################################################
if __name__ == "__main__":
main()
| 1,165 | 0 | 68 |
db3c4207c50f48cdc28270d617ee480805604ad5 | 438 | py | Python | addons/hr_payroll_community/models/res_config_settings.py | gleis44/stellwerk | 1fc4145eac6bbb76134ef9ebb22f2441a69d093f | [
"MIT"
] | null | null | null | addons/hr_payroll_community/models/res_config_settings.py | gleis44/stellwerk | 1fc4145eac6bbb76134ef9ebb22f2441a69d093f | [
"MIT"
] | null | null | null | addons/hr_payroll_community/models/res_config_settings.py | gleis44/stellwerk | 1fc4145eac6bbb76134ef9ebb22f2441a69d093f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from odoo import fields, models
| 33.692308 | 75 | 0.767123 | # -*- coding: utf-8 -*-
from odoo import fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
module_account_accountant = fields.Boolean(string='Account Accountant')
module_l10n_fr_hr_payroll = fields.Boolean(string='French Payroll')
module_l10n_be_hr_payroll = fields.Boolean(string='Belgium Payroll')
module_l10n_in_hr_payroll = fields.Boolean(string='Indian Payroll')
| 0 | 357 | 23 |
5f8303a364a058074c7edce225ce29a40b336c35 | 1,019 | py | Python | newprojtestserver.py | AistoBasBistoC/adeept_picarpro | 0a863a21bacbff20eab30e1921d04c82c99a7e36 | [
"MIT"
] | null | null | null | newprojtestserver.py | AistoBasBistoC/adeept_picarpro | 0a863a21bacbff20eab30e1921d04c82c99a7e36 | [
"MIT"
] | null | null | null | newprojtestserver.py | AistoBasBistoC/adeept_picarpro | 0a863a21bacbff20eab30e1921d04c82c99a7e36 | [
"MIT"
] | null | null | null | ################################
##Generated with a lot of love##
## with EasyPython ##
##Web site: easycoding.tn ##
################################
import RPi.GPIO as GPIO
from http.server import BaseHTTPRequestHandler, HTTPServer
GPIO.setmode(GPIO.BCM)
GPIO.setup(26, GPIO.OUT)
request = None
server_address_httpd = ('192.168.254.29',8080)
httpd = HTTPServer(server_address_httpd, RequestHandler_httpd)
print('Starting Server.....')
httpd.serve_forever()
| 28.305556 | 62 | 0.653582 | ################################
##Generated with a lot of love##
## with EasyPython ##
##Web site: easycoding.tn ##
################################
import RPi.GPIO as GPIO
from http.server import BaseHTTPRequestHandler, HTTPServer
GPIO.setmode(GPIO.BCM)
GPIO.setup(26, GPIO.OUT)
request = None
class RequestHandler_httpd(BaseHTTPRequestHandler):
def do_GET(self):
global request
messagetosend = bytes('Hello!',"utf")
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(messagetosend))
self.end_headers()
self.wfile.write(messagetosend)
request = self.requestline
request = request[5 : int(len(request)-9)]
print(request)
if request == 'on':
GPIO.output(26,False)
if request == 'off':
GPIO.output(26,True)
return
server_address_httpd = ('192.168.254.29',8080)
httpd = HTTPServer(server_address_httpd, RequestHandler_httpd)
print('Starting Server.....')
httpd.serve_forever()
| 466 | 30 | 47 |
ef447ff1be3b358314419c8ad22e7d4ea1ad4fe3 | 7,762 | py | Python | helper/apptypes.py | wernerpaulin/PyProfGen | 443a2989e07789694b7a0797a3768921227bfeae | [
"MIT"
] | 7 | 2021-04-08T13:43:14.000Z | 2021-05-04T14:57:30.000Z | helper/apptypes.py | wernerpaulin/PyProfGen | 443a2989e07789694b7a0797a3768921227bfeae | [
"MIT"
] | null | null | null | helper/apptypes.py | wernerpaulin/PyProfGen | 443a2989e07789694b7a0797a3768921227bfeae | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
import json
from collections import namedtuple
import paho.mqtt.client as mqtt
import time
#sudo pip3 install paho-mqtt
#sudo apt-get install -y mosquitto mosquitto-clients
#sudo systemctl enable mosquitto.service
MQTT_ERR_SUCCESS = 0
class RTapp:
"Cyclic realtime app"
#when the client connects to the broker (again), send all parameters out to initialize the UI and also subscribe to topics
#when the UI connects to the broker send onConnect values to allow the UI to (re-)initalize itself
| 44.867052 | 185 | 0.618784 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
import json
from collections import namedtuple
import paho.mqtt.client as mqtt
import time
#sudo pip3 install paho-mqtt
#sudo apt-get install -y mosquitto mosquitto-clients
#sudo systemctl enable mosquitto.service
MQTT_ERR_SUCCESS = 0
def PubSub_onConnect(client, userdata, flags, rc):
if (rc != 0):
print("MQTT: Error connecting with result code {0}".format(rc))
else:
userdata.onMqttBrokerConnected()
def PubSub_onMessage(client, userdata, msg):
#forward message to app instance in which the MQTT client lives
try:
userdata.onMqttMessageReceived(msg.topic, msg.payload.decode())
except Exception as e:
print("MQTT: unhandled topic <{0}> received with payload {1} and error: {3}".format(msg.topic, msg.payload.decode(), e))
class RTapp:
"Cyclic realtime app"
def __init__(self, cycleTime, appName):
self.cycleTime = cycleTime
self.appName = appName
self.mqttClient = {}
self.brokerIP = ""
self.brokerPort = 0
self.brokerKeepalive = 0
self.subscriptionList = dict() #[topic] = data object
self.publicationListCyclic = dict() #[topic] = data object
self.publicationListOnConnect = dict() #[topic] = data object
print("Init of RT app <{0}> done".format(self.appName))
def cyclic(self):
#walk through all topics registered for cyclic publishing
for topic in self.publicationListCyclic:
try:
sourceDataObj = self.publicationListCyclic[topic]
#self.subscriptionList[topic] ...source data instance its data need to be published
#key ...name of attribute in instance
#jsonObj[key] ...value to be read from attribute
self.mqttClient.publish(topic, json.dumps(sourceDataObj.__dict__))
except Exception as e:
print("MQTT: Error publishing topic <{0}>: {2}".format(topic, e))
def pubSubConnect(self, brokerIP, brokerPort, brokerKeepalive):
print("MQTT: connecting to broker with IP address <{0}> via port {1}".format(brokerIP, brokerPort))
#connect to MQTT broker
self.brokerIP = brokerIP
self.brokerPort = brokerPort
self.brokerKeepalive = brokerKeepalive
self.mqttClient = mqtt.Client(userdata=self)
self.mqttClient.on_connect = PubSub_onConnect
self.mqttClient.on_message = PubSub_onMessage
#connect to broker without exception in case the broker is not yet available or the network is not yet up
self.mqttSaveConnect()
#once the connected start the receive and send loop
self.mqttClient.loop_start() #non-blocking call with automatic reconnects
def mqttSaveConnect(self):
try:
self.mqttClient.connect(self.brokerIP, self.brokerPort, self.brokerKeepalive)
except Exception as e:
print("MQTT: Fundamental error: {0}".format(e))
print("MQTT: Trying to connect...")
time.sleep(1)
self.mqttSaveConnect()
def addSubscription(self, topic, destinationDataObj):
#MQTT
self.subscriptionList[topic] = destinationDataObj #register topic and the destination data object to which all receive data will be mapped
def addCyclicPublication(self, topic, sourceDataObj):
#MQTT
self.publicationListCyclic[topic] = sourceDataObj
def addOnConnectPublication(self, topic, sourceDataObj):
#MQTT
self.publicationListOnConnect[topic] = sourceDataObj
def onMqttMessageReceived(self, topic, payload):
try:
#print("MQTT: RT app <{0}> received topic <{1}> with payload {2}".format(self.appName, topic, payloadJSON))
#assume data is sent as JSON string: "{"id":"MC_MoveVelocity"}"
jsonObj = json.loads(payload)
#map json key values directly to class instance
for key in jsonObj:
#self.subscriptionList[topic] ...destination data instance
#key ...name of attribute in instance which should be written
#jsonObj[key] ...value to be written to attribute in instance
setattr(self.subscriptionList[topic], key, jsonObj[key])
except Exception as e:
print("MQTT: Error decoding received MQTT payload of RT app <{0}>, error: {1}".format(self.appName, e))
#when the client connects to the broker (again), send all parameters out to initialize the UI and also subscribe to topics
def onMqttBrokerConnected(self):
print("MQTT: RT App <{0}> connected to broker at: <{1}>".format(self.appName, self.brokerIP)) #this print() is necessary so that the following code is executed - no idea why?
#send parameters for subscribers to initalize their default data e.g. in UI
try:
print("MQTT: publishing all parameters of RT App <{0}> for subscribes to initalize their default data e.g. in UI".format(self.appName))
for topic in self.publicationListOnConnect:
try:
sourceDataObj = self.publicationListOnConnect[topic]
self.mqttClient.publish(topic, json.dumps(sourceDataObj.__dict__))
except Exception as e:
print("MQTT: Error publishing topic <{0}>: {1}".format(topic, e))
except Exception as e:
print("MQTT: Error publishing topic: <{0}>".format(e))
#subscribe to all topics the app wants to consume
try:
print("MQTT: subsribing to all topics the RT App <{0}> wants to consume".format(self.appName))
retSubscribe = MQTT_ERR_SUCCESS
mid = 0 #mid ...message id
for topic in self.subscriptionList:
try:
retSubscribe, mid = self.mqttClient.subscribe(topic)
if (retSubscribe != MQTT_ERR_SUCCESS):
print("MQTT: Bad return code when subscribing to topic <{0}>: {1}".format(topic, retSubscribe))
break
except Exception as e:
print("MQTT: Error subscribing to topic <{0}>: {1}".format(topic, e))
#subscription failed -> try again
if (retSubscribe != MQTT_ERR_SUCCESS):
print("MQTT: Trying to subscribe again...")
time.sleep(1)
self.onMqttBrokerConnected()
except Exception as e:
print("MQTT: Error subscribing to topic: <{0}>".format(e))
#when the UI connects to the broker send onConnect values to allow the UI to (re-)initalize itself
def onUserInterfaceConnected(self):
print("MQTT: UI requests a publish of on-connect parameters of RT App <{0}>".format(self.appName))
#send parameters for subscribees to initalize their default data e.g. in UI
try:
print("MQTT: publishing all parameters of RT App <{0}> for subscribes to initalize their default data e.g. in UI".format(self.appName))
for topic in self.publicationListOnConnect:
try:
sourceDataObj = self.publicationListOnConnect[topic]
self.mqttClient.publish(topic, json.dumps(sourceDataObj.__dict__))
except Exception as e:
print("MQTT: Error publishing topic <{0}>: {1}".format(topic, e))
except Exception as e:
print("MQTT: Error publishing topic: <{0}>".format(e))
| 6,848 | 0 | 333 |
4c3ad1d44bd93c66096d64a3e25a7f96248c9a10 | 1,744 | py | Python | ckan/lib/io.py | gg2/ckan | d61a533cc330b6050f4957573f58ec912695ed0a | [
"BSD-3-Clause"
] | 2,805 | 2015-01-02T18:13:15.000Z | 2022-03-31T03:35:01.000Z | ckan/lib/io.py | gg2/ckan | d61a533cc330b6050f4957573f58ec912695ed0a | [
"BSD-3-Clause"
] | 3,801 | 2015-01-02T11:05:36.000Z | 2022-03-31T19:24:37.000Z | ckan/lib/io.py | gg2/ckan | d61a533cc330b6050f4957573f58ec912695ed0a | [
"BSD-3-Clause"
] | 1,689 | 2015-01-02T19:46:43.000Z | 2022-03-28T14:59:43.000Z | # encoding: utf-8
u'''
Utility functions for I/O.
'''
import sys
import six
_FILESYSTEM_ENCODING = str(
sys.getfilesystemencoding() or sys.getdefaultencoding()
)
def encode_path(p):
u'''
Convert a Unicode path string to a byte string.
Intended to be used for encoding paths that are known to be
compatible with the filesystem, for example paths of existing files
that were previously decoded using :py:func:`decode_path`. If you're
dynamically constructing names for new files using unknown inputs
then pass them through :py:func:`ckan.lib.munge.munge_filename`
before encoding them.
Raises a ``UnicodeEncodeError`` if the path cannot be encoded using
the filesystem's encoding. That will never happen for paths returned
by :py:func:`decode_path`.
Raises a ``TypeError`` is the input is not a Unicode string.
'''
if not isinstance(p, str):
raise TypeError(u'Can only encode unicode, not {}'.format(type(p)))
return six.ensure_text(p).encode(_FILESYSTEM_ENCODING)
def decode_path(p):
u'''
Convert a byte path string to a Unicode string.
Intended to be used for decoding byte paths to existing files as
returned by some of Python's built-in I/O functions.
Raises a ``UnicodeDecodeError`` if the path cannot be decoded using
the filesystem's encoding. Assuming the path was returned by one of
Python's I/O functions this means that the environment Python is
running in is set up incorrectly.
Raises a ``TypeError`` if the input is not a byte string.
'''
if not isinstance(p, bytes):
raise TypeError(u'Can only decode str, not {}'.format(type(p)))
return six.ensure_binary(p).decode(_FILESYSTEM_ENCODING)
| 31.142857 | 75 | 0.713876 | # encoding: utf-8
u'''
Utility functions for I/O.
'''
import sys
import six
_FILESYSTEM_ENCODING = str(
sys.getfilesystemencoding() or sys.getdefaultencoding()
)
def encode_path(p):
u'''
Convert a Unicode path string to a byte string.
Intended to be used for encoding paths that are known to be
compatible with the filesystem, for example paths of existing files
that were previously decoded using :py:func:`decode_path`. If you're
dynamically constructing names for new files using unknown inputs
then pass them through :py:func:`ckan.lib.munge.munge_filename`
before encoding them.
Raises a ``UnicodeEncodeError`` if the path cannot be encoded using
the filesystem's encoding. That will never happen for paths returned
by :py:func:`decode_path`.
Raises a ``TypeError`` is the input is not a Unicode string.
'''
if not isinstance(p, str):
raise TypeError(u'Can only encode unicode, not {}'.format(type(p)))
return six.ensure_text(p).encode(_FILESYSTEM_ENCODING)
def decode_path(p):
u'''
Convert a byte path string to a Unicode string.
Intended to be used for decoding byte paths to existing files as
returned by some of Python's built-in I/O functions.
Raises a ``UnicodeDecodeError`` if the path cannot be decoded using
the filesystem's encoding. Assuming the path was returned by one of
Python's I/O functions this means that the environment Python is
running in is set up incorrectly.
Raises a ``TypeError`` if the input is not a byte string.
'''
if not isinstance(p, bytes):
raise TypeError(u'Can only decode str, not {}'.format(type(p)))
return six.ensure_binary(p).decode(_FILESYSTEM_ENCODING)
| 0 | 0 | 0 |
1382f4c57e548338346af9acb45b72cc33a0979d | 667 | py | Python | dags/exercise4.py | tisako/airflow-training-skeleton | 5bebe784d69f115df352ad5185320653eaa78eee | [
"Apache-2.0"
] | null | null | null | dags/exercise4.py | tisako/airflow-training-skeleton | 5bebe784d69f115df352ad5185320653eaa78eee | [
"Apache-2.0"
] | null | null | null | dags/exercise4.py | tisako/airflow-training-skeleton | 5bebe784d69f115df352ad5185320653eaa78eee | [
"Apache-2.0"
] | null | null | null | from datetime import timedelta
import airflow
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
args = {
'owner': 'Airflow',
'start_date': airflow.utils.dates.days_ago(9),
}
with DAG(
dag_id='exercise4',
default_args=args,
schedule_interval=timedelta(hours=2.5)
) as dag:
task1 = DummyOperator(
task_id='task1'
)
task2 = DummyOperator(
task_id='task2'
)
task3 = DummyOperator(
task_id='task3'
)
task4 = DummyOperator(
task_id='task4'
)
task5 = DummyOperator(
task_id='task5'
)
task1 >> task2 >> [task3, task4] >> task5
| 20.212121 | 58 | 0.623688 | from datetime import timedelta
import airflow
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
args = {
'owner': 'Airflow',
'start_date': airflow.utils.dates.days_ago(9),
}
with DAG(
dag_id='exercise4',
default_args=args,
schedule_interval=timedelta(hours=2.5)
) as dag:
task1 = DummyOperator(
task_id='task1'
)
task2 = DummyOperator(
task_id='task2'
)
task3 = DummyOperator(
task_id='task3'
)
task4 = DummyOperator(
task_id='task4'
)
task5 = DummyOperator(
task_id='task5'
)
task1 >> task2 >> [task3, task4] >> task5
| 0 | 0 | 0 |
9e5c7d9f8f958c4ec71e450dbcba738f978fbbf8 | 980 | py | Python | web_app/create_btc_wallet.py | pcsubirachs/pybit_wallet | 0fb6c12841c66546d0466602cbd46d28376eb16f | [
"MIT"
] | null | null | null | web_app/create_btc_wallet.py | pcsubirachs/pybit_wallet | 0fb6c12841c66546d0466602cbd46d28376eb16f | [
"MIT"
] | null | null | null | web_app/create_btc_wallet.py | pcsubirachs/pybit_wallet | 0fb6c12841c66546d0466602cbd46d28376eb16f | [
"MIT"
] | null | null | null | # create_btc_wallet.py
# pywallet implementation
from pywallet import wallet
# Testing
#print(w)
#print("Seed Phrase: ", seed)
#print("Private Key: ", priv_key)
#print("Public Key: ", pub_key)
#print("Address: ", address)
cw = create_wallet()
print(cw) | 23.902439 | 66 | 0.656122 | # create_btc_wallet.py
# pywallet implementation
from pywallet import wallet
def create_wallet():
# generate 12 word mnemonic seed
seed = wallet.generate_mnemonic()
# create bitcoin wallet in JSON format
w = wallet.create_wallet(network="BTC", seed=seed, children=1)
coin = w['coin']
seed = w['seed']
priv_key = w['xprivate_key']
pub_key = w['xpublic_key']
address = w['address']
wif = w['wif']
# children, derived from priv key
child_path = w['children'][0]['path']
child_bip32_path = w['children'][0]['bip32_path']
child_pub_key = w['children'][0]['xpublic_key']
child_address = w['children'][0]['address']
#child_pub_key_prime = w['children'][0]['xpublic_key_prime']
arr = [seed, priv_key, pub_key, address]
return arr
# Testing
#print(w)
#print("Seed Phrase: ", seed)
#print("Private Key: ", priv_key)
#print("Public Key: ", pub_key)
#print("Address: ", address)
cw = create_wallet()
print(cw) | 700 | 0 | 23 |
4c3a955838bc1a131cd7eef019138c3f66abd94e | 14,639 | py | Python | fab_support/heroku.py | drummonds/fab_support | de2c9595e8cf499848f0cc5661e7f1d6465609a2 | [
"MIT"
] | 3 | 2018-10-31T19:04:08.000Z | 2019-06-05T09:17:36.000Z | fab_support/heroku.py | drummonds/fab_support | de2c9595e8cf499848f0cc5661e7f1d6465609a2 | [
"MIT"
] | 205 | 2018-03-04T10:12:14.000Z | 2022-03-28T15:24:10.000Z | fab_support/heroku.py | drummonds/fab_support | de2c9595e8cf499848f0cc5661e7f1d6465609a2 | [
"MIT"
] | null | null | null | import datetime as dt
from fabric.api import env, local, task, lcd, settings
import json
import time
from time import sleep
from .heroku_utils import first_colour_database
from .utils import repeat_run_local, FabricSupportException, wait_for_dyno_to_run
# Global environment variables See documentation
HEROKU_APP_NAME = "fab-support-app-test" # name of this stages Heroku app
HEROKU_PROD_APP_NAME = (
"fab-support-app-prod"
) # Name of Heroku app which is production, ie source of data
HEROKU_OLD_PROD_APP_NAME = (
"fab-support-app-old-prod"
) # Name of heroku app to save production to
PRODUCTION_URL = ""
HEROKU_POSTGRES_TYPE = "hobby-dev"
GIT_PUSH = "" # Default to false
GIT_PUSH_DIR = "." #
GIT_BRANCH = "master"
USES_CELERY = False
##################################################
# Local utilities
##################################################
def remove_unused_db():
"""List all databases in use for app, find the main one and remove all the others"""
data = json.loads(
local(f"heroku config --json --app {HEROKU_APP_NAME}", capture=True)
)
for k, v in data.items():
# noinspection SpellCheckingInspection
if k.find("HEROKU_POSTGRESQL_") == 0:
if v != data["DATABASE_URL"]:
local(
f"heroku addons:destroy {k} --app {HEROKU_APP_NAME} --confirm {HEROKU_APP_NAME}"
)
def default_db_colour(app_name):
"""Return the default database colour of heroku application"""
data = json.loads(
local("heroku config --json --app {0}".format(app_name), capture=True)
)
for k, v in data.items():
if k.find("HEROKU_POSTGRESQL_") == 0:
if v == data["DATABASE_URL"]:
return k
# if no colour found then try the long name in database_url
# raise Exception(f'No color database names found for app {app_name} - create an extra one and it should be ok.')
return data["DATABASE_URL"]
def set_heroku_environment_variables(stage):
"""This sets all the environment variables that a Django recipe needs."""
# TODO deal with no 'ENV'
env_dict = env["stages"][stage]["ENV"] # Should be a dictionary
# Set all the variables you need
for key, value in env_dict.items():
local("heroku config:set {}={} --app {}".format(key, value, HEROKU_APP_NAME))
# Setup defaults for some ENV variables if have not been setup
if "DJANGO_ALLOWED_HOSTS" not in env_dict:
allowed_hosts = f"{HEROKU_APP_NAME}.herokuapp.com"
local(
f'heroku config:set DJANGO_ALLOWED_HOSTS="{allowed_hosts}" --app {HEROKU_APP_NAME}'
)
if "DJANGO_SETTINGS_MODULE" not in env_dict:
local(
f"heroku config:set DJANGO_SETTINGS_MODULE=production --app {HEROKU_APP_NAME}"
)
if "PYTHONHASHSEED" not in env_dict:
local(f"heroku config:set PYTHONHASHSEED=random --app {HEROKU_APP_NAME}")
def raw_update_app(stage):
"""Update of app to latest version"""
# Put the heroku app in maintenance mode TODO
set_heroku_environment_variables(stage) # In case anything has changed
# connect git to the correct remote repository
local("heroku git:remote -a {}".format(HEROKU_APP_NAME))
# Need to push the branch in git to the master branch in the remote heroku repository
print(
f"GIT_PUSH_DIR = {GIT_PUSH_DIR}, GIT_PUSH = {GIT_PUSH}, GIT_BRANCH = {GIT_BRANCH}"
)
if GIT_PUSH == "": # test for special case probably deploying a subtree
local(f"git push heroku {GIT_BRANCH}:master")
else:
# The command will probably be like this:
# 'GIT_PUSH': 'git subtree push --prefix tests/my_heroku_project heroku master',
with lcd(GIT_PUSH_DIR):
local(GIT_PUSH)
# Don't need to scale workers down as not using eg heroku ps:scale worker=0
if USES_CELERY:
local(f"heroku ps:scale worker=1 -a {HEROKU_APP_NAME}")
# Have used performance web=standard-1x and worker=standard-2x but adjusted app to used less memory
# local(f'heroku ps:resize web=standard-1x -a {HEROKU_APP_NAME}') # Resize web to be compatible with performance workers
# local(f'heroku ps:resize worker=standard-2x -a {HEROKU_APP_NAME}') # Resize workers
# makemigrations should be run locally and the results checked into git
local(
"heroku run \"yes 'yes' | python manage.py migrate\""
) # Force deletion of stale content types
# #############
def _create_newbuild(stage):
"""This builds the database and waits for it be ready. It is is safe to run and won't
destroy any existing infrastructure."""
local(
f"heroku create {HEROKU_APP_NAME} --buildpack https://github.com/heroku/heroku-buildpack-python --region eu"
)
# This is where we create the database. The type of database can range from hobby-dev for small
# free access to standard for production quality docs
local(
f"heroku addons:create heroku-postgresql:{HEROKU_POSTGRES_TYPE} --app {HEROKU_APP_NAME}"
)
local(f"heroku addons:create cloudamqp:lemur --app {HEROKU_APP_NAME}")
local(f"heroku addons:create papertrail:choklad --app {HEROKU_APP_NAME}")
# set database backup schedule
repeat_run_local(
f"heroku pg:wait --app {HEROKU_APP_NAME}"
) # It takes some time for DB so wait for it
# When wait returns the database is not necessarily completely finished preparing itself. So the next
# command could fail (and did on testing on v0.1.6)
repeat_run_local(f"heroku pg:backups:schedule --at 04:00 --app {HEROKU_APP_NAME}")
# Already promoted as new local('heroku pg:promote DATABASE_URL --app my-app-prod')
# Leaving out and aws and reddis
raw_update_app(stage)
wait_for_dyno_to_run(HEROKU_APP_NAME)
local("heroku run python manage.py check --deploy") # make sure all ok
# Create superuser - the interactive command does not allow you to script the password
# So this is a hack workaround.
# Django 1 only
# cmd = ('heroku run "echo \'from django.contrib.auth import get_user_model; User = get_user_model(); '
# + f'User.objects.filter(email="""{SUPERUSER_EMAIL}""", is_superuser=True).delete(); '
# + f'User.objects.create_superuser("""{SUPERUSER_NAME}""", """{SUPERUSER_EMAIL}""", """{SUPERUSER_PASSWORD}""")\' '
# + f' | python manage.py shell"')
# local(cmd)
def _kill_app():
"""see kill app"""
local(f"heroku destroy {HEROKU_APP_NAME} --confirm {HEROKU_APP_NAME}")
def kill_app(stage, safety_on=True):
"""Kill app notice that to the syntax for the production version is:
fab the_stage kill_app:False"""
get_global_environment_variables(stage)
if HEROKU_APP_NAME in list_app_names():
if not (is_production() and not safety_on):
_kill_app()
def build_uat():
"""Build a new uat environments"""
build_app("uat")
def _build_app(stage="uat"):
"""Build a test environment. Default is uat.
So fab build_app is equivalent to fab build_app:uat and to fab build_app:stage=uat
so can build a test branch with:
fab build_app:stage=test"""
try:
_kill_app()
except SystemExit:
if stage != "prod":
pass # ignore errors in case original does not exist
else:
raise Exception(
"Must stop if an error when deleting a production database as now the only working instance is UAT."
)
_create_newbuild(stage)
_transfer_database_from_production(stage)
# makemigrations should be run locally and the results checked into git
# Need to migrate the old database schema from the master production database
local(
"heroku run \"yes 'yes' | python manage.py migrate\""
) # Force deletion of stale content types
def _create_new_db():
"""Just creates an extra new database for this instance."""
# Put the heroku app in maintenance move
m = local(
f"heroku addons:create heroku-postgresql:{HEROKU_POSTGRES_TYPE} --app {HEROKU_APP_NAME}",
capture=True,
)
repeat_run_local("heroku pg:wait") # It takes some time for DB so wait for it
# There should now be 2 database
return first_colour_database(app=HEROKU_APP_NAME)
def _transfer_database_from_production(stage="test", clean=True):
"""This is usually used for making a copy of the production database for a UAT staging
or test environment. It can also be used to upgrade the production environment from one
database plan to the next.
Method:
"""
try:
local("heroku maintenance:on --app {} ".format(HEROKU_APP_NAME))
db_name, colour = create_new_db(stage) # colour is ?
# Don't need to scale workers down as not using eg heroku ps:scale worker=0
local(
f"heroku pg:copy {HEROKU_PROD_APP_NAME}::DATABASE_URL {colour} --app {HEROKU_APP_NAME} --confirm {HEROKU_APP_NAME}"
)
local(f"heroku pg:promote {colour}")
if clean:
remove_unused_db()
finally:
local("heroku maintenance:off --app {} ".format(HEROKU_APP_NAME))
def list_stages():
"""This is put here to test the exact same code in django as in set_stages. In one it seems to work
and another to fail."""
try:
stages = env["stages"]
print("List of stages")
print(stages)
for stage_name, stage in stages.items():
try:
comment = stage["comment"]
except KeyError:
comment = ""
print(f"{stage_name} - {comment}")
except KeyError:
for k, v in env:
if k.lower() == "stages":
print("env['{f}'] has been set but should probably be 'stages'")
print("env['stages'] has not been set.")
def _promote_to_prod():
"""
Promotes a stage typically, uat to production
Saves old production for safety
Should work if this is the first promotion ie no production database or if there is a production database.
TODO require manual override if not uat
TODO do not run if old_prod exists. Require manual deletion
"""
# turn maintenance on
local(f"heroku maintenance:on --app {HEROKU_APP_NAME}")
production_exists = True
with settings(abort_exception=FabricSupportException):
try:
local(f"heroku maintenance:on --app {HEROKU_PROD_APP_NAME}")
except FabricSupportException:
# Going to assume that there is no production
production_exists = False
try:
if production_exists:
local(
f"heroku apps:rename {HEROKU_OLD_PROD_APP_NAME} --app {HEROKU_PROD_APP_NAME}"
) # Should fail if already an old_prod
local(f"heroku apps:rename {HEROKU_PROD_APP_NAME} --app {HEROKU_APP_NAME}")
if production_exists:
# Having moved from production to old proudction need to update allowed hosts
local(
f'heroku config:set DJANGO_ALLOWED_HOSTS="{HEROKU_OLD_PROD_APP_NAME}.herokuapp.com" --app {HEROKU_OLD_PROD_APP_NAME}'
)
wait_for_dyno_to_run(HEROKU_OLD_PROD_APP_NAME)
local(
f'heroku config:set DJANGO_ALLOWED_HOSTS="{HEROKU_PROD_APP_NAME}.herokuapp.com" --app {HEROKU_PROD_APP_NAME}'
)
wait_for_dyno_to_run(HEROKU_PROD_APP_NAME)
if PRODUCTION_URL:
# Switch over domains
local(f"heroku domains:clear --app {HEROKU_OLD_PROD_APP_NAME}")
local(f"heroku domains:add {PRODUCTION_URL} --app {HEROKU_PROD_APP_NAME}")
finally:
local(f"heroku maintenance:off --app {HEROKU_PROD_APP_NAME} ")
if (
production_exists
): # Then need to run maintenance off on what is now old production
local(f"heroku maintenance:off --app {HEROKU_OLD_PROD_APP_NAME} ")
| 39.352151 | 133 | 0.662545 | import datetime as dt
from fabric.api import env, local, task, lcd, settings
import json
import time
from time import sleep
from .heroku_utils import first_colour_database
from .utils import repeat_run_local, FabricSupportException, wait_for_dyno_to_run
# Global environment variables See documentation
HEROKU_APP_NAME = "fab-support-app-test" # name of this stages Heroku app
HEROKU_PROD_APP_NAME = (
"fab-support-app-prod"
) # Name of Heroku app which is production, ie source of data
HEROKU_OLD_PROD_APP_NAME = (
"fab-support-app-old-prod"
) # Name of heroku app to save production to
PRODUCTION_URL = ""
HEROKU_POSTGRES_TYPE = "hobby-dev"
GIT_PUSH = "" # Default to false
GIT_PUSH_DIR = "." #
GIT_BRANCH = "master"
USES_CELERY = False
##################################################
# Local utilities
##################################################
def remove_unused_db():
"""List all databases in use for app, find the main one and remove all the others"""
data = json.loads(
local(f"heroku config --json --app {HEROKU_APP_NAME}", capture=True)
)
for k, v in data.items():
# noinspection SpellCheckingInspection
if k.find("HEROKU_POSTGRESQL_") == 0:
if v != data["DATABASE_URL"]:
local(
f"heroku addons:destroy {k} --app {HEROKU_APP_NAME} --confirm {HEROKU_APP_NAME}"
)
def default_db_colour(app_name):
"""Return the default database colour of heroku application"""
data = json.loads(
local("heroku config --json --app {0}".format(app_name), capture=True)
)
for k, v in data.items():
if k.find("HEROKU_POSTGRESQL_") == 0:
if v == data["DATABASE_URL"]:
return k
# if no colour found then try the long name in database_url
# raise Exception(f'No color database names found for app {app_name} - create an extra one and it should be ok.')
return data["DATABASE_URL"]
def set_heroku_environment_variables(stage):
"""This sets all the environment variables that a Django recipe needs."""
# TODO deal with no 'ENV'
env_dict = env["stages"][stage]["ENV"] # Should be a dictionary
# Set all the variables you need
for key, value in env_dict.items():
local("heroku config:set {}={} --app {}".format(key, value, HEROKU_APP_NAME))
# Setup defaults for some ENV variables if have not been setup
if "DJANGO_ALLOWED_HOSTS" not in env_dict:
allowed_hosts = f"{HEROKU_APP_NAME}.herokuapp.com"
local(
f'heroku config:set DJANGO_ALLOWED_HOSTS="{allowed_hosts}" --app {HEROKU_APP_NAME}'
)
if "DJANGO_SETTINGS_MODULE" not in env_dict:
local(
f"heroku config:set DJANGO_SETTINGS_MODULE=production --app {HEROKU_APP_NAME}"
)
if "PYTHONHASHSEED" not in env_dict:
local(f"heroku config:set PYTHONHASHSEED=random --app {HEROKU_APP_NAME}")
def raw_update_app(stage):
"""Update of app to latest version"""
# Put the heroku app in maintenance mode TODO
set_heroku_environment_variables(stage) # In case anything has changed
# connect git to the correct remote repository
local("heroku git:remote -a {}".format(HEROKU_APP_NAME))
# Need to push the branch in git to the master branch in the remote heroku repository
print(
f"GIT_PUSH_DIR = {GIT_PUSH_DIR}, GIT_PUSH = {GIT_PUSH}, GIT_BRANCH = {GIT_BRANCH}"
)
if GIT_PUSH == "": # test for special case probably deploying a subtree
local(f"git push heroku {GIT_BRANCH}:master")
else:
# The command will probably be like this:
# 'GIT_PUSH': 'git subtree push --prefix tests/my_heroku_project heroku master',
with lcd(GIT_PUSH_DIR):
local(GIT_PUSH)
# Don't need to scale workers down as not using eg heroku ps:scale worker=0
if USES_CELERY:
local(f"heroku ps:scale worker=1 -a {HEROKU_APP_NAME}")
# Have used performance web=standard-1x and worker=standard-2x but adjusted app to used less memory
# local(f'heroku ps:resize web=standard-1x -a {HEROKU_APP_NAME}') # Resize web to be compatible with performance workers
# local(f'heroku ps:resize worker=standard-2x -a {HEROKU_APP_NAME}') # Resize workers
# makemigrations should be run locally and the results checked into git
local(
"heroku run \"yes 'yes' | python manage.py migrate\""
) # Force deletion of stale content types
def install_heroku_plugins(plug_in_list):
# plugins doesn't support returning --json
results = local(
"heroku plugins --core", capture=True
) # returns string or string list
result_list = results.split("\n")
plugin_dict = {}
for result in result_list:
parts = result.split(" ")
try:
plugin_dict[parts[0]] = parts[1]
except IndexError:
plugin_dict[parts[0]] = ""
for plug_in in plug_in_list:
if plug_in not in plugin_dict:
local(
f"heroku plugins:install {plug_in}"
) # installed in local toolbelt not on app
# If it fails then it really is a failure not just it has already been installed.
return True # Got to end and all installed
# print(f'|{results}|')
# #############
def _create_newbuild(stage):
"""This builds the database and waits for it be ready. It is is safe to run and won't
destroy any existing infrastructure."""
local(
f"heroku create {HEROKU_APP_NAME} --buildpack https://github.com/heroku/heroku-buildpack-python --region eu"
)
# This is where we create the database. The type of database can range from hobby-dev for small
# free access to standard for production quality docs
local(
f"heroku addons:create heroku-postgresql:{HEROKU_POSTGRES_TYPE} --app {HEROKU_APP_NAME}"
)
local(f"heroku addons:create cloudamqp:lemur --app {HEROKU_APP_NAME}")
local(f"heroku addons:create papertrail:choklad --app {HEROKU_APP_NAME}")
# set database backup schedule
repeat_run_local(
f"heroku pg:wait --app {HEROKU_APP_NAME}"
) # It takes some time for DB so wait for it
# When wait returns the database is not necessarily completely finished preparing itself. So the next
# command could fail (and did on testing on v0.1.6)
repeat_run_local(f"heroku pg:backups:schedule --at 04:00 --app {HEROKU_APP_NAME}")
# Already promoted as new local('heroku pg:promote DATABASE_URL --app my-app-prod')
# Leaving out and aws and reddis
raw_update_app(stage)
wait_for_dyno_to_run(HEROKU_APP_NAME)
local("heroku run python manage.py check --deploy") # make sure all ok
# Create superuser - the interactive command does not allow you to script the password
# So this is a hack workaround.
# Django 1 only
# cmd = ('heroku run "echo \'from django.contrib.auth import get_user_model; User = get_user_model(); '
# + f'User.objects.filter(email="""{SUPERUSER_EMAIL}""", is_superuser=True).delete(); '
# + f'User.objects.create_superuser("""{SUPERUSER_NAME}""", """{SUPERUSER_EMAIL}""", """{SUPERUSER_PASSWORD}""")\' '
# + f' | python manage.py shell"')
# local(cmd)
def get_global_environment_variables(stage):
# Get a number of predefined environment variables from the staging system variables
# and turn them into globals for use in this script
# TODO perhaps convert to another method of access
for global_env in (
"HEROKU_APP_NAME",
"HEROKU_PROD_APP_NAME",
"HEROKU_OLD_PROD_APP_NAME",
"PRODUCTION_URL",
"HEROKU_POSTGRES_TYPE",
"USES_CELERY",
"GIT_BRANCH",
"GIT_PUSH",
"GIT_PUSH_DIR",
"DJANGO_SETTINGS_MODULE",
):
try:
globals()[global_env] = env["stages"][stage][global_env]
except KeyError:
# This global variable will use the default
pass
def create_newbuild(stage):
get_global_environment_variables(stage)
_create_newbuild(stage)
def is_production():
return HEROKU_APP_NAME[-4:].lower() == "prod"
def _kill_app():
"""see kill app"""
local(f"heroku destroy {HEROKU_APP_NAME} --confirm {HEROKU_APP_NAME}")
def list_app_names():
results = json.loads(local("heroku apps --json", capture=True))
return [heroku_app["name"] for heroku_app in results]
def kill_app(stage, safety_on=True):
"""Kill app notice that to the syntax for the production version is:
fab the_stage kill_app:False"""
get_global_environment_variables(stage)
if HEROKU_APP_NAME in list_app_names():
if not (is_production() and not safety_on):
_kill_app()
def build_uat():
"""Build a new uat environments"""
build_app("uat")
def _build_app(stage="uat"):
"""Build a test environment. Default is uat.
So fab build_app is equivalent to fab build_app:uat and to fab build_app:stage=uat
so can build a test branch with:
fab build_app:stage=test"""
try:
_kill_app()
except SystemExit:
if stage != "prod":
pass # ignore errors in case original does not exist
else:
raise Exception(
"Must stop if an error when deleting a production database as now the only working instance is UAT."
)
_create_newbuild(stage)
_transfer_database_from_production(stage)
# makemigrations should be run locally and the results checked into git
# Need to migrate the old database schema from the master production database
local(
"heroku run \"yes 'yes' | python manage.py migrate\""
) # Force deletion of stale content types
def build_app(stage="uat"):
start_time = time.time()
get_global_environment_variables(stage)
_build_app(stage)
# Calculate time
end_time = time.time()
runtime = str(dt.timedelta(seconds=int(end_time - start_time)))
print(f"Run time = {runtime} Completed at: {dt.datetime.now()}")
def _create_new_db():
"""Just creates an extra new database for this instance."""
# Put the heroku app in maintenance move
m = local(
f"heroku addons:create heroku-postgresql:{HEROKU_POSTGRES_TYPE} --app {HEROKU_APP_NAME}",
capture=True,
)
repeat_run_local("heroku pg:wait") # It takes some time for DB so wait for it
# There should now be 2 database
return first_colour_database(app=HEROKU_APP_NAME)
def create_new_db(stage="uat"):
get_global_environment_variables(stage)
return _create_new_db()
def _transfer_database_from_production(stage="test", clean=True):
"""This is usually used for making a copy of the production database for a UAT staging
or test environment. It can also be used to upgrade the production environment from one
database plan to the next.
Method:
"""
try:
local("heroku maintenance:on --app {} ".format(HEROKU_APP_NAME))
db_name, colour = create_new_db(stage) # colour is ?
# Don't need to scale workers down as not using eg heroku ps:scale worker=0
local(
f"heroku pg:copy {HEROKU_PROD_APP_NAME}::DATABASE_URL {colour} --app {HEROKU_APP_NAME} --confirm {HEROKU_APP_NAME}"
)
local(f"heroku pg:promote {colour}")
if clean:
remove_unused_db()
finally:
local("heroku maintenance:off --app {} ".format(HEROKU_APP_NAME))
def transfer_database_from_production(stage="test", clean=True):
get_global_environment_variables(stage)
_transfer_database_from_production(stage, clean)
def list_stages():
"""This is put here to test the exact same code in django as in set_stages. In one it seems to work
and another to fail."""
try:
stages = env["stages"]
print("List of stages")
print(stages)
for stage_name, stage in stages.items():
try:
comment = stage["comment"]
except KeyError:
comment = ""
print(f"{stage_name} - {comment}")
except KeyError:
for k, v in env:
if k.lower() == "stages":
print("env['{f}'] has been set but should probably be 'stages'")
print("env['stages'] has not been set.")
def _promote_to_prod():
"""
Promotes a stage typically, uat to production
Saves old production for safety
Should work if this is the first promotion ie no production database or if there is a production database.
TODO require manual override if not uat
TODO do not run if old_prod exists. Require manual deletion
"""
# turn maintenance on
local(f"heroku maintenance:on --app {HEROKU_APP_NAME}")
production_exists = True
with settings(abort_exception=FabricSupportException):
try:
local(f"heroku maintenance:on --app {HEROKU_PROD_APP_NAME}")
except FabricSupportException:
# Going to assume that there is no production
production_exists = False
try:
if production_exists:
local(
f"heroku apps:rename {HEROKU_OLD_PROD_APP_NAME} --app {HEROKU_PROD_APP_NAME}"
) # Should fail if already an old_prod
local(f"heroku apps:rename {HEROKU_PROD_APP_NAME} --app {HEROKU_APP_NAME}")
if production_exists:
# Having moved from production to old proudction need to update allowed hosts
local(
f'heroku config:set DJANGO_ALLOWED_HOSTS="{HEROKU_OLD_PROD_APP_NAME}.herokuapp.com" --app {HEROKU_OLD_PROD_APP_NAME}'
)
wait_for_dyno_to_run(HEROKU_OLD_PROD_APP_NAME)
local(
f'heroku config:set DJANGO_ALLOWED_HOSTS="{HEROKU_PROD_APP_NAME}.herokuapp.com" --app {HEROKU_PROD_APP_NAME}'
)
wait_for_dyno_to_run(HEROKU_PROD_APP_NAME)
if PRODUCTION_URL:
# Switch over domains
local(f"heroku domains:clear --app {HEROKU_OLD_PROD_APP_NAME}")
local(f"heroku domains:add {PRODUCTION_URL} --app {HEROKU_PROD_APP_NAME}")
finally:
local(f"heroku maintenance:off --app {HEROKU_PROD_APP_NAME} ")
if (
production_exists
): # Then need to run maintenance off on what is now old production
local(f"heroku maintenance:off --app {HEROKU_OLD_PROD_APP_NAME} ")
def promote_to_prod(stage="uat"):
get_global_environment_variables(stage)
start_time = time.time()
_promote_to_prod()
end_time = time.time()
runtime = str(dt.timedelta(seconds=int(end_time - start_time)))
print(f"Run time = {runtime}")
| 2,499 | 0 | 207 |
7f4374b8e4ae801466cdd4e437daeacabc7f2b75 | 3,066 | py | Python | tools/medline-cli.py | pminervini/distant-supervision | 408f8ac493d1d369dd818148216120c87b4aa038 | [
"MIT"
] | null | null | null | tools/medline-cli.py | pminervini/distant-supervision | 408f8ac493d1d369dd818148216120c87b4aa038 | [
"MIT"
] | null | null | null | tools/medline-cli.py | pminervini/distant-supervision | 408f8ac493d1d369dd818148216120c87b4aa038 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import gzip
from xml.dom import minidom
from concurrent.futures import ProcessPoolExecutor
import multiprocessing
from tqdm import tqdm
import jsonlines
from typing import List, Dict, Any
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
main(sys.argv[1:])
| 33.326087 | 113 | 0.607958 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import gzip
from xml.dom import minidom
from concurrent.futures import ProcessPoolExecutor
import multiprocessing
from tqdm import tqdm
import jsonlines
from typing import List, Dict, Any
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
def parse(path: str) -> List[Dict[str, Any]]:
logger.debug(f'Processing {path} ..')
with gzip.open(path, 'r') as f:
content = f.read()
# return [content]
dom = minidom.parseString(content)
a_lst = dom.getElementsByTagName("Article")
res = []
for a in a_lst:
entry = {}
at = a.getElementsByTagName("ArticleTitle")[0]
at_text = at.firstChild
if at_text is not None:
if hasattr(at_text, 'data'):
entry['title'] = at_text.data
else:
print('at_text has no data', path)
ab_lst = a.getElementsByTagName("AbstractText")
abstract_lst = []
for ab in ab_lst:
ab_text = ab.firstChild
ab_label = ab.getAttribute('Label')
ab_nlm_category = ab.getAttribute('NlmCategory')
abstract = {}
if ab_text is not None:
if hasattr(ab_text, 'data'):
abstract['text'] = ab_text.data
else:
print('ab_text has no data', path)
if ab_label is not None and len(ab_label) > 0:
abstract['label'] = ab_label
if ab_nlm_category is not None and len(ab_nlm_category) > 0:
abstract['nlm_category'] = ab_nlm_category
if len(abstract) > 0:
abstract_lst += [abstract]
entry['abstract'] = abstract_lst
res += [entry]
return res
def main(argv):
parser = argparse.ArgumentParser('MEDLINE', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('paths', type=str, nargs='+', help='Paths')
parser.add_argument('--threads', '-t', type=int, default=multiprocessing.cpu_count(), help='Threads')
parser.add_argument('--jsonl', type=str, default='medline.jsonl', help='JSONL output')
parser.add_argument('--text', type=str, default=None, help='JSONL output')
args = parser.parse_args(argv)
jsonl_path = args.jsonl
text_path = args.text
with ProcessPoolExecutor(max_workers=args.threads) as e:
entry_lst = [entry for el in list(tqdm(e.map(parse, args.paths), total=len(args.paths))) for entry in el]
if jsonl_path is not None:
with jsonlines.open(jsonl_path, 'w') as f:
f.write_all(entry_lst)
if text_path is not None:
with open(text_path, 'w') as f:
for entry in entry_lst:
for abstract in entry['abstract']:
if 'text' in abstract:
f.write(str(bytes(abstract['text'], 'utf-8')) + '\n')
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
main(sys.argv[1:])
| 2,554 | 0 | 46 |
e28f288d8baf761d62a58045450c5d688f0b7d68 | 1,601 | py | Python | 892.surface-area-of-3-d-shapes.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | 892.surface-area-of-3-d-shapes.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | 892.surface-area-of-3-d-shapes.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=892 lang=python3
#
# [892] Surface Area of 3D Shapes
#
# https://leetcode.com/problems/surface-area-of-3d-shapes/description/
#
# algorithms
# Easy (57.01%)
# Likes: 209
# Dislikes: 270
# Total Accepted: 15.9K
# Total Submissions: 27.5K
# Testcase Example: '[[2]]'
#
# On a N * N grid, we place some 1 * 1 * 1 cubes.
#
# Each value v = grid[i][j] represents a tower of v cubes placed on top of grid
# cell (i, j).
#
# Return the total surface area of the resulting shapes.
#
#
#
#
#
#
#
#
#
#
#
#
#
# Example 1:
#
#
# Input: [[2]]
# Output: 10
#
#
#
# Example 2:
#
#
# Input: [[1,2],[3,4]]
# Output: 34
#
#
#
# Example 3:
#
#
# Input: [[1,0],[0,2]]
# Output: 16
#
#
#
# Example 4:
#
#
# Input: [[1,1,1],[1,0,1],[1,1,1]]
# Output: 32
#
#
#
# Example 5:
#
#
# Input: [[2,2,2],[2,1,2],[2,2,2]]
# Output: 46
#
#
#
#
# Note:
#
#
# 1 <= N <= 50
# 0 <= grid[i][j] <= 50
#
#
#
#
#
#
#
#
# @lc code=start
# @lc code=end
| 14.294643 | 79 | 0.445971 | #
# @lc app=leetcode id=892 lang=python3
#
# [892] Surface Area of 3D Shapes
#
# https://leetcode.com/problems/surface-area-of-3d-shapes/description/
#
# algorithms
# Easy (57.01%)
# Likes: 209
# Dislikes: 270
# Total Accepted: 15.9K
# Total Submissions: 27.5K
# Testcase Example: '[[2]]'
#
# On a N * N grid, we place some 1 * 1 * 1 cubes.
#
# Each value v = grid[i][j] represents a tower of v cubes placed on top of grid
# cell (i, j).
#
# Return the total surface area of the resulting shapes.
#
#
#
#
#
#
#
#
#
#
#
#
#
# Example 1:
#
#
# Input: [[2]]
# Output: 10
#
#
#
# Example 2:
#
#
# Input: [[1,2],[3,4]]
# Output: 34
#
#
#
# Example 3:
#
#
# Input: [[1,0],[0,2]]
# Output: 16
#
#
#
# Example 4:
#
#
# Input: [[1,1,1],[1,0,1],[1,1,1]]
# Output: 32
#
#
#
# Example 5:
#
#
# Input: [[2,2,2],[2,1,2],[2,2,2]]
# Output: 46
#
#
#
#
# Note:
#
#
# 1 <= N <= 50
# 0 <= grid[i][j] <= 50
#
#
#
#
#
#
#
#
# @lc code=start
class Solution:
def surfaceArea(self, grid: List[List[int]]) -> int:
m,n = len(grid),len(grid[0])
for i in range(m):
grid[i]=[0]+grid[i]+[0]
grid = [[0]*(n+2)]+grid+[[0]*(n+2)]
ans = 0
for i in range(1,m+1):
for j in range(1,n+1):
c=grid[i][j]
if c>0:
ans+=2
nb = [c-grid[i][j-1],c-grid[i][j+1],
c-grid[i-1][j],c-grid[i+1][j]]
for s in nb:
if s>0:
ans+=s
return ans
# @lc code=end
| 564 | -6 | 48 |
7319c5c39d36a15d33e3185a5140611895db3086 | 5,745 | py | Python | tensorflow_datasets/scripts/document_datasets.py | atksh/datasets | 814058b31ebd99e418114016d60ab4d6f8f82070 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/scripts/document_datasets.py | atksh/datasets | 814058b31ebd99e418114016d60ab4d6f8f82070 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/scripts/document_datasets.py | atksh/datasets | 814058b31ebd99e418114016d60ab4d6f8f82070 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to document datasets.
To test:
python -m tensorflow_datasets.scripts.document_datasets
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from absl import app
from concurrent import futures
import mako.lookup
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_datasets.core.utils import py_utils
WORKER_COUNT_DATASETS = 200
WORKER_COUNT_CONFIGS = 50
BASE_URL = "https://github.com/tensorflow/datasets/tree/master/tensorflow_datasets"
# WmtTranslate: The raw wmt can only be instantiated with the config kwargs
# TODO(tfds): Document image_label_folder datasets in a separate section
BUILDER_BLACKLIST = ["wmt_translate"]
@py_utils.memoize()
def get_mako_template(tmpl_name):
"""Returns mako.lookup.Template object to use to render documentation.
Args:
tmpl_name: string, name of template to load.
Returns:
mako 'Template' instance that can be rendered.
"""
tmpl_path = py_utils.get_tfds_path("scripts/templates/%s.mako.md" % tmpl_name)
with tf.io.gfile.GFile(tmpl_path, "r") as tmpl_f:
tmpl_content = tmpl_f.read()
return mako.lookup.Template(tmpl_content, default_filters=["str", "trim"])
def document_single_builder(builder):
"""Doc string for a single builder, with or without configs."""
print("Document builder %s..." % builder.name)
get_config_builder = lambda config: tfds.builder(builder.name, config=config)
config_builders = []
if builder.builder_configs:
with futures.ThreadPoolExecutor(max_workers=WORKER_COUNT_CONFIGS) as tpool:
config_builders = list(
tpool.map(get_config_builder, builder.BUILDER_CONFIGS)
)
tmpl = get_mako_template("dataset")
out_str = tmpl.render_unicode(
builder=builder, config_builders=config_builders,
).strip()
schema_org_tmpl = get_mako_template("schema_org")
schema_org_out_str = schema_org_tmpl.render_unicode(
builder=builder, config_builders=config_builders,
).strip()
out_str = schema_org_out_str + "\n" + out_str
return out_str
def make_module_to_builder_dict(datasets=None):
"""Get all builders organized by module in nested dicts."""
# pylint: disable=g-long-lambda
# dict to hold tfds->image->mnist->[builders]
module_to_builder = collections.defaultdict(
lambda: collections.defaultdict(lambda: collections.defaultdict(list))
)
# pylint: enable=g-long-lambda
if not datasets:
datasets = [
name for name in tfds.list_builders() if name not in BUILDER_BLACKLIST
]
print("Creating the vanilla builders for %s datasets..." % len(datasets))
with futures.ThreadPoolExecutor(max_workers=WORKER_COUNT_DATASETS) as tpool:
builders = tpool.map(tfds.builder, datasets)
print("Vanilla builders built, constructing module_to_builder dict...")
for builder in builders:
module_name = builder.__class__.__module__
modules = module_name.split(".")
if "testing" in modules:
continue
current_mod_ctr = module_to_builder
for mod in modules:
current_mod_ctr = current_mod_ctr[mod]
current_mod_ctr.append(builder)
module_to_builder = module_to_builder["tensorflow_datasets"]
return module_to_builder
def dataset_docs_str(datasets=None):
"""Create dataset documentation string for given datasets.
Args:
datasets: list of datasets for which to create documentation.
If None, then all available datasets will be used.
Returns:
- overview document
- a dictionary of sections. Each dataset in a section is represented by a
tuple (dataset_name, is_manual_dataset, string describing the datasets
(in the MarkDown format))
"""
print("Retrieving the list of builders...")
module_to_builder = make_module_to_builder_dict(datasets)
sections = sorted(list(module_to_builder.keys()))
section_docs = collections.defaultdict(list)
for section in sections:
builders = tf.nest.flatten(module_to_builder[section])
builders = sorted(builders, key=lambda b: b.name)
unused_ = get_mako_template("dataset") # To warm cache.
with futures.ThreadPoolExecutor(max_workers=WORKER_COUNT_DATASETS) as tpool:
builder_docs = tpool.map(document_single_builder, builders)
builder_docs = [
(builder.name, builder.MANUAL_DOWNLOAD_INSTRUCTIONS, builder_doc)
for (builder, builder_doc) in zip(builders, builder_docs)
]
section_docs[section.capitalize()] = builder_docs
tmpl = get_mako_template("catalog_overview")
catalog_overview = tmpl.render_unicode().lstrip()
return [catalog_overview, section_docs]
if __name__ == "__main__":
app.run(main)
| 34.608434 | 84 | 0.723934 | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to document datasets.
To test:
python -m tensorflow_datasets.scripts.document_datasets
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from absl import app
from concurrent import futures
import mako.lookup
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_datasets.core.utils import py_utils
WORKER_COUNT_DATASETS = 200
WORKER_COUNT_CONFIGS = 50
BASE_URL = "https://github.com/tensorflow/datasets/tree/master/tensorflow_datasets"
# WmtTranslate: The raw wmt can only be instantiated with the config kwargs
# TODO(tfds): Document image_label_folder datasets in a separate section
BUILDER_BLACKLIST = ["wmt_translate"]
@py_utils.memoize()
def get_mako_template(tmpl_name):
"""Returns mako.lookup.Template object to use to render documentation.
Args:
tmpl_name: string, name of template to load.
Returns:
mako 'Template' instance that can be rendered.
"""
tmpl_path = py_utils.get_tfds_path("scripts/templates/%s.mako.md" % tmpl_name)
with tf.io.gfile.GFile(tmpl_path, "r") as tmpl_f:
tmpl_content = tmpl_f.read()
return mako.lookup.Template(tmpl_content, default_filters=["str", "trim"])
def cls_url(module_file):
if module_file.endswith("pyc"):
module_file = module_file[:-1]
path = os.path.relpath(module_file, py_utils.tfds_dir())
return os.path.join(BASE_URL, path)
def document_single_builder(builder):
"""Doc string for a single builder, with or without configs."""
print("Document builder %s..." % builder.name)
get_config_builder = lambda config: tfds.builder(builder.name, config=config)
config_builders = []
if builder.builder_configs:
with futures.ThreadPoolExecutor(max_workers=WORKER_COUNT_CONFIGS) as tpool:
config_builders = list(
tpool.map(get_config_builder, builder.BUILDER_CONFIGS)
)
tmpl = get_mako_template("dataset")
out_str = tmpl.render_unicode(
builder=builder, config_builders=config_builders,
).strip()
schema_org_tmpl = get_mako_template("schema_org")
schema_org_out_str = schema_org_tmpl.render_unicode(
builder=builder, config_builders=config_builders,
).strip()
out_str = schema_org_out_str + "\n" + out_str
return out_str
def make_module_to_builder_dict(datasets=None):
"""Get all builders organized by module in nested dicts."""
# pylint: disable=g-long-lambda
# dict to hold tfds->image->mnist->[builders]
module_to_builder = collections.defaultdict(
lambda: collections.defaultdict(lambda: collections.defaultdict(list))
)
# pylint: enable=g-long-lambda
if not datasets:
datasets = [
name for name in tfds.list_builders() if name not in BUILDER_BLACKLIST
]
print("Creating the vanilla builders for %s datasets..." % len(datasets))
with futures.ThreadPoolExecutor(max_workers=WORKER_COUNT_DATASETS) as tpool:
builders = tpool.map(tfds.builder, datasets)
print("Vanilla builders built, constructing module_to_builder dict...")
for builder in builders:
module_name = builder.__class__.__module__
modules = module_name.split(".")
if "testing" in modules:
continue
current_mod_ctr = module_to_builder
for mod in modules:
current_mod_ctr = current_mod_ctr[mod]
current_mod_ctr.append(builder)
module_to_builder = module_to_builder["tensorflow_datasets"]
return module_to_builder
def dataset_docs_str(datasets=None):
"""Create dataset documentation string for given datasets.
Args:
datasets: list of datasets for which to create documentation.
If None, then all available datasets will be used.
Returns:
- overview document
- a dictionary of sections. Each dataset in a section is represented by a
tuple (dataset_name, is_manual_dataset, string describing the datasets
(in the MarkDown format))
"""
print("Retrieving the list of builders...")
module_to_builder = make_module_to_builder_dict(datasets)
sections = sorted(list(module_to_builder.keys()))
section_docs = collections.defaultdict(list)
for section in sections:
builders = tf.nest.flatten(module_to_builder[section])
builders = sorted(builders, key=lambda b: b.name)
unused_ = get_mako_template("dataset") # To warm cache.
with futures.ThreadPoolExecutor(max_workers=WORKER_COUNT_DATASETS) as tpool:
builder_docs = tpool.map(document_single_builder, builders)
builder_docs = [
(builder.name, builder.MANUAL_DOWNLOAD_INSTRUCTIONS, builder_doc)
for (builder, builder_doc) in zip(builders, builder_docs)
]
section_docs[section.capitalize()] = builder_docs
tmpl = get_mako_template("catalog_overview")
catalog_overview = tmpl.render_unicode().lstrip()
return [catalog_overview, section_docs]
def main(_):
print(dataset_docs_str())
if __name__ == "__main__":
app.run(main)
| 201 | 0 | 46 |
52e4d662718a8f9f1b46b82953a87eb0d4aa9eb1 | 3,505 | py | Python | lib/model/train.py | kelvindecosta/heimdall | af3f69781619bcfe17c7a96f2a10f2a4b52ea8a2 | [
"MIT"
] | null | null | null | lib/model/train.py | kelvindecosta/heimdall | af3f69781619bcfe17c7a96f2a10f2a4b52ea8a2 | [
"MIT"
] | null | null | null | lib/model/train.py | kelvindecosta/heimdall | af3f69781619bcfe17c7a96f2a10f2a4b52ea8a2 | [
"MIT"
] | null | null | null | import segmentation_models_pytorch as smp
import shutil
import torch
from pathlib import Path
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from lib.dataset import DroneDeploySegmentationDataset as Dataset
from lib.config import (
BATCH_SIZES,
CONFIG_PATH,
CRITERION,
CRITERION_ARGS,
DEVICE,
EPOCHS,
METRIC,
METRIC_ARGS,
MODEL,
MODEL_ARGS,
OPTIMIZER,
OPTIMIZER_ARGS,
SCHEDULER,
SCHEDULER_ARGS,
TIMESTAMP,
)
__all__ = ["run"]
def run(**kwargs):
"""
Trains a model on the dataset.
Uses the following configuration settings:
- BATCH_SIZES: number of data points fed in a single optimization step
- CONFIG_PATH: path to configuration file
- CRITERION: loss function
- CRITERION_ARGS: arguments for criterion
- DEVICE: device upon which torch operations are run
- EPOCHS: number of iterations on the dataset
- METRIC: accuracy score
- METRIC_ARGS: arguments for metric
- MODEL: model architecture
- MODEL_ARGS: arguments for model
- OPTIMIZER: gradient descent and backpropagation optimizer
- OPTIMIZER_ARGS: arguments for optimizer
- SCHEDULER: learning rate scheduler
- SCHEDULER_ARGS: arguments for scheduler
- TIMESTAMP: time at run (unique identifier)
"""
# Create data loaders
data_loaders = {
"train": DataLoader(
Dataset(split="train"), batch_size=BATCH_SIZES["train"], shuffle=True,
),
"valid": DataLoader(
Dataset(split="valid"), batch_size=BATCH_SIZES["valid"], shuffle=False,
),
}
# Assign model, criterion, optimizer, scheduler and metrics
model = MODEL(**MODEL_ARGS)
criterion = CRITERION(**CRITERION_ARGS)
optimizer = OPTIMIZER(params=model.parameters(), **OPTIMIZER_ARGS)
scheduler = SCHEDULER(optimizer=optimizer, **SCHEDULER_ARGS)
metric = METRIC(**METRIC_ARGS)
# Create train and valid epoch executions
execution = {
"train": smp.utils.train.TrainEpoch(
model,
loss=criterion,
metrics=[metric],
optimizer=optimizer,
device=DEVICE,
verbose=True,
),
"valid": smp.utils.train.ValidEpoch(
model, loss=criterion, metrics=[metric], device=DEVICE, verbose=True,
),
}
# Create run directory
run_dir = Path("runs") / TIMESTAMP
run_dir.mkdir(parents=True, exist_ok=True)
# Copy current configuration settings
shutil.copy(str(CONFIG_PATH), str(run_dir / "config.yml"))
# Setup TensorBoard
writer = SummaryWriter(str(run_dir))
# Iterate over epochs
best_score = 0
for epoch in range(EPOCHS):
print(f"Epoch: {epoch+1}")
# Iterate over phases
for phase in ["train", "valid"]:
# Evaluate dataset
logs = execution[phase].run(data_loaders[phase])
# Write to TensorBoard
for scalar in logs:
writer.add_scalar(f"{phase} {scalar}", logs[scalar], epoch + 1)
# Save the model if it is the best one so far, based on the validation score
score = logs[metric.__name__]
if phase == "valid" and best_score < score:
best_score = score
torch.save(model, str(run_dir / "model.pth"))
# Notify scheduler every epoch
scheduler.step()
| 29.70339 | 88 | 0.632525 | import segmentation_models_pytorch as smp
import shutil
import torch
from pathlib import Path
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from lib.dataset import DroneDeploySegmentationDataset as Dataset
from lib.config import (
BATCH_SIZES,
CONFIG_PATH,
CRITERION,
CRITERION_ARGS,
DEVICE,
EPOCHS,
METRIC,
METRIC_ARGS,
MODEL,
MODEL_ARGS,
OPTIMIZER,
OPTIMIZER_ARGS,
SCHEDULER,
SCHEDULER_ARGS,
TIMESTAMP,
)
__all__ = ["run"]
def run(**kwargs):
"""
Trains a model on the dataset.
Uses the following configuration settings:
- BATCH_SIZES: number of data points fed in a single optimization step
- CONFIG_PATH: path to configuration file
- CRITERION: loss function
- CRITERION_ARGS: arguments for criterion
- DEVICE: device upon which torch operations are run
- EPOCHS: number of iterations on the dataset
- METRIC: accuracy score
- METRIC_ARGS: arguments for metric
- MODEL: model architecture
- MODEL_ARGS: arguments for model
- OPTIMIZER: gradient descent and backpropagation optimizer
- OPTIMIZER_ARGS: arguments for optimizer
- SCHEDULER: learning rate scheduler
- SCHEDULER_ARGS: arguments for scheduler
- TIMESTAMP: time at run (unique identifier)
"""
# Create data loaders
data_loaders = {
"train": DataLoader(
Dataset(split="train"), batch_size=BATCH_SIZES["train"], shuffle=True,
),
"valid": DataLoader(
Dataset(split="valid"), batch_size=BATCH_SIZES["valid"], shuffle=False,
),
}
# Assign model, criterion, optimizer, scheduler and metrics
model = MODEL(**MODEL_ARGS)
criterion = CRITERION(**CRITERION_ARGS)
optimizer = OPTIMIZER(params=model.parameters(), **OPTIMIZER_ARGS)
scheduler = SCHEDULER(optimizer=optimizer, **SCHEDULER_ARGS)
metric = METRIC(**METRIC_ARGS)
# Create train and valid epoch executions
execution = {
"train": smp.utils.train.TrainEpoch(
model,
loss=criterion,
metrics=[metric],
optimizer=optimizer,
device=DEVICE,
verbose=True,
),
"valid": smp.utils.train.ValidEpoch(
model, loss=criterion, metrics=[metric], device=DEVICE, verbose=True,
),
}
# Create run directory
run_dir = Path("runs") / TIMESTAMP
run_dir.mkdir(parents=True, exist_ok=True)
# Copy current configuration settings
shutil.copy(str(CONFIG_PATH), str(run_dir / "config.yml"))
# Setup TensorBoard
writer = SummaryWriter(str(run_dir))
# Iterate over epochs
best_score = 0
for epoch in range(EPOCHS):
print(f"Epoch: {epoch+1}")
# Iterate over phases
for phase in ["train", "valid"]:
# Evaluate dataset
logs = execution[phase].run(data_loaders[phase])
# Write to TensorBoard
for scalar in logs:
writer.add_scalar(f"{phase} {scalar}", logs[scalar], epoch + 1)
# Save the model if it is the best one so far, based on the validation score
score = logs[metric.__name__]
if phase == "valid" and best_score < score:
best_score = score
torch.save(model, str(run_dir / "model.pth"))
# Notify scheduler every epoch
scheduler.step()
| 0 | 0 | 0 |
956f6bb2822a61e2af601a2b724a088566b856d4 | 29 | py | Python | tools/rcp/__main__.py | noahbkim/finances | 98e2d0cf1bcabcc9785177d36c581e6d3f7caba7 | [
"BSD-3-Clause"
] | null | null | null | tools/rcp/__main__.py | noahbkim/finances | 98e2d0cf1bcabcc9785177d36c581e6d3f7caba7 | [
"BSD-3-Clause"
] | null | null | null | tools/rcp/__main__.py | noahbkim/finances | 98e2d0cf1bcabcc9785177d36c581e6d3f7caba7 | [
"BSD-3-Clause"
] | 1 | 2019-02-14T08:09:46.000Z | 2019-02-14T08:09:46.000Z | from .rcp import main
main() | 9.666667 | 21 | 0.724138 | from .rcp import main
main() | 0 | 0 | 0 |
d45dd403fb3479360a3e6dcc42bea3d26dd8b86b | 2,272 | py | Python | tradefed_cluster/api.py | maksonlee/tradefed_cluster | d1153743ce8ddcad752443b23851015630862aea | [
"Apache-2.0"
] | null | null | null | tradefed_cluster/api.py | maksonlee/tradefed_cluster | d1153743ce8ddcad752443b23851015630862aea | [
"Apache-2.0"
] | null | null | null | tradefed_cluster/api.py | maksonlee/tradefed_cluster | d1153743ce8ddcad752443b23851015630862aea | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""an API module to serve TF Cluster APIs."""
import endpoints
from tradefed_cluster import cluster_api
from tradefed_cluster import cluster_device_api
from tradefed_cluster import cluster_host_api
from tradefed_cluster import command_attempt_api
from tradefed_cluster import command_event_api
from tradefed_cluster import command_task_api
from tradefed_cluster import coordinator_api
from tradefed_cluster import device_blocklist_api
from tradefed_cluster import device_snapshot_api
from tradefed_cluster import filter_hint_api
from tradefed_cluster import env_config
from tradefed_cluster import test_harness_image_api
from tradefed_cluster import host_event_api
from tradefed_cluster import lab_management_api
from tradefed_cluster import predefined_message_api
from tradefed_cluster import report_api
from tradefed_cluster import request_api
from tradefed_cluster import run_target_api
from tradefed_cluster import acl_check_api
API_HANDLERS = [
cluster_api.ClusterApi,
cluster_device_api.ClusterDeviceApi,
cluster_host_api.ClusterHostApi,
command_attempt_api.CommandAttemptApi,
command_event_api.CommandEventApi,
command_task_api.CommandTaskApi,
coordinator_api.CoordinatorApi,
device_blocklist_api.DeviceBlocklistApi,
device_snapshot_api.DeviceSnapshotApi,
filter_hint_api.FilterHintApi,
test_harness_image_api.TestHarnessImageApi,
host_event_api.HostEventApi,
lab_management_api.LabManagementApi,
predefined_message_api.PredefinedMessageApi,
report_api.ReportApi,
request_api.RequestApi,
run_target_api.RunTargetApi,
acl_check_api.AclApi,
] + env_config.CONFIG.extra_apis
APP = endpoints.api_server(API_HANDLERS)
| 37.245902 | 74 | 0.832746 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""an API module to serve TF Cluster APIs."""
import endpoints
from tradefed_cluster import cluster_api
from tradefed_cluster import cluster_device_api
from tradefed_cluster import cluster_host_api
from tradefed_cluster import command_attempt_api
from tradefed_cluster import command_event_api
from tradefed_cluster import command_task_api
from tradefed_cluster import coordinator_api
from tradefed_cluster import device_blocklist_api
from tradefed_cluster import device_snapshot_api
from tradefed_cluster import filter_hint_api
from tradefed_cluster import env_config
from tradefed_cluster import test_harness_image_api
from tradefed_cluster import host_event_api
from tradefed_cluster import lab_management_api
from tradefed_cluster import predefined_message_api
from tradefed_cluster import report_api
from tradefed_cluster import request_api
from tradefed_cluster import run_target_api
from tradefed_cluster import acl_check_api
API_HANDLERS = [
cluster_api.ClusterApi,
cluster_device_api.ClusterDeviceApi,
cluster_host_api.ClusterHostApi,
command_attempt_api.CommandAttemptApi,
command_event_api.CommandEventApi,
command_task_api.CommandTaskApi,
coordinator_api.CoordinatorApi,
device_blocklist_api.DeviceBlocklistApi,
device_snapshot_api.DeviceSnapshotApi,
filter_hint_api.FilterHintApi,
test_harness_image_api.TestHarnessImageApi,
host_event_api.HostEventApi,
lab_management_api.LabManagementApi,
predefined_message_api.PredefinedMessageApi,
report_api.ReportApi,
request_api.RequestApi,
run_target_api.RunTargetApi,
acl_check_api.AclApi,
] + env_config.CONFIG.extra_apis
APP = endpoints.api_server(API_HANDLERS)
| 0 | 0 | 0 |
2671a750ee7ab12cb193704869904d96497ae9a3 | 975 | py | Python | tests/client/teams_test.py | carlmanaster/python-lokalise-api | ce4a43c5a7bf14f45a2432e096b1880ff28d6770 | [
"BSD-3-Clause"
] | 5 | 2020-09-09T15:22:34.000Z | 2021-12-07T12:24:26.000Z | tests/client/teams_test.py | carlmanaster/python-lokalise-api | ce4a43c5a7bf14f45a2432e096b1880ff28d6770 | [
"BSD-3-Clause"
] | 39 | 2020-12-08T16:56:06.000Z | 2022-03-28T15:18:52.000Z | tests/client/teams_test.py | carlmanaster/python-lokalise-api | ce4a43c5a7bf14f45a2432e096b1880ff28d6770 | [
"BSD-3-Clause"
] | 1 | 2021-03-25T02:55:49.000Z | 2021-03-25T02:55:49.000Z | """
Tests for the Teams endpoint.
"""
import pytest
@pytest.mark.vcr
def test_teams(client):
"""Tests fetching of all teams
"""
teams = client.teams()
team = teams.items[0]
assert team.team_id == 199048
assert team.name == "Sample"
assert team.created_at == "2019-12-25 13:50:00 (Etc/UTC)"
assert team.created_at_timestamp == 1577281800
assert team.plan == "Trial"
assert team.quota_usage['users'] == 1
assert team.quota_allowed['users'] == 999999999
@pytest.mark.vcr
def test_teams_pagination(client):
"""Tests fetching of all teams with pagination
"""
teams = client.teams({"page": 2, "limit": 3})
assert teams.items[0].team_id == 170312
assert teams.current_page == 2
assert teams.total_count == 4
assert teams.page_count == 2
assert teams.limit == 3
assert teams.is_last_page()
assert not teams.is_first_page()
assert not teams.has_next_page()
assert teams.has_prev_page()
| 25.657895 | 61 | 0.668718 | """
Tests for the Teams endpoint.
"""
import pytest
@pytest.mark.vcr
def test_teams(client):
"""Tests fetching of all teams
"""
teams = client.teams()
team = teams.items[0]
assert team.team_id == 199048
assert team.name == "Sample"
assert team.created_at == "2019-12-25 13:50:00 (Etc/UTC)"
assert team.created_at_timestamp == 1577281800
assert team.plan == "Trial"
assert team.quota_usage['users'] == 1
assert team.quota_allowed['users'] == 999999999
@pytest.mark.vcr
def test_teams_pagination(client):
"""Tests fetching of all teams with pagination
"""
teams = client.teams({"page": 2, "limit": 3})
assert teams.items[0].team_id == 170312
assert teams.current_page == 2
assert teams.total_count == 4
assert teams.page_count == 2
assert teams.limit == 3
assert teams.is_last_page()
assert not teams.is_first_page()
assert not teams.has_next_page()
assert teams.has_prev_page()
| 0 | 0 | 0 |
0681764ad795b1fb284a31ab85ad4886247e1650 | 21,494 | py | Python | varconlib/fitting/model_fits.py | DBerke/alpha-var-code | 9b97622dfddd582e048e32d47afb29cf8ec14544 | [
"MIT"
] | null | null | null | varconlib/fitting/model_fits.py | DBerke/alpha-var-code | 9b97622dfddd582e048e32d47afb29cf8ec14544 | [
"MIT"
] | null | null | null | varconlib/fitting/model_fits.py | DBerke/alpha-var-code | 9b97622dfddd582e048e32d47afb29cf8ec14544 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 25 09:44:15 2019
@author: dberke
Code to define a class for a model fit to an absorption line.
"""
import matplotlib
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from scipy.optimize import curve_fit, OptimizeWarning
from tqdm import tqdm
import unyt as u
from varconlib.exceptions import PositiveAmplitudeError
from varconlib.fitting import gaussian, integrated_gaussian
from varconlib.miscellaneous import (shift_wavelength, velocity2wavelength,
wavelength2index, wavelength2velocity)
# This line prevents the wavelength formatting from being in the form of
# scientific notation.
matplotlib.rcParams['axes.formatter.useoffset'] = False
# Don't use TeX for font rendering, as these are just diagnostic plots and it
# slows everything way down.
matplotlib.rcParams['text.usetex'] = False
class GaussianFit(object):
"""A class to fit an absorption line and store information about the fit.
"""
def __init__(self, transition, observation, order, radial_velocity=None,
close_up_plot_path=None, context_plot_path=None,
integrated=True, verbose=False):
"""Construct a fit to an absorption feature using a Gaussian or
integrated Gaussian.
Parameters
----------
transition : `transition_line.Transition` object
A `Transition` object representing the absorption feature to fit.
observation : `obs2d.HARPSFile2DScience` object
A `HARPSFile2DScience` object to find the absorption feature in.
order : int
The order in the e2ds file to fit the transition in. Zero-indexed,
so ranging from [0-71].
Optional
--------
radial_velocity : `unyt.unyt_quantity`
A radial velocity (dimensions of length / time) for the object in
the observation. Most of the time the radial velocity should be
picked up from the observation itself, but for certain objects
such as asteroids the supplied radial velocity may not be correct.
In such cases, this parameter can be used to override the given
radial velocity.
close_up_plot_path : string or `pathlib.Path`
The file name to save a close-up plot of the fit to.
context_plot_path : string or `pathlib.Path`
The file name to save a wider context plot (±25 km/s) around the
fitted feature to.
integrated : bool, Default : True
Controls whether to attempt to fit a feature with an integrated
Gaussian instead of a Gaussian.
verbose : bool, Default : False
Whether to print out extra diagnostic information while running
the function.
"""
# Store the transition.
self.transition = transition
# Grab some observation-specific information from the observation.
self.dateObs = observation.dateObs
self.BERV = observation.BERV
self.airmass = observation.airmass
self.exptime = observation.exptime
self.calibrationFile = observation.calibrationFile
self.calibrationSource = observation.calibrationSource
self.order = int(order)
# Store the plot paths.
self.close_up_plot_path = close_up_plot_path
self.context_plot_path = context_plot_path
# Define some useful numbers and variables.
# The ranges in velocity space to search around to find the minimum of
# an absorption line.
search_range_vel = 5 * u.km / u.s
# The range in velocity space to consider to find the continuum.
continuum_range_vel = 25 * u.km / u.s
# The number of pixels either side of the flux minimum to use in the
# fit.
pixel_range = 3
# If no radial velocity is given, use the radial velocity from the
# supplied observation. This is mostly for use with things like
# asteroids that might not have a radial velocity assigned.
if radial_velocity is None:
radial_velocity = observation.radialVelocity
# Shift the wavelength being searched for to correct for the radial
# velocity of the star.
nominal_wavelength = self.transition.wavelength.to(u.angstrom)
self.correctedWavelength = shift_wavelength(nominal_wavelength,
radial_velocity)
if verbose:
tqdm.write('Given RV {:.2f}: line {:.3f} should be at {:.3f}'.
format(radial_velocity,
nominal_wavelength.to(u.angstrom),
self.correctedWavelength.to(u.angstrom)))
self.baryArray = observation.barycentricArray[self.order]
self.fluxArray = observation.photonFluxArray[self.order]
self.errorArray = observation.errorArray[self.order]
# Figure out the range in wavelength space to search around the nominal
# wavelength for the flux minimum, as well as the range to take for
# measuring the continuum.
search_range = velocity2wavelength(search_range_vel,
self.correctedWavelength)
self.continuumRange = velocity2wavelength(continuum_range_vel,
self.correctedWavelength)
low_search_index = wavelength2index(self.correctedWavelength -
search_range,
self.baryArray)
high_search_index = wavelength2index(self.correctedWavelength +
search_range,
self.baryArray)
self.lowContinuumIndex = wavelength2index(self.correctedWavelength
- self.continuumRange,
self.baryArray)
self.highContinuumIndex = wavelength2index(self.correctedWavelength
+ self.continuumRange,
self.baryArray)
self.centralIndex = low_search_index + \
self.fluxArray[low_search_index:high_search_index].argmin()
self.continuumLevel = self.fluxArray[self.lowContinuumIndex:
self.highContinuumIndex].max()
self.fluxMinimum = self.fluxArray[self.centralIndex]
self.lowFitIndex = self.centralIndex - pixel_range
self.highFitIndex = self.centralIndex + pixel_range + 1
# Grab the wavelengths, fluxes, and errors from the region to be fit.
self.wavelengths = self.baryArray[self.lowFitIndex:self.highFitIndex]
self.fluxes = self.fluxArray[self.lowFitIndex:self.highFitIndex]
self.errors = self.errorArray[self.lowFitIndex:self.highFitIndex]
self.lineDepth = self.continuumLevel - self.fluxMinimum
self.normalizedLineDepth = self.lineDepth / self.continuumLevel
self.initial_guess = (self.lineDepth * -1,
self.correctedWavelength.to(u.angstrom).value,
0.05,
self.continuumLevel)
if verbose:
tqdm.write('Attempting to fit line at {:.4f} with initial guess:'.
format(self.correctedWavelength))
if verbose:
tqdm.write('Initial parameters are:\n{}\n{}\n{}\n{}'.format(
*self.initial_guess))
# Do the fitting:
try:
if integrated:
wavelengths_lower = observation.pixelLowerArray
wavelengths_upper = observation.pixelUpperArray
pixel_edges_lower = wavelengths_lower[self.order,
self.lowFitIndex:
self.highFitIndex]
pixel_edges_upper = wavelengths_upper[self.order,
self.lowFitIndex:
self.highFitIndex]
self.popt, self.pcov = curve_fit(integrated_gaussian,
(pixel_edges_lower.value,
pixel_edges_upper.value),
self.fluxes,
sigma=self.errors,
absolute_sigma=True,
p0=self.initial_guess,
method='lm', maxfev=10000)
else:
self.popt, self.pcov = curve_fit(gaussian,
self.wavelengths.value,
self.fluxes,
sigma=self.errors,
absolute_sigma=True,
p0=self.initial_guess,
method='lm', maxfev=10000)
except (OptimizeWarning, RuntimeError):
print(self.continuumLevel)
print(self.lineDepth)
print(self.initial_guess)
self.plotFit(close_up_plot_path, context_plot_path,
plot_fit=False, verbose=True)
raise
if verbose:
print(self.popt)
print(self.pcov)
# Recover the fitted values for the parameters:
self.amplitude = self.popt[0]
self.mean = self.popt[1] * u.angstrom
self.sigma = self.popt[2] * u.angstrom
if self.amplitude > 0:
err_msg = ('Fit for'
f' {self.transition.wavelength.to(u.angstrom)}'
' has a positive amplitude.')
tqdm.write(err_msg)
self.plotFit(close_up_plot_path, context_plot_path,
plot_fit=True, verbose=verbose)
raise PositiveAmplitudeError(err_msg)
# Find 1-σ errors from the covariance matrix:
self.perr = np.sqrt(np.diag(self.pcov))
self.amplitudeErr = self.perr[0]
self.meanErr = self.perr[1] * u.angstrom
self.meanErrVel = abs(wavelength2velocity(self.mean,
self.mean +
self.meanErr))
self.sigmaErr = self.perr[2] * u.angstrom
if (self.chiSquaredNu > 1):
self.meanErr *= np.sqrt(self.chiSquaredNu)
if verbose:
tqdm.write('χ^2_ν = {}'.format(self.chiSquaredNu))
# Find the full width at half max.
# 2.354820 ≈ 2 * sqrt(2 * ln(2)), the relationship of FWHM to the
# standard deviation of a Gaussian.
self.FWHM = 2.354820 * self.sigma
self.FWHMErr = 2.354820 * self.sigmaErr
self.velocityFWHM = wavelength2velocity(self.mean,
self.mean +
self.FWHM).to(u.km/u.s)
self.velocityFWHMErr = wavelength2velocity(self.mean,
self.mean +
self.FWHMErr).to(u.km/u.s)
# Compute the offset between the input wavelength and the wavelength
# found in the fit.
self.offset = self.correctedWavelength - self.mean
self.offsetErr = self.meanErr
self.velocityOffset = wavelength2velocity(self.correctedWavelength,
self.mean)
self.velocityOffsetErr = wavelength2velocity(self.mean,
self.mean +
self.offsetErr)
if verbose:
print(self.continuumLevel)
print(self.fluxMinimum)
print(self.wavelengths)
@property
@property
@property
def getFitInformation(self):
"""Return a list of information about the fit which can be written as
a CSV file.
Returns
-------
list
A list containing the following information about the fit:
1. Observation date, in ISO format
2. The amplitude of the fit (in photons)
3. The error on the amplitude (in photons)
4. The mean of the fit (in Å)
5. The error on the mean (in Å)
6. The error on the mean (in m/s in velocity space)
7. The sigma of the fitted Gaussian (in Å)
8. The error on the sigma (in Å)
9. The offset from expected wavelength (in m/s)
10. The error on the offset (in m/s)
11. The FWHM (in velocity space)
12. The error on the FWHM (in m/s)
13. The chi-squared-nu value
14. The order the fit was made on (starting at 0, so in [0, 71].
15. The mean airmass of the observation.
"""
return [self.dateObs.isoformat(timespec='milliseconds'),
self.amplitude,
self.amplitudeErr,
self.mean.value,
self.meanErr.value,
self.meanErrVel.value,
self.sigma.value,
self.sigmaErr.value,
self.velocityOffset.to(u.m/u.s).value,
self.velocityOffsetErr.to(u.m/u.s).value,
self.velocityFWHM.to(u.m/u.s).value,
self.velocityFWHMErr.to(u.m/u.s).value,
self.chiSquaredNu,
self.order,
self.airmass]
def plotFit(self, close_up_plot_path=None,
context_plot_path=None,
plot_fit=True,
verbose=False):
"""Plot a graph of this fit.
This method will produce a 'close-up' plot of just the fitted region
itself, in order to check out the fit has worked out, and a wider
'context' plot of the area around the feature.
Optional
--------
close_up_plot_path : string or `pathlib.Path`
The file name to save a close-up plot of the fit to. If not given,
will default to using the value providing when initializing the
fit.
context_plot_path : string or `pathlib.Path`
The file name to save a wider context plot (±25 km/s) around the
fitted feature to. If not given, will default to using the value
provided when initializing the fit.
plot_fit : bool, Default : True
If *True*, plot the mean of the fit and the fitted Gaussian.
Otherwise, don't plot those two things. This allows creating plots
of failed fits to see the context of the data.
verbose : bool, Default : False
If *True*, the function will print out additional information as it
runs.
"""
edge_pixels = (509, 510, 1021, 1022, 1533, 1534, 2045, 2046,
2557, 2558, 3069, 3070, 3581, 3582)
# If no plot paths are given, assume we want to use the ones given
# when initializing the fit.
if close_up_plot_path is None:
close_up_plot_path = self.close_up_plot_path
if context_plot_path is None:
context_plot_path = self.context_plot_path
# Set up the figure.
fig = plt.figure(figsize=(7, 5), dpi=100, tight_layout=True)
gs = GridSpec(nrows=2, ncols=1, height_ratios=[4, 1], hspace=0)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1], sharex=ax1)
ax1.tick_params(axis='x', direction='in')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2.set_ylim(bottom=-3, top=3)
ax2.yaxis.set_major_locator(ticker.FixedLocator([-2, -1, 0, 1, 2]))
for pixel in edge_pixels:
ax1.axvline(x=self.baryArray[pixel-1], ymin=0, ymax=0.2,
color='LimeGreen',
linestyle='--')
ax1.set_ylabel('Flux (photo-electrons)')
ax2.set_xlabel('Wavelength ($\\AA$)')
ax2.set_ylabel('Residuals\n($\\sigma$)')
ax1.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:.2f}'))
ax1.yaxis.set_major_formatter(ticker.StrMethodFormatter('{x:>7.1e}'))
plt.xticks(horizontalalignment='right')
ax1.set_xlim(left=self.correctedWavelength - self.continuumRange,
right=self.correctedWavelength + self.continuumRange)
# Set y-limits so a fit doesn't balloon the plot scale out.
ax1.set_ylim(top=self.continuumLevel * 1.25,
bottom=self.fluxMinimum * 0.93)
# Plot the expected and measured wavelengths.
ax1.axvline(self.correctedWavelength.to(u.angstrom),
color='LightSteelBlue', linestyle=':', alpha=0.8,
label=r'RV-corrected $\lambda=${:.3f}'.format(
self.correctedWavelength.to(u.angstrom)))
# Don't plot the mean if this is a failed fit.
if hasattr(self, 'mean') and hasattr(self, 'velocityOffset'):
ax1.axvline(self.mean.to(u.angstrom),
color='IndianRed', alpha=0.7,
label='Mean ({:.4f}, {:+.2f})'.
format(self.mean.to(u.angstrom),
self.velocityOffset.to(u.m/u.s)),
linestyle='-')
# Plot the actual data.
ax1.errorbar(self.baryArray[self.lowContinuumIndex - 1:
self.highContinuumIndex + 1],
self.fluxArray[self.lowContinuumIndex - 1:
self.highContinuumIndex + 1],
yerr=self.errorArray[self.lowContinuumIndex - 1:
self.highContinuumIndex + 1],
color='SandyBrown', ecolor='Sienna',
marker='o', markersize=5,
label='Flux', barsabove=True)
# Generate some x-values across the plot range.
x = np.linspace(self.baryArray[self.lowContinuumIndex].value,
self.baryArray[self.highContinuumIndex].value, 1000)
# Plot the initial guess for the gaussian.
ax1.plot(x, gaussian(x, *self.initial_guess),
color='SlateGray', label='Initial guess',
linestyle='--', alpha=0.5)
# Plot the fitted gaussian, unless this is a failed fit attempt.
if plot_fit:
ax1.plot(x, gaussian(x, *self.popt),
color='DarkGreen', alpha=0.5,
linestyle='-.',
label=r'Fit ($\chi^2_\nu=${:.3f}, $\sigma=${:.4f})'.
format(self.chiSquaredNu, self.sigma))
# Replace underscore in label so LaTeX won't crash on it.
ax1.legend(loc='upper center', framealpha=0.6, fontsize=9,
ncol=2,
title=self.label.replace('_', r'\_') if\
matplotlib.rcParams['text.usetex'] else self.label,
title_fontsize=10,
labelspacing=0.4)
# Add in some guidelines.
ax2.axhline(color='Gray', linestyle='-')
ax2.axhline(y=1, color='SkyBlue', linestyle='--')
ax2.axhline(y=-1, color='SkyBlue', linestyle='--')
ax2.axhline(y=2, color='LightSteelBlue', linestyle=':')
ax2.axhline(y=-2, color='LightSteelBlue', linestyle=':')
# Plot the residuals on the lower axis.
residuals = (self.fluxes - gaussian(self.wavelengths.value,
*self.popt)) / self.errors
ax2.plot(self.wavelengths, residuals, color='Navy', alpha=0.6,
linestyle='', marker='D', linewidth=1.5, markersize=5)
# Save the resultant plot.
fig.savefig(str(context_plot_path), format="png")
if verbose:
tqdm.write('Created wider context plot at {}'.format(
context_plot_path))
# Now create a close-in version to focus on the fit.
ax1.set_xlim(left=self.baryArray[self.lowFitIndex - 1],
right=self.baryArray[self.highFitIndex])
ax1.set_ylim(top=self.fluxes.max() * 1.15,
bottom=self.fluxes.min() * 0.95)
fig.savefig(str(close_up_plot_path), format="png")
if verbose:
tqdm.write('Created close up plot at {}'.format(
close_up_plot_path))
plt.close(fig)
| 44.135524 | 79 | 0.555457 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 25 09:44:15 2019
@author: dberke
Code to define a class for a model fit to an absorption line.
"""
import matplotlib
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from scipy.optimize import curve_fit, OptimizeWarning
from tqdm import tqdm
import unyt as u
from varconlib.exceptions import PositiveAmplitudeError
from varconlib.fitting import gaussian, integrated_gaussian
from varconlib.miscellaneous import (shift_wavelength, velocity2wavelength,
wavelength2index, wavelength2velocity)
# This line prevents the wavelength formatting from being in the form of
# scientific notation.
matplotlib.rcParams['axes.formatter.useoffset'] = False
# Don't use TeX for font rendering, as these are just diagnostic plots and it
# slows everything way down.
matplotlib.rcParams['text.usetex'] = False
class GaussianFit(object):
"""A class to fit an absorption line and store information about the fit.
"""
def __init__(self, transition, observation, order, radial_velocity=None,
close_up_plot_path=None, context_plot_path=None,
integrated=True, verbose=False):
"""Construct a fit to an absorption feature using a Gaussian or
integrated Gaussian.
Parameters
----------
transition : `transition_line.Transition` object
A `Transition` object representing the absorption feature to fit.
observation : `obs2d.HARPSFile2DScience` object
A `HARPSFile2DScience` object to find the absorption feature in.
order : int
The order in the e2ds file to fit the transition in. Zero-indexed,
so ranging from [0-71].
Optional
--------
radial_velocity : `unyt.unyt_quantity`
A radial velocity (dimensions of length / time) for the object in
the observation. Most of the time the radial velocity should be
picked up from the observation itself, but for certain objects
such as asteroids the supplied radial velocity may not be correct.
In such cases, this parameter can be used to override the given
radial velocity.
close_up_plot_path : string or `pathlib.Path`
The file name to save a close-up plot of the fit to.
context_plot_path : string or `pathlib.Path`
The file name to save a wider context plot (±25 km/s) around the
fitted feature to.
integrated : bool, Default : True
Controls whether to attempt to fit a feature with an integrated
Gaussian instead of a Gaussian.
verbose : bool, Default : False
Whether to print out extra diagnostic information while running
the function.
"""
# Store the transition.
self.transition = transition
# Grab some observation-specific information from the observation.
self.dateObs = observation.dateObs
self.BERV = observation.BERV
self.airmass = observation.airmass
self.exptime = observation.exptime
self.calibrationFile = observation.calibrationFile
self.calibrationSource = observation.calibrationSource
self.order = int(order)
# Store the plot paths.
self.close_up_plot_path = close_up_plot_path
self.context_plot_path = context_plot_path
# Define some useful numbers and variables.
# The ranges in velocity space to search around to find the minimum of
# an absorption line.
search_range_vel = 5 * u.km / u.s
# The range in velocity space to consider to find the continuum.
continuum_range_vel = 25 * u.km / u.s
# The number of pixels either side of the flux minimum to use in the
# fit.
pixel_range = 3
# If no radial velocity is given, use the radial velocity from the
# supplied observation. This is mostly for use with things like
# asteroids that might not have a radial velocity assigned.
if radial_velocity is None:
radial_velocity = observation.radialVelocity
# Shift the wavelength being searched for to correct for the radial
# velocity of the star.
nominal_wavelength = self.transition.wavelength.to(u.angstrom)
self.correctedWavelength = shift_wavelength(nominal_wavelength,
radial_velocity)
if verbose:
tqdm.write('Given RV {:.2f}: line {:.3f} should be at {:.3f}'.
format(radial_velocity,
nominal_wavelength.to(u.angstrom),
self.correctedWavelength.to(u.angstrom)))
self.baryArray = observation.barycentricArray[self.order]
self.fluxArray = observation.photonFluxArray[self.order]
self.errorArray = observation.errorArray[self.order]
# Figure out the range in wavelength space to search around the nominal
# wavelength for the flux minimum, as well as the range to take for
# measuring the continuum.
search_range = velocity2wavelength(search_range_vel,
self.correctedWavelength)
self.continuumRange = velocity2wavelength(continuum_range_vel,
self.correctedWavelength)
low_search_index = wavelength2index(self.correctedWavelength -
search_range,
self.baryArray)
high_search_index = wavelength2index(self.correctedWavelength +
search_range,
self.baryArray)
self.lowContinuumIndex = wavelength2index(self.correctedWavelength
- self.continuumRange,
self.baryArray)
self.highContinuumIndex = wavelength2index(self.correctedWavelength
+ self.continuumRange,
self.baryArray)
self.centralIndex = low_search_index + \
self.fluxArray[low_search_index:high_search_index].argmin()
self.continuumLevel = self.fluxArray[self.lowContinuumIndex:
self.highContinuumIndex].max()
self.fluxMinimum = self.fluxArray[self.centralIndex]
self.lowFitIndex = self.centralIndex - pixel_range
self.highFitIndex = self.centralIndex + pixel_range + 1
# Grab the wavelengths, fluxes, and errors from the region to be fit.
self.wavelengths = self.baryArray[self.lowFitIndex:self.highFitIndex]
self.fluxes = self.fluxArray[self.lowFitIndex:self.highFitIndex]
self.errors = self.errorArray[self.lowFitIndex:self.highFitIndex]
self.lineDepth = self.continuumLevel - self.fluxMinimum
self.normalizedLineDepth = self.lineDepth / self.continuumLevel
self.initial_guess = (self.lineDepth * -1,
self.correctedWavelength.to(u.angstrom).value,
0.05,
self.continuumLevel)
if verbose:
tqdm.write('Attempting to fit line at {:.4f} with initial guess:'.
format(self.correctedWavelength))
if verbose:
tqdm.write('Initial parameters are:\n{}\n{}\n{}\n{}'.format(
*self.initial_guess))
# Do the fitting:
try:
if integrated:
wavelengths_lower = observation.pixelLowerArray
wavelengths_upper = observation.pixelUpperArray
pixel_edges_lower = wavelengths_lower[self.order,
self.lowFitIndex:
self.highFitIndex]
pixel_edges_upper = wavelengths_upper[self.order,
self.lowFitIndex:
self.highFitIndex]
self.popt, self.pcov = curve_fit(integrated_gaussian,
(pixel_edges_lower.value,
pixel_edges_upper.value),
self.fluxes,
sigma=self.errors,
absolute_sigma=True,
p0=self.initial_guess,
method='lm', maxfev=10000)
else:
self.popt, self.pcov = curve_fit(gaussian,
self.wavelengths.value,
self.fluxes,
sigma=self.errors,
absolute_sigma=True,
p0=self.initial_guess,
method='lm', maxfev=10000)
except (OptimizeWarning, RuntimeError):
print(self.continuumLevel)
print(self.lineDepth)
print(self.initial_guess)
self.plotFit(close_up_plot_path, context_plot_path,
plot_fit=False, verbose=True)
raise
if verbose:
print(self.popt)
print(self.pcov)
# Recover the fitted values for the parameters:
self.amplitude = self.popt[0]
self.mean = self.popt[1] * u.angstrom
self.sigma = self.popt[2] * u.angstrom
if self.amplitude > 0:
err_msg = ('Fit for'
f' {self.transition.wavelength.to(u.angstrom)}'
' has a positive amplitude.')
tqdm.write(err_msg)
self.plotFit(close_up_plot_path, context_plot_path,
plot_fit=True, verbose=verbose)
raise PositiveAmplitudeError(err_msg)
# Find 1-σ errors from the covariance matrix:
self.perr = np.sqrt(np.diag(self.pcov))
self.amplitudeErr = self.perr[0]
self.meanErr = self.perr[1] * u.angstrom
self.meanErrVel = abs(wavelength2velocity(self.mean,
self.mean +
self.meanErr))
self.sigmaErr = self.perr[2] * u.angstrom
if (self.chiSquaredNu > 1):
self.meanErr *= np.sqrt(self.chiSquaredNu)
if verbose:
tqdm.write('χ^2_ν = {}'.format(self.chiSquaredNu))
# Find the full width at half max.
# 2.354820 ≈ 2 * sqrt(2 * ln(2)), the relationship of FWHM to the
# standard deviation of a Gaussian.
self.FWHM = 2.354820 * self.sigma
self.FWHMErr = 2.354820 * self.sigmaErr
self.velocityFWHM = wavelength2velocity(self.mean,
self.mean +
self.FWHM).to(u.km/u.s)
self.velocityFWHMErr = wavelength2velocity(self.mean,
self.mean +
self.FWHMErr).to(u.km/u.s)
# Compute the offset between the input wavelength and the wavelength
# found in the fit.
self.offset = self.correctedWavelength - self.mean
self.offsetErr = self.meanErr
self.velocityOffset = wavelength2velocity(self.correctedWavelength,
self.mean)
self.velocityOffsetErr = wavelength2velocity(self.mean,
self.mean +
self.offsetErr)
if verbose:
print(self.continuumLevel)
print(self.fluxMinimum)
print(self.wavelengths)
@property
def chiSquared(self):
if not hasattr(self, '_chiSquared'):
residuals = self.fluxes - gaussian(self.wavelengths.value,
*self.popt)
self._chiSquared = sum((residuals / self.errors) ** 2)
return self._chiSquared
@property
def chiSquaredNu(self):
return self.chiSquared / 3 # ν = 7 (pixels) - 4 (params)
@property
def label(self):
return self.transition.label + '_' + str(self.order)
def getFitInformation(self):
"""Return a list of information about the fit which can be written as
a CSV file.
Returns
-------
list
A list containing the following information about the fit:
1. Observation date, in ISO format
2. The amplitude of the fit (in photons)
3. The error on the amplitude (in photons)
4. The mean of the fit (in Å)
5. The error on the mean (in Å)
6. The error on the mean (in m/s in velocity space)
7. The sigma of the fitted Gaussian (in Å)
8. The error on the sigma (in Å)
9. The offset from expected wavelength (in m/s)
10. The error on the offset (in m/s)
11. The FWHM (in velocity space)
12. The error on the FWHM (in m/s)
13. The chi-squared-nu value
14. The order the fit was made on (starting at 0, so in [0, 71].
15. The mean airmass of the observation.
"""
return [self.dateObs.isoformat(timespec='milliseconds'),
self.amplitude,
self.amplitudeErr,
self.mean.value,
self.meanErr.value,
self.meanErrVel.value,
self.sigma.value,
self.sigmaErr.value,
self.velocityOffset.to(u.m/u.s).value,
self.velocityOffsetErr.to(u.m/u.s).value,
self.velocityFWHM.to(u.m/u.s).value,
self.velocityFWHMErr.to(u.m/u.s).value,
self.chiSquaredNu,
self.order,
self.airmass]
def plotFit(self, close_up_plot_path=None,
context_plot_path=None,
plot_fit=True,
verbose=False):
"""Plot a graph of this fit.
This method will produce a 'close-up' plot of just the fitted region
itself, in order to check out the fit has worked out, and a wider
'context' plot of the area around the feature.
Optional
--------
close_up_plot_path : string or `pathlib.Path`
The file name to save a close-up plot of the fit to. If not given,
will default to using the value providing when initializing the
fit.
context_plot_path : string or `pathlib.Path`
The file name to save a wider context plot (±25 km/s) around the
fitted feature to. If not given, will default to using the value
provided when initializing the fit.
plot_fit : bool, Default : True
If *True*, plot the mean of the fit and the fitted Gaussian.
Otherwise, don't plot those two things. This allows creating plots
of failed fits to see the context of the data.
verbose : bool, Default : False
If *True*, the function will print out additional information as it
runs.
"""
edge_pixels = (509, 510, 1021, 1022, 1533, 1534, 2045, 2046,
2557, 2558, 3069, 3070, 3581, 3582)
# If no plot paths are given, assume we want to use the ones given
# when initializing the fit.
if close_up_plot_path is None:
close_up_plot_path = self.close_up_plot_path
if context_plot_path is None:
context_plot_path = self.context_plot_path
# Set up the figure.
fig = plt.figure(figsize=(7, 5), dpi=100, tight_layout=True)
gs = GridSpec(nrows=2, ncols=1, height_ratios=[4, 1], hspace=0)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1], sharex=ax1)
ax1.tick_params(axis='x', direction='in')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2.set_ylim(bottom=-3, top=3)
ax2.yaxis.set_major_locator(ticker.FixedLocator([-2, -1, 0, 1, 2]))
for pixel in edge_pixels:
ax1.axvline(x=self.baryArray[pixel-1], ymin=0, ymax=0.2,
color='LimeGreen',
linestyle='--')
ax1.set_ylabel('Flux (photo-electrons)')
ax2.set_xlabel('Wavelength ($\\AA$)')
ax2.set_ylabel('Residuals\n($\\sigma$)')
ax1.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:.2f}'))
ax1.yaxis.set_major_formatter(ticker.StrMethodFormatter('{x:>7.1e}'))
plt.xticks(horizontalalignment='right')
ax1.set_xlim(left=self.correctedWavelength - self.continuumRange,
right=self.correctedWavelength + self.continuumRange)
# Set y-limits so a fit doesn't balloon the plot scale out.
ax1.set_ylim(top=self.continuumLevel * 1.25,
bottom=self.fluxMinimum * 0.93)
# Plot the expected and measured wavelengths.
ax1.axvline(self.correctedWavelength.to(u.angstrom),
color='LightSteelBlue', linestyle=':', alpha=0.8,
label=r'RV-corrected $\lambda=${:.3f}'.format(
self.correctedWavelength.to(u.angstrom)))
# Don't plot the mean if this is a failed fit.
if hasattr(self, 'mean') and hasattr(self, 'velocityOffset'):
ax1.axvline(self.mean.to(u.angstrom),
color='IndianRed', alpha=0.7,
label='Mean ({:.4f}, {:+.2f})'.
format(self.mean.to(u.angstrom),
self.velocityOffset.to(u.m/u.s)),
linestyle='-')
# Plot the actual data.
ax1.errorbar(self.baryArray[self.lowContinuumIndex - 1:
self.highContinuumIndex + 1],
self.fluxArray[self.lowContinuumIndex - 1:
self.highContinuumIndex + 1],
yerr=self.errorArray[self.lowContinuumIndex - 1:
self.highContinuumIndex + 1],
color='SandyBrown', ecolor='Sienna',
marker='o', markersize=5,
label='Flux', barsabove=True)
# Generate some x-values across the plot range.
x = np.linspace(self.baryArray[self.lowContinuumIndex].value,
self.baryArray[self.highContinuumIndex].value, 1000)
# Plot the initial guess for the gaussian.
ax1.plot(x, gaussian(x, *self.initial_guess),
color='SlateGray', label='Initial guess',
linestyle='--', alpha=0.5)
# Plot the fitted gaussian, unless this is a failed fit attempt.
if plot_fit:
ax1.plot(x, gaussian(x, *self.popt),
color='DarkGreen', alpha=0.5,
linestyle='-.',
label=r'Fit ($\chi^2_\nu=${:.3f}, $\sigma=${:.4f})'.
format(self.chiSquaredNu, self.sigma))
# Replace underscore in label so LaTeX won't crash on it.
ax1.legend(loc='upper center', framealpha=0.6, fontsize=9,
ncol=2,
title=self.label.replace('_', r'\_') if\
matplotlib.rcParams['text.usetex'] else self.label,
title_fontsize=10,
labelspacing=0.4)
# Add in some guidelines.
ax2.axhline(color='Gray', linestyle='-')
ax2.axhline(y=1, color='SkyBlue', linestyle='--')
ax2.axhline(y=-1, color='SkyBlue', linestyle='--')
ax2.axhline(y=2, color='LightSteelBlue', linestyle=':')
ax2.axhline(y=-2, color='LightSteelBlue', linestyle=':')
# Plot the residuals on the lower axis.
residuals = (self.fluxes - gaussian(self.wavelengths.value,
*self.popt)) / self.errors
ax2.plot(self.wavelengths, residuals, color='Navy', alpha=0.6,
linestyle='', marker='D', linewidth=1.5, markersize=5)
# Save the resultant plot.
fig.savefig(str(context_plot_path), format="png")
if verbose:
tqdm.write('Created wider context plot at {}'.format(
context_plot_path))
# Now create a close-in version to focus on the fit.
ax1.set_xlim(left=self.baryArray[self.lowFitIndex - 1],
right=self.baryArray[self.highFitIndex])
ax1.set_ylim(top=self.fluxes.max() * 1.15,
bottom=self.fluxes.min() * 0.95)
fig.savefig(str(close_up_plot_path), format="png")
if verbose:
tqdm.write('Created close up plot at {}'.format(
close_up_plot_path))
plt.close(fig)
| 399 | 0 | 78 |
2eeecc1ae92440be4d5d536b2ed2e1800ffff7f5 | 249 | py | Python | ml/frame/tensorflow/linear-regression.py | zqsheng/snippet | cb14300fc62c616d48e6552ad93c6d33b5e8c9a1 | [
"Apache-2.0"
] | 1 | 2018-09-10T11:31:33.000Z | 2018-09-10T11:31:33.000Z | ml/frame/tensorflow/linear-regression.py | zqsheng/snippet | cb14300fc62c616d48e6552ad93c6d33b5e8c9a1 | [
"Apache-2.0"
] | null | null | null | ml/frame/tensorflow/linear-regression.py | zqsheng/snippet | cb14300fc62c616d48e6552ad93c6d33b5e8c9a1 | [
"Apache-2.0"
] | null | null | null | import tensorflow as try:
import numpy as np
x_data = np.float32(np.random.rand(2,100))
y_data = np.dot([0.100,0.200],x_data) + 0.300
b = tf.Varible(tf.zerso([1]))
W = tf.Varible(tf.random_uniform([1,2],-1.0,1.0))
y = tf.matmul(W,x_data) + b
| 31.125 | 50 | 0.654618 | import tensorflow as try:
import numpy as np
x_data = np.float32(np.random.rand(2,100))
y_data = np.dot([0.100,0.200],x_data) + 0.300
b = tf.Varible(tf.zerso([1]))
W = tf.Varible(tf.random_uniform([1,2],-1.0,1.0))
y = tf.matmul(W,x_data) + b
| 0 | 0 | 0 |
1a47e16bbc1c9a27bb44a250b95b6dc46f70cbad | 2,814 | py | Python | scvae/analyses/metrics/summary.py | chgroenbech/deep-learning-for-single-cell-transcriptomics | d6148efabfb12eda8bd1b895e1bb72f592e39ab0 | [
"Apache-2.0"
] | 46 | 2019-06-05T14:17:12.000Z | 2022-02-02T22:15:52.000Z | scvae/analyses/metrics/summary.py | chgroenbech/deep-learning-for-single-cell-transcriptomics | d6148efabfb12eda8bd1b895e1bb72f592e39ab0 | [
"Apache-2.0"
] | 12 | 2019-07-17T05:24:15.000Z | 2021-08-17T23:02:06.000Z | scvae/analyses/metrics/summary.py | chgroenbech/deep-learning-for-single-cell-transcriptomics | d6148efabfb12eda8bd1b895e1bb72f592e39ab0 | [
"Apache-2.0"
] | 13 | 2017-03-03T02:56:20.000Z | 2019-04-17T18:13:42.000Z | # ======================================================================== #
#
# Copyright (c) 2017 - 2020 scVAE authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================== #
import numpy
from scvae.data.sparse import sparsity
from scvae.data.utilities import standard_deviation
MAXIMUM_NUMBER_OF_VALUES_FOR_NORMAL_STATISTICS_COMPUTATION = 5e8
| 29.93617 | 77 | 0.613717 | # ======================================================================== #
#
# Copyright (c) 2017 - 2020 scVAE authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================== #
import numpy
from scvae.data.sparse import sparsity
from scvae.data.utilities import standard_deviation
MAXIMUM_NUMBER_OF_VALUES_FOR_NORMAL_STATISTICS_COMPUTATION = 5e8
def summary_statistics(x, name="", tolerance=1e-3, skip_sparsity=False):
batch_size = None
if x.size > MAXIMUM_NUMBER_OF_VALUES_FOR_NORMAL_STATISTICS_COMPUTATION:
batch_size = 1000
x_mean = x.mean()
x_std = standard_deviation(x, ddof=1, batch_size=batch_size)
x_min = x.min()
x_max = x.max()
x_dispersion = x_std**2 / x_mean
if skip_sparsity:
x_sparsity = numpy.nan
else:
x_sparsity = sparsity(x, tolerance=tolerance, batch_size=batch_size)
statistics = {
"name": name,
"mean": x_mean,
"standard deviation": x_std,
"minimum": x_min,
"maximum": x_max,
"dispersion": x_dispersion,
"sparsity": x_sparsity
}
return statistics
def format_summary_statistics(statistics_sets, name="Data set"):
if not isinstance(statistics_sets, list):
statistics_sets = [statistics_sets]
name_width = max(
[len(name)]
+ [len(statistics_set["name"]) for statistics_set in statistics_sets]
)
table_heading = " ".join([
"{:{}}".format(name, name_width),
" mean ", "std. dev. ", "dispersion",
" minimum ", " maximum ", "sparsity"
])
table_rows = [table_heading]
for statistics_set in statistics_sets:
table_row_parts = [
"{:{}}".format(statistics_set["name"], name_width),
"{:<9.5g}".format(statistics_set["mean"]),
"{:<9.5g}".format(statistics_set["standard deviation"]),
"{:<9.5g}".format(statistics_set["dispersion"]),
"{:<11.5g}".format(statistics_set["minimum"]),
"{:<11.5g}".format(statistics_set["maximum"]),
"{:<7.5g}".format(statistics_set["sparsity"]),
]
table_row = " ".join(table_row_parts)
table_rows.append(table_row)
table = "\n".join(table_rows)
return table
| 1,849 | 0 | 46 |
4719b21b2b436532c4686a4d45cf8eacaf7caa00 | 7,800 | py | Python | apiai_assistant/agent.py | toasterco/apiaiassistant | 7f3f0693c4c5aa9f1fd4486f85ebe05080505dc8 | [
"MIT"
] | 6 | 2017-08-10T16:08:03.000Z | 2018-08-03T23:36:20.000Z | apiai_assistant/agent.py | toasterco/apiaiassistant | 7f3f0693c4c5aa9f1fd4486f85ebe05080505dc8 | [
"MIT"
] | 1 | 2018-03-23T14:12:36.000Z | 2018-03-23T15:40:33.000Z | apiai_assistant/agent.py | toasterco/apiaiassistant | 7f3f0693c4c5aa9f1fd4486f85ebe05080505dc8 | [
"MIT"
] | null | null | null | """ API.ai Agent
This module provides a Agent class to be used within an Assistant class
implementation to be able to interact with the user through the agent """
from . import utils
from . import parser
from . import widgets
Status = utils.enum(
'OK',
'GenericError', 'InvalidData', 'NotImplemented',
'Aborted', 'AccessDenied')
""" :obj:`apiai_assistant.utils.enum`: statuses of the agent """
class Agent(object):
"""
Provides methods to instruct the agent on how to respond tu user queries
Args:
corpus (:obj:`apiai_assistant.corpus.Corpus`): Corpus to get the outputs from
request (:obj:`dict`, optional): API.ai request
ssml (boolean, optional, True): if True, will format speech to support SSML
"""
SupportedPermissions = utils.enum(
'NAME', 'COARSE_LOCATION', 'PRECISE_LOCATION')
""" :obj:`apiai_assistant.utils.enum`: permissions supported by the agent """
def tell(self, corpus_id, context=None):
"""
Looks for the output id in the corpus and formats with the context
Args:
corpus_id (str): ID of the output to tell
context (:obj:`dict`, optional): context to format the output with
"""
output = self.corpus[corpus_id]
if context is not None:
output = output.format(**context)
self.tell_raw(output)
def ask(self, corpus_id, context=None):
"""
Looks for the output id in the corpus and formats with the context
Args:
corpus_id (str): ID of the output to ask
context (:obj:`dict`, optional): context to format the output with
"""
output = self.corpus[corpus_id]
if context is not None:
output = output.format(**context)
self.ask_raw(output)
def suggest(self, corpus_id):
"""
Looks for the output id in the corpus to suggest
Args:
corpus_id (str): ID of the suggestions to show
"""
suggestions = self.corpus.get(corpus_id, self.corpus.suggestions)
if suggestions:
self.suggest_raw(suggestions)
def tell_raw(self, speech, text=None):
"""
Tells the user by adding the speech and/or text to the response's messages
Args:
speech (str): speech to tell
text (str, optional): text to tell, if None, speech will be used
"""
self.response.close_mic()
widget = widgets.SimpleResponseWidget(speech, text, ssml=self._ssml)
self.show(widget)
def ask_raw(self, speech, text=None):
"""
Asks the user by adding the speech and/or text to the response's messages
Args:
speech (str): speech to ask
text (str, optional): text to ask, if None, speech will be used
"""
self.response.open_mic()
widget = widgets.SimpleResponseWidget(speech, text, ssml=self._ssml)
self.show(widget)
def suggest_raw(self, suggestions):
"""
Suggests the user by adding the suggestions to the response's messages
Args:
suggestions (:obj:`list`): suggestions
"""
if type(suggestions) != list:
suggestions = [suggestions]
suggestion_widget = widgets.SuggestionsWidget(suggestions)
self.show(suggestion_widget)
def show(self, obj):
"""
Renders a rich response widget and add it to the response's messages
"""
message = obj.render()
self.response.add_message(message)
def add_context(self, context_name, parameters=None, lifespan=5):
"""
Adds a context to the response's contexts
Args:
context_name (str): name of the context to add
parameters (:obj:`dict`, optional): parameters of the context
lifespan (:obj:`int`, optional, 5): lifespan of the context
"""
self.response.add_context({
'name': context_name,
'lifespan': lifespan,
'parameters': parameters or {}
})
class Response(object):
""" Abstraction to build API.ai compatible responses """
PERMISSIONS = {
Agent.SupportedPermissions.NAME: 'NAME',
Agent.SupportedPermissions.COARSE_LOCATION: 'DEVICE_COARSE_LOCATION',
Agent.SupportedPermissions.PRECISE_LOCATION: 'DEVICE_PRECISE_LOCATION'
}
@property
| 29.323308 | 89 | 0.600256 | """ API.ai Agent
This module provides a Agent class to be used within an Assistant class
implementation to be able to interact with the user through the agent """
from . import utils
from . import parser
from . import widgets
Status = utils.enum(
'OK',
'GenericError', 'InvalidData', 'NotImplemented',
'Aborted', 'AccessDenied')
""" :obj:`apiai_assistant.utils.enum`: statuses of the agent """
class Agent(object):
"""
Provides methods to instruct the agent on how to respond tu user queries
Args:
corpus (:obj:`apiai_assistant.corpus.Corpus`): Corpus to get the outputs from
request (:obj:`dict`, optional): API.ai request
ssml (boolean, optional, True): if True, will format speech to support SSML
"""
SupportedPermissions = utils.enum(
'NAME', 'COARSE_LOCATION', 'PRECISE_LOCATION')
""" :obj:`apiai_assistant.utils.enum`: permissions supported by the agent """
def __init__(self, corpus=None, request=None, ssml=True, *args, **kwargs):
self.code = Status.OK
self.error_message = None
self._ssml = ssml
self.corpus = corpus
self.parser = None
if request:
self.parser = parser.GoogleAssistantParser(request)
self.response = Response()
def __repr__(self):
return '<Agent: ({}{})>'.format(
Status.by_value.get(self.code),
' - {}'.format(self.error_message) if self.code != Status.OK else ''
)
def abort(self, reason):
self.code = Status.Aborted
self.error_message = reason
self.response.abort(self.error_message, self.code)
def error(self, error_message, code=Status.GenericError):
self.code = code
self.error_message = error_message
self.response.abort(error_message)
def tell(self, corpus_id, context=None):
"""
Looks for the output id in the corpus and formats with the context
Args:
corpus_id (str): ID of the output to tell
context (:obj:`dict`, optional): context to format the output with
"""
output = self.corpus[corpus_id]
if context is not None:
output = output.format(**context)
self.tell_raw(output)
def ask(self, corpus_id, context=None):
"""
Looks for the output id in the corpus and formats with the context
Args:
corpus_id (str): ID of the output to ask
context (:obj:`dict`, optional): context to format the output with
"""
output = self.corpus[corpus_id]
if context is not None:
output = output.format(**context)
self.ask_raw(output)
def suggest(self, corpus_id):
"""
Looks for the output id in the corpus to suggest
Args:
corpus_id (str): ID of the suggestions to show
"""
suggestions = self.corpus.get(corpus_id, self.corpus.suggestions)
if suggestions:
self.suggest_raw(suggestions)
def tell_raw(self, speech, text=None):
"""
Tells the user by adding the speech and/or text to the response's messages
Args:
speech (str): speech to tell
text (str, optional): text to tell, if None, speech will be used
"""
self.response.close_mic()
widget = widgets.SimpleResponseWidget(speech, text, ssml=self._ssml)
self.show(widget)
def ask_raw(self, speech, text=None):
"""
Asks the user by adding the speech and/or text to the response's messages
Args:
speech (str): speech to ask
text (str, optional): text to ask, if None, speech will be used
"""
self.response.open_mic()
widget = widgets.SimpleResponseWidget(speech, text, ssml=self._ssml)
self.show(widget)
def suggest_raw(self, suggestions):
"""
Suggests the user by adding the suggestions to the response's messages
Args:
suggestions (:obj:`list`): suggestions
"""
if type(suggestions) != list:
suggestions = [suggestions]
suggestion_widget = widgets.SuggestionsWidget(suggestions)
self.show(suggestion_widget)
def show(self, obj):
"""
Renders a rich response widget and add it to the response's messages
"""
message = obj.render()
self.response.add_message(message)
def add_context(self, context_name, parameters=None, lifespan=5):
"""
Adds a context to the response's contexts
Args:
context_name (str): name of the context to add
parameters (:obj:`dict`, optional): parameters of the context
lifespan (:obj:`int`, optional, 5): lifespan of the context
"""
self.response.add_context({
'name': context_name,
'lifespan': lifespan,
'parameters': parameters or {}
})
def ask_for_permissions(self, reason, permissions):
self.response.add_permission(reason, permissions)
def ask_for_confirmation(self, corpus_id):
self.ask(corpus_id)
self.suggest_raw(self.corpus.get_confirmation())
def ask_for_confirmation_raw(self, question):
self.ask_raw(question)
self.suggest_raw(self.corpus.get_confirmation())
class Response(object):
""" Abstraction to build API.ai compatible responses """
PERMISSIONS = {
Agent.SupportedPermissions.NAME: 'NAME',
Agent.SupportedPermissions.COARSE_LOCATION: 'DEVICE_COARSE_LOCATION',
Agent.SupportedPermissions.PRECISE_LOCATION: 'DEVICE_PRECISE_LOCATION'
}
def __init__(self):
self.code = Status.OK
self.error_message = None
self.expect_user_response = False
self._messages = [
self.initial_message
]
self._permissions = []
self._contexts = []
@property
def initial_message(self):
return {'type': 0, 'speech': ''}
def abort(self, error_message, code=Status.GenericError):
self.code = code
self.error_message = error_message
def close_mic(self):
self.expect_user_response = False
def open_mic(self):
self.expect_user_response = True
def add_message(self, message, position=None):
if position is not None:
self._messages.insert(position, message)
else:
self._messages.append(message)
def add_context(self, context, position=None):
if position is not None:
self._contexts.insert(position, context)
else:
self._contexts.append(context)
def add_permission(self, reason, permissions):
self._permissions.append((reason, [self.PERMISSIONS[p] for p in permissions]))
def to_dict(self):
if self.code != Status.OK:
return {'error': '400'}
payload = {
'messages': self._messages,
'data': {
'google': {
'expectUserResponse': self.expect_user_response
}
}
}
if self._contexts:
payload['contextOut'] = self._contexts
if self._permissions:
reason = self._permissions[0][0]
permissions = list(
{p for _, perms in self._permissions for p in perms})
payload['data']['google']['systemIntent'] = {
"intent": "actions.intent.PERMISSION",
"data": {
"@type": "type.googleapis.com/google.actions.v2.PermissionValueSpec",
"optContext": reason,
"permissions": permissions
}
}
return payload
| 2,915 | 0 | 431 |
68f7471865a941dae58f133032cb209c776bb294 | 1,090 | py | Python | user/tests.py | peterpalace/django-rest-boilerplate | b5611f017d736f8a837ddabdb98d5180f66efccf | [
"MIT"
] | null | null | null | user/tests.py | peterpalace/django-rest-boilerplate | b5611f017d736f8a837ddabdb98d5180f66efccf | [
"MIT"
] | null | null | null | user/tests.py | peterpalace/django-rest-boilerplate | b5611f017d736f8a837ddabdb98d5180f66efccf | [
"MIT"
] | null | null | null | from django.db import IntegrityError
from django.test import TestCase
from .models import Profile, User
class UserTest(TestCase):
""" Test module for Puppy model """
| 25.348837 | 63 | 0.568807 | from django.db import IntegrityError
from django.test import TestCase
from .models import Profile, User
class UserTest(TestCase):
""" Test module for Puppy model """
def setUp(self):
User.objects.create(
email='mario.rossi@gmail.com',
password="password",
first_name="mario",
last_name="rossi"
)
def test_user_create(self):
self.assertRaises(
IntegrityError,
User.objects.create,
email='lucia.rossi@gmail.com',
password="password",
)
def test_profile_created(self):
mario = User.objects.get(email="mario.rossi@gmail.com")
self.assertEqual(
mario.email, "mario.rossi@gmail.com"
)
self.assertIsNotNone(
mario.profile
)
def test_profile_slug_created(self):
mario = User.objects.get(email="mario.rossi@gmail.com")
self.assertIsNotNone(
mario.profile.slug
)
self.assertIsNot(
mario.profile.slug,
""
)
| 810 | 0 | 108 |
e49d59ec8713105b07ff2892eb05d7a83f295de3 | 5,501 | py | Python | dearpygui_obj/window.py | Amorano/DearPyGui-Obj | 3b8ac1713c9b4ba126bb2b8b57ae2adfa54f2f7b | [
"MIT"
] | null | null | null | dearpygui_obj/window.py | Amorano/DearPyGui-Obj | 3b8ac1713c9b4ba126bb2b8b57ae2adfa54f2f7b | [
"MIT"
] | null | null | null | dearpygui_obj/window.py | Amorano/DearPyGui-Obj | 3b8ac1713c9b4ba126bb2b8b57ae2adfa54f2f7b | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import TYPE_CHECKING
import dearpygui.core as dpgcore
from dearpygui_obj import _register_item_type, wrap_callback
from dearpygui_obj.wrapper.widget import Widget, ItemWidget, ConfigProperty
if TYPE_CHECKING:
from typing import Optional, Tuple, Callable
from dearpygui_obj import PyGuiCallback
from dearpygui_obj.wrapper.widget import ItemConfigData
class MainWindow:
"""Container for static functions used to manipulate the main window.
Attempting to instantiate this class will raise a :class:`TypeError`.
"""
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
def set_primary_window(window: Optional[Window]) -> None:
"""Set a window as the primary window, or remove the primary window.
When a window is set as the primary window it will fill the entire viewport.
If any other window was already set as the primary window, it will be unset.
"""
if window is not None:
dpgcore.set_primary_window(window.id, True)
else:
dpgcore.set_primary_window('', False)
@staticmethod
def set_resize_callback(callback: Callable):
"""Set a callback for when the main viewport is resized."""
dpgcore.set_resize_callback(callback, handler='')
@staticmethod
def enable_docking(**kwargs):
"""Enable docking and set docking options.
Note:
Once docking is enabled, it cannot be disabled.
Keyword Arguments:
shift_only: if ``True``, hold down shift for docking.
If ``False``, dock by dragging window titlebars.
dock_space: if ``True``, windows will be able to dock
with the main window viewport.
"""
dpgcore.enable_docking(**kwargs)
@_register_item_type('mvAppItemType::Window')
class Window(Widget):
"""Creates a new window."""
label: str = ConfigProperty()
x_pos: int = ConfigProperty()
y_pos: int = ConfigProperty()
autosize: bool = ConfigProperty()
no_resize: bool = ConfigProperty()
no_title_bar: bool = ConfigProperty()
no_move: bool = ConfigProperty()
no_collapse: bool = ConfigProperty()
no_focus_on_appearing: bool = ConfigProperty()
no_bring_to_front_on_focus: bool = ConfigProperty()
no_close: bool = ConfigProperty()
no_background: bool = ConfigProperty()
show_menubar: bool = ConfigProperty(key='menubar')
#: Disable scrollbars (can still scroll with mouse or programmatically).
no_scrollbar: bool = ConfigProperty()
#: Allow horizontal scrollbar to appear.
horizontal_scrollbar: bool = ConfigProperty()
pos: Tuple[int, int]
@ConfigProperty()
def pos(self) -> Tuple[int, int]:
"""Get or set (x_pos, y_pos) as a tuple."""
config = self.get_config()
return config['x_pos'], config['y_pos']
@pos.getconfig
def __init__(self, label: str = None, *, name_id: str = None, **config):
"""
Parameters:
label: window label.
"""
super().__init__(label=label, name_id=name_id, **config)
## workaround for the fact that you can't set the on_close callback in DPG
_on_close_callback: Optional[Callable] = None
def on_close(self, callback: Optional[PyGuiCallback]) -> Callable:
"""Set on_close callback, can be used as a decorator."""
if callback is not None:
callback = wrap_callback(callback)
self._on_close_callback = callback
return callback
def resized(self, callback: PyGuiCallback) -> Callable:
"""Set resized callback, can be used as a decorator."""
dpgcore.set_resize_callback(wrap_callback(callback), handler=self.id)
return callback
## Menu Bars and Menus
@_register_item_type('mvAppItemType::MenuBar')
class MenuBar(Widget, ItemWidget):
"""A menu bar that can be added to a :class:`.Window`."""
__all__ = [
'MainWindow',
'Window',
'MenuBar',
] | 31.614943 | 84 | 0.663334 | from __future__ import annotations
from typing import TYPE_CHECKING
import dearpygui.core as dpgcore
from dearpygui_obj import _register_item_type, wrap_callback
from dearpygui_obj.wrapper.widget import Widget, ItemWidget, ConfigProperty
if TYPE_CHECKING:
from typing import Optional, Tuple, Callable
from dearpygui_obj import PyGuiCallback
from dearpygui_obj.wrapper.widget import ItemConfigData
class MainWindow:
"""Container for static functions used to manipulate the main window.
Attempting to instantiate this class will raise a :class:`TypeError`.
"""
def __new__(cls):
raise TypeError('this class may not be instantiated')
@staticmethod
def set_title(title: str) -> None:
dpgcore.set_main_window_title(title)
@staticmethod
def set_pos(x: int, y: int) -> None:
dpgcore.set_main_window_pos(x, y)
@staticmethod
def allow_resize(enabled: bool):
dpgcore.set_main_window_resizable(enabled)
@staticmethod
def set_size(width: int, height: int):
dpgcore.set_main_window_size(width, height)
@staticmethod
def get_size() -> Tuple[int, int]:
return tuple(dpgcore.get_main_window_size())
@staticmethod
def set_primary_window(window: Optional[Window]) -> None:
"""Set a window as the primary window, or remove the primary window.
When a window is set as the primary window it will fill the entire viewport.
If any other window was already set as the primary window, it will be unset.
"""
if window is not None:
dpgcore.set_primary_window(window.id, True)
else:
dpgcore.set_primary_window('', False)
@staticmethod
def set_resize_callback(callback: Callable):
"""Set a callback for when the main viewport is resized."""
dpgcore.set_resize_callback(callback, handler='')
@staticmethod
def enable_docking(**kwargs):
"""Enable docking and set docking options.
Note:
Once docking is enabled, it cannot be disabled.
Keyword Arguments:
shift_only: if ``True``, hold down shift for docking.
If ``False``, dock by dragging window titlebars.
dock_space: if ``True``, windows will be able to dock
with the main window viewport.
"""
dpgcore.enable_docking(**kwargs)
@_register_item_type('mvAppItemType::Window')
class Window(Widget):
"""Creates a new window."""
label: str = ConfigProperty()
x_pos: int = ConfigProperty()
y_pos: int = ConfigProperty()
autosize: bool = ConfigProperty()
no_resize: bool = ConfigProperty()
no_title_bar: bool = ConfigProperty()
no_move: bool = ConfigProperty()
no_collapse: bool = ConfigProperty()
no_focus_on_appearing: bool = ConfigProperty()
no_bring_to_front_on_focus: bool = ConfigProperty()
no_close: bool = ConfigProperty()
no_background: bool = ConfigProperty()
show_menubar: bool = ConfigProperty(key='menubar')
#: Disable scrollbars (can still scroll with mouse or programmatically).
no_scrollbar: bool = ConfigProperty()
#: Allow horizontal scrollbar to appear.
horizontal_scrollbar: bool = ConfigProperty()
pos: Tuple[int, int]
@ConfigProperty()
def pos(self) -> Tuple[int, int]:
"""Get or set (x_pos, y_pos) as a tuple."""
config = self.get_config()
return config['x_pos'], config['y_pos']
@pos.getconfig
def pos(self, value: Tuple[int, int]) -> ItemConfigData:
width, height = value
return {'x_pos': width, 'y_pos' : height}
def __init__(self, label: str = None, *, name_id: str = None, **config):
"""
Parameters:
label: window label.
"""
super().__init__(label=label, name_id=name_id, **config)
def _setup_add_widget(self, dpg_args) -> None:
dpgcore.add_window(self.id, on_close=self._on_close, **dpg_args)
def __enter__(self) -> Window:
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
dpgcore.end()
## workaround for the fact that you can't set the on_close callback in DPG
_on_close_callback: Optional[Callable] = None
def _on_close(self, sender, data) -> None:
if self._on_close_callback is not None:
self._on_close_callback(sender, data)
def on_close(self, callback: Optional[PyGuiCallback]) -> Callable:
"""Set on_close callback, can be used as a decorator."""
if callback is not None:
callback = wrap_callback(callback)
self._on_close_callback = callback
return callback
def resized(self, callback: PyGuiCallback) -> Callable:
"""Set resized callback, can be used as a decorator."""
dpgcore.set_resize_callback(wrap_callback(callback), handler=self.id)
return callback
## Menu Bars and Menus
@_register_item_type('mvAppItemType::MenuBar')
class MenuBar(Widget, ItemWidget):
"""A menu bar that can be added to a :class:`.Window`."""
def __init__(self, *, name_id: str = None, **config):
super().__init__(name_id=name_id, **config)
def _setup_add_widget(self, dpg_args) -> None:
dpgcore.add_menu_bar(self.id, **dpg_args)
def __enter__(self) -> MenuBar:
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
dpgcore.end()
__all__ = [
'MainWindow',
'Window',
'MenuBar',
] | 1,030 | 0 | 398 |
a1eb9067849ddd61a632ee6a8f3c26705651e8eb | 3,543 | py | Python | tensorflow/python/ipu/__init__.py | DebeshJha/tensorflow-1 | 2b5a225c49d25273532d11c424d37ce394d7579a | [
"Apache-2.0"
] | 2 | 2021-03-08T23:32:06.000Z | 2022-01-13T03:43:49.000Z | tensorflow/python/ipu/__init__.py | DebeshJha/tensorflow-1 | 2b5a225c49d25273532d11c424d37ce394d7579a | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ipu/__init__.py | DebeshJha/tensorflow-1 | 2b5a225c49d25273532d11c424d37ce394d7579a | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Operations and utilities related to the Graphcore IPU
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.ipu.ops import all_to_all_op
from tensorflow.python.ipu.ops import all_to_all_op_grad
from tensorflow.python.ipu.ops import custom_ops
from tensorflow.python.ipu.ops import cross_replica_ops
from tensorflow.python.ipu.ops import embedding_ops
from tensorflow.python.ipu.ops import embedding_ops_grad
from tensorflow.python.ipu.ops import functional_ops
from tensorflow.python.ipu.ops import functional_ops_grad
from tensorflow.python.ipu.ops import internal_ops
from tensorflow.python.ipu.ops import internal_ops_grad
from tensorflow.python.ipu.ops import math_ops
from tensorflow.python.ipu.ops import nn_ops
from tensorflow.python.ipu.ops import nn_ops_grad
from tensorflow.python.ipu.ops import normalization_ops
from tensorflow.python.ipu.ops import normalization_ops_grad
from tensorflow.python.ipu.ops import pipelining_ops
from tensorflow.python.ipu.ops import pipelining_ops_grad
from tensorflow.python.ipu.ops import rand_ops
from tensorflow.python.ipu.ops import rand_ops_grad
from tensorflow.python.ipu.ops import reduce_scatter_op
from tensorflow.python.ipu.ops import replication_ops
from tensorflow.python.ipu.ops import rnn_ops
from tensorflow.python.ipu.ops import rnn_ops_grad
from tensorflow.python.ipu.ops import summary_ops
from tensorflow.python.ipu.ops.experimental import popfloat_cast_to_gfloat
from tensorflow.python.ipu import autoshard
from tensorflow.python.ipu import autoshard_cnn
from tensorflow.python.ipu import data
from tensorflow.python.ipu import dataset_benchmark
from tensorflow.python.ipu import ipu_compiler
from tensorflow.python.ipu import ipu_infeed_queue
from tensorflow.python.ipu import ipu_multi_worker_strategy
from tensorflow.python.ipu import ipu_outfeed_queue
from tensorflow.python.ipu import ipu_run_config
from tensorflow.python.ipu import ipu_session_run_hooks
from tensorflow.python.ipu import ipu_strategy
from tensorflow.python.ipu import loops
from tensorflow.python.ipu import scopes
from tensorflow.python.ipu import sharding
from tensorflow.python.ipu import utils
from tensorflow.python.ipu import ipu_estimator
from tensorflow.python.ipu import ipu_pipeline_estimator
from tensorflow.python.ipu import vertex_edsl
from tensorflow.python.ipu.keras import layers
from tensorflow.python.ipu.optimizers import cross_replica_optimizer
from tensorflow.python.ipu.optimizers import map_gradient_optimizer
from tensorflow.python.ipu.optimizers import sharded_optimizer
from tensorflow.python.ipu.optimizers import gradient_accumulation_optimizer
# Expose functional_ops.function as ipu.function
from tensorflow.python.ipu.ops.functional_ops import function
# pylint: enable=wildcard-import,unused-import
| 45.423077 | 79 | 0.815975 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Operations and utilities related to the Graphcore IPU
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.ipu.ops import all_to_all_op
from tensorflow.python.ipu.ops import all_to_all_op_grad
from tensorflow.python.ipu.ops import custom_ops
from tensorflow.python.ipu.ops import cross_replica_ops
from tensorflow.python.ipu.ops import embedding_ops
from tensorflow.python.ipu.ops import embedding_ops_grad
from tensorflow.python.ipu.ops import functional_ops
from tensorflow.python.ipu.ops import functional_ops_grad
from tensorflow.python.ipu.ops import internal_ops
from tensorflow.python.ipu.ops import internal_ops_grad
from tensorflow.python.ipu.ops import math_ops
from tensorflow.python.ipu.ops import nn_ops
from tensorflow.python.ipu.ops import nn_ops_grad
from tensorflow.python.ipu.ops import normalization_ops
from tensorflow.python.ipu.ops import normalization_ops_grad
from tensorflow.python.ipu.ops import pipelining_ops
from tensorflow.python.ipu.ops import pipelining_ops_grad
from tensorflow.python.ipu.ops import rand_ops
from tensorflow.python.ipu.ops import rand_ops_grad
from tensorflow.python.ipu.ops import reduce_scatter_op
from tensorflow.python.ipu.ops import replication_ops
from tensorflow.python.ipu.ops import rnn_ops
from tensorflow.python.ipu.ops import rnn_ops_grad
from tensorflow.python.ipu.ops import summary_ops
from tensorflow.python.ipu.ops.experimental import popfloat_cast_to_gfloat
from tensorflow.python.ipu import autoshard
from tensorflow.python.ipu import autoshard_cnn
from tensorflow.python.ipu import data
from tensorflow.python.ipu import dataset_benchmark
from tensorflow.python.ipu import ipu_compiler
from tensorflow.python.ipu import ipu_infeed_queue
from tensorflow.python.ipu import ipu_multi_worker_strategy
from tensorflow.python.ipu import ipu_outfeed_queue
from tensorflow.python.ipu import ipu_run_config
from tensorflow.python.ipu import ipu_session_run_hooks
from tensorflow.python.ipu import ipu_strategy
from tensorflow.python.ipu import loops
from tensorflow.python.ipu import scopes
from tensorflow.python.ipu import sharding
from tensorflow.python.ipu import utils
from tensorflow.python.ipu import ipu_estimator
from tensorflow.python.ipu import ipu_pipeline_estimator
from tensorflow.python.ipu import vertex_edsl
from tensorflow.python.ipu.keras import layers
from tensorflow.python.ipu.optimizers import cross_replica_optimizer
from tensorflow.python.ipu.optimizers import map_gradient_optimizer
from tensorflow.python.ipu.optimizers import sharded_optimizer
from tensorflow.python.ipu.optimizers import gradient_accumulation_optimizer
# Expose functional_ops.function as ipu.function
from tensorflow.python.ipu.ops.functional_ops import function
# pylint: enable=wildcard-import,unused-import
| 0 | 0 | 0 |
c8cd0ca301fcc495d44e5f87e6900eb6cb14c9d9 | 5,174 | py | Python | tingbot/tingapp.py | gynsolomon/tingbot-python | ac9fe234dcbcf2b07066b05257580d4effe8ef2f | [
"BSD-2-Clause"
] | 19 | 2015-11-04T01:34:19.000Z | 2020-04-25T23:51:49.000Z | tingbot/tingapp.py | gynsolomon/tingbot-python | ac9fe234dcbcf2b07066b05257580d4effe8ef2f | [
"BSD-2-Clause"
] | 58 | 2015-11-05T15:41:19.000Z | 2020-11-08T17:24:03.000Z | tingbot/tingapp.py | gynsolomon/tingbot-python | ac9fe234dcbcf2b07066b05257580d4effe8ef2f | [
"BSD-2-Clause"
] | 13 | 2015-11-05T00:45:32.000Z | 2021-12-07T17:20:17.000Z | import collections
import json
import os
import sys
import hashlib
import logging
from .utils import cached_property, get_resource
from .graphics import Image
class SettingsDict(collections.MutableMapping):
'''
Represents the tingapp.settings dict-like object.
The settings are loaded from three files in the app bundle
- default_settings.json
This file contains default settings as defined by the app creator
- settings.json
This file contains settings as set by a user when installing the app
(via Tide, for example)
- local_settings.json
This file contains settings written by the app itself.
Settings can be overridden by later files.
Changes are always saved to the local_settings.json file.
'''
app = TingApp()
| 29.067416 | 125 | 0.610166 | import collections
import json
import os
import sys
import hashlib
import logging
from .utils import cached_property, get_resource
from .graphics import Image
def load_json(filename):
try:
with open(filename, 'r') as fp:
result = json.load(fp)
if not isinstance(result, dict):
raise ValueError('Failed to load %s because it should contain a dictionary object, not an array.' % filename)
return result
except ValueError:
raise ValueError('Failed to load %s because it\'s not a valid JSON file' % filename)
except IOError:
#either non-existent file or empty filename
return {}
def save_json(filename, obj):
data = json.dumps(obj)
with open(filename, 'w') as fp:
fp.write(data)
class SettingsDict(collections.MutableMapping):
'''
Represents the tingapp.settings dict-like object.
The settings are loaded from three files in the app bundle
- default_settings.json
This file contains default settings as defined by the app creator
- settings.json
This file contains settings as set by a user when installing the app
(via Tide, for example)
- local_settings.json
This file contains settings written by the app itself.
Settings can be overridden by later files.
Changes are always saved to the local_settings.json file.
'''
def __init__(self, path):
#note we do NOT initialise self.dct or self.local_settings here - this ensures we
#raise an error in the event that they are accessed before self.load
self.loaded = False
self.path = path
def __contains__(self, item):
if not self.loaded:
self.load()
return item in self.dct
def __len__(self):
if not self.loaded:
self.load()
return len(self.dct)
def __getitem__(self, key):
if not self.loaded:
self.load()
return self.dct[key]
def __setitem__(self, key, value):
if not self.loaded:
self.load()
self.dct[key] = value
self.local_settings[key] = value
self.save()
def __delitem__(self, key):
if not self.loaded:
self.load()
del self.local_settings[key]
def __iter__(self):
if not self.loaded:
self.load()
return iter(self.dct)
def load(self):
self.dct = load_json(os.path.join(self.path, 'default_settings.json'))
self.dct.update(load_json(os.path.join(self.path, 'settings.json')))
self.local_settings = load_json(os.path.join(self.path, 'local_settings.json'))
self.dct.update(self.local_settings)
self.loaded = True
def save(self):
save_json(os.path.join(self.path, 'local_settings.json'), self.local_settings)
def generic_icon(name):
name_hash = int(hashlib.md5(name).hexdigest(), 16)
color_options = [
'blue', 'teal', 'green', 'olive', 'yellow', 'orange', 'red',
'fuchsia', 'purple', 'maroon'
]
color = color_options[name_hash % len(color_options)]
letter = name[0].lower()
icon = Image(size=(96, 96))
icon.fill(color=color)
image = get_resource('default-icon-texture-96.png')
icon.image(image)
font = get_resource('MiniSet2.ttf')
descenders = ['g', 'p', 'q', 'y']
ascenders = ['b', 'd', 'f', 'h', 'k', 'l', 't']
y_offset = 0
if letter in descenders:
y_offset -= 8
if letter in ascenders:
y_offset += 6
icon.text(letter,
xy=(52, 41 + y_offset),
color='white',
font=font,
font_size=70)
# they're a little large compared to the real icons, let's size them down a bit
resized_icon = Image(size=(96,96))
resized_icon.image(icon, scale=0.9)
return resized_icon
class TingApp(object):
def __init__(self, path=None):
"""path is the root path of the app you want to inspect
if path is None, then will let you inspect the current app"""
if path is None:
path = os.path.dirname(os.path.abspath(sys.argv[0]))
self.path = path
self.settings = SettingsDict(path)
@cached_property
def info(self):
return load_json(os.path.join(self.path, 'app.tbinfo'))
@property
def name(self):
if 'name' in self.info and self.info['name'] != '':
return self.info['name']
else:
return os.path.basename(self.path)
@cached_property
def icon(self):
icon_path = os.path.join(self.path, 'icon.png')
if not os.path.isfile(icon_path):
return generic_icon(self.name)
try:
icon = Image.load(icon_path)
except:
logging.exception('Failed to load icon at %s', icon_path)
return generic_icon(self.name)
if icon.size != (96, 96):
# resize the icon by redrawing in the correct size
resized_icon = Image(size=(96, 96))
resized_icon.image(icon, scale='shrinkToFit')
return resized_icon
return icon
app = TingApp()
| 3,551 | 468 | 334 |
19cfd2639394b73206505038a38d56e68dd70cd3 | 1,605 | py | Python | Line drawing algorithm/Bresenham_straightline_algorithm.py | shuvankarroy/graphics_python | e32749c91550836d7340a12a7eb1f3e3e90fd321 | [
"MIT"
] | null | null | null | Line drawing algorithm/Bresenham_straightline_algorithm.py | shuvankarroy/graphics_python | e32749c91550836d7340a12a7eb1f3e3e90fd321 | [
"MIT"
] | null | null | null | Line drawing algorithm/Bresenham_straightline_algorithm.py | shuvankarroy/graphics_python | e32749c91550836d7340a12a7eb1f3e3e90fd321 | [
"MIT"
] | null | null | null | # Algorithm to draw a straight line using Bresenham's algorithm
# works only foor lines having inclination <= 45 degree
from graphics import *
import time
import ctypes
user32 = ctypes.windll.user32
scrnWidth, scrnHeight= (user32.GetSystemMetrics(0)-100), (user32.GetSystemMetrics(1)-100)
print("Straight line drawing using Bresenham's algorithm : ")
start = tuple(int(x.strip()) for x in input("Enter starting co-ordinate of the straight line (x,y) : ").split(','))
end = tuple(int(x.strip()) for x in input("Enter starting co-ordinate of the straight line (x,y) : ").split(','))
win = GraphWin('Bresenham\'s Straight Line', scrnWidth, scrnHeight)
# for printing the message
message = Text(Point(win.getWidth()/2, 30), 'Straight line drawing using Bresenham\'s algorithm : ')
message.setTextColor('red')
message.setStyle('italic')
message.setSize(20)
message.draw(win)
message = Text(Point(win.getWidth()/2, win.getHeight()-20), 'Click on the window to close')
message.setTextColor('red')
message.setStyle('italic')
message.setSize(20)
message.draw(win)
x1,y1 = start
x2,y2 = end
pt = Point(x1,y1)
x_new,y_new = x1,y1
pt.draw(win)
delta_x = abs(x2 - x1)
delta_y = abs(y2 - y1)
p = 2 * delta_y - delta_x
i = 1
while(i <= delta_x):
time.sleep(0.1)
if(p < 0):
x_new += 1
pt = Point(x_new,y_new)
pt.draw(win)
p += 2*delta_y
else:
x_new += 1
y_new += 1
pt = Point(x_new,y_new)
pt.draw(win)
p = p + 2*delta_y - 2*delta_x
i+=1
win.getMouse()
win.close()
| 27.20339 | 116 | 0.643614 | # Algorithm to draw a straight line using Bresenham's algorithm
# works only foor lines having inclination <= 45 degree
from graphics import *
import time
import ctypes
user32 = ctypes.windll.user32
scrnWidth, scrnHeight= (user32.GetSystemMetrics(0)-100), (user32.GetSystemMetrics(1)-100)
print("Straight line drawing using Bresenham's algorithm : ")
start = tuple(int(x.strip()) for x in input("Enter starting co-ordinate of the straight line (x,y) : ").split(','))
end = tuple(int(x.strip()) for x in input("Enter starting co-ordinate of the straight line (x,y) : ").split(','))
win = GraphWin('Bresenham\'s Straight Line', scrnWidth, scrnHeight)
# for printing the message
message = Text(Point(win.getWidth()/2, 30), 'Straight line drawing using Bresenham\'s algorithm : ')
message.setTextColor('red')
message.setStyle('italic')
message.setSize(20)
message.draw(win)
message = Text(Point(win.getWidth()/2, win.getHeight()-20), 'Click on the window to close')
message.setTextColor('red')
message.setStyle('italic')
message.setSize(20)
message.draw(win)
x1,y1 = start
x2,y2 = end
pt = Point(x1,y1)
x_new,y_new = x1,y1
pt.draw(win)
delta_x = abs(x2 - x1)
delta_y = abs(y2 - y1)
p = 2 * delta_y - delta_x
i = 1
while(i <= delta_x):
time.sleep(0.1)
if(p < 0):
x_new += 1
pt = Point(x_new,y_new)
pt.draw(win)
p += 2*delta_y
else:
x_new += 1
y_new += 1
pt = Point(x_new,y_new)
pt.draw(win)
p = p + 2*delta_y - 2*delta_x
i+=1
win.getMouse()
win.close()
| 0 | 0 | 0 |
6865502dfb12453a7c664da7a679fbf4159aa717 | 477 | py | Python | src/features/test_outlier_correction.py | KennedyMurphy/elomerchant | b6561de4e13a0bcfcda72bb99cec722a58e8f09e | [
"FTL"
] | null | null | null | src/features/test_outlier_correction.py | KennedyMurphy/elomerchant | b6561de4e13a0bcfcda72bb99cec722a58e8f09e | [
"FTL"
] | 12 | 2019-12-26T17:02:54.000Z | 2022-03-21T22:16:55.000Z | src/features/test_outlier_correction.py | KennedyMurphy/elomerchant | b6561de4e13a0bcfcda72bb99cec722a58e8f09e | [
"FTL"
] | null | null | null | import unittest
import pandas as pd
import numpy as np
import src.features.outlier_correction as oc | 26.5 | 63 | 0.677149 | import unittest
import pandas as pd
import numpy as np
import src.features.outlier_correction as oc
class TestFlagNormalOutliers(unittest.TestCase):
def setUp(self):
self.series = pd.Series(np.random.normal(0, 0.1, 1000))
self.series.loc[500] = -5
self.series.loc[42] = 5
def test_flag(self):
outliers = oc.flag_normal_outliers(self.series, 5)
self.assertTrue(outliers.loc[500])
self.assertTrue(outliers.loc[42]) | 270 | 27 | 81 |
fe25df49c039924110d36cd8241d90927630908c | 603 | py | Python | analytics/pyspark/src/main/python/geowave_pyspark/__init__.py | MC-JY/geowave | 73fa7ceca05ba572774377a89e10abbe9ee874fb | [
"Apache-2.0"
] | null | null | null | analytics/pyspark/src/main/python/geowave_pyspark/__init__.py | MC-JY/geowave | 73fa7ceca05ba572774377a89e10abbe9ee874fb | [
"Apache-2.0"
] | null | null | null | analytics/pyspark/src/main/python/geowave_pyspark/__init__.py | MC-JY/geowave | 73fa7ceca05ba572774377a89e10abbe9ee874fb | [
"Apache-2.0"
] | null | null | null | ###############################################################################
# Copyright (c) 2013-2022 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional
# information regarding copyright ownership.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License,
# Version 2.0 which accompanies this distribution and is available at
# http://www.apache.org/licenses/LICENSE-2.0.txt
##############################################################################
import types | 54.818182 | 79 | 0.577114 | ###############################################################################
# Copyright (c) 2013-2022 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional
# information regarding copyright ownership.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License,
# Version 2.0 which accompanies this distribution and is available at
# http://www.apache.org/licenses/LICENSE-2.0.txt
##############################################################################
import types | 0 | 0 | 0 |
3bd3e618b436ec6dc4278aee16965510d37cf51c | 6,273 | py | Python | CIFAR10/src/trainer.py | AnesBenmerzoug/Machine-Learning-Projects | c52b3f55968c042a20299473fb124b75cc410ce0 | [
"MIT"
] | 1 | 2020-05-02T18:50:11.000Z | 2020-05-02T18:50:11.000Z | CIFAR10/src/trainer.py | AnesBenmerzoug/Machine-Learning-Projects | c52b3f55968c042a20299473fb124b75cc410ce0 | [
"MIT"
] | null | null | null | CIFAR10/src/trainer.py | AnesBenmerzoug/Machine-Learning-Projects | c52b3f55968c042a20299473fb124b75cc410ce0 | [
"MIT"
] | null | null | null | import torch
import numpy as np
import torch.optim as optim
from torch.nn import NLLLoss
from torch.utils.data import DataLoader
from torch.utils.data.sampler import RandomSampler
from torch.nn.utils import clip_grad_norm
from torchvision.datasets import CIFAR10
from torchvision.transforms import transforms
from src.model import CIFAR10_Network
| 34.657459 | 88 | 0.569903 | import torch
import numpy as np
import torch.optim as optim
from torch.nn import NLLLoss
from torch.utils.data import DataLoader
from torch.utils.data.sampler import RandomSampler
from torch.nn.utils import clip_grad_norm
from torchvision.datasets import CIFAR10
from torchvision.transforms import transforms
from src.model import CIFAR10_Network
class CIFAR10Trainer:
def __init__(self, parameters):
self.params = parameters
# Transform applied to each image
transform = transforms.ToTensor()
# Initialize datasets
self.trainset = CIFAR10(
root=self.params.dataset_dir, train=True, download=True, transform=transform
)
self.testset = CIFAR10(
root=self.params.dataset_dir,
train=False,
download=True,
transform=transform,
)
# Initialize loaders
self.trainloader = DataLoader(
self.trainset,
batch_size=self.params.batch_size,
shuffle=False,
num_workers=self.params.num_workers,
sampler=RandomSampler(self.trainset),
)
self.testloader = DataLoader(
self.testset,
batch_size=self.params.batch_size,
shuffle=False,
num_workers=self.params.num_workers,
)
# Checking for GPU
self.use_gpu = self.params.use_gpu and torch.cuda.is_available()
self.device = torch.device("cuda:0" if self.use_gpu else "cpu")
# Initialize model
self.model = CIFAR10_Network(self.params)
self.model.to(self.device)
print(self.model)
print("Number of parameters = {}".format(self.model.num_parameters()))
# Setup optimizer
self.optimizer = self.optimizer_select()
# Criterion
self.criterion = NLLLoss()
def train_model(self):
max_accuracy = None
best_model = None
avg_losses = np.zeros(self.params.num_epochs)
for epoch in range(self.params.num_epochs):
try:
print("Epoch {}".format(epoch + 1))
print("Learning Rate= {}".format(self.optimizer.param_groups[0]["lr"]))
# Set mode to training
self.model.train()
# Go through the training set
avg_losses[epoch] = self.train_epoch()
print("Average loss= {}".format(avg_losses[epoch]))
# Switch to eval and go through the test set
self.model.eval()
# Go through the test set
test_accuracy = self.test_epoch()
print(
"In Epoch {}, Obtained Accuracy {:.2f}".format(
epoch + 1, test_accuracy
)
)
if max_accuracy is None or max_accuracy < test_accuracy:
max_accuracy = test_accuracy
best_model = self.model.state_dict()
except KeyboardInterrupt:
print("Training was interrupted")
break
# Saving trained model
self.save_model(best_model)
return avg_losses
def train_epoch(self):
losses = 0.0
for batch_index, (data) in enumerate(self.trainloader, 1):
if batch_index % 200 == 0:
print("Step {}".format(batch_index))
print("Average Loss so far: {}".format(losses / batch_index))
# Split data tuple
inputs, labels = data
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Main Model Forward Step
output = self.model(inputs)
# Loss Computation
loss = self.criterion(output, labels)
inf = float("inf")
if loss.data.item() == inf or loss.data.item() == -inf:
print("Warning, received inf loss. Skipping it")
elif loss.data.item() != loss.data.item():
print("Warning, received nan loss.")
else:
losses = losses + loss.data.item()
# Zero the optimizer gradient
self.optimizer.zero_grad()
# Backward step
loss.backward()
# Clip gradients
clip_grad_norm(self.model.parameters(), self.params.max_norm)
# Weight Update
self.optimizer.step()
if self.use_gpu is True:
torch.cuda.synchronize()
del inputs, labels, data, loss, output
# Compute the average loss for this epoch
avg_loss = losses / len(self.trainloader)
return avg_loss
def test_epoch(self):
correct = 0
total = 0
for data in self.testloader:
# Split data tuple
inputs, labels = data
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Forward step
outputs = self.model(inputs)
_, predicted = torch.max(outputs.data, dim=1)
total += labels.size(0)
correct += torch.sum(predicted == labels.data)
del outputs, inputs, labels, data
total_accuracy = correct * 1.0 / total * 100.0
return total_accuracy
def save_model(self, model_parameters):
self.model.load_state_dict(model_parameters)
torch.save(
self.serialize(), self.params.model_dir / "trained_model.pt",
)
def serialize(self):
model_is_cuda = next(self.model.parameters()).is_cuda
model = self.model.cpu() if model_is_cuda else self.model
package = {
"state_dict": model.state_dict(),
"optim_dict": self.optimizer.state_dict(),
}
return package
def optimizer_select(self):
if self.params.optimizer == "Adam":
return optim.Adam(self.model.parameters(), lr=self.params.learning_rate)
elif self.params.optimizer == "SGD":
return optim.SGD(
self.model.parameters(),
lr=self.params.learning_rate,
momentum=self.params.momentum,
nesterov=self.params.nesterov,
)
else:
raise NotImplementedError
| 5,714 | 0 | 211 |
dee639241bbc86ec9f0a3ad511c414f90479cc1c | 3,602 | py | Python | recipes/Python/579043_Printing_an_ASCII_table_to_PDF/recipe-579043.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/579043_Printing_an_ASCII_table_to_PDF/recipe-579043.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/579043_Printing_an_ASCII_table_to_PDF/recipe-579043.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | # ASCIITableToPDF.py
# Author: Vasudev Ram - http://www.dancingbison.com
# Demo program to show how to generate an ASCII table as PDF,
# using the xtopdf toolkit for PDF creation from Python.
# Generates a PDF file with information about the
# first 32 ASCII codes, i.e. the control characters.
# Based on the ASCII Code table at http://www.ascii-code.com/
import sys
from PDFWriter import PDFWriter
# Define the header information.
column_names = ['DEC', 'OCT', 'HEX', 'BIN', 'Symbol', 'Description']
column_widths = [4, 6, 4, 10, 7, 20]
# Define the ASCII control character information.
ascii_control_characters = \
"""
0 000 00 00000000 NUL � Null char
1 001 01 00000001 SOH Start of Heading
2 002 02 00000010 STX Start of Text
3 003 03 00000011 ETX End of Text
4 004 04 00000100 EOT End of Transmission
5 005 05 00000101 ENQ Enquiry
6 006 06 00000110 ACK Acknowledgment
7 007 07 00000111 BEL Bell
8 010 08 00001000 BS Back Space
9 011 09 00001001 HT Horizontal Tab
10 012 0A 00001010 LF
Line Feed
11 013 0B 00001011 VT Vertical Tab
12 014 0C 00001100 FF Form Feed
13 015 0D 00001101 CR
Carriage Return
14 016 0E 00001110 SO Shift Out / X-On
15 017 0F 00001111 SI Shift In / X-Off
16 020 10 00010000 DLE Data Line Escape
17 021 11 00010001 DC1 Device Control 1 (oft. XON)
18 022 12 00010010 DC2 Device Control 2
19 023 13 00010011 DC3 Device Control 3 (oft. XOFF)
20 024 14 00010100 DC4 Device Control 4
21 025 15 00010101 NAK Negative Acknowledgement
22 026 16 00010110 SYN Synchronous Idle
23 027 17 00010111 ETB End of Transmit Block
24 030 18 00011000 CAN Cancel
25 031 19 00011001 EM End of Medium
26 032 1A 00011010 SUB Substitute
27 033 1B 00011011 ESC Escape
28 034 1C 00011100 FS File Separator
29 035 1D 00011101 GS Group Separator
30 036 1E 00011110 RS Record Separator
31 037 1F 00011111 US Unit Separator
"""
# Create and set some of the fields of a PDFWriter instance.
pw = PDFWriter("ASCII-Table.pdf")
pw.setFont("Courier", 12)
pw.setHeader("ASCII Control Characters - 0 to 31")
pw.setFooter("Generated by xtopdf: http://slid.es/vasudevram/xtopdf")
# Write the column headings to the output.
column_headings = [ str(val).ljust(column_widths[idx]) \
for idx, val in enumerate(column_names) ]
pw.writeLine(' '.join(column_headings))
# Split the string into lines, omitting the first and last empty lines.
for line in ascii_control_characters.split('\n')[1:-1]:
# Split the line into space-delimited fields.
lis = line.split()
# Join the words of the Description back into one field,
# since it was split due to having internal spaces.
lis2 = lis[0:5] + [' '.join(lis[6:])]
# Write the column data to the output.
lis3 = [ str(val).ljust(column_widths[idx]) \
for idx, val in enumerate(lis2) ]
pw.writeLine(' '.join(lis3))
pw.close()
| 43.926829 | 76 | 0.578567 | # ASCIITableToPDF.py
# Author: Vasudev Ram - http://www.dancingbison.com
# Demo program to show how to generate an ASCII table as PDF,
# using the xtopdf toolkit for PDF creation from Python.
# Generates a PDF file with information about the
# first 32 ASCII codes, i.e. the control characters.
# Based on the ASCII Code table at http://www.ascii-code.com/
import sys
from PDFWriter import PDFWriter
# Define the header information.
column_names = ['DEC', 'OCT', 'HEX', 'BIN', 'Symbol', 'Description']
column_widths = [4, 6, 4, 10, 7, 20]
# Define the ASCII control character information.
ascii_control_characters = \
"""
0 000 00 00000000 NUL � Null char
1 001 01 00000001 SOH Start of Heading
2 002 02 00000010 STX Start of Text
3 003 03 00000011 ETX End of Text
4 004 04 00000100 EOT End of Transmission
5 005 05 00000101 ENQ Enquiry
6 006 06 00000110 ACK Acknowledgment
7 007 07 00000111 BEL Bell
8 010 08 00001000 BS Back Space
9 011 09 00001001 HT Horizontal Tab
10 012 0A 00001010 LF
Line Feed
11 013 0B 00001011 VT Vertical Tab
12 014 0C 00001100 FF Form Feed
13 015 0D 00001101 CR
Carriage Return
14 016 0E 00001110 SO Shift Out / X-On
15 017 0F 00001111 SI Shift In / X-Off
16 020 10 00010000 DLE Data Line Escape
17 021 11 00010001 DC1 Device Control 1 (oft. XON)
18 022 12 00010010 DC2 Device Control 2
19 023 13 00010011 DC3 Device Control 3 (oft. XOFF)
20 024 14 00010100 DC4 Device Control 4
21 025 15 00010101 NAK Negative Acknowledgement
22 026 16 00010110 SYN Synchronous Idle
23 027 17 00010111 ETB End of Transmit Block
24 030 18 00011000 CAN Cancel
25 031 19 00011001 EM End of Medium
26 032 1A 00011010 SUB Substitute
27 033 1B 00011011 ESC Escape
28 034 1C 00011100 FS File Separator
29 035 1D 00011101 GS Group Separator
30 036 1E 00011110 RS Record Separator
31 037 1F 00011111 US Unit Separator
"""
# Create and set some of the fields of a PDFWriter instance.
pw = PDFWriter("ASCII-Table.pdf")
pw.setFont("Courier", 12)
pw.setHeader("ASCII Control Characters - 0 to 31")
pw.setFooter("Generated by xtopdf: http://slid.es/vasudevram/xtopdf")
# Write the column headings to the output.
column_headings = [ str(val).ljust(column_widths[idx]) \
for idx, val in enumerate(column_names) ]
pw.writeLine(' '.join(column_headings))
# Split the string into lines, omitting the first and last empty lines.
for line in ascii_control_characters.split('\n')[1:-1]:
# Split the line into space-delimited fields.
lis = line.split()
# Join the words of the Description back into one field,
# since it was split due to having internal spaces.
lis2 = lis[0:5] + [' '.join(lis[6:])]
# Write the column data to the output.
lis3 = [ str(val).ljust(column_widths[idx]) \
for idx, val in enumerate(lis2) ]
pw.writeLine(' '.join(lis3))
pw.close()
| 0 | 0 | 0 |
ccc92edca48018b9ec4cdeb6f3f532f0068d1134 | 4,726 | py | Python | game_object.py | anthopark/Face-Breakout | be075220f2037b9f07ea9c951a667d10ec14425f | [
"MIT"
] | 1 | 2020-02-18T12:10:21.000Z | 2020-02-18T12:10:21.000Z | game_object.py | anthopark/Face-Breakout | be075220f2037b9f07ea9c951a667d10ec14425f | [
"MIT"
] | null | null | null | game_object.py | anthopark/Face-Breakout | be075220f2037b9f07ea9c951a667d10ec14425f | [
"MIT"
] | null | null | null | """This is game module
It helps control game status and contains some helper functions
"""
import pygame
import brick
from constants import *
import random
import time
# pygame will search the system for font with similar name
fontName = pygame.font.match_font('arial')
def initSound():
"""load various sound effects"""
WALLOPENSND = pygame.mixer.Sound(path.join(SNDDIR, 'wallopen.wav'))
BALLFALLSND = pygame.mixer.Sound(path.join(SNDDIR, 'ballfalling.wav'))
PADDLECOLSND = pygame.mixer.Sound(path.join(SNDDIR, 'paddlecollide.wav'))
SINGCOLSNDS = []
SINGCOLSNDSLIST = ['singlebrickcol1.wav', 'singlebrickcol2.wav', 'singlebrickcol3.wav', 'singlebrickcol4.wav']
for snd in SINGCOLSNDSLIST:
SINGCOLSNDS.append(pygame.mixer.Sound(path.join(SNDDIR, snd)))
MULTICOLSNDS = []
MULTICOLSNDSLIST = ['multibrickcol1.wav', 'multibrickcol2.wav']
for snd in MULTICOLSNDSLIST:
MULTICOLSNDS.append(pygame.mixer.Sound(path.join(SNDDIR, snd)))
return WALLOPENSND, BALLFALLSND, PADDLECOLSND, SINGCOLSNDS, MULTICOLSNDS
def drawText(surface, text, size, x, y):
"""size: font size. x, y: location."""
font = pygame.font.Font(fontName, size)
# False - alias / True - Anti-aliased(look smoother and nice)
text_surface = font.render(text, True, GREEN)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
surface.blit(text_surface, text_rect)
def createBricks(brickGroup, allSprites):
"""Create and place brick objects
"""
brickPlaceY = 0
for i in range(6):
if i % 2:
brickPlaceX = 100
else:
brickPlaceX = 50
for j in range(6):
brickImg = pygame.image.load(path.join(IMGDIR, random.choice(BRICKIMGLIST)))
brickObj = brick.Brick(brickImg, brickPlaceX, brickPlaceY)
allSprites.add(brickObj)
brickGroup.add(brickObj)
brickPlaceX += 100
if not i % 2:
brickImg = pygame.image.load(path.join(IMGDIR, random.choice(BRICKIMGLIST)))
brickObj = brick.Brick(brickImg, brickPlaceX, brickPlaceY)
allSprites.add(brickObj)
brickGroup.add(brickObj)
brickPlaceY += 30
| 30.688312 | 114 | 0.628015 | """This is game module
It helps control game status and contains some helper functions
"""
import pygame
import brick
from constants import *
import random
import time
class Game(object):
def __init__(self):
self.initialScreen = True
self.standByStatus = True
self.running = True
self.lives = 3
def runningStatus(self):
return self.running
def setRunningStatus(self, val):
assert type(val) == bool, 'incorrect type of parameter "val"'
self.running = val
def die(self):
"""if ball is dropped decrement the life"""
self.lives -= 1
if self.lives == 0:
self.initialScreen = True
def getStandByMode(self):
return self.standByStatus
def setStandByMode(self, val):
assert type(val) == bool, 'incorrect type of parameter "val"'
self.standByStatus = val
def getInitialScreenStatus(self):
return self.initialScreen
def setInitialScreenStatus(self, val):
assert type(val) == bool, 'incorrect type of parameter "val"'
self.initialScreen = val
# pygame will search the system for font with similar name
fontName = pygame.font.match_font('arial')
def initSound():
"""load various sound effects"""
WALLOPENSND = pygame.mixer.Sound(path.join(SNDDIR, 'wallopen.wav'))
BALLFALLSND = pygame.mixer.Sound(path.join(SNDDIR, 'ballfalling.wav'))
PADDLECOLSND = pygame.mixer.Sound(path.join(SNDDIR, 'paddlecollide.wav'))
SINGCOLSNDS = []
SINGCOLSNDSLIST = ['singlebrickcol1.wav', 'singlebrickcol2.wav', 'singlebrickcol3.wav', 'singlebrickcol4.wav']
for snd in SINGCOLSNDSLIST:
SINGCOLSNDS.append(pygame.mixer.Sound(path.join(SNDDIR, snd)))
MULTICOLSNDS = []
MULTICOLSNDSLIST = ['multibrickcol1.wav', 'multibrickcol2.wav']
for snd in MULTICOLSNDSLIST:
MULTICOLSNDS.append(pygame.mixer.Sound(path.join(SNDDIR, snd)))
return WALLOPENSND, BALLFALLSND, PADDLECOLSND, SINGCOLSNDS, MULTICOLSNDS
def drawText(surface, text, size, x, y):
"""size: font size. x, y: location."""
font = pygame.font.Font(fontName, size)
# False - alias / True - Anti-aliased(look smoother and nice)
text_surface = font.render(text, True, GREEN)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
surface.blit(text_surface, text_rect)
def showInitialScreen(game, surface, clock):
surface.blit(LEFTBRICKIMG, (0, 0))
surface.blit(RIGHTBRICKIMG, (700, 0))
surface.blit(CENTERBRICKIMG, (100, 0))
drawText(surface, "FACE BREAKOUT!", 80, WIDTH//2, HEIGHT//4 - 40)
drawText(surface, "Space to shoot the ball, Move face to control paddle", 34, WIDTH//2, HEIGHT//2+15)
drawText(surface, "Press any key to play", 30, WIDTH//2, (HEIGHT*3/4) + 10)
pygame.display.flip()
waiting = True
quit = False
while waiting:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
waiting = False
quit = True
break
elif event.type == pygame.KEYUP:
waiting = False
if quit:
pygame.quit()
def openUpScreen(surface, clock, snd):
height = 0
ySpeed = 8
snd.play()
while height > -HEIGHT:
clock.tick(FPS)
surface.fill(BLACK)
surface.blit(LEFTBRICKIMG, (0, 0))
surface.blit(RIGHTBRICKIMG, (700, 0))
surface.blit(CENTERBRICKIMG, (100, height))
height -= ySpeed
pygame.display.flip()
def closeDownScreen(surface, clock, snd):
height = -HEIGHT
ySpeed = 8
snd.play()
while height < 0:
clock.tick(FPS)
surface.fill(BLACK)
surface.blit(LEFTBRICKIMG, (0, 0))
surface.blit(RIGHTBRICKIMG, (700, 0))
surface.blit(CENTERBRICKIMG, (100, height))
height += ySpeed
pygame.display.flip()
def createBricks(brickGroup, allSprites):
"""Create and place brick objects
"""
brickPlaceY = 0
for i in range(6):
if i % 2:
brickPlaceX = 100
else:
brickPlaceX = 50
for j in range(6):
brickImg = pygame.image.load(path.join(IMGDIR, random.choice(BRICKIMGLIST)))
brickObj = brick.Brick(brickImg, brickPlaceX, brickPlaceY)
allSprites.add(brickObj)
brickGroup.add(brickObj)
brickPlaceX += 100
if not i % 2:
brickImg = pygame.image.load(path.join(IMGDIR, random.choice(BRICKIMGLIST)))
brickObj = brick.Brick(brickImg, brickPlaceX, brickPlaceY)
allSprites.add(brickObj)
brickGroup.add(brickObj)
brickPlaceY += 30
| 2,036 | 348 | 92 |
287c5b1a417db68a52874d5ed7fd3ca89e612f78 | 16,327 | py | Python | mindinsight/backend/debugger/debugger_api.py | mindspore-ai/mindinsight | 8c57fdd62eb7f8653662be2208633386ac82e8d7 | [
"Apache-2.0"
] | 216 | 2020-03-28T02:11:56.000Z | 2022-03-31T06:20:09.000Z | mindinsight/backend/debugger/debugger_api.py | mindspore-ai/mindinsight | 8c57fdd62eb7f8653662be2208633386ac82e8d7 | [
"Apache-2.0"
] | 13 | 2020-03-31T03:00:12.000Z | 2021-01-03T13:01:06.000Z | mindinsight/backend/debugger/debugger_api.py | mindspore-ai/mindinsight | 8c57fdd62eb7f8653662be2208633386ac82e8d7 | [
"Apache-2.0"
] | 21 | 2020-03-28T02:41:06.000Z | 2021-11-24T12:20:25.000Z | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Debugger restful api."""
import json
import weakref
from urllib.parse import unquote
from flask import Blueprint, jsonify, request, Response
from mindinsight.conf import settings
from mindinsight.debugger.session_manager import SessionManager
from mindinsight.utils.exceptions import ParamMissError, ParamValueError, ParamTypeError
BLUEPRINT = Blueprint("debugger", __name__,
url_prefix=settings.URL_PATH_PREFIX + settings.API_PREFIX)
def _unquote_param(param):
"""
Decode parameter value.
Args:
param (str): Encoded param value.
Returns:
str, decoded param value.
"""
if isinstance(param, str):
try:
param = unquote(param, errors='strict')
except UnicodeDecodeError:
raise ParamValueError('Unquote error with strict mode.')
return param
def _read_post_request(post_request):
"""
Extract the body of post request.
Args:
post_request (object): The post request.
Returns:
dict, the deserialized body of request.
"""
body = post_request.stream.read()
try:
body = json.loads(body if body else "{}")
except Exception:
raise ParamValueError("Json data parse failed.")
return body
def to_int(param, param_name):
"""Transfer param to int type."""
try:
param = int(param)
except ValueError:
raise ParamTypeError(param_name, 'Integer')
return param
def _wrap_reply(func, *args, **kwargs):
"""Serialize reply."""
reply = func(*args, **kwargs)
return jsonify(reply)
@BLUEPRINT.route("/debugger/sessions/<session_id>/poll-data", methods=["GET"])
def poll_data(session_id):
"""
Wait for data to be updated on UI.
Get data from server and display the change on UI.
Returns:
str, the updated data.
Examples:
>>> Get http://xxxx/v1/mindinsight/debugger/sessions/xxxx/poll-data?pos=xx
"""
pos = request.args.get('pos')
reply = _wrap_reply(_session_manager.get_session(session_id).poll_data, pos)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/search", methods=["GET"])
def search(session_id):
"""
Search nodes in specified watchpoint.
Returns:
str, the required data.
Examples:
>>> Get http://xxxx/v1/mindinsight/debugger/sessions/xxxx/search?name=mock_name&watch_point_id=1
"""
name = request.args.get('name')
graph_name = request.args.get('graph_name')
watch_point_id = to_int(request.args.get('watch_point_id', 0), 'watch_point_id')
node_category = request.args.get('node_category')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
stack_pattern = _unquote_param(request.args.get('stack_info_key_word'))
reply = _wrap_reply(_session_manager.get_session(session_id).search,
{'name': name,
'graph_name': graph_name,
'watch_point_id': watch_point_id,
'node_category': node_category,
'rank_id': rank_id,
'stack_pattern': stack_pattern})
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-comparisons", methods=["GET"])
def tensor_comparisons(session_id):
"""
Get tensor comparisons.
Returns:
str, the required data.
Examples:
>>> Get http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-comparisons
"""
name = request.args.get('name')
detail = request.args.get('detail', 'data')
shape = _unquote_param(request.args.get('shape'))
graph_name = request.args.get('graph_name', '')
tolerance = request.args.get('tolerance', '0')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).tensor_comparisons, name, shape,
detail, tolerance, rank_id, graph_name)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/retrieve", methods=["POST"])
def retrieve(session_id):
"""
Retrieve data according to mode and params.
Returns:
str, the required data.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/retrieve
"""
body = _read_post_request(request)
mode = body.get('mode')
params = body.get('params')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve, mode, params)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-history", methods=["POST"])
def retrieve_tensor_history(session_id):
"""
Retrieve data according to mode and params.
Returns:
str, the required data.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-history
"""
body = _read_post_request(request)
name = body.get('name')
graph_name = body.get('graph_name')
rank_id = to_int(body.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_history, name, graph_name,
rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensors", methods=["GET"])
def retrieve_tensor_value(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensors?name=tensor_name&detail=data&shape=[1,1,:,:]
"""
name = request.args.get('name')
detail = request.args.get('detail')
shape = _unquote_param(request.args.get('shape'))
graph_name = request.args.get('graph_name')
prev = bool(request.args.get('prev') == 'true')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_value, name, detail,
shape, graph_name, prev, rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/create-watchpoint", methods=["POST"])
def create_watchpoint(session_id):
"""
Create watchpoint.
Returns:
str, watchpoint id.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/create-watchpoint
"""
params = _read_post_request(request)
params['watch_condition'] = params.pop('condition', None)
reply = _wrap_reply(_session_manager.get_session(session_id).create_watchpoint, params)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/update-watchpoint", methods=["POST"])
def update_watchpoint(session_id):
"""
Update watchpoint.
Returns:
str, reply message.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/update-watchpoint
"""
params = _read_post_request(request)
reply = _wrap_reply(_session_manager.get_session(session_id).update_watchpoint, params)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/delete-watchpoint", methods=["POST"])
def delete_watchpoint(session_id):
"""
Delete watchpoint.
Returns:
str, reply message.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/delete-watchpoint
"""
body = _read_post_request(request)
watch_point_id = body.get('watch_point_id')
reply = _wrap_reply(_session_manager.get_session(session_id).delete_watchpoint, watch_point_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/control", methods=["POST"])
def control(session_id):
"""
Control request.
Returns:
str, reply message.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/control
"""
params = _read_post_request(request)
reply = _wrap_reply(_session_manager.get_session(session_id).control, params)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/recheck", methods=["POST"])
def recheck(session_id):
"""
Recheck request.
Returns:
str, reply message.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/recheck
"""
reply = _wrap_reply(_session_manager.get_session(session_id).recheck)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-graphs", methods=["GET"])
def retrieve_tensor_graph(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-graphs?tensor_name=xxx&graph_name=xxx
"""
tensor_name = request.args.get('tensor_name')
graph_name = request.args.get('graph_name')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_graph, tensor_name,
graph_name, rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-hits", methods=["GET"])
def retrieve_tensor_hits(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-hits?tensor_name=xxx&graph_name=xxx
"""
tensor_name = request.args.get('tensor_name')
graph_name = request.args.get('graph_name')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_hits, tensor_name,
graph_name, rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/search-watchpoint-hits", methods=["POST"])
def search_watchpoint_hits(session_id):
"""
Search watchpoint hits by group condition.
Returns:
str, the required data.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/search-watchpoint-hits
"""
body = _read_post_request(request)
group_condition = body.get('group_condition')
reply = _wrap_reply(_session_manager.get_session(session_id).search_watchpoint_hits, group_condition)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/condition-collections", methods=["GET"])
def get_condition_collections(session_id):
"""Get condition collections."""
reply = _wrap_reply(_session_manager.get_session(session_id).get_condition_collections)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/set-recommended-watch-points", methods=["POST"])
def set_recommended_watch_points(session_id):
"""Set recommended watch points."""
body = _read_post_request(request)
request_body = body.get('requestBody')
if request_body is None:
raise ParamMissError('requestBody')
set_recommended = request_body.get('set_recommended')
reply = _wrap_reply(_session_manager.get_session(session_id).set_recommended_watch_points, set_recommended)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-files/load", methods=["POST"])
def load(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxx/v1/mindinsight/debugger/sessions/xxxx/tensor-files/load
"""
body = _read_post_request(request)
name = body.get('name')
graph_name = body.get('graph_name')
rank_id = to_int(body.get('rank_id', 0), 'rank_id')
prev = bool(body.get('prev') == 'true')
reply = _wrap_reply(_session_manager.get_session(session_id).load, name, prev, graph_name, rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-files/download", methods=["GET"])
def download(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxx/v1/mindinsight/debugger/sessions/xxx/tensor-files/download?name=name&graph_name=xxx&prev=xxx
"""
name = request.args.get('name')
graph_name = request.args.get('graph_name')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
prev = bool(request.args.get('prev') == 'true')
file_name, file_path, clean_func = _session_manager.get_session(session_id).download(name, prev, graph_name,
rank_id)
response = Response(file_send(), content_type='application/octet-stream')
response.headers["Content-disposition"] = 'attachment; filename=%s' % file_name
weakref.finalize(response, clean_func,)
return response
@BLUEPRINT.route("/debugger/sessions", methods=["POST"])
def create_session():
"""
Get session id if session exist, else create a session.
Returns:
str, session id.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions
"""
body = _read_post_request(request)
summary_dir = body.get('dump_dir')
session_type = body.get('session_type')
reply = _wrap_reply(_session_manager.create_session, session_type, summary_dir)
return reply
@BLUEPRINT.route("/debugger/sessions", methods=["GET"])
def get_train_jobs():
"""
Check the current active sessions.
Examples:
>>> GET http://xxxx/v1/mindinsight/debugger/sessions
"""
reply = _wrap_reply(_session_manager.get_train_jobs)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/delete", methods=["POST"])
def delete_session(session_id):
"""
Delete session by session id.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/xxx/delete-session
"""
reply = _wrap_reply(_session_manager.delete_session, session_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/stacks", methods=["GET"])
def get_stack_infos(session_id):
"""
Get stack infos.
Examples:
>>> GET /v1/mindsight/debugger/sessions/<session_id>/stacks?key_word=xxx&offset=0
"""
key_word = _unquote_param(request.args.get('key_word'))
limit = int(request.args.get('limit', 10))
offset = int(request.args.get('offset', 0))
filter_condition = {
'pattern': key_word,
'limit': limit,
'offset': offset
}
reply = _wrap_reply(_session_manager.get_session(session_id).get_stack_infos, filter_condition)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/ranks/<rank_id>/graph-runs", methods=["GET"])
def get_graph_runs(session_id, rank_id):
"""
Get graph runs.
Examples:
>>> GET /v1/mindsight/debugger/sessions/<session_id>/ranks/<rank_id>/graph-runs
"""
session = _session_manager.get_session(session_id)
rank_id = to_int(rank_id, 'rank_id')
reply = _wrap_reply(session.get_graph_runs, rank_id)
return reply
_session_manager = SessionManager.get_instance()
def init_module(app):
"""
Init module entry.
Args:
app (Flask): The application obj.
"""
app.register_blueprint(BLUEPRINT)
| 30.922348 | 119 | 0.673853 | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Debugger restful api."""
import json
import weakref
from urllib.parse import unquote
from flask import Blueprint, jsonify, request, Response
from mindinsight.conf import settings
from mindinsight.debugger.session_manager import SessionManager
from mindinsight.utils.exceptions import ParamMissError, ParamValueError, ParamTypeError
BLUEPRINT = Blueprint("debugger", __name__,
url_prefix=settings.URL_PATH_PREFIX + settings.API_PREFIX)
def _unquote_param(param):
"""
Decode parameter value.
Args:
param (str): Encoded param value.
Returns:
str, decoded param value.
"""
if isinstance(param, str):
try:
param = unquote(param, errors='strict')
except UnicodeDecodeError:
raise ParamValueError('Unquote error with strict mode.')
return param
def _read_post_request(post_request):
"""
Extract the body of post request.
Args:
post_request (object): The post request.
Returns:
dict, the deserialized body of request.
"""
body = post_request.stream.read()
try:
body = json.loads(body if body else "{}")
except Exception:
raise ParamValueError("Json data parse failed.")
return body
def to_int(param, param_name):
"""Transfer param to int type."""
try:
param = int(param)
except ValueError:
raise ParamTypeError(param_name, 'Integer')
return param
def _wrap_reply(func, *args, **kwargs):
"""Serialize reply."""
reply = func(*args, **kwargs)
return jsonify(reply)
@BLUEPRINT.route("/debugger/sessions/<session_id>/poll-data", methods=["GET"])
def poll_data(session_id):
"""
Wait for data to be updated on UI.
Get data from server and display the change on UI.
Returns:
str, the updated data.
Examples:
>>> Get http://xxxx/v1/mindinsight/debugger/sessions/xxxx/poll-data?pos=xx
"""
pos = request.args.get('pos')
reply = _wrap_reply(_session_manager.get_session(session_id).poll_data, pos)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/search", methods=["GET"])
def search(session_id):
"""
Search nodes in specified watchpoint.
Returns:
str, the required data.
Examples:
>>> Get http://xxxx/v1/mindinsight/debugger/sessions/xxxx/search?name=mock_name&watch_point_id=1
"""
name = request.args.get('name')
graph_name = request.args.get('graph_name')
watch_point_id = to_int(request.args.get('watch_point_id', 0), 'watch_point_id')
node_category = request.args.get('node_category')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
stack_pattern = _unquote_param(request.args.get('stack_info_key_word'))
reply = _wrap_reply(_session_manager.get_session(session_id).search,
{'name': name,
'graph_name': graph_name,
'watch_point_id': watch_point_id,
'node_category': node_category,
'rank_id': rank_id,
'stack_pattern': stack_pattern})
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-comparisons", methods=["GET"])
def tensor_comparisons(session_id):
"""
Get tensor comparisons.
Returns:
str, the required data.
Examples:
>>> Get http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-comparisons
"""
name = request.args.get('name')
detail = request.args.get('detail', 'data')
shape = _unquote_param(request.args.get('shape'))
graph_name = request.args.get('graph_name', '')
tolerance = request.args.get('tolerance', '0')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).tensor_comparisons, name, shape,
detail, tolerance, rank_id, graph_name)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/retrieve", methods=["POST"])
def retrieve(session_id):
"""
Retrieve data according to mode and params.
Returns:
str, the required data.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/retrieve
"""
body = _read_post_request(request)
mode = body.get('mode')
params = body.get('params')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve, mode, params)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-history", methods=["POST"])
def retrieve_tensor_history(session_id):
"""
Retrieve data according to mode and params.
Returns:
str, the required data.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-history
"""
body = _read_post_request(request)
name = body.get('name')
graph_name = body.get('graph_name')
rank_id = to_int(body.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_history, name, graph_name,
rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensors", methods=["GET"])
def retrieve_tensor_value(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensors?name=tensor_name&detail=data&shape=[1,1,:,:]
"""
name = request.args.get('name')
detail = request.args.get('detail')
shape = _unquote_param(request.args.get('shape'))
graph_name = request.args.get('graph_name')
prev = bool(request.args.get('prev') == 'true')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_value, name, detail,
shape, graph_name, prev, rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/create-watchpoint", methods=["POST"])
def create_watchpoint(session_id):
"""
Create watchpoint.
Returns:
str, watchpoint id.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/create-watchpoint
"""
params = _read_post_request(request)
params['watch_condition'] = params.pop('condition', None)
reply = _wrap_reply(_session_manager.get_session(session_id).create_watchpoint, params)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/update-watchpoint", methods=["POST"])
def update_watchpoint(session_id):
"""
Update watchpoint.
Returns:
str, reply message.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/update-watchpoint
"""
params = _read_post_request(request)
reply = _wrap_reply(_session_manager.get_session(session_id).update_watchpoint, params)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/delete-watchpoint", methods=["POST"])
def delete_watchpoint(session_id):
"""
Delete watchpoint.
Returns:
str, reply message.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/delete-watchpoint
"""
body = _read_post_request(request)
watch_point_id = body.get('watch_point_id')
reply = _wrap_reply(_session_manager.get_session(session_id).delete_watchpoint, watch_point_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/control", methods=["POST"])
def control(session_id):
"""
Control request.
Returns:
str, reply message.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/control
"""
params = _read_post_request(request)
reply = _wrap_reply(_session_manager.get_session(session_id).control, params)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/recheck", methods=["POST"])
def recheck(session_id):
"""
Recheck request.
Returns:
str, reply message.
Raises:
MindInsightException: If method fails to be called.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/recheck
"""
reply = _wrap_reply(_session_manager.get_session(session_id).recheck)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-graphs", methods=["GET"])
def retrieve_tensor_graph(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-graphs?tensor_name=xxx&graph_name=xxx
"""
tensor_name = request.args.get('tensor_name')
graph_name = request.args.get('graph_name')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_graph, tensor_name,
graph_name, rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-hits", methods=["GET"])
def retrieve_tensor_hits(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxxx/v1/mindinsight/debugger/sessions/xxxx/tensor-hits?tensor_name=xxx&graph_name=xxx
"""
tensor_name = request.args.get('tensor_name')
graph_name = request.args.get('graph_name')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
reply = _wrap_reply(_session_manager.get_session(session_id).retrieve_tensor_hits, tensor_name,
graph_name, rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/search-watchpoint-hits", methods=["POST"])
def search_watchpoint_hits(session_id):
"""
Search watchpoint hits by group condition.
Returns:
str, the required data.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions/xxxx/search-watchpoint-hits
"""
body = _read_post_request(request)
group_condition = body.get('group_condition')
reply = _wrap_reply(_session_manager.get_session(session_id).search_watchpoint_hits, group_condition)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/condition-collections", methods=["GET"])
def get_condition_collections(session_id):
"""Get condition collections."""
reply = _wrap_reply(_session_manager.get_session(session_id).get_condition_collections)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/set-recommended-watch-points", methods=["POST"])
def set_recommended_watch_points(session_id):
"""Set recommended watch points."""
body = _read_post_request(request)
request_body = body.get('requestBody')
if request_body is None:
raise ParamMissError('requestBody')
set_recommended = request_body.get('set_recommended')
reply = _wrap_reply(_session_manager.get_session(session_id).set_recommended_watch_points, set_recommended)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-files/load", methods=["POST"])
def load(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxx/v1/mindinsight/debugger/sessions/xxxx/tensor-files/load
"""
body = _read_post_request(request)
name = body.get('name')
graph_name = body.get('graph_name')
rank_id = to_int(body.get('rank_id', 0), 'rank_id')
prev = bool(body.get('prev') == 'true')
reply = _wrap_reply(_session_manager.get_session(session_id).load, name, prev, graph_name, rank_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/tensor-files/download", methods=["GET"])
def download(session_id):
"""
Retrieve tensor value according to name and shape.
Returns:
str, the required data.
Examples:
>>> GET http://xxx/v1/mindinsight/debugger/sessions/xxx/tensor-files/download?name=name&graph_name=xxx&prev=xxx
"""
name = request.args.get('name')
graph_name = request.args.get('graph_name')
rank_id = to_int(request.args.get('rank_id', 0), 'rank_id')
prev = bool(request.args.get('prev') == 'true')
file_name, file_path, clean_func = _session_manager.get_session(session_id).download(name, prev, graph_name,
rank_id)
def file_send():
with open(file_path, 'rb') as fb:
while True:
data = fb.read(50 * 1024 * 1024)
if not data:
break
yield data
response = Response(file_send(), content_type='application/octet-stream')
response.headers["Content-disposition"] = 'attachment; filename=%s' % file_name
weakref.finalize(response, clean_func,)
return response
@BLUEPRINT.route("/debugger/sessions", methods=["POST"])
def create_session():
"""
Get session id if session exist, else create a session.
Returns:
str, session id.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/sessions
"""
body = _read_post_request(request)
summary_dir = body.get('dump_dir')
session_type = body.get('session_type')
reply = _wrap_reply(_session_manager.create_session, session_type, summary_dir)
return reply
@BLUEPRINT.route("/debugger/sessions", methods=["GET"])
def get_train_jobs():
"""
Check the current active sessions.
Examples:
>>> GET http://xxxx/v1/mindinsight/debugger/sessions
"""
reply = _wrap_reply(_session_manager.get_train_jobs)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/delete", methods=["POST"])
def delete_session(session_id):
"""
Delete session by session id.
Examples:
>>> POST http://xxxx/v1/mindinsight/debugger/xxx/delete-session
"""
reply = _wrap_reply(_session_manager.delete_session, session_id)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/stacks", methods=["GET"])
def get_stack_infos(session_id):
"""
Get stack infos.
Examples:
>>> GET /v1/mindsight/debugger/sessions/<session_id>/stacks?key_word=xxx&offset=0
"""
key_word = _unquote_param(request.args.get('key_word'))
limit = int(request.args.get('limit', 10))
offset = int(request.args.get('offset', 0))
filter_condition = {
'pattern': key_word,
'limit': limit,
'offset': offset
}
reply = _wrap_reply(_session_manager.get_session(session_id).get_stack_infos, filter_condition)
return reply
@BLUEPRINT.route("/debugger/sessions/<session_id>/ranks/<rank_id>/graph-runs", methods=["GET"])
def get_graph_runs(session_id, rank_id):
"""
Get graph runs.
Examples:
>>> GET /v1/mindsight/debugger/sessions/<session_id>/ranks/<rank_id>/graph-runs
"""
session = _session_manager.get_session(session_id)
rank_id = to_int(rank_id, 'rank_id')
reply = _wrap_reply(session.get_graph_runs, rank_id)
return reply
_session_manager = SessionManager.get_instance()
def init_module(app):
"""
Init module entry.
Args:
app (Flask): The application obj.
"""
app.register_blueprint(BLUEPRINT)
| 192 | 0 | 27 |
5f60b556afcf62edbbd2b9abfbe75e3d7cbeb098 | 593 | py | Python | src/Saleem/cp_about.py | networkdynamics/PuckIt | 08542c324440919960198eae4ca8855f2ac43134 | [
"Apache-2.0"
] | 1 | 2018-05-16T20:58:32.000Z | 2018-05-16T20:58:32.000Z | src/Saleem/cp_about.py | networkdynamics/PuckIt | 08542c324440919960198eae4ca8855f2ac43134 | [
"Apache-2.0"
] | null | null | null | src/Saleem/cp_about.py | networkdynamics/PuckIt | 08542c324440919960198eae4ca8855f2ac43134 | [
"Apache-2.0"
] | null | null | null | #Imports
import os
from shutil import copyfile
data_path = '/home/ndg/projects/shared_datasets/PuckIt/FACITdata'
out_path = '/home/ndg/projects/shared_datasets/PuckIt/sample2/data'
sub_file = '/home/ndg/users/hsalee/PuckIt/resources/large_sample.txt'
with open(sub_file, 'r') as fin:
all_subs = fin.readlines()
all_subs = [x.strip() for x in all_subs]
all_subs = sorted(all_subs, key=lambda s: s.lower())
for sub in all_subs:
print sub
src_file = os.path.join(data_path, sub+'.json')
dst_file = os.path.join(out_path, sub, 'about.json')
copyfile(src_file, dst_file)
| 25.782609 | 69 | 0.728499 | #Imports
import os
from shutil import copyfile
data_path = '/home/ndg/projects/shared_datasets/PuckIt/FACITdata'
out_path = '/home/ndg/projects/shared_datasets/PuckIt/sample2/data'
sub_file = '/home/ndg/users/hsalee/PuckIt/resources/large_sample.txt'
with open(sub_file, 'r') as fin:
all_subs = fin.readlines()
all_subs = [x.strip() for x in all_subs]
all_subs = sorted(all_subs, key=lambda s: s.lower())
for sub in all_subs:
print sub
src_file = os.path.join(data_path, sub+'.json')
dst_file = os.path.join(out_path, sub, 'about.json')
copyfile(src_file, dst_file)
| 0 | 0 | 0 |
adcd6a253117e46ca0bc032ee344ea7f5b1b7f05 | 37 | py | Python | util/__init__.py | AlexMabry/aoc20 | ce3fa021134b91b96d50b2f73be66a7516f9809e | [
"MIT"
] | null | null | null | util/__init__.py | AlexMabry/aoc20 | ce3fa021134b91b96d50b2f73be66a7516f9809e | [
"MIT"
] | null | null | null | util/__init__.py | AlexMabry/aoc20 | ce3fa021134b91b96d50b2f73be66a7516f9809e | [
"MIT"
] | null | null | null | from .decode_list import decode_list
| 18.5 | 36 | 0.864865 | from .decode_list import decode_list
| 0 | 0 | 0 |
ac00c03f8eeda54d771af763cbd67d327c9206b6 | 3,506 | py | Python | src/evaluate.py | chunplusplus/crnn-pytorch | d9d185ec8b7f4a344cf7ab0f457783e579eef704 | [
"MIT"
] | 114 | 2020-10-17T08:00:47.000Z | 2022-03-14T17:36:04.000Z | src/evaluate.py | chunplusplus/crnn-pytorch | d9d185ec8b7f4a344cf7ab0f457783e579eef704 | [
"MIT"
] | 6 | 2021-08-22T13:04:41.000Z | 2022-02-14T03:25:16.000Z | src/evaluate.py | chunplusplus/crnn-pytorch | d9d185ec8b7f4a344cf7ab0f457783e579eef704 | [
"MIT"
] | 35 | 2020-10-17T11:44:18.000Z | 2022-03-18T04:28:53.000Z | import torch
from torch.utils.data import DataLoader
from torch.nn import CTCLoss
from tqdm import tqdm
from dataset import Synth90kDataset, synth90k_collate_fn
from model import CRNN
from ctc_decoder import ctc_decode
from config import evaluate_config as config
torch.backends.cudnn.enabled = False
if __name__ == '__main__':
main()
| 32.462963 | 89 | 0.6332 | import torch
from torch.utils.data import DataLoader
from torch.nn import CTCLoss
from tqdm import tqdm
from dataset import Synth90kDataset, synth90k_collate_fn
from model import CRNN
from ctc_decoder import ctc_decode
from config import evaluate_config as config
torch.backends.cudnn.enabled = False
def evaluate(crnn, dataloader, criterion,
max_iter=None, decode_method='beam_search', beam_size=10):
crnn.eval()
tot_count = 0
tot_loss = 0
tot_correct = 0
wrong_cases = []
pbar_total = max_iter if max_iter else len(dataloader)
pbar = tqdm(total=pbar_total, desc="Evaluate")
with torch.no_grad():
for i, data in enumerate(dataloader):
if max_iter and i >= max_iter:
break
device = 'cuda' if next(crnn.parameters()).is_cuda else 'cpu'
images, targets, target_lengths = [d.to(device) for d in data]
logits = crnn(images)
log_probs = torch.nn.functional.log_softmax(logits, dim=2)
batch_size = images.size(0)
input_lengths = torch.LongTensor([logits.size(0)] * batch_size)
loss = criterion(log_probs, targets, input_lengths, target_lengths)
preds = ctc_decode(log_probs, method=decode_method, beam_size=beam_size)
reals = targets.cpu().numpy().tolist()
target_lengths = target_lengths.cpu().numpy().tolist()
tot_count += batch_size
tot_loss += loss.item()
target_length_counter = 0
for pred, target_length in zip(preds, target_lengths):
real = reals[target_length_counter:target_length_counter + target_length]
target_length_counter += target_length
if pred == real:
tot_correct += 1
else:
wrong_cases.append((real, pred))
pbar.update(1)
pbar.close()
evaluation = {
'loss': tot_loss / tot_count,
'acc': tot_correct / tot_count,
'wrong_cases': wrong_cases
}
return evaluation
def main():
eval_batch_size = config['eval_batch_size']
cpu_workers = config['cpu_workers']
reload_checkpoint = config['reload_checkpoint']
img_height = config['img_height']
img_width = config['img_width']
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'device: {device}')
test_dataset = Synth90kDataset(root_dir=config['data_dir'], mode='test',
img_height=img_height, img_width=img_width)
test_loader = DataLoader(
dataset=test_dataset,
batch_size=eval_batch_size,
shuffle=False,
num_workers=cpu_workers,
collate_fn=synth90k_collate_fn)
num_class = len(Synth90kDataset.LABEL2CHAR) + 1
crnn = CRNN(1, img_height, img_width, num_class,
map_to_seq_hidden=config['map_to_seq_hidden'],
rnn_hidden=config['rnn_hidden'],
leaky_relu=config['leaky_relu'])
crnn.load_state_dict(torch.load(reload_checkpoint, map_location=device))
crnn.to(device)
criterion = CTCLoss(reduction='sum')
criterion.to(device)
evaluation = evaluate(crnn, test_loader, criterion,
decode_method=config['decode_method'],
beam_size=config['beam_size'])
print('test_evaluation: loss={loss}, acc={acc}'.format(**evaluation))
if __name__ == '__main__':
main()
| 3,115 | 0 | 46 |
157319d0d3fd45bdc9cba48e1268e8bb8ad5bec4 | 2,364 | py | Python | ml_project/src/models/train_model.py | made-ml-in-prod-2021/illumaria | e93a2818910ecbb0545097ee39c2c5e7fa37bfaa | [
"MIT"
] | null | null | null | ml_project/src/models/train_model.py | made-ml-in-prod-2021/illumaria | e93a2818910ecbb0545097ee39c2c5e7fa37bfaa | [
"MIT"
] | 3 | 2021-03-31T14:33:41.000Z | 2021-06-22T07:56:21.000Z | ml_project/src/models/train_model.py | made-ml-in-prod-2021/illumaria | e93a2818910ecbb0545097ee39c2c5e7fa37bfaa | [
"MIT"
] | 3 | 2021-03-31T15:57:16.000Z | 2021-06-21T19:27:40.000Z | import logging
import pickle
from typing import Dict, Optional, Union, NoReturn
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.pipeline import Pipeline
from src.entities.train_params import TrainParams
logger = logging.getLogger(__name__)
ClassificationModel = Union[RandomForestClassifier, LogisticRegression]
def train_model(
features: pd.DataFrame, target: pd.Series, train_params: TrainParams
) -> ClassificationModel:
"""
Train the model.
:param features: features to train on
:param target: target labels
:param train_params: training parameters
:return: trained model class
"""
if train_params.model_type == "RandomForestClassifier":
model = RandomForestClassifier(
n_estimators=100, random_state=train_params.random_state
)
elif train_params.model_type == "LogisticRegression":
model = LogisticRegression(
solver="liblinear", random_state=train_params.random_state
)
else:
raise NotImplementedError()
model.fit(features, target)
logger.info("Model successfully fitted.")
return model
def evaluate_model(predicts: np.ndarray, target: pd.Series) -> Dict[str, float]:
"""
Evaluate model and return the metrics.
:param predicts: predicted labels
:param target: target labels
:return: a dict of type {'metric': value}
"""
metrics = {
"accuracy": accuracy_score(target, predicts),
"roc_auc": roc_auc_score(target, predicts),
}
logger.info(f"Metrics are: {metrics}")
return metrics
def serialize_model(model: ClassificationModel, path: str, transformer: Optional[ColumnTransformer] = None) -> NoReturn:
"""
Save model to pickle file.
:param transformer: the transformer to save
:param model: the model to save
:param path: the file to save to
:return: the path to saved file
"""
pipeline = Pipeline((
[
("transformer", transformer),
("model", model),
]
))
with open(path, "wb") as fout:
pickle.dump(pipeline, fout)
logger.info(f"Pipeline saved to {path}")
return path
| 30.307692 | 120 | 0.697124 | import logging
import pickle
from typing import Dict, Optional, Union, NoReturn
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.pipeline import Pipeline
from src.entities.train_params import TrainParams
logger = logging.getLogger(__name__)
ClassificationModel = Union[RandomForestClassifier, LogisticRegression]
def train_model(
features: pd.DataFrame, target: pd.Series, train_params: TrainParams
) -> ClassificationModel:
"""
Train the model.
:param features: features to train on
:param target: target labels
:param train_params: training parameters
:return: trained model class
"""
if train_params.model_type == "RandomForestClassifier":
model = RandomForestClassifier(
n_estimators=100, random_state=train_params.random_state
)
elif train_params.model_type == "LogisticRegression":
model = LogisticRegression(
solver="liblinear", random_state=train_params.random_state
)
else:
raise NotImplementedError()
model.fit(features, target)
logger.info("Model successfully fitted.")
return model
def evaluate_model(predicts: np.ndarray, target: pd.Series) -> Dict[str, float]:
"""
Evaluate model and return the metrics.
:param predicts: predicted labels
:param target: target labels
:return: a dict of type {'metric': value}
"""
metrics = {
"accuracy": accuracy_score(target, predicts),
"roc_auc": roc_auc_score(target, predicts),
}
logger.info(f"Metrics are: {metrics}")
return metrics
def serialize_model(model: ClassificationModel, path: str, transformer: Optional[ColumnTransformer] = None) -> NoReturn:
"""
Save model to pickle file.
:param transformer: the transformer to save
:param model: the model to save
:param path: the file to save to
:return: the path to saved file
"""
pipeline = Pipeline((
[
("transformer", transformer),
("model", model),
]
))
with open(path, "wb") as fout:
pickle.dump(pipeline, fout)
logger.info(f"Pipeline saved to {path}")
return path
| 0 | 0 | 0 |
f1e8e4d8c206f17147678886472f3d334eb0a335 | 310 | py | Python | catalog/bindings/gmd/md_georectified.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/md_georectified.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/md_georectified.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from bindings.gmd.md_georectified_type import MdGeorectifiedType
__NAMESPACE__ = "http://www.isotc211.org/2005/gmd"
@dataclass
| 25.833333 | 64 | 0.758065 | from dataclasses import dataclass
from bindings.gmd.md_georectified_type import MdGeorectifiedType
__NAMESPACE__ = "http://www.isotc211.org/2005/gmd"
@dataclass
class MdGeorectified(MdGeorectifiedType):
class Meta:
name = "MD_Georectified"
namespace = "http://www.isotc211.org/2005/gmd"
| 0 | 124 | 22 |
2741be49bfb071866523babe3a4ce3dc70e7559f | 3,087 | py | Python | src/abaqus/Region/Stringer.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | 7 | 2022-01-21T09:15:45.000Z | 2022-02-15T09:31:58.000Z | src/abaqus/Region/Stringer.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | src/abaqus/Region/Stringer.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | from ..BasicGeometry.Edge import Edge
from ..BasicGeometry.EdgeArray import EdgeArray
from ..Mesh.MeshEdge import MeshEdge
from ..Mesh.MeshElementArray import MeshElementArray
class Stringer:
"""The Stringer object stores information on stringer reinforcements created on entities.
Attributes
----------
elements: MeshElementArray
A :py:class:`~abaqus.Mesh.MeshElementArray.MeshElementArray` object.
edges: EdgeArray
An :py:class:`~abaqus.BasicGeometry.EdgeArray.EdgeArray` object.
Notes
-----
This object can be accessed by:
.. code-block:: python
import part
mdb.models[name].parts[name].stringers[name]
import assembly
mdb.models[name].rootAssembly.allInstances[name].stringers[name]
mdb.models[name].rootAssembly.instances[name].stringers[name]
mdb.models[name].rootAssembly.stringers[name]
"""
# A MeshElementArray object.
elements: MeshElementArray = MeshElementArray([])
# An EdgeArray object.
edges: EdgeArray = EdgeArray([])
def __init__(self, name: str, edges: tuple[Edge] = (), elementEdges: tuple[MeshEdge] = ()):
"""This method creates a stringer from a sequence of objects in a model database. At least
one of the optional arguments needs to be specified.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].parts[*name*].Stringer
Parameters
----------
name
A String specifying the repository key. The default value is an empty string.
edges
A sequence of Edge objects specifying the edges on which stringers should be created.
Applicable to three and two dimensional parts.
elementEdges
A sequence of MeshEdge objects specifying the mesh edges on which stringers should be
created. Applicable to three and two dimensional parts.
Returns
-------
A Stringer object.
"""
pass
def EditStringer(self, name: str, edges: tuple[Edge] = (), elementEdges: tuple[MeshEdge] = ()):
"""This method modifies underlying entities of the selected stringer. At least one of the
optional arguments needs to be specified.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].parts[*name*].Stringer
Parameters
----------
name
A String specifying the repository key. The default value is an empty string.
edges
A sequence of Edge objects specifying the edges on which stringers should be created.
Applicable to three and two dimensional parts.
elementEdges
A sequence of MeshEdge objects specifying the mesh edges on which stringers should be
created. Applicable to three and two dimensional parts.
Returns
-------
A Stringer object.
"""
pass
| 32.494737 | 99 | 0.628118 | from ..BasicGeometry.Edge import Edge
from ..BasicGeometry.EdgeArray import EdgeArray
from ..Mesh.MeshEdge import MeshEdge
from ..Mesh.MeshElementArray import MeshElementArray
class Stringer:
"""The Stringer object stores information on stringer reinforcements created on entities.
Attributes
----------
elements: MeshElementArray
A :py:class:`~abaqus.Mesh.MeshElementArray.MeshElementArray` object.
edges: EdgeArray
An :py:class:`~abaqus.BasicGeometry.EdgeArray.EdgeArray` object.
Notes
-----
This object can be accessed by:
.. code-block:: python
import part
mdb.models[name].parts[name].stringers[name]
import assembly
mdb.models[name].rootAssembly.allInstances[name].stringers[name]
mdb.models[name].rootAssembly.instances[name].stringers[name]
mdb.models[name].rootAssembly.stringers[name]
"""
# A MeshElementArray object.
elements: MeshElementArray = MeshElementArray([])
# An EdgeArray object.
edges: EdgeArray = EdgeArray([])
def __init__(self, name: str, edges: tuple[Edge] = (), elementEdges: tuple[MeshEdge] = ()):
"""This method creates a stringer from a sequence of objects in a model database. At least
one of the optional arguments needs to be specified.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].parts[*name*].Stringer
Parameters
----------
name
A String specifying the repository key. The default value is an empty string.
edges
A sequence of Edge objects specifying the edges on which stringers should be created.
Applicable to three and two dimensional parts.
elementEdges
A sequence of MeshEdge objects specifying the mesh edges on which stringers should be
created. Applicable to three and two dimensional parts.
Returns
-------
A Stringer object.
"""
pass
def EditStringer(self, name: str, edges: tuple[Edge] = (), elementEdges: tuple[MeshEdge] = ()):
"""This method modifies underlying entities of the selected stringer. At least one of the
optional arguments needs to be specified.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].parts[*name*].Stringer
Parameters
----------
name
A String specifying the repository key. The default value is an empty string.
edges
A sequence of Edge objects specifying the edges on which stringers should be created.
Applicable to three and two dimensional parts.
elementEdges
A sequence of MeshEdge objects specifying the mesh edges on which stringers should be
created. Applicable to three and two dimensional parts.
Returns
-------
A Stringer object.
"""
pass
| 0 | 0 | 0 |
59d26d71f071d667a63f64f727be0051ab2ef45a | 1,009 | py | Python | gooddata-sdk/gooddata_sdk/catalog/permissions/service.py | hkad98/gooddata-python-sdk | 64942080ecb44c2d8e914e57f7a591daa6cca205 | [
"MIT"
] | null | null | null | gooddata-sdk/gooddata_sdk/catalog/permissions/service.py | hkad98/gooddata-python-sdk | 64942080ecb44c2d8e914e57f7a591daa6cca205 | [
"MIT"
] | null | null | null | gooddata-sdk/gooddata_sdk/catalog/permissions/service.py | hkad98/gooddata-python-sdk | 64942080ecb44c2d8e914e57f7a591daa6cca205 | [
"MIT"
] | null | null | null | # (C) 2022 GoodData Corporation
from gooddata_sdk import GoodDataApiClient
from gooddata_sdk.catalog.catalog_service_base import CatalogServiceBase
from gooddata_sdk.catalog.permissions.declarative_model.permission import CatalogDeclarativeWorkspacePermissions
| 50.45 | 120 | 0.822597 | # (C) 2022 GoodData Corporation
from gooddata_sdk import GoodDataApiClient
from gooddata_sdk.catalog.catalog_service_base import CatalogServiceBase
from gooddata_sdk.catalog.permissions.declarative_model.permission import CatalogDeclarativeWorkspacePermissions
class CatalogPermissionService(CatalogServiceBase):
def __init__(self, api_client: GoodDataApiClient) -> None:
super(CatalogPermissionService, self).__init__(api_client)
def get_declarative_permissions(self, workspace_id: str) -> CatalogDeclarativeWorkspacePermissions:
return CatalogDeclarativeWorkspacePermissions.from_api(self._layout_api.get_workspace_permissions(workspace_id))
def set_declarative_permissions(
self, workspace_id: str, declarative_workspace_permissions: CatalogDeclarativeWorkspacePermissions
) -> None:
self._layout_api.set_workspace_permissions(
workspace_id=workspace_id, declarative_workspace_permissions=declarative_workspace_permissions.to_api()
)
| 614 | 30 | 103 |
3322e8378b7b5144d527a23ae37d3377dbc68769 | 1,551 | py | Python | testslide/lib.py | oxo42/TestSlide | 584ffe03c7968ca2af9df847985a61fa7daed670 | [
"MIT"
] | null | null | null | testslide/lib.py | oxo42/TestSlide | 584ffe03c7968ca2af9df847985a61fa7daed670 | [
"MIT"
] | null | null | null | testslide/lib.py | oxo42/TestSlide | 584ffe03c7968ca2af9df847985a61fa7daed670 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import typeguard
| 33.717391 | 80 | 0.661509 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import typeguard
def _bail_if_private(candidate: str, allow_private: False):
if (
candidate.startswith("_")
and not allow_private
and not (candidate.startswith("__") and candidate.endswith("__"))
):
raise ValueError(
f"It's disencouraged to patch/mock private interfaces.\n"
"This would result in way too coupled tests and implementation. "
"Please consider using patterns like dependency injection instead. "
"If you really need to do this use the allow_private=True argument."
)
def _validate_function_signature(argspec: inspect.FullArgSpec, args, kwargs):
type_errs = []
for idx in range(0, len(args)):
if argspec.args:
arg = argspec.args[idx]
try:
__validate_argument_type(argspec.annotations, arg, args[idx])
except TypeError as te:
type_errs.append(te)
for k, v in kwargs.items():
try:
__validate_argument_type(argspec.annotations, k, v)
except TypeError as te:
type_errs.append(te)
return type_errs
def __validate_argument_type(annotations, argname, value):
type_information = annotations.get(argname)
if type_information:
typeguard.check_type(argname, value, type_information)
| 1,246 | 0 | 69 |
841346b433c957571d99bb0450139a2882c9666f | 3,854 | py | Python | Game.py | ai006/SuperMario_Python | a1562c8a61ca2de66b54850de72773a435c27447 | [
"MIT"
] | 3 | 2019-01-30T04:41:21.000Z | 2019-09-26T13:13:17.000Z | Game.py | ai006/SuperMario_Python | a1562c8a61ca2de66b54850de72773a435c27447 | [
"MIT"
] | null | null | null | Game.py | ai006/SuperMario_Python | a1562c8a61ca2de66b54850de72773a435c27447 | [
"MIT"
] | null | null | null | import pygame
import time
import Sprite
from pygame.locals import*
from time import sleep
from Sprite import Sprite
from myVegeta import Vegeta
from myTube import Tube
from myGoomba import Goomba
from myFireball import Fireball
print("Use the arrow keys to move. Press Esc to quit.")
pygame.init()
m = Model()
v = View(m)
c = Controller(m)
while c.keep_going:
c.update()
m.update()
v.update()
sleep(0.04)
print("Goodbye") | 31.590164 | 142 | 0.543332 | import pygame
import time
import Sprite
from pygame.locals import*
from time import sleep
from Sprite import Sprite
from myVegeta import Vegeta
from myTube import Tube
from myGoomba import Goomba
from myFireball import Fireball
class Model():
def __init__(self):
self.dest_x = 0
self.dest_y = 0
self.frame_count = 0
self.sprites = []
self.vegeta = Vegeta(self)
self.sprites.append(self.vegeta)
self.tubes = Tube(self,250,300)
self.sprites.append(self.tubes)
self.tubes = Tube(self,600,300)
self.sprites.append(self.tubes)
self.goomba = Goomba(self)
self.sprites.append(self.goomba)
def addFireball(self):
self.fireball = Fireball(self,self.vegeta.x,self.vegeta.y)
self.sprites.append(self.fireball)
def update(self):
if self.rect.left < self.dest_x:
self.rect.left += 1
if self.rect.left > self.dest_x:
self.rect.left -= 1
if self.rect.top < self.dest_y:
self.rect.top += 1
if self.rect.top > self.dest_y:
self.rect.top -= 1
if self.frame_count > 3:
self.frame_count = 0
for i in self.sprites:
i.update()
if i.isGoomba():
if i.fc > 5 and i.hit:
self.sprites.remove(i)
if i.isFireball():
if i.touched:
self.sprites.remove(i)
def set_dest(self, pos):
self.dest_x = pos[0]
self.dest_y = pos[1]
class View():
def __init__(self, model):
screen_size = (800,600)
self.screen = pygame.display.set_mode(screen_size, 32)
self.model = model
self.vegeta_image = [pygame.image.load("S0.png"),pygame.image.load("S2.png"),pygame.image.load("S3.png"),pygame.image.load("S4.png")]
self.model.rect = self.vegeta_image[0].get_rect()
def update(self):
self.screen.fill([0,200,100])
#self.screen.blit(self.vegeta_image[self.model.frame_count], (self.model.vegeta.x,self.model.vegeta.y))
for i in self.model.sprites:
i.drawSelf(self)
pygame.display.flip()
class Controller():
def __init__(self, model):
self.model = model
self.keep_going = True
def update(self):
self.model.vegeta.prev_x = self.model.vegeta.x
self.model.vegeta.prev_y = self.model.vegeta.y
for event in pygame.event.get():
if event.type == QUIT:
self.keep_going = False
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
self.keep_going = False
elif event.type == pygame.MOUSEBUTTONUP:
self.model.set_dest(pygame.mouse.get_pos())
keys = pygame.key.get_pressed()
if keys[K_LEFT]:
self.model.vegeta.x -= 5
self.model.frame_count+= 1
if keys[K_RIGHT]:
self.model.vegeta.x += 5
self.model.frame_count+= 1
#Vegeta.hi(self)
#Vegeta.hey(self)
if keys[K_UP]:
self.model.dest_y -= 1
if keys[K_DOWN]:
self.model.dest_y += 1
self.model.vegeta.vert_velociy -= 18
if keys[K_SPACE] and self.model.vegeta.onGround < 3:
self.model.vegeta.jump()
if keys[K_LCTRL]:
self.model.addFireball()
print("Use the arrow keys to move. Press Esc to quit.")
pygame.init()
m = Model()
v = View(m)
c = Controller(m)
while c.keep_going:
c.update()
m.update()
v.update()
sleep(0.04)
print("Goodbye") | 3,050 | -17 | 318 |
55f49209365e323276fb704103cc729bf1b7ab7e | 11,487 | py | Python | code/louvre/basic_tokenizer.py | yeonsw/weakly_supervised_multi_hop_retriever | 6208832fd643a5e4c6c109ec4141e7a6ace72232 | [
"MIT"
] | 9 | 2021-06-18T08:36:55.000Z | 2021-11-14T15:03:28.000Z | code/louvre/basic_tokenizer.py | yeonsw/weakly_supervised_multi_hop_retriever | 6208832fd643a5e4c6c109ec4141e7a6ace72232 | [
"MIT"
] | null | null | null | code/louvre/basic_tokenizer.py | yeonsw/weakly_supervised_multi_hop_retriever | 6208832fd643a5e4c6c109ec4141e7a6ace72232 | [
"MIT"
] | 2 | 2021-06-19T05:10:58.000Z | 2021-06-19T12:01:44.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Base tokenizer/tokens classes and utilities."""
import copy
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __len__(self):
"""The number of tokens."""
return len(self.data)
def slice(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if 'pos' not in self.annotators:
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if 'lemma' not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if 'ner' not in self.annotators:
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
import regex
import logging
logger = logging.getLogger(__name__)
STOPWORDS = {
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your',
'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she',
'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their',
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that',
'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an',
'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through',
'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',
'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then',
'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor',
'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can',
'will', 'just', 'don', 'should', 'now', 'd', 'll', 'm', 'o', 're', 've',
'y', 'ain', 'aren', 'couldn', 'didn', 'doesn', 'hadn', 'hasn', 'haven',
'isn', 'ma', 'mightn', 'mustn', 'needn', 'shan', 'shouldn', 'wasn', 'weren',
'won', 'wouldn', "'ll", "'re", "'ve", "n't", "'s", "'d", "'m", "''", "``"
}
import unicodedata
def normalize(text):
"""Resolve different type of unicode encodings."""
return unicodedata.normalize('NFD', text)
def filter_word(text):
"""Take out english stopwords, punctuation, and compound endings."""
text = normalize(text)
if regex.match(r'^\p{P}+$', text):
return True
if text.lower() in STOPWORDS:
return True
return False
def filter_ngram(gram, mode='any'):
"""Decide whether to keep or discard an n-gram.
Args:
gram: list of tokens (length N)
mode: Option to throw out ngram if
'any': any single token passes filter_word
'all': all tokens pass filter_word
'ends': book-ended by filterable tokens
"""
filtered = [filter_word(w) for w in gram]
if mode == 'any':
return any(filtered)
elif mode == 'all':
return all(filtered)
elif mode == 'ends':
return filtered[0] or filtered[-1]
else:
raise ValueError('Invalid mode: %s' % mode)
| 34.1875 | 80 | 0.523635 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Base tokenizer/tokens classes and utilities."""
import copy
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def slice(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if 'pos' not in self.annotators:
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if 'lemma' not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if 'ner' not in self.annotators:
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
def tokenize(self, text):
raise NotImplementedError
def shutdown(self):
pass
def __del__(self):
self.shutdown()
import regex
import logging
logger = logging.getLogger(__name__)
class RegexpTokenizer(Tokenizer):
DIGIT = r'\p{Nd}+([:\.\,]\p{Nd}+)*'
TITLE = (r'(dr|esq|hon|jr|mr|mrs|ms|prof|rev|sr|st|rt|messrs|mmes|msgr)'
r'\.(?=\p{Z})')
ABBRV = r'([\p{L}]\.){2,}(?=\p{Z}|$)'
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]++'
HYPHEN = r'{A}([-\u058A\u2010\u2011]{A})+'.format(A=ALPHA_NUM)
NEGATION = r"((?!n't)[\p{L}\p{N}\p{M}])++(?=n't)|n't"
CONTRACTION1 = r"can(?=not\b)"
CONTRACTION2 = r"'([tsdm]|re|ll|ve)\b"
START_DQUOTE = r'(?<=[\p{Z}\(\[{<]|^)(``|["\u0093\u201C\u00AB])(?!\p{Z})'
START_SQUOTE = r'(?<=[\p{Z}\(\[{<]|^)[\'\u0091\u2018\u201B\u2039](?!\p{Z})'
END_DQUOTE = r'(?<!\p{Z})(\'\'|["\u0094\u201D\u00BB])'
END_SQUOTE = r'(?<!\p{Z})[\'\u0092\u2019\u203A]'
DASH = r'--|[\u0096\u0097\u2013\u2014\u2015]'
ELLIPSES = r'\.\.\.|\u2026'
PUNCT = r'\p{P}'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
substitutions: if true, normalizes some token types (e.g. quotes).
"""
self._regexp = regex.compile(
'(?P<digit>%s)|(?P<title>%s)|(?P<abbr>%s)|(?P<neg>%s)|(?P<hyph>%s)|'
'(?P<contr1>%s)|(?P<alphanum>%s)|(?P<contr2>%s)|(?P<sdquote>%s)|'
'(?P<edquote>%s)|(?P<ssquote>%s)|(?P<esquote>%s)|(?P<dash>%s)|'
'(?<ellipses>%s)|(?P<punct>%s)|(?P<nonws>%s)' %
(self.DIGIT, self.TITLE, self.ABBRV, self.NEGATION, self.HYPHEN,
self.CONTRACTION1, self.ALPHA_NUM, self.CONTRACTION2,
self.START_DQUOTE, self.END_DQUOTE, self.START_SQUOTE,
self.END_SQUOTE, self.DASH, self.ELLIPSES, self.PUNCT,
self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
self.substitutions = kwargs.get('substitutions', True)
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Make normalizations for special token types
if self.substitutions:
groups = matches[i].groupdict()
if groups['sdquote']:
token = "``"
elif groups['edquote']:
token = "''"
elif groups['ssquote']:
token = "`"
elif groups['esquote']:
token = "'"
elif groups['dash']:
token = '--'
elif groups['ellipses']:
token = '...'
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
STOPWORDS = {
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your',
'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she',
'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their',
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that',
'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an',
'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through',
'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',
'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then',
'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor',
'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can',
'will', 'just', 'don', 'should', 'now', 'd', 'll', 'm', 'o', 're', 've',
'y', 'ain', 'aren', 'couldn', 'didn', 'doesn', 'hadn', 'hasn', 'haven',
'isn', 'ma', 'mightn', 'mustn', 'needn', 'shan', 'shouldn', 'wasn', 'weren',
'won', 'wouldn', "'ll", "'re", "'ve", "n't", "'s", "'d", "'m", "''", "``"
}
import unicodedata
def normalize(text):
"""Resolve different type of unicode encodings."""
return unicodedata.normalize('NFD', text)
def filter_word(text):
"""Take out english stopwords, punctuation, and compound endings."""
text = normalize(text)
if regex.match(r'^\p{P}+$', text):
return True
if text.lower() in STOPWORDS:
return True
return False
def filter_ngram(gram, mode='any'):
"""Decide whether to keep or discard an n-gram.
Args:
gram: list of tokens (length N)
mode: Option to throw out ngram if
'any': any single token passes filter_word
'all': all tokens pass filter_word
'ends': book-ended by filterable tokens
"""
filtered = [filter_word(w) for w in gram]
if mode == 'any':
return any(filtered)
elif mode == 'all':
return all(filtered)
elif mode == 'ends':
return filtered[0] or filtered[-1]
else:
raise ValueError('Invalid mode: %s' % mode)
| 2,084 | 2,744 | 184 |
d6b7783665c8b28aef89592f9ff18cbfa86c4a7e | 684 | py | Python | setup.py | Valkatraz/scons | 5e70c65f633dcecc035751c9f0c6f894088df8a0 | [
"MIT"
] | null | null | null | setup.py | Valkatraz/scons | 5e70c65f633dcecc035751c9f0c6f894088df8a0 | [
"MIT"
] | 3 | 2019-01-15T20:40:02.000Z | 2021-02-13T03:16:34.000Z | setup.py | Valkatraz/scons | 5e70c65f633dcecc035751c9f0c6f894088df8a0 | [
"MIT"
] | null | null | null | import fnmatch
from setuptools import setup
from setuptools.command.build_py import build_py as build_py_orig
exclude = ['*Tests']
setup(
cmdclass={
'build_py': build_py,
}
) | 27.36 | 83 | 0.654971 | import fnmatch
from setuptools import setup
from setuptools.command.build_py import build_py as build_py_orig
exclude = ['*Tests']
class build_py(build_py_orig):
def find_package_modules(self, package, package_dir):
"""
Custom module to find package modules.
It will strip out any modules which match the glob patters in exclude above
"""
modules = super().find_package_modules(package, package_dir)
return [(pkg, mod, file, ) for (pkg, mod, file, ) in modules
if not any(fnmatch.fnmatchcase(mod, pat=pattern)
for pattern in exclude)]
setup(
cmdclass={
'build_py': build_py,
}
) | 0 | 467 | 23 |
0083ef62389d5b3e89320c8519707483347c6a65 | 3,690 | py | Python | etc/replay_data.py | wimax-grapl/grapl | be0a49a83f62b84a10182c383d12f911cc555b24 | [
"Apache-2.0"
] | null | null | null | etc/replay_data.py | wimax-grapl/grapl | be0a49a83f62b84a10182c383d12f911cc555b24 | [
"Apache-2.0"
] | null | null | null | etc/replay_data.py | wimax-grapl/grapl | be0a49a83f62b84a10182c383d12f911cc555b24 | [
"Apache-2.0"
] | null | null | null | import argparse
import json
import os
from typing import Any, Iterator
from datetime import datetime
import boto3
from mypy_boto3_s3.client import S3Client
from mypy_boto3_sqs.client import SQSClient
IS_LOCAL = bool(os.environ.get("IS_LOCAL", False))
if __name__ == "__main__":
args = parse_args()
if args.bucket_prefix is None:
raise Exception("Provide bucket prefix as first argument")
else:
if args.bucket_prefix == "local-grapl":
IS_LOCAL = True
main(args.bucket_prefix)
| 27.744361 | 80 | 0.513279 | import argparse
import json
import os
from typing import Any, Iterator
from datetime import datetime
import boto3
from mypy_boto3_s3.client import S3Client
from mypy_boto3_sqs.client import SQSClient
IS_LOCAL = bool(os.environ.get("IS_LOCAL", False))
def into_sqs_message(bucket: str, key: str, region: str) -> str:
return json.dumps(
{
"Records": [
{
"eventTime": datetime.utcnow().isoformat(),
"awsRegion": region,
"principalId": {
"principalId": None,
},
"requestParameters": {
"sourceIpAddress": None,
},
"responseElements": {},
"s3": {
"schemaVersion": None,
"configurationId": None,
"bucket": {
"name": bucket,
"ownerIdentity": {
"principalId": None,
},
},
"object": {
"key": key,
"size": 0,
"urlDecodedKey": None,
"versionId": None,
"eTag": None,
"sequencer": None,
},
},
}
]
}
)
def send_s3_event(
sqs_client: SQSClient,
queue_url: str,
output_bucket: str,
output_path: str,
):
sqs_client.send_message(
QueueUrl=queue_url,
MessageBody=into_sqs_message(
bucket=output_bucket,
key=output_path,
region=sqs_client.meta.region_name,
),
)
def list_objects(client: S3Client, bucket: str) -> Iterator[str]:
for page in client.get_paginator("list_objects_v2").paginate(Bucket=bucket):
for entry in page["Contents"]:
yield entry["Key"]
def get_sqs_client() -> SQSClient:
if IS_LOCAL:
return boto3.client(
"sqs",
endpoint_url="http://localhost:9324",
region_name="us-east-1",
aws_access_key_id="dummy_cred_aws_access_key_id",
aws_secret_access_key="dummy_cred_aws_secret_access_key",
)
else:
return boto3.client("sqs")
def get_s3_client() -> S3Client:
if IS_LOCAL:
return boto3.client(
"s3",
endpoint_url="http://localhost:9000",
aws_access_key_id="minioadmin",
aws_secret_access_key="minioadmin",
)
else:
return boto3.client("s3")
def main(bucket_prefix: str) -> None:
s3, sqs = get_s3_client(), get_sqs_client()
queue_name = bucket_prefix + "-graph-merger-queue"
queue_url = sqs.get_queue_url(QueueName=queue_name)["QueueUrl"]
bucket = bucket_prefix + "-subgraphs-generated-bucket"
for key in list_objects(s3, bucket):
send_s3_event(
sqs,
queue_url,
bucket,
key,
)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Replay graph-merger events")
parser.add_argument("--bucket_prefix", dest="bucket_prefix", required=True)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
if args.bucket_prefix is None:
raise Exception("Provide bucket prefix as first argument")
else:
if args.bucket_prefix == "local-grapl":
IS_LOCAL = True
main(args.bucket_prefix)
| 2,991 | 0 | 161 |
0e2409e384c0960502ae313bde1b331f94863a4a | 460 | py | Python | source/register/register.py | debemdeboas/virtual-machine | 31d43fc75d3242cf884d2fb9a38d686b960e0576 | [
"MIT"
] | 2 | 2021-03-13T20:49:34.000Z | 2021-03-14T12:53:59.000Z | source/register/register.py | debemdeboas/virtual-machine | 31d43fc75d3242cf884d2fb9a38d686b960e0576 | [
"MIT"
] | 27 | 2021-03-16T22:33:48.000Z | 2021-07-08T22:29:51.000Z | source/register/register.py | debemdeboas/virtual-machine | 31d43fc75d3242cf884d2fb9a38d686b960e0576 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
| 18.4 | 48 | 0.641304 | from abc import ABC, abstractmethod
class IRegister(ABC):
@property
@abstractmethod
def value(self) -> int: ...
@value.setter
@abstractmethod
def value(self, val: int): ...
class Register(IRegister):
def __init__(self, value=0):
self._value = value
@property
def value(self): return self._value
@value.setter
def value(self, val): self._value = int(val)
def __str__(self): return str(self.value)
| 107 | 269 | 46 |
68ac9dfa660f648a4b7904668fe991e0f7d4dd1f | 234 | py | Python | Algorithms/Baekjoon/num_11053.py | endol007/TIL | a86c683149b263aaffffc5f3d22f76d848d829e8 | [
"MIT"
] | null | null | null | Algorithms/Baekjoon/num_11053.py | endol007/TIL | a86c683149b263aaffffc5f3d22f76d848d829e8 | [
"MIT"
] | null | null | null | Algorithms/Baekjoon/num_11053.py | endol007/TIL | a86c683149b263aaffffc5f3d22f76d848d829e8 | [
"MIT"
] | null | null | null | import sys
t = int(input())
num = list(map(int, sys.stdin.readline().split()))
dp = [1 for _ in range(t)]
for i in range(t):
for j in range(i):
if num[i] > num[j]:
dp[i] = max(dp[i], dp[j] + 1)
print(max(dp)) | 21.272727 | 50 | 0.529915 | import sys
t = int(input())
num = list(map(int, sys.stdin.readline().split()))
dp = [1 for _ in range(t)]
for i in range(t):
for j in range(i):
if num[i] > num[j]:
dp[i] = max(dp[i], dp[j] + 1)
print(max(dp)) | 0 | 0 | 0 |
b5094df2b5512caf70d4a53b214327998004c250 | 25,414 | py | Python | AMBER/amber/architect/manager.py | rtu715/NAS-Bench-360 | d075006848c664371855c34082b0a00cda62be67 | [
"MIT"
] | 10 | 2021-06-15T17:48:34.000Z | 2022-02-23T18:34:28.000Z | AMBER/amber/architect/manager.py | rtu715/NAS-Bench-360 | d075006848c664371855c34082b0a00cda62be67 | [
"MIT"
] | 1 | 2021-11-12T15:12:38.000Z | 2021-11-12T19:38:00.000Z | AMBER/amber/architect/manager.py | rtu715/NAS-Bench-360 | d075006848c664371855c34082b0a00cda62be67 | [
"MIT"
] | 1 | 2021-11-15T04:07:17.000Z | 2021-11-15T04:07:17.000Z | # -*- coding: UTF-8 -*-
"""Manager class for streamlining downstream build and evaluation given an architecture.
Manager is the class that takes in architecture designs from an architecture search/optimization algorithm, then
interacts with ``amber.modeler`` to build and train the model according to architecture, and finally calls
``amber.architect.rewards`` to evaluate the trained model rewards to feedback the architecture designer.
"""
import gc
import os, sys
import warnings
import numpy as np
import tensorflow.keras as keras
from ..utils import corrected_tf as tf
import tensorflow as tf2
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.models import Model
import time
from datetime import datetime
from collections import defaultdict
from .commonOps import unpack_data
from .store import get_store_fn
__all__ = [
'BaseNetworkManager',
'NetworkManager',
'GeneralManager',
'DistributedGeneralManager'
]
class GeneralManager(BaseNetworkManager):
"""Manager creates child networks, train them on a dataset, and retrieve rewards.
Parameters
----------
train_data : tuple, string or generator
Training data to be fed to ``keras.models.Model.fit``.
validation_data : tuple, string, or generator
Validation data. The data format is understood similarly to train_data.
model_fn : amber.modeler
A callable function to build and implement child models given an architecture sequence.
reward_fn : amber.architect.rewards
A callable function to evaluate the rewards on a trained model and the validation dataset.
store_fn : amber.architect.store
A callable function to store necessary information (such as predictions, model architectures, and a variety of
plots etc.) for the given child model.
working_dir : str
File path for working directory.
save_full_model : bool
If true, save the full model beside the model weights. Default is False.
epochs : int
The total number of epochs to train the child model.
child_batchsize : int
The batch size for training the child model.
fit_kwargs : dict or None
Keyword arguments for model.fit
predict_kwargs : dict or None
Keyword arguments for model.predict
evaluate_kwargs : dict or None
Keyword arguments for model.evaluate
verbose : bool or int
Verbose level. 0=non-verbose, 1=verbose, 2=less verbose.
kwargs : dict
Other keyword arguments parsed.
Attributes
----------
train_data : tuple or generator
The unpacked training data
validation_data : tuple or generator
The unpacked validation data
model_fn : amber.modeler
Reference to the callable function to build and implement child models given an architecture sequence.
reward_fn : amber.architect.rewards
Reference to the callable function to evaluate the rewards on a trained model and the validation dataset.
store_fn : amber.architect.store
Reference to the callable function to store necessary information (such as predictions, model architectures, and a variety of
plots etc.) for the given child model.
working_dir : str
File path to working directory
verbose : bool or int
Verbose level
TODO
------
- Refactor the rest of attributes as private.
- Update the description of ``train_data`` and ``validation_data`` to more flexible unpacking, once it's added::
If it's tuple, expects it to be a tuple of numpy.array of
(x,y); if it's string, expects it to be the file path to a compiled training data; if it's a generator, expects
it yield a batch of training features and samples.
"""
def get_rewards(self, trial, model_arc, **kwargs):
"""The reward getter for a given model architecture
Parameters
----------
trial : int
An integer number indicating the trial for this architecture
model_arc : list
The list of architecture sequence
Returns
-------
this_reward : float
The reward signal as determined by ``reward_fn(model, val_data)``
loss_and_metrics : dict
A dictionary of auxillary information for this model, such as loss, and other metrics (as in ``tf.keras.metrics``)
"""
# print('-'*80, model_arc, '-'*80)
train_graph = tf.Graph()
train_sess = tf.Session(graph=train_graph)
with train_graph.as_default(), train_sess.as_default():
try:
K.set_session(train_sess)
except RuntimeError: # keras 2.3.1 `set_session` not available for tf2.0
assert keras.__version__ > '2.2.5'
pass
model = self.model_fn(model_arc) # a compiled keras Model
if model is None:
assert hasattr(self.reward_fn, "min"), "model_fn of type %s returned a non-valid model, but the given " \
"reward_fn of type %s does not have .min() method" % (type(
self.model_fn), type(self.reward_fn))
hist = None
this_reward, loss_and_metrics, reward_metrics = self.reward_fn.min(data=self.validation_data)
loss = loss_and_metrics.pop(0)
loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in
range(len(loss_and_metrics))}
loss_and_metrics['loss'] = loss
if reward_metrics:
loss_and_metrics.update(reward_metrics)
else:
# train the model using Keras methods
if self.verbose:
print(" Trial %i: Start training model..." % trial)
train_x, train_y = unpack_data(self.train_data)
hist = model.fit(x=train_x,
y=train_y,
batch_size=self.batchsize if train_y is not None else None,
epochs=self.epochs,
verbose=self.verbose,
#shuffle=True,
validation_data=self.validation_data,
callbacks=[ModelCheckpoint(os.path.join(self.working_dir, 'temp_network.h5'),
monitor='val_loss', verbose=self.verbose,
save_best_only=True),
EarlyStopping(monitor='val_loss', patience=self.fit_kwargs.pop("earlystop_patience", 5), verbose=self.verbose)],
**self.fit_kwargs
)
# load best performance epoch in this training session
# in corner cases, the optimization might fail and no temp_network
# would be created
if os.path.isfile((os.path.join(self.working_dir, 'temp_network.h5'))):
model.load_weights(os.path.join(self.working_dir, 'temp_network.h5'))
else:
model.save_weights((os.path.join(self.working_dir, 'temp_network.h5')))
# evaluate the model by `reward_fn`
this_reward, loss_and_metrics, reward_metrics = \
self.reward_fn(model, self.validation_data,
session=train_sess,
graph=train_graph)
loss = loss_and_metrics.pop(0)
loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in
range(len(loss_and_metrics))}
loss_and_metrics['loss'] = loss
if reward_metrics:
loss_and_metrics.update(reward_metrics)
# do any post processing,
# e.g. save child net, plot training history, plot scattered prediction.
if self.store_fn:
val_pred = model.predict(self.validation_data, verbose=self.verbose, **self.predict_kwargs)
self.store_fn(
trial=trial,
model=model,
hist=hist,
data=self.validation_data,
pred=val_pred,
loss_and_metrics=loss_and_metrics,
working_dir=self.working_dir,
save_full_model=self.save_full_model,
knowledge_func=self.reward_fn.knowledge_function
)
# clean up resources and GPU memory
del model
del hist
gc.collect()
return this_reward, loss_and_metrics
class DistributedGeneralManager(GeneralManager):
"""Distributed manager will place all tensors of any child models to a pre-assigned GPU device
"""
class EnasManager(GeneralManager):
"""A specialized manager for Efficient Neural Architecture Search (ENAS).
Because
Parameters
----------
session : tensorflow.Session or None
The tensorflow session that the manager will be parsed to modelers. By default it's None, which will then get the
Session from the modeler.
train_data : tuple, string or generator
Training data to be fed to ``keras.models.Model.fit``.
validation_data : tuple, string, or generator
Validation data. The data format is understood similarly to train_data.
model_fn : amber.modeler
A callable function to build and implement child models given an architecture sequence. Must be a model_fn that
is compatible with ENAS parameter sharing.
reward_fn : amber.architect.rewards
A callable function to evaluate the rewards on a trained model and the validation dataset.
store_fn : amber.architect.store
A callable function to store necessary information (such as predictions, model architectures, and a variety of
plots etc.) for the given child model.
working_dir : str
File path for working directory.
Attributes
----------
model : amber.modeler.child
The child DAG that is connected to ``controller.sample_arc`` as the input architecture sequence, which
will activate a randomly sampled subgraph within child DAG. Because it's hard-wired to the sampled architecture
in controller, using this model to train and predict will also have the inherent stochastic behaviour that is
linked to controller.
See Also
--------
amber.modeler.child : AMBER wrapped-up version of child models that is intended to have similar interface and
methods as the ``keras.models.Model`` API.
train_data : tuple or generator
The unpacked training data
validation_data : tuple or generator
The unpacked validation data
model_fn : amber.modeler
Reference to the callable function to build and implement child models given an architecture sequence.
reward_fn : amber.architect.rewards
Reference to the callable function to evaluate the rewards on a trained model and the validation dataset.
store_fn : amber.architect.store
Reference to the callable function to store necessary information (such as predictions, model architectures, and a variety of
plots etc.) for the given child model.
disable_controller : bool
If true, will randomly return a reward by uniformly sampling in the interval [0,1]. Default is False.
working_dir : str
File path to working directory
verbose : bool or int
Verbose level
"""
def get_rewards(self, trial, model_arc=None, nsteps=None):
"""The reward getter for a given model architecture.
Because Enas will train child model by random sampling an architecture to activate for each mini-batch,
there will not be any rewards evaluation in the Manager anymore.
However, we can still use `get_rewards` as a proxy to train child models
Parameters
----------
trial : int
An integer number indicating the trial for this architecture
model_arc : list or None
The list of architecture sequence. If is None (as by default), will return the child DAG with architecture
connected directly to ``controller.sample_arc`` tensors.
nsteps: int
Optional, if specified, train model nsteps of batches instead of a whole epoch
Returns
-------
this_reward : float
The reward signal as determined by ``reward_fn(model, val_data)``
loss_and_metrics : dict
A dictionary of auxillary information for this model, such as loss, and other metrics (as in ``tf.keras.metrics``)
"""
if self.model is None:
self.model = self.model_fn()
if model_arc is None:
# unpack the dataset
X_val, y_val = self.validation_data[0:2]
X_train, y_train = self.train_data
# train the model using EnasModel methods
if self.verbose:
print(" Trial %i: Start training model with sample_arc..." % trial)
hist = self.model.fit(X_train, y_train,
batch_size=self.batchsize,
nsteps=nsteps,
epochs=self.epochs,
verbose=self.verbose,
# comment out because of temporary
# incompatibility with tf.data.Dataset
# validation_data=(X_val, y_val),
)
# do any post processing,
# e.g. save child net, plot training history, plot scattered prediction.
if self.store_fn:
val_pred = self.model.predict(X_val, verbose=self.verbose)
self.store_fn(
trial=trial,
model=self.model,
hist=hist,
data=self.validation_data,
pred=val_pred,
loss_and_metrics=None,
working_dir=self.working_dir,
save_full_model=self.save_full_model,
knowledge_func=self.reward_fn.knowledge_function
)
return None, None
else:
model = self.model_fn(model_arc)
this_reward, loss_and_metrics, reward_metrics = \
self.reward_fn(model, self.validation_data,
session=self.session)
loss = loss_and_metrics.pop(0)
loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in
range(len(loss_and_metrics))}
loss_and_metrics['loss'] = loss
if reward_metrics:
loss_and_metrics.update(reward_metrics)
# enable this to overwrite a random reward when disable controller
if self.disable_controller:
this_reward = np.random.uniform(0, 1)
# end
return this_reward, loss_and_metrics
| 43.591767 | 156 | 0.586173 | # -*- coding: UTF-8 -*-
"""Manager class for streamlining downstream build and evaluation given an architecture.
Manager is the class that takes in architecture designs from an architecture search/optimization algorithm, then
interacts with ``amber.modeler`` to build and train the model according to architecture, and finally calls
``amber.architect.rewards`` to evaluate the trained model rewards to feedback the architecture designer.
"""
import gc
import os, sys
import warnings
import numpy as np
import tensorflow.keras as keras
from ..utils import corrected_tf as tf
import tensorflow as tf2
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.models import Model
import time
from datetime import datetime
from collections import defaultdict
from .commonOps import unpack_data
from .store import get_store_fn
__all__ = [
'BaseNetworkManager',
'NetworkManager',
'GeneralManager',
'DistributedGeneralManager'
]
class BaseNetworkManager:
def __init__(self, *args, **kwargs):
# abstract
pass
def get_rewards(self, trial, model_arc):
raise NotImplementedError("Abstract method.")
class GeneralManager(BaseNetworkManager):
"""Manager creates child networks, train them on a dataset, and retrieve rewards.
Parameters
----------
train_data : tuple, string or generator
Training data to be fed to ``keras.models.Model.fit``.
validation_data : tuple, string, or generator
Validation data. The data format is understood similarly to train_data.
model_fn : amber.modeler
A callable function to build and implement child models given an architecture sequence.
reward_fn : amber.architect.rewards
A callable function to evaluate the rewards on a trained model and the validation dataset.
store_fn : amber.architect.store
A callable function to store necessary information (such as predictions, model architectures, and a variety of
plots etc.) for the given child model.
working_dir : str
File path for working directory.
save_full_model : bool
If true, save the full model beside the model weights. Default is False.
epochs : int
The total number of epochs to train the child model.
child_batchsize : int
The batch size for training the child model.
fit_kwargs : dict or None
Keyword arguments for model.fit
predict_kwargs : dict or None
Keyword arguments for model.predict
evaluate_kwargs : dict or None
Keyword arguments for model.evaluate
verbose : bool or int
Verbose level. 0=non-verbose, 1=verbose, 2=less verbose.
kwargs : dict
Other keyword arguments parsed.
Attributes
----------
train_data : tuple or generator
The unpacked training data
validation_data : tuple or generator
The unpacked validation data
model_fn : amber.modeler
Reference to the callable function to build and implement child models given an architecture sequence.
reward_fn : amber.architect.rewards
Reference to the callable function to evaluate the rewards on a trained model and the validation dataset.
store_fn : amber.architect.store
Reference to the callable function to store necessary information (such as predictions, model architectures, and a variety of
plots etc.) for the given child model.
working_dir : str
File path to working directory
verbose : bool or int
Verbose level
TODO
------
- Refactor the rest of attributes as private.
- Update the description of ``train_data`` and ``validation_data`` to more flexible unpacking, once it's added::
If it's tuple, expects it to be a tuple of numpy.array of
(x,y); if it's string, expects it to be the file path to a compiled training data; if it's a generator, expects
it yield a batch of training features and samples.
"""
def __init__(self,
train_data,
validation_data,
model_fn,
reward_fn,
store_fn,
working_dir='.',
save_full_model=False,
epochs=5,
child_batchsize=128,
verbose=0,
fit_kwargs=None,
predict_kwargs=None,
evaluate_kwargs=None,
**kwargs):
super(GeneralManager, self).__init__(**kwargs)
self.train_data = train_data
self.validation_data = validation_data
self.working_dir = working_dir
self.fit_kwargs = fit_kwargs or {}
self.predict_kwargs = predict_kwargs or {}
self.evaluate_kwargs = evaluate_kwargs or {}
self._earlystop_patience = self.fit_kwargs.pop("earlystop_patience",5)
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
self.model_compile_dict = kwargs.pop("model_compile_dict", None)
if self.model_compile_dict is None:
self.model_compile_dict = model_fn.model_compile_dict
# added 2020.5.19: parse model_space to manager for compatibility with newer versions of controllers
self.model_space = kwargs.pop("model_space", None)
self.save_full_model = save_full_model
self.epochs = epochs
self.batchsize = child_batchsize
self.verbose = verbose
self.model_fn = model_fn
self.reward_fn = reward_fn
self.store_fn = get_store_fn(store_fn)
def get_rewards(self, trial, model_arc, **kwargs):
"""The reward getter for a given model architecture
Parameters
----------
trial : int
An integer number indicating the trial for this architecture
model_arc : list
The list of architecture sequence
Returns
-------
this_reward : float
The reward signal as determined by ``reward_fn(model, val_data)``
loss_and_metrics : dict
A dictionary of auxillary information for this model, such as loss, and other metrics (as in ``tf.keras.metrics``)
"""
# print('-'*80, model_arc, '-'*80)
train_graph = tf.Graph()
train_sess = tf.Session(graph=train_graph)
with train_graph.as_default(), train_sess.as_default():
try:
K.set_session(train_sess)
except RuntimeError: # keras 2.3.1 `set_session` not available for tf2.0
assert keras.__version__ > '2.2.5'
pass
model = self.model_fn(model_arc) # a compiled keras Model
if model is None:
assert hasattr(self.reward_fn, "min"), "model_fn of type %s returned a non-valid model, but the given " \
"reward_fn of type %s does not have .min() method" % (type(
self.model_fn), type(self.reward_fn))
hist = None
this_reward, loss_and_metrics, reward_metrics = self.reward_fn.min(data=self.validation_data)
loss = loss_and_metrics.pop(0)
loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in
range(len(loss_and_metrics))}
loss_and_metrics['loss'] = loss
if reward_metrics:
loss_and_metrics.update(reward_metrics)
else:
# train the model using Keras methods
if self.verbose:
print(" Trial %i: Start training model..." % trial)
train_x, train_y = unpack_data(self.train_data)
hist = model.fit(x=train_x,
y=train_y,
batch_size=self.batchsize if train_y is not None else None,
epochs=self.epochs,
verbose=self.verbose,
#shuffle=True,
validation_data=self.validation_data,
callbacks=[ModelCheckpoint(os.path.join(self.working_dir, 'temp_network.h5'),
monitor='val_loss', verbose=self.verbose,
save_best_only=True),
EarlyStopping(monitor='val_loss', patience=self.fit_kwargs.pop("earlystop_patience", 5), verbose=self.verbose)],
**self.fit_kwargs
)
# load best performance epoch in this training session
# in corner cases, the optimization might fail and no temp_network
# would be created
if os.path.isfile((os.path.join(self.working_dir, 'temp_network.h5'))):
model.load_weights(os.path.join(self.working_dir, 'temp_network.h5'))
else:
model.save_weights((os.path.join(self.working_dir, 'temp_network.h5')))
# evaluate the model by `reward_fn`
this_reward, loss_and_metrics, reward_metrics = \
self.reward_fn(model, self.validation_data,
session=train_sess,
graph=train_graph)
loss = loss_and_metrics.pop(0)
loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in
range(len(loss_and_metrics))}
loss_and_metrics['loss'] = loss
if reward_metrics:
loss_and_metrics.update(reward_metrics)
# do any post processing,
# e.g. save child net, plot training history, plot scattered prediction.
if self.store_fn:
val_pred = model.predict(self.validation_data, verbose=self.verbose, **self.predict_kwargs)
self.store_fn(
trial=trial,
model=model,
hist=hist,
data=self.validation_data,
pred=val_pred,
loss_and_metrics=loss_and_metrics,
working_dir=self.working_dir,
save_full_model=self.save_full_model,
knowledge_func=self.reward_fn.knowledge_function
)
# clean up resources and GPU memory
del model
del hist
gc.collect()
return this_reward, loss_and_metrics
class DistributedGeneralManager(GeneralManager):
"""Distributed manager will place all tensors of any child models to a pre-assigned GPU device
"""
def __init__(self, devices, train_data_kwargs, validate_data_kwargs, do_resample=False, *args, **kwargs):
self.devices = devices
super().__init__(*args, **kwargs)
assert devices is None or len(self.devices) == 1, "Only supports one GPU device currently"
# For keeping & closing file connection at multi-processing
self.train_data_kwargs = train_data_kwargs or {}
self.validate_data_kwargs = validate_data_kwargs or {}
self.train_x = None
self.train_y = None
self.file_connected = False
# For resampling; TODO: how to implement a Bayesian version of this?
self.arc_records = defaultdict(dict)
self.do_resample = do_resample
def close_handler(self):
if self.file_connected:
self.train_x.close()
if self.train_y:
self.train_y.close()
self._validation_data_gen.close()
self.train_x = None
self.train_y = None
self.file_connected = False
def get_rewards(self, trial, model_arc, remap_device=None, **kwargs):
# TODO: use tensorflow distributed strategy
#strategy = tf2.distribute.MirroredStrategy(devices=self.devices)
#print('Number of devices: {} - {}'.format(strategy.num_replicas_in_sync, self.devices))
#with strategy.scope():
pid = os.getpid()
sys.stderr.write("[%s][%s] Preprocessing.."%(pid, datetime.now().strftime("%H:%M:%S") ))
start_time = time.time()
train_graph = tf.Graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
train_sess = tf.Session(graph=train_graph, config=config)
# remap device will overwrite the manager device
if remap_device is not None:
target_device = remap_device
elif self.devices is None:
from ..utils.gpu_query import get_idle_gpus
idle_gpus = get_idle_gpus()
target_device = idle_gpus[0]
target_device = "/device:GPU:%i"%target_device
self.devices = [target_device]
sys.stderr.write("[%s] Auto-assign device: %s" % (pid, target_device) )
else:
target_device = self.devices[0]
with train_graph.as_default(), train_sess.as_default():
with tf.device(target_device):
try:
K.set_session(train_sess)
except RuntimeError: # keras 2.3.1 `set_session` not available for tf2.0
pass
model = self.model_fn(model_arc) # a compiled keras Model
# unpack the dataset
if not self.file_connected:
X_train, y_train = unpack_data(self.train_data, callable_kwargs=self.train_data_kwargs)
self.train_x = X_train
self.train_y = y_train
assert callable(self.validation_data), "Expect validation_data to be callable, got %s" % type(self.validation_data)
self._validation_data_gen = self.validation_data(**self.validate_data_kwargs)
self.file_connected = True
elapse_time = time.time() - start_time
sys.stderr.write(" %.3f sec\n"%elapse_time)
model_arc_ = tuple(model_arc)
if model_arc_ in self.arc_records and self.do_resample is True:
this_reward = self.arc_records[model_arc_]['reward']
old_trial = self.arc_records[model_arc_]['trial']
loss_and_metrics = self.arc_records[model_arc_]['loss_and_metrics']
sys.stderr.write("[%s][%s] Trial %i: Re-sampled from history %i\n" % (pid, datetime.now().strftime("%H:%M:%S"), trial, old_trial))
else:
# train the model using Keras methods
start_time = time.time()
sys.stderr.write("[%s][%s] Trial %i: Start training model.." % (pid, datetime.now().strftime("%H:%M:%S"), trial))
hist = model.fit(self.train_x, self.train_y,
batch_size=self.batchsize,
epochs=self.epochs,
verbose=self.verbose,
validation_data=self._validation_data_gen,
callbacks=[ModelCheckpoint(os.path.join(self.working_dir, 'temp_network.h5'),
monitor='val_loss', verbose=self.verbose,
save_best_only=True),
EarlyStopping(monitor='val_loss', patience=self._earlystop_patience, verbose=self.verbose)],
**self.fit_kwargs
)
# load best performance epoch in this training session
model.load_weights(os.path.join(self.working_dir, 'temp_network.h5'))
elapse_time = time.time() - start_time
sys.stderr.write(" %.3f sec\n"%elapse_time)
start_time = time.time()
sys.stderr.write("[%s] Postprocessing.."% pid )
# evaluate the model by `reward_fn`
this_reward, loss_and_metrics, reward_metrics = \
self.reward_fn(model, self._validation_data_gen,
session=train_sess,
)
loss = loss_and_metrics.pop(0)
loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in
range(len(loss_and_metrics))}
loss_and_metrics['loss'] = loss
if reward_metrics:
loss_and_metrics.update(reward_metrics)
# do any post processing,
# e.g. save child net, plot training history, plot scattered prediction.
if self.store_fn:
val_pred = model.predict(self.validation_data, verbose=self.verbose)
self.store_fn(
trial=trial,
model=model,
hist=hist,
data=self._validation_data_gen,
pred=val_pred,
loss_and_metrics=loss_and_metrics,
working_dir=self.working_dir,
save_full_model=self.save_full_model,
knowledge_func=self.reward_fn.knowledge_function
)
elapse_time = time.time() - start_time
sys.stderr.write(" %.3f sec\n"%elapse_time)
# store the rewards in records
self.arc_records[model_arc_]['trial'] = trial
self.arc_records[model_arc_]['reward'] = this_reward
self.arc_records[model_arc_]['loss_and_metrics'] = loss_and_metrics
# clean up resources and GPU memory
start_time = time.time()
sys.stderr.write("[%s] Cleaning up.."%pid)
try:
del train_sess
del train_graph
del model
del hist
except UnboundLocalError:
pass
gc.collect()
elapse_time = time.time() - start_time
sys.stderr.write(" %.3f sec\n"%elapse_time)
return this_reward, loss_and_metrics
class EnasManager(GeneralManager):
"""A specialized manager for Efficient Neural Architecture Search (ENAS).
Because
Parameters
----------
session : tensorflow.Session or None
The tensorflow session that the manager will be parsed to modelers. By default it's None, which will then get the
Session from the modeler.
train_data : tuple, string or generator
Training data to be fed to ``keras.models.Model.fit``.
validation_data : tuple, string, or generator
Validation data. The data format is understood similarly to train_data.
model_fn : amber.modeler
A callable function to build and implement child models given an architecture sequence. Must be a model_fn that
is compatible with ENAS parameter sharing.
reward_fn : amber.architect.rewards
A callable function to evaluate the rewards on a trained model and the validation dataset.
store_fn : amber.architect.store
A callable function to store necessary information (such as predictions, model architectures, and a variety of
plots etc.) for the given child model.
working_dir : str
File path for working directory.
Attributes
----------
model : amber.modeler.child
The child DAG that is connected to ``controller.sample_arc`` as the input architecture sequence, which
will activate a randomly sampled subgraph within child DAG. Because it's hard-wired to the sampled architecture
in controller, using this model to train and predict will also have the inherent stochastic behaviour that is
linked to controller.
See Also
--------
amber.modeler.child : AMBER wrapped-up version of child models that is intended to have similar interface and
methods as the ``keras.models.Model`` API.
train_data : tuple or generator
The unpacked training data
validation_data : tuple or generator
The unpacked validation data
model_fn : amber.modeler
Reference to the callable function to build and implement child models given an architecture sequence.
reward_fn : amber.architect.rewards
Reference to the callable function to evaluate the rewards on a trained model and the validation dataset.
store_fn : amber.architect.store
Reference to the callable function to store necessary information (such as predictions, model architectures, and a variety of
plots etc.) for the given child model.
disable_controller : bool
If true, will randomly return a reward by uniformly sampling in the interval [0,1]. Default is False.
working_dir : str
File path to working directory
verbose : bool or int
Verbose level
"""
def __init__(self, session=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if session is None:
self.session = self.model_fn.session
else:
self.session = session
self.model = None
self.disable_controller = kwargs.pop("disable_controller", False)
def get_rewards(self, trial, model_arc=None, nsteps=None):
"""The reward getter for a given model architecture.
Because Enas will train child model by random sampling an architecture to activate for each mini-batch,
there will not be any rewards evaluation in the Manager anymore.
However, we can still use `get_rewards` as a proxy to train child models
Parameters
----------
trial : int
An integer number indicating the trial for this architecture
model_arc : list or None
The list of architecture sequence. If is None (as by default), will return the child DAG with architecture
connected directly to ``controller.sample_arc`` tensors.
nsteps: int
Optional, if specified, train model nsteps of batches instead of a whole epoch
Returns
-------
this_reward : float
The reward signal as determined by ``reward_fn(model, val_data)``
loss_and_metrics : dict
A dictionary of auxillary information for this model, such as loss, and other metrics (as in ``tf.keras.metrics``)
"""
if self.model is None:
self.model = self.model_fn()
if model_arc is None:
# unpack the dataset
X_val, y_val = self.validation_data[0:2]
X_train, y_train = self.train_data
# train the model using EnasModel methods
if self.verbose:
print(" Trial %i: Start training model with sample_arc..." % trial)
hist = self.model.fit(X_train, y_train,
batch_size=self.batchsize,
nsteps=nsteps,
epochs=self.epochs,
verbose=self.verbose,
# comment out because of temporary
# incompatibility with tf.data.Dataset
# validation_data=(X_val, y_val),
)
# do any post processing,
# e.g. save child net, plot training history, plot scattered prediction.
if self.store_fn:
val_pred = self.model.predict(X_val, verbose=self.verbose)
self.store_fn(
trial=trial,
model=self.model,
hist=hist,
data=self.validation_data,
pred=val_pred,
loss_and_metrics=None,
working_dir=self.working_dir,
save_full_model=self.save_full_model,
knowledge_func=self.reward_fn.knowledge_function
)
return None, None
else:
model = self.model_fn(model_arc)
this_reward, loss_and_metrics, reward_metrics = \
self.reward_fn(model, self.validation_data,
session=self.session)
loss = loss_and_metrics.pop(0)
loss_and_metrics = {str(self.model_compile_dict['metrics'][i]): loss_and_metrics[i] for i in
range(len(loss_and_metrics))}
loss_and_metrics['loss'] = loss
if reward_metrics:
loss_and_metrics.update(reward_metrics)
# enable this to overwrite a random reward when disable controller
if self.disable_controller:
this_reward = np.random.uniform(0, 1)
# end
return this_reward, loss_and_metrics
| 9,594 | 4 | 209 |
40a0cb0772418674a27ad3f05ce6d3ea2bb8100b | 705 | py | Python | Globals/Globals.py | PoweredByME/SSVEP_FYP | 6839be6a4aeddfa1b29c587b23d64c95a90810f8 | [
"MIT"
] | null | null | null | Globals/Globals.py | PoweredByME/SSVEP_FYP | 6839be6a4aeddfa1b29c587b23d64c95a90810f8 | [
"MIT"
] | null | null | null | Globals/Globals.py | PoweredByME/SSVEP_FYP | 6839be6a4aeddfa1b29c587b23d64c95a90810f8 | [
"MIT"
] | null | null | null | """
This script contains all the globals variables
which are used in the code.
Most of the variables are settings upon which the
code works.
"""
DATA_SOURCE = "offline"; # can be either "offline" or "online".
DATA_SAMPLING_FREQ = 256.0; # the sampling rate of the recorded EEG.
DATA_MAX_BUFFER_TIME_SEC = 0.25; # The time in seconds for which the data is stored in the buffer.
SHOW_DATA_WHEN_FILE_OPENED = False # print the data file when it is opened. Use this for debugging.
DATA_FRAME_APPENDAGE = 3; # the number of columns which are extra i.e. other than the EEG data itself.
OFFLINE_DATA_PATH = "DataSets/SRC";
OFFLINE_DATASET_FILE_TYPE = ".mat"; | 41.470588 | 112 | 0.70922 | """
This script contains all the globals variables
which are used in the code.
Most of the variables are settings upon which the
code works.
"""
DATA_SOURCE = "offline"; # can be either "offline" or "online".
DATA_SAMPLING_FREQ = 256.0; # the sampling rate of the recorded EEG.
DATA_MAX_BUFFER_TIME_SEC = 0.25; # The time in seconds for which the data is stored in the buffer.
SHOW_DATA_WHEN_FILE_OPENED = False # print the data file when it is opened. Use this for debugging.
DATA_FRAME_APPENDAGE = 3; # the number of columns which are extra i.e. other than the EEG data itself.
OFFLINE_DATA_PATH = "DataSets/SRC";
OFFLINE_DATASET_FILE_TYPE = ".mat"; | 0 | 0 | 0 |
d2a2418e84585d499ca9dad4f673626405f3dcd6 | 3,518 | py | Python | src/data/CMNIST.py | mfederici/dsit | 7f26f7ce93edb2075fba4aa965aa1ad9bf773aa5 | [
"MIT"
] | 17 | 2021-11-02T17:51:02.000Z | 2022-02-21T02:48:56.000Z | src/data/CMNIST.py | mfederici/dsit | 7f26f7ce93edb2075fba4aa965aa1ad9bf773aa5 | [
"MIT"
] | null | null | null | src/data/CMNIST.py | mfederici/dsit | 7f26f7ce93edb2075fba4aa965aa1ad9bf773aa5 | [
"MIT"
] | null | null | null | import torch
import torchvision
from torch.utils.data import Dataset, Subset
from torchvision.transforms import ToTensor
from src.data.cmnist_dist import make_joint_distribution, CMNIST_NAME, CMNIST_VERSIONS
CMNIST_SIZE = 28 ** 2 * 2
CMNIST_SHAPE = [2, 28, 28]
CMNIST_N_CLASSES = 2
CMNIST_N_ENVS = 2
MNIST_TRAIN = 'train'
MNIST_VALID = 'valid'
MNIST_TEST = 'test'
MNIST_TRAIN_VALID = 'train+valid'
MNIST_TRAIN_SPLITS = [MNIST_TRAIN, MNIST_VALID, MNIST_TRAIN_VALID]
MNIST_SPLITS = MNIST_TRAIN_SPLITS + [MNIST_TEST]
MNIST_TRAIN_EXAMPLES = 50000
# Wrapper for the torchvision MNIST dataset with validation split
# Implementation of the CMNIST, d-CMNIST and y-CMNIST datasets for pytorch
| 34.490196 | 86 | 0.596362 | import torch
import torchvision
from torch.utils.data import Dataset, Subset
from torchvision.transforms import ToTensor
from src.data.cmnist_dist import make_joint_distribution, CMNIST_NAME, CMNIST_VERSIONS
CMNIST_SIZE = 28 ** 2 * 2
CMNIST_SHAPE = [2, 28, 28]
CMNIST_N_CLASSES = 2
CMNIST_N_ENVS = 2
MNIST_TRAIN = 'train'
MNIST_VALID = 'valid'
MNIST_TEST = 'test'
MNIST_TRAIN_VALID = 'train+valid'
MNIST_TRAIN_SPLITS = [MNIST_TRAIN, MNIST_VALID, MNIST_TRAIN_VALID]
MNIST_SPLITS = MNIST_TRAIN_SPLITS + [MNIST_TEST]
MNIST_TRAIN_EXAMPLES = 50000
# Wrapper for the torchvision MNIST dataset with validation split
class MNIST(Dataset):
def __init__(self, root, split, keep_in_memory=False, device='cpu', **params):
super(MNIST, self).__init__()
dataset = torchvision.datasets.MNIST(root=root,
train=split in MNIST_TRAIN_SPLITS,
transform=ToTensor(), **params)
if split == MNIST_TRAIN:
dataset = Subset(dataset, range(MNIST_TRAIN_EXAMPLES))
elif split == MNIST_VALID:
dataset = Subset(dataset, range(MNIST_TRAIN_EXAMPLES, len(dataset)))
elif not (split == MNIST_TEST) and not (split == MNIST_TRAIN_VALID):
raise Exception('The possible splits are %s' % ', '.join(MNIST_SPLITS))
self.dataset = dataset
self.keep_in_memory = keep_in_memory
if keep_in_memory:
cache = {
'x': torch.FloatTensor(len(dataset), 1, 28, 28).to(device),
'y': torch.LongTensor(len(dataset)).to(device)
}
else:
cache = None
self.cache = cache
self.device = device
self.stored_ids = []
def __getitem__(self, index):
if not self.keep_in_memory or not (index in self.stored_ids):
x, y = self.dataset[index]
if self.keep_in_memory:
self.cache['x'][index] = x.to(self.device)
self.cache['y'][index] = y
self.stored_ids.append(index)
else:
x = self.cache['x'][index]
y = self.cache['y'][index]
return {'x': x, 'y': torch.LongTensor([y])}
def __len__(self):
return len(self.dataset)
# Implementation of the CMNIST, d-CMNIST and y-CMNIST datasets for pytorch
class CMNIST(MNIST):
def __init__(self, root, version=CMNIST_NAME, sample_once=False, t=1, **params):
super(CMNIST, self).__init__(root=root, **params)
assert version in CMNIST_VERSIONS
assert t in [0, 1]
self.dist = make_joint_distribution(version).condition_on('t', t)
self.sample_once = sample_once
self.sampled_data = {}
def __getitem__(self, index):
if index in self.sampled_data:
data = self.sampled_data[index]
else:
data = super(CMNIST, self).__getitem__(index)
x = data['x']
d = data['y']
# sample from p(e,y,c|d) to determine color, label and environment
sample = self.dist.condition_on('d', d).sample()
# Concatenate an empty channel (red)
x = torch.cat([x, x * 0], 0)
# If the color is 1, make the empty channel the first (green)
if sample['c'] == 1:
x = torch.roll(x, 1, 0)
data = {'x': x, 'y': sample['y'], 'e': sample['e']}
if self.sample_once:
self.sampled_data[index] = data
return data
| 2,650 | -1 | 177 |