hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
934e01680c989249b519bc742c606a505e712464
| 813
|
py
|
Python
|
src/my_blog/autenticacao/migrations/0001_initial.py
|
luizsla/django-studies
|
8140428d2d64b86d7d5b44abcc79de59b2554846
|
[
"MIT"
] | null | null | null |
src/my_blog/autenticacao/migrations/0001_initial.py
|
luizsla/django-studies
|
8140428d2d64b86d7d5b44abcc79de59b2554846
|
[
"MIT"
] | 6
|
2021-03-30T13:53:29.000Z
|
2022-03-12T00:39:13.000Z
|
src/my_blog/autenticacao/migrations/0001_initial.py
|
luizsla/django-studies
|
8140428d2d64b86d7d5b44abcc79de59b2554846
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-07-01 20:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(upload_to='user/profile')),
('description', models.TextField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 30.111111
| 121
| 0.634686
|
d52ce0f0e95eca4851d8db8f945e3eb1d1ced8e1
| 1,526
|
py
|
Python
|
tests/basics/MainPrograms.py
|
RESP3CT88/Nuitka
|
0fcc25d9f00c4fc78c79a863c4b7987f573962e1
|
[
"Apache-2.0"
] | 5,421
|
2018-09-24T08:04:06.000Z
|
2022-03-31T20:02:37.000Z
|
tests/basics/MainPrograms.py
|
ztessler/Nuitka
|
04c9a5471b702a0e5f28398f2661c93b83ab0d1a
|
[
"Apache-2.0"
] | 1,348
|
2018-09-22T13:41:00.000Z
|
2022-03-31T22:33:40.000Z
|
tests/basics/MainPrograms.py
|
ztessler/Nuitka
|
04c9a5471b702a0e5f28398f2661c93b83ab0d1a
|
[
"Apache-2.0"
] | 396
|
2018-09-28T15:37:03.000Z
|
2022-03-29T10:52:09.000Z
|
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
print("Module name is", __name__)
class SomeClass:
pass
print("Class inside main module names its module as", repr(SomeClass.__module__))
if __name__ == "__main__":
print("Executed as __main__:")
import sys, os
# The sys.argv[0] might contain ".exe", ".py" or no suffix at all.
# Remove it, so the "diff" output is more acceptable.
args = sys.argv[:]
args[0] = (
os.path.basename(args[0])
.replace(".exe", ".py")
.replace(".bin", ".py")
.replace(".py", "")
)
print("Arguments were (stripped argv[0] suffix):", repr(args))
# Output the flags, so we can test if we are compatible with these too.
print("The sys.flags are:", sys.flags)
| 31.791667
| 81
| 0.669069
|
908b904eaa061ac9f507952279425598cccbfa64
| 11,755
|
py
|
Python
|
src/experiment.py
|
RobbinBouwmeester/LIT
|
0516a69fbf1b8e9976524e0c243f82de041df544
|
[
"Apache-2.0"
] | null | null | null |
src/experiment.py
|
RobbinBouwmeester/LIT
|
0516a69fbf1b8e9976524e0c243f82de041df544
|
[
"Apache-2.0"
] | null | null | null |
src/experiment.py
|
RobbinBouwmeester/LIT
|
0516a69fbf1b8e9976524e0c243f82de041df544
|
[
"Apache-2.0"
] | null | null | null |
from pyteomics import mzml,mgf
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
from search_engine import MS1_extracter
import numpy as np
import copy
class Experiment():
def __init__(self,filename,delta_precursor=1,delta_rt=1,mgf=False):
self.filename = filename
self.ms1 = []
self.ms2 = []
self.scan_to_spectrum = {}
self.ms2_to_ms1 = {}
self.delta_precursor = delta_precursor
self.delta_rt = delta_rt
self.prec_ms2 = {}
self.rt_ms2 = {}
if not mgf: self.read_mzml(self.filename)
if mgf: self.read_mgf(self.filename)
def __str__(self):
ret_str = ""
ret_str += "Number of ms1: %s\n" % (len(self.ms1))
ret_str += "Number of ms2: %s\n" % (len(self.ms2))
return(ret_str)
def read_mgf(self,filename):
infile = mgf.read(filename)
for scan in infile:
spec_obj = Spectrum(scan["m/z array"],
scan["intensity array"],
2,
1,
float(scan["params"]["rtinseconds"]),
float(scan["params"]["rtinseconds"]),
1,
False,
True)
scan_id = scan["params"]["title"].split("_")[-2]
self.scan_to_spectrum[scan_id] = spec_obj
if 2 == 1:
self.ms1.append(scan_id)
if 2 == 2:
self.ms2.append(scan_id)
prec_scan_id = scan_id
spec_obj.set_prec_mass(scan["params"]["pepmass"][0])
prec_mass_round = int(round(spec_obj.prec_mass / self.delta_precursor))
rt_time_round = int(round(spec_obj.scan_start_time / self.delta_rt))
if self.prec_ms2.has_key(prec_mass_round):
self.prec_ms2[prec_mass_round].append(spec_obj)
else:
self.prec_ms2[prec_mass_round] = [spec_obj]
if self.rt_ms2.has_key(rt_time_round):
self.rt_ms2[rt_time_round].append(spec_obj)
else:
self.rt_ms2[rt_time_round] = [spec_obj]
self.ms2_to_ms1[scan_id] = prec_scan_id
def read_mzml(self,filename,use_prec_max_intens=False,isolation_window_bounds=0.49,filter_top=5,windowed_mode=True,window_size=50):
infile = mzml.read(filename)
for scan in infile:
spec_obj = Spectrum(scan["m/z array"],
scan["intensity array"],
scan["ms level"],
scan["total ion current"],
scan["scanList"]["scan"][0]["scan start time"],
scan["base peak intensity"],
"positive scan" in scan.keys(),
"negative scan" in scan.keys())
scan_id = scan["id"].split("scan=")[1].split(" ")[0]
if scan["ms level"] == 1:
#print(list(spec_obj.mz_array))
#print(len(spec_obj.mz_array))
#print(scan_id)
#raw_input("ms1")
self.scan_to_spectrum[scan_id] = spec_obj
self.ms1.append(scan_id)
infile = mzml.read(filename)
for scan in infile:
spec_obj = Spectrum(scan["m/z array"],
scan["intensity array"],
scan["ms level"],
scan["total ion current"],
scan["scanList"]["scan"][0]["scan start time"],
scan["base peak intensity"],
"positive scan" in scan.keys(),
"negative scan" in scan.keys())
scan_id = scan["id"].split("scan=")[1].split(" ")[0]
if scan["ms level"] == 2:
if "function=" in scan["id"]:
prec_scan_id = scan_id
scan_id = scan_id+"_"+scan["id"].split("function=")[1].split(" ")[0]
else:
self.scan_to_spectrum[scan_id] = spec_obj
prec_scan_id = scan["precursorList"]["precursor"][0]["spectrumRef"].split("scan=")[1]
self.ms2_to_ms1[scan_id] = prec_scan_id
#print(list(spec_obj.mz_array))
#print(len(spec_obj.mz_array))
#print(scan_id)
#raw_input("ms2")
self.scan_to_spectrum[scan_id] = spec_obj
try: self.scan_to_spectrum[self.ms2_to_ms1[scan_id]]
except KeyError: continue
self.ms2.append(scan_id)
spec_obj.filter_top_peaks(min_intensity=True)
if use_prec_max_intens:
spec_obj.set_prec_mass(scan["precursorList"]["precursor"][0]["selectedIonList"]["selectedIon"][0]["selected ion m/z"])
prec_mz = self.scan_to_spectrum[scan_id].prec_mass
lower_bound = prec_mz-isolation_window_bounds
upper_bound = prec_mz+isolation_window_bounds
retr_indexes = np.where(np.logical_and(self.scan_to_spectrum[self.ms2_to_ms1[scan_id]].mz_array > lower_bound, self.scan_to_spectrum[self.ms2_to_ms1[scan_id]].mz_array < upper_bound))[0]
max_intensity_index = int(np.argmax(self.scan_to_spectrum[self.ms2_to_ms1[scan_id]].intensity_array[retr_indexes]))
isolated_precursor = self.scan_to_spectrum[self.ms2_to_ms1[scan_id]].mz_array[retr_indexes][max_intensity_index]
self.scan_to_spectrum[scan_id].set_prec_mass(float(isolated_precursor))
else:
spec_obj.set_prec_mass(scan["precursorList"]["precursor"][0]["selectedIonList"]["selectedIon"][0]["selected ion m/z"])
prec_mass_round = int(round(spec_obj.prec_mass / self.delta_precursor))
rt_time_round = int(round(spec_obj.scan_start_time / self.delta_rt))
if prec_mass_round in self.prec_ms2.keys():
self.prec_ms2[prec_mass_round].append(spec_obj)
else:
self.prec_ms2[prec_mass_round] = [spec_obj]
if rt_time_round in self.rt_ms2.keys():
self.rt_ms2[rt_time_round].append(spec_obj)
else:
self.rt_ms2[rt_time_round] = [spec_obj]
#if use_prec_max_intens:
# for scan in self.ms2:
# prec_mz = self.scan_to_spectrum[scan].prec_mass
# lower_bound = prec_mz-isolation_window_bounds
# upper_bound = prec_mz+isolation_window_bounds
# retr_indexes = np.where(np.logical_and(self.scan_to_spectrum[self.ms2_to_ms1[scan_id]].mz_array > lower_bound, self.scan_to_spectrum[self.ms2_to_ms1[scan_id]].mz_array < upper_bound))[0]
# max_intensity_index = int(np.argmax(self.scan_to_spectrum[self.ms2_to_ms1[scan_id]].intensity_array[retr_indexes]))
# isolated_precursor = self.scan_to_spectrum[self.ms2_to_ms1[scan_id]].mz_array[retr_indexes][max_intensity_index]
# self.scan_to_spectrum[scan].set_prec_mass(isolated_precursor)
def get_XIC(self,mass,ppm=20,search_engine=False,positive_mode=True,negative_mode=True):
if not search_engine:
search_engine = MS1_extracter(ppm=ppm)
ret_list = []
for ms1_scan in self.ms1:
ms1_spectrum = self.scan_to_spectrum[ms1_scan]
if not ms1_spectrum.positive_scan == positive_mode: continue
if not ms1_spectrum.negative_scan == negative_mode: continue
ret_list.append([ms1_spectrum.scan_start_time,search_engine.retrieve_intensity(zip(ms1_spectrum.mz_array,ms1_spectrum.intensity_array),mass)[1]])
return(ret_list)
class Spectrum():
def __init__(self,
mz_array,
intensity_array,
ms_level,
total_ion_current,
scan_start_time,
base_peak_intensity,
positive_scan,
negative_scan,
prec_mass=0.0,
ion_injection_time=0.0,
top_peaks=100):
self.mz_array = mz_array
self.intensity_array = intensity_array
self.ms_level = ms_level
self.total_ion_current = total_ion_current
self.scan_start_time = scan_start_time
self.ion_injection_time = ion_injection_time
self.prec_mass = prec_mass
self.base_peak_intensity = base_peak_intensity
self.positive_scan = positive_scan
self.negative_scan = negative_scan
# Test if list is sorted ... and sort if not the case. Needed later for binary search and stuff.
if not all(a <= b for a, b in zip(self.mz_array[:-1],self.mz_array[1:])):
self.intensity_array = [ia for ma,ia in sorted(zip(self.mz_array,self.intensity_array),key=itemgetter(0))]
self.mz_array = sorted(self.mz_array)
if not positive_scan and not negative_scan:
positive_scan = True
negative_scan = True
def set_prec_mass(self,prec_mass):
self.prec_mass = prec_mass
# TODO add Minimal intensity!
def filter_top_peaks(self,min_intensity=False,min_perc=False,windowed_mode=False,intensity_threshold=10.0,top=10,window_size=100,add_dummy_peak=False):
"""
Filter in multiple ways on the intensity of peaks.
Parameters
----------
mz_list : list
The m/z values of a spectrum in a list; equal length to the intensity list
intensity_list : list
The intensity values of a spectrum in a list; equal length to the m/z list
min_perc : bool
Flag to use a minimal percentage intensity to filter peaks
windowed_mode : bool
Flag to use windowed mode to return the top intensity peaks
top : int
The top intensity peaks to filter (in windowed mode it will return the top peaks within the window)
window_size : int
The size of the window in windowed mode
add_dummy_peak : bool
Flag to add a dummy peak at 0.0 m/z
Returns
-------
list
the filtered m/z values from the spectrum
list
the filtered intensity values from the spectrum
"""
gr_intensity_list = []
gr_mz_list = []
#In the case of minimal percentage... calculate perc intensity and filter
if min_perc:
for i,mz in zip(self.intensity_array,self.mz_array):
if i > min_perc:
gr_intensity_list.append(i)
gr_mz_list.append(mz)
#In the case of windowed mode... iterate over the possible windows and intensity values; take the top per window
if windowed_mode:
start_index = 0
for w in range(window_size,int(max(self.mz_array)),window_size):
temp_mz = []
temp_intens = []
temp_start_index = 0
#Iterate over all m/z values and see if they fall within the window
for mz,intens in zip(self.mz_array[start_index:],self.intensity_array[start_index:]):
if mz > w and mz <= w+window_size:
temp_start_index += 1
temp_mz.append(mz)
temp_intens.append(intens)
if mz > w+window_size:
break
#Next window ignore all these lower values
start_index = temp_start_index
#Use all if there are less peaks than the top number of peaks it should select
if len(temp_mz) <= top:
gr_mz_list.extend(temp_mz)
gr_intensity_list.extend(temp_intens)
continue
#Get the indexes of the top peaks
idxs = np.sort(np.argpartition(np.array(temp_intens), -top)[-top:])
gr_mz_list.extend([temp_mz[idx] for idx in idxs])
gr_intensity_list.extend([temp_intens[idx] for idx in idxs])
if min_intensity:
for i,mz in zip(self.intensity_array,self.mz_array):
if i > intensity_threshold:
gr_intensity_list.append(i)
gr_mz_list.append(mz)
#If not windowed, min perc or min intensity use a simple top peaks
if not windowed_mode and not min_perc and not min_intensity:
if len(self.intensity_array) > top:
#Get the indexes of the top peaks
idxs = np.sort(np.argpartition(np.array(intensity_list), -top)[-top:])
gr_mz_list = [self.mz_array[idx] for idx in idxs]
gr_intensity_list = [self.intensity_array[idx] for idx in idxs]
else:
#If there are less peaks than top peaks; return all
gr_mz_list = self.mz_array
gr_intensity_list = self.intensity_array
#If needed add a dummy peak; this is important later since I want to take into account immonium ions and small fragments
if add_dummy_peak:
gr_mz_list.insert(0,0.0)
gr_intensity_list.insert(0,1.0)
self.mz_array = gr_mz_list
self.intensity_array = gr_intensity_list
"""
def filter_top_peaks(self,top=1000):
if len(self.intensity_array) > top:
#Get the indexes of the top peaks
idxs = np.sort(np.argpartition(np.array(self.intensity_array), -top)[-top:])
gr_mz_list = [self.mz_array[idx] for idx in idxs]
gr_intensity_list = [self.intensity_array[idx] for idx in idxs]
else:
#If there are less peaks than top peaks; return all
gr_mz_list = self.mz_array
gr_intensity_list = self.intensity_array
self.mz_array = gr_mz_list
self.intensity_array = gr_intensity_list
"""
if __name__ == "__main__":
exp = Experiment("23Mar17_HaCaT_ox15h_1.mzML")
for scan_num in exp.ms2:
print(exp.scan_to_spectrum[scan_num].prec_mass)
| 35.194611
| 191
| 0.704977
|
6e5ecee2efce9062b3251b6995c3c11ea7a5ca28
| 11,158
|
py
|
Python
|
tests/test_mousebind.py
|
tiosgz/herbstluftwm
|
2f337ab3d73431b02f31e7d3cfee3a60fe77cdb9
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
tests/test_mousebind.py
|
tiosgz/herbstluftwm
|
2f337ab3d73431b02f31e7d3cfee3a60fe77cdb9
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
tests/test_mousebind.py
|
tiosgz/herbstluftwm
|
2f337ab3d73431b02f31e7d3cfee3a60fe77cdb9
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
import pytest
import re
import math
# Note: For unknown reasons, mouse buttons 4 and 5 (scroll wheel) do not work
# in Xvfb when running tests in the CI. Therefore, we maintain two lists of
# buttons:
MOUSE_BUTTONS_THAT_EXIST = [1, 2, 3, 4, 5]
MOUSE_BUTTONS_THAT_WORK = [1, 2, 3]
@pytest.mark.parametrize('method', ['-F', '--all'])
def test_mouseunbind_all(hlwm, method, mouse):
hlwm.create_client()
hlwm.call('mousebind Button1 call quit')
unbind = hlwm.call(['mouseunbind', method])
assert unbind.stdout == ''
# TODO: assert hlwm.call('list_mousebind').stdout == ''
mouse.click('1') # verify that binding got ungrabbed
def test_mousebind_empty_command(hlwm):
call = hlwm.call_xfail('mousebind Button3 call')
call.expect_stderr('mousebind: not enough arguments')
def test_mousebind_unknown_button(hlwm):
call = hlwm.call_xfail('mousebind Button42 call quit')
call.expect_stderr('Unknown mouse button "Button42"')
def test_mousebind_unknown_action(hlwm):
call = hlwm.call_xfail('mousebind Button1 get schwifty')
call.expect_stderr('Unknown mouse action "get"')
@pytest.mark.parametrize('button', MOUSE_BUTTONS_THAT_WORK)
def test_trigger_mouse_binding_without_modifier(hlwm, mouse, button):
hlwm.call('new_attr string my_press')
hlwm.call(f'mousebind Button{button} call set_attr my_press yup')
client_id, _ = hlwm.create_client()
mouse.click(str(button), client_id)
assert hlwm.get_attr('my_press') == 'yup'
@pytest.mark.parametrize('button', MOUSE_BUTTONS_THAT_WORK)
def test_trigger_mouse_binding_with_modifier(hlwm, keyboard, mouse, button):
hlwm.call('new_attr string my_press')
hlwm.call(f'mousebind Mod1-Button{button} call set_attr my_press yup')
hlwm.call(f'mousebind Button{button} call remove_attr my_press') # canary bind (should not trigger)
client_id, _ = hlwm.create_client()
keyboard.down('Alt')
mouse.click(str(button), client_id)
keyboard.up('Alt')
assert hlwm.get_attr('my_press') == 'yup'
def test_overlapping_bindings_most_recent_one_counts(hlwm, mouse):
hlwm.call('new_attr string my_press')
hlwm.call('mousebind Button2 call set_attr my_press firstbind')
hlwm.call('mousebind Button2 call set_attr my_press secondbind')
client_id, _ = hlwm.create_client()
mouse.click('2', client_id)
assert hlwm.get_attr('my_press') == 'secondbind'
@pytest.mark.parametrize('prefix', ['', 'Mod1+'])
def test_complete_mousebind_offers_all_mods_and_buttons(hlwm, prefix):
complete = hlwm.complete(['mousebind', prefix], partial=True, position=1)
buttons = sum(([f'Button{i}', f'B{i}'] for i in MOUSE_BUTTONS_THAT_EXIST), [])
mods = ['Alt', 'Control', 'Ctrl', 'Mod1', 'Mod2', 'Mod3', 'Mod4', 'Mod5', 'Shift', 'Super']
if prefix == 'Mod1+':
mods = [m for m in mods if m not in ['Mod1', 'Alt']]
assert sorted(c[:-1] for c in complete) == sorted(prefix + i for i in mods + buttons)
def test_complete_mousebind_after_button_offers_action(hlwm):
complete = hlwm.complete('mousebind B3', partial=False, position=2)
assert set(complete) == {'move', 'resize', 'zoom', 'call'}
def test_complete_mousebind_with_call_action_offers_all_commands(hlwm):
complete = hlwm.complete('mousebind B1 call', position=3)
assert complete == hlwm.complete('', position=0)
def test_complete_mousebind_validates_all_button(hlwm):
# Note: This might seem like a stupid test, but previous implementations
# ignored the invalid first modifier.
complete = hlwm.complete('mousebind Moo+Mo', partial=True, position=1)
assert complete == []
# we had a race condition here, so increase the likelyhood
# that we really fixed it:
@pytest.mark.parametrize('repeat', list(range(0, 100)))
def test_drag_move(hlwm, x11, mouse, repeat):
hlwm.call('set_attr tags.focus.floating on')
client, winid = x11.create_client()
x, y = x11.get_absolute_top_left(client)
mouse.move_into(winid, wait=True)
hlwm.call(['drag', winid, 'move'])
mouse.move_relative(12, 15)
hlwm.call('true') # sync
assert x11.get_absolute_top_left(client) == (x + 12, y + 15)
def test_drag_no_frame_splits(hlwm):
winid, _ = hlwm.create_client()
hlwm.call_xfail(['drag', winid, 'resize']) \
.expect_stderr('No neighbour frame')
def test_mouse_drag_no_frame_splits(hlwm, hlwm_process, mouse):
hlwm.call('mousebind B1 resize')
winid, _ = hlwm.create_client()
with hlwm_process.wait_stderr_match('No neighbour frame'):
# we do not wait because it clashes with the running
# wait_stderr_match() context here
mouse.click('1', winid, wait=False)
def test_drag_invisible_client(hlwm):
# check that we can't resize clients that are on a tag
# that is not shown
hlwm.call('add t')
hlwm.call('set_attr tags.by-name.t.floating on')
# invisible win
kid, _ = hlwm.create_client()
# got a tag of his own
hlwm.call('move t')
# where he'll never be known
hlwm.call_xfail(['drag', kid, 'resize']) \
.expect_stderr('cannot drag invisible client')
# inward he's grown :-)
def test_drag_minimized_client(hlwm):
winid, _ = hlwm.create_client()
hlwm.call(f'set_attr clients.{winid}.minimized on')
hlwm.call_xfail(['drag', winid, 'resize']) \
.expect_stderr('cannot drag invisible client')
def test_drag_resize_tiled_client(hlwm, mouse):
winid, _ = hlwm.create_client()
layout = '(split horizontal:{}:1 (clients max:0) (clients max:0 {}))'
hlwm.call(['load', layout.format('0.5', winid)])
# Just positioning the mouse pointer, no need to wait for hlwm
mouse.move_into(winid, x=10, y=30, wait=False)
hlwm.call(['drag', winid, 'resize'])
assert hlwm.get_attr('clients.dragged.winid') == winid
mouse.move_relative(200, 300)
monitor_width = int(hlwm.call('monitor_rect').stdout.split(' ')[2])
layout_str = hlwm.call('dump').stdout
layout_ma = re.match(layout.replace('(', r'\(')
.replace(')', r'\)')
.format('([^:]*)', '.*'), layout_str)
expected = 0.5 + 200 / monitor_width
actual = float(layout_ma.group(1))
assert math.isclose(actual, expected, abs_tol=0.01)
@pytest.mark.parametrize('live_update', [True, False])
def test_drag_resize_floating_client(hlwm, x11, mouse, live_update):
hlwm.call(['set', 'update_dragged_clients', hlwm.bool(live_update)])
client, winid = x11.create_client(geometry=(50, 50, 300, 200))
hlwm.call(f'set_attr clients.{winid}.floating true')
geom_before = client.get_geometry()
x_before, y_before = x11.get_absolute_top_left(client)
assert (geom_before.width, geom_before.height) == (300, 200)
# move cursor to the top left corner, so we change the
# window position and the size (and the bottom right corner is fixed)
# Just positioning the mouse pointer, no need to wait for hlwm
mouse.move_into(winid, x=0, y=0, wait=False)
hlwm.call(['drag', winid, 'resize'])
assert hlwm.get_attr('clients.dragged.winid') == winid
mouse.move_relative(100, 120)
final_size = (geom_before.width - 100, geom_before.height - 120)
# check geometry during drag
x11.display.sync()
geom_after = client.get_geometry()
x_after, y_after = x11.get_absolute_top_left(client)
assert (x_after, y_after) == (x_before + 100, y_before + 120)
expected_size = (geom_before.width, geom_before.height)
if live_update:
expected_size = final_size
assert (geom_after.width, geom_after.height) == expected_size
# stop drag and check final size
mouse.click('1', wait=True)
geom_after = client.get_geometry()
assert (geom_after.width, geom_after.height) == final_size
def test_drag_zoom_floating_client(hlwm, x11, mouse):
client, winid = x11.create_client(geometry=(50, 50, 300, 200))
hlwm.call(f'set_attr clients.{winid}.floating true')
geom_before = client.get_geometry()
assert (geom_before.width, geom_before.height) == (300, 200)
x_before, y_before = x11.get_absolute_top_left(client)
center_before = (x_before + geom_before.width / 2, y_before + geom_before.height / 2)
# Just positioning the mouse pointer, no need to wait for hlwm
mouse.move_into(winid, x=0, y=0, wait=False)
hlwm.call(['drag', winid, 'zoom'])
assert hlwm.get_attr('clients.dragged.winid') == winid
mouse.move_relative(100, -30)
final_size = (geom_before.width - (100 * 2), geom_before.height + (30 * 2))
# stop drag and check final size and client center
mouse.click('1', wait=True)
geom_after = client.get_geometry()
assert (geom_after.width, geom_after.height) == final_size
x_after, y_after = x11.get_absolute_top_left(client)
center_after = (x_after + geom_after.width / 2, y_after + geom_after.height / 2)
assert center_before == center_after
# we had a race condition here, so increase the likelyhood
# that we really fixed it:
@pytest.mark.parametrize('repeat', list(range(0, 100)))
def test_move_client_via_decoration(hlwm, x11, mouse, repeat):
hlwm.call('attr theme.padding_top 20')
client, winid = x11.create_client(geometry=(50, 50, 300, 200))
hlwm.call(f'set_attr clients.{winid}.floating true')
size_before = client.get_geometry()
x_before, y_before = x11.get_absolute_top_left(client)
mouse.move_to(x_before + 10, y_before - 10) # a bit into the padding
mouse.mouse_press('1')
assert hlwm.get_attr('clients.dragged.winid') == winid
mouse.move_relative(130, 110)
expected_position = (x_before + 130, y_before + 110)
mouse.mouse_release('1')
x11.display.sync()
assert 'dragged' not in hlwm.list_children('clients')
# the size didn't change
size_after = client.get_geometry()
assert (size_before.width, size_before.height) \
== (size_after.width, size_after.height)
# but the location
assert expected_position == x11.get_absolute_top_left(client)
# we had a race condition here, so increase the likelyhood
# that we really fixed it:
@pytest.mark.parametrize('repeat', list(range(0, 100)))
def test_resize_client_via_decoration(hlwm, x11, mouse, repeat):
hlwm.call('attr theme.border_width 20')
client, winid = x11.create_client(geometry=(50, 50, 300, 200))
hlwm.call(f'set_attr clients.{winid}.floating true')
size_before = client.get_geometry()
x_before, y_before = x11.get_absolute_top_left(client)
mouse.move_to(x_before + 10, y_before - 10) # a bit into the window border
mouse.mouse_press('1')
assert hlwm.get_attr('clients.dragged.winid') == winid
mouse.move_relative(80, 70)
expected_position = (x_before + 80, y_before + 70)
expected_size = (size_before.width - 80, size_before.height - 70)
mouse.mouse_release('1')
# the size changed
x11.display.sync()
assert 'dragged' not in hlwm.list_children('clients')
size_after = client.get_geometry()
assert expected_size == (size_after.width, size_after.height)
# and also the location
assert expected_position == x11.get_absolute_top_left(client)
| 37.442953
| 104
| 0.697975
|
4a5d2877020d204b5c83f6160fbafb13115d2cd6
| 4,544
|
py
|
Python
|
src/pfun/aio_trampoline.py
|
suned/pfun
|
46c460646487abfef897bd9627891f6cf7870774
|
[
"MIT"
] | 126
|
2019-09-16T15:28:20.000Z
|
2022-03-20T10:57:53.000Z
|
src/pfun/aio_trampoline.py
|
suned/pfun
|
46c460646487abfef897bd9627891f6cf7870774
|
[
"MIT"
] | 54
|
2019-09-30T08:44:01.000Z
|
2022-03-20T11:10:00.000Z
|
src/pfun/aio_trampoline.py
|
suned/pfun
|
46c460646487abfef897bd9627891f6cf7870774
|
[
"MIT"
] | 11
|
2020-01-02T08:32:46.000Z
|
2022-03-20T11:10:24.000Z
|
from abc import ABC, abstractmethod
from asyncio import iscoroutine
from typing import (Awaitable, Callable, Generic, Iterable, List, TypeVar,
Union, cast)
from .immutable import Immutable
from .monad import Monad
A = TypeVar('A', covariant=True)
B = TypeVar('B')
C = TypeVar('C')
class Trampoline(Immutable, Monad, Generic[A], ABC):
"""
Base class for Trampolines. Useful for writing stack safe-safe
recursive functions.
"""
@abstractmethod
async def _resume(self) -> 'Trampoline[A]':
pass
@abstractmethod
async def _handle_cont(
self, cont: Callable[[A], 'Trampoline[B]']
) -> 'Trampoline[B]':
pass
@property
def _is_done(self) -> bool:
return isinstance(self, Done)
def and_then(self, f: Callable[[A], 'Trampoline[B]']) -> 'Trampoline[B]':
"""
Apply ``f`` to the value wrapped by this trampoline.
:param f: function to apply the value in this trampoline
:return: Result of applying ``f`` to the value wrapped by \
this trampoline
"""
return AndThen(self, f)
def map(self, f: Callable[[A], B]) -> 'Trampoline[B]':
"""
Map ``f`` over the value wrapped by this trampoline.
:param f: function to wrap over this trampoline
:return: new trampoline wrapping the result of ``f``
"""
return self.and_then(lambda a: Done(f(a))) # type: ignore
async def run(self) -> A:
"""
Interpret a structure of trampolines to produce a result
:return: result of intepreting this structure of \
trampolines
"""
trampoline = self
while not trampoline._is_done:
trampoline = await trampoline._resume()
return cast(Done[A], trampoline).a
class Done(Trampoline[A]):
"""
Represents the result of a recursive computation.
"""
a: A
async def _resume(self) -> Trampoline[A]:
return self
async def _handle_cont(
self,
cont: Callable[[A], Union[Awaitable[Trampoline[B]], Trampoline[B]]]
) -> Trampoline[B]:
result = cont(self.a)
if iscoroutine(result):
return await result # type: ignore
return result # type: ignore
class Call(Trampoline[A]):
"""
Represents a recursive call.
"""
thunk: Callable[[], Awaitable[Trampoline[A]]]
async def _handle_cont(self, cont: Callable[[A], Trampoline[B]]
) -> Trampoline[B]:
trampoline = await self.thunk()
return trampoline.and_then(cont)
async def _resume(self) -> Trampoline[A]:
return await self.thunk()
class AndThen(Generic[A, B], Trampoline[B]):
"""
Represents monadic bind for trampolines as a class to avoid
deep recursive calls to ``Trampoline.run`` during interpretation.
"""
sub: Trampoline[A]
cont: Callable[[A], Union[Trampoline[B], Awaitable[Trampoline[B]]]]
async def _handle_cont(self, cont: Callable[[B], Trampoline[C]]
) -> Trampoline[C]:
return self.sub.and_then(self.cont).and_then(cont) # type: ignore
async def _resume(self) -> Trampoline[B]:
return await self.sub._handle_cont(self.cont) # type: ignore
def and_then( # type: ignore
self, f: Callable[[A], Trampoline[B]]
) -> Trampoline[B]:
def cont(x):
async def thunk():
t = self.cont(x)
if iscoroutine(t):
print('awaiting')
t = await t
return t.and_then(f)
return Call(thunk)
return AndThen(self.sub, cont)
def sequence(iterable: Iterable[Trampoline[B]]) -> Trampoline[Iterable[B]]:
"""
Evaluate each :class:`Trampoline` in `iterable` from left to right
and collect the results
:example:
>>> sequence([Just(v) for v in range(3)])
Just((0, 1, 2))
:param iterable: The iterable to collect results from
:returns: ``Trampoline`` of collected results
"""
def combine(rs: Trampoline[List[B]],
t: Trampoline[B]) -> Trampoline[List[B]]:
return rs.and_then(
lambda xs: t.map(
lambda x: (xs.append(x), xs)[1] # type: ignore
)
)
result: Trampoline[List[B]] = Done([])
for trampoline in iterable:
result = combine(result, trampoline)
return result.map(tuple)
__all__ = ['Trampoline', 'Done', 'sequence', 'Call', 'AndThen']
| 28.759494
| 77
| 0.588468
|
854ea68c5cd426cf2e360918b78d6df59187692e
| 1,139
|
py
|
Python
|
shepherd/protos/text_pb2.py
|
pioneers/Shepherd
|
c76b70ca6cbe0db9e45bd55e1f87691a1f5cc3cd
|
[
"Apache-2.0"
] | 1
|
2021-05-26T00:14:38.000Z
|
2021-05-26T00:14:38.000Z
|
shepherd/protos/text_pb2.py
|
pioneers/shepherd
|
c76b70ca6cbe0db9e45bd55e1f87691a1f5cc3cd
|
[
"Apache-2.0"
] | 38
|
2020-06-17T05:57:30.000Z
|
2022-01-27T04:20:18.000Z
|
shepherd/protos/text_pb2.py
|
pioneers/Shepherd
|
c76b70ca6cbe0db9e45bd55e1f87691a1f5cc3cd
|
[
"Apache-2.0"
] | 1
|
2021-06-17T18:47:12.000Z
|
2021-06-17T18:47:12.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: text.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\ntext.proto\"\x17\n\x04Text\x12\x0f\n\x07payload\x18\x01 \x03(\tB\x02H\x03\x62\x06proto3')
_TEXT = DESCRIPTOR.message_types_by_name['Text']
Text = _reflection.GeneratedProtocolMessageType('Text', (_message.Message,), {
'DESCRIPTOR' : _TEXT,
'__module__' : 'text_pb2'
# @@protoc_insertion_point(class_scope:Text)
})
_sym_db.RegisterMessage(Text)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'H\003'
_TEXT._serialized_start=14
_TEXT._serialized_end=37
# @@protoc_insertion_point(module_scope)
| 31.638889
| 153
| 0.785777
|
cdb9be1c239aef0572d33d6a107967ee79d4af5b
| 5,446
|
py
|
Python
|
test/functional/receivedby.py
|
PERSHYANCOIN/PERSHYANCOIN
|
bbadf90495732ecdbf5ab9a27e84e1dbdaff117d
|
[
"MIT"
] | 1
|
2018-02-21T07:10:01.000Z
|
2018-02-21T07:10:01.000Z
|
test/functional/receivedby.py
|
pershyancoin/pershyancoin
|
bbadf90495732ecdbf5ab9a27e84e1dbdaff117d
|
[
"MIT"
] | 2
|
2018-02-12T22:00:38.000Z
|
2018-02-12T22:01:03.000Z
|
test/functional/receivedby.py
|
PERSHYANCOIN/PERSHYANCOIN
|
bbadf90495732ecdbf5ab9a27e84e1dbdaff117d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Pershyancoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listreceivedbyaddress RPC."""
from decimal import Decimal
from test_framework.test_framework import PershyancoinTestFramework
from test_framework.util import (assert_array_result,
assert_equal,
assert_raises_rpc_error,
)
class ReceivedByTest(PershyancoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
# Generate block to get out of IBD
self.nodes[0].generate(1)
self.log.info("listreceivedbyaddress Test")
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# Check not listed in listreceivedbyaddress because has 0 confirmations
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address": addr},
{},
True)
# Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address": addr},
{"address": addr, "account": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]})
# With min confidence < 10
assert_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address": addr},
{"address": addr, "account": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]})
# With min confidence > 10, should not find Tx
assert_array_result(self.nodes[1].listreceivedbyaddress(11), {"address": addr}, {}, True)
# Empty Tx
addr = self.nodes[1].getnewaddress()
assert_array_result(self.nodes[1].listreceivedbyaddress(0, True),
{"address": addr},
{"address": addr, "account": "", "amount": 0, "confirmations": 0, "txids": []})
self.log.info("getreceivedbyaddress Test")
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
assert_equal(balance, Decimal("0.0"))
# Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr, 0)
assert_equal(balance, Decimal("0.1"))
# Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
assert_equal(balance, Decimal("0.1"))
# Trying to getreceivedby for an address the wallet doesn't own should return an error
assert_raises_rpc_error(-4, "Address not found in wallet", self.nodes[0].getreceivedbyaddress, addr)
self.log.info("listreceivedbyaccount + getreceivedbyaccount Test")
# set pre-state
addrArr = self.nodes[1].getnewaddress()
account = self.nodes[1].getaccount(addrArr)
received_by_account_json = [r for r in self.nodes[1].listreceivedbyaccount() if r["account"] == account][0]
balance_by_account = self.nodes[1].getreceivedbyaccount(account)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
assert_array_result(self.nodes[1].listreceivedbyaccount(),
{"account": account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbyaccount(account)
assert_equal(balance, balance_by_account)
self.nodes[1].generate(10)
self.sync_all()
# listreceivedbyaccount should return updated account balance
assert_array_result(self.nodes[1].listreceivedbyaccount(),
{"account": account},
{"account": received_by_account_json["account"], "amount": (received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
balance = self.nodes[1].getreceivedbyaccount(account)
assert_equal(balance, balance_by_account + Decimal("0.1"))
# Create a new account named "mynewaccount" that has a 0 balance
self.nodes[1].getaccountaddress("mynewaccount")
received_by_account_json = [r for r in self.nodes[1].listreceivedbyaccount(0, True) if r["account"] == "mynewaccount"][0]
# Test includeempty of listreceivedbyaccount
assert_equal(received_by_account_json["amount"], Decimal("0.0"))
# Test getreceivedbyaccount for 0 amount accounts
balance = self.nodes[1].getreceivedbyaccount("mynewaccount")
assert_equal(balance, Decimal("0.0"))
if __name__ == '__main__':
ReceivedByTest().main()
| 45.008264
| 142
| 0.625964
|
ad6c6f70baf71916d5d89b05d88137afacfd7a4a
| 2,617
|
py
|
Python
|
testcube/urls.py
|
tobyqin/testcube
|
0a3e9a4ad61bd97ae46f878e188936f4725e1e49
|
[
"MIT"
] | 28
|
2017-06-28T08:39:01.000Z
|
2022-01-24T11:47:41.000Z
|
testcube/urls.py
|
gunesmes/testcube
|
008740c278ad82e8b33cad7c069e3e8e086b389c
|
[
"MIT"
] | 60
|
2017-06-06T04:06:46.000Z
|
2019-06-21T08:53:14.000Z
|
testcube/urls.py
|
gunesmes/testcube
|
008740c278ad82e8b33cad7c069e3e8e086b389c
|
[
"MIT"
] | 10
|
2017-06-06T02:14:08.000Z
|
2022-02-07T14:43:28.000Z
|
"""testcube URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework import routers
from .core import views
from .core.api import api_registration as core_api_registration
from .core.api import client_auth
from .runner.api import api_registration as runner_api_registration
from .users import views as user_views
admin.site.site_header = 'TestCube Administration'
admin.site.site_title = admin.site.site_header
router = routers.DefaultRouter()
core_api_registration(router)
runner_api_registration(router)
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include(router.urls), name='api'),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^client-register', client_auth.register, name='client_register'),
url('^signin$', user_views.signin, name='signin'),
url('^signup$', user_views.signup, name='signup'),
url('^signout$', user_views.signout, name='signout'),
url('^reset$', user_views.reset_password, name='reset_password'),
url('^profile$', user_views.user_profile, name='user_profile'),
url(r'^$', views.index, name='index'),
url(r'^welcome$', views.welcome, name='welcome'),
url(r'^docs/(?P<name>.+)$', views.document, name='docs'),
url(r'^runs$', views.runs, name='runs'),
url(r'^runs/(\d+)$', views.run_detail, name='run_detail'),
url(r'^testcases$', views.cases, name='testcases'),
url(r'^testcases/(\d+)', views.case_detail, name='testcase_detail'),
url(r'^results/(\d+)$', views.result_detail, name='result_detail'),
url(r'^results/(\d+)/reset$', views.result_reset, name='result_reset'),
url(r'^results/(\d+)/analysis$', views.result_analysis, name='result_analysis'),
url(r'^results$', views.results, name='results'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 40.890625
| 84
| 0.710737
|
450116e83605118ec9192e31900610715c7a4afe
| 1,084
|
py
|
Python
|
glossy-gorillas/core/factories/trader.py
|
fliepeltje/summer-code-jam-2020
|
de1287b643b610d9c5df49778bfbeae5dd115df1
|
[
"MIT"
] | 40
|
2020-08-02T07:38:22.000Z
|
2021-07-26T01:46:50.000Z
|
glossy-gorillas/core/factories/trader.py
|
fliepeltje/summer-code-jam-2020
|
de1287b643b610d9c5df49778bfbeae5dd115df1
|
[
"MIT"
] | 134
|
2020-07-31T12:15:45.000Z
|
2020-12-13T04:42:19.000Z
|
glossy-gorillas/core/factories/trader.py
|
fliepeltje/summer-code-jam-2020
|
de1287b643b610d9c5df49778bfbeae5dd115df1
|
[
"MIT"
] | 101
|
2020-07-31T12:00:47.000Z
|
2021-11-01T09:06:58.000Z
|
import factory
from django.contrib.auth.models import User
from core.models import Trader, InventoryRecord, QuantityType as QT
from core.factories.product import ProductFactory
class UserFactory(factory.DjangoModelFactory):
username = factory.Sequence(lambda n: f"teabay_user_{n+1}")
first_name = factory.Faker("first_name")
last_name = factory.Faker("last_name")
email = factory.LazyAttribute(
lambda obj: f"{obj.first_name}.{obj.last_name}@{factory.Faker('domain_name')}".lower()
)
class Meta:
model = User
class TraderFactory(factory.DjangoModelFactory):
user = factory.SubFactory(UserFactory)
description = factory.Faker("catch_phrase")
class Meta:
model = Trader
class InventoryRecordFactory(factory.DjangoModelFactory):
owner = factory.SubFactory(TraderFactory)
quantity = factory.Faker("pyint")
quantity_type = factory.Iterator(
[QT.COUNT.value, QT.WEIGHT_G.value, QT.WEIGHT_KG.value]
)
product = factory.SubFactory(ProductFactory)
class Meta:
model = InventoryRecord
| 29.297297
| 94
| 0.72417
|
cc670a08f3a53b36ed67ccd4823f1cd82b90d3ee
| 639
|
py
|
Python
|
geoevents/taggit/tests/forms.py
|
ngageoint/geoevents
|
11d11b97033403f81934270c10d655b8e699c410
|
[
"MIT"
] | 25
|
2015-01-06T15:37:31.000Z
|
2020-12-10T19:05:22.000Z
|
geoevents/taggit/tests/forms.py
|
ngageoint/geoevents
|
11d11b97033403f81934270c10d655b8e699c410
|
[
"MIT"
] | 2
|
2015-01-31T02:36:58.000Z
|
2015-02-01T00:11:15.000Z
|
geoevents/taggit/tests/forms.py
|
ngageoint/geoevents
|
11d11b97033403f81934270c10d655b8e699c410
|
[
"MIT"
] | 5
|
2016-01-01T15:04:49.000Z
|
2019-05-30T23:34:30.000Z
|
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from django import forms
from taggit.tests.models import Food, DirectFood, CustomPKFood, OfficialFood
class FoodForm(forms.ModelForm):
class Meta:
model = Food
class DirectFoodForm(forms.ModelForm):
class Meta:
model = DirectFood
class CustomPKFoodForm(forms.ModelForm):
class Meta:
model = CustomPKFood
class OfficialFoodForm(forms.ModelForm):
class Meta:
model = OfficialFood
| 26.625
| 104
| 0.735524
|
17ba785e8e96e79cd1c9f27627dad3dff9f74f3d
| 22,970
|
py
|
Python
|
corehq/apps/commtrack/tests/test_xml.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | 1
|
2015-02-10T23:26:39.000Z
|
2015-02-10T23:26:39.000Z
|
corehq/apps/commtrack/tests/test_xml.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/commtrack/tests/test_xml.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
from decimal import Decimal
from django.test.utils import override_settings
from lxml import etree
import os
import random
import uuid
from datetime import datetime
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.xml import V2
from casexml.apps.phone.restore import RestoreConfig
from casexml.apps.phone.tests.utils import synclog_id_from_restore_payload
from corehq.apps.commtrack.models import ConsumptionConfig, StockRestoreConfig, RequisitionCase, Product, StockState
from corehq.apps.consumption.shortcuts import set_default_monthly_consumption_for_domain
from couchforms.models import XFormInstance
from dimagi.utils.parsing import json_format_datetime
from casexml.apps.stock import const as stockconst
from casexml.apps.stock.models import StockReport, StockTransaction
from corehq.apps.commtrack import const
from corehq.apps.commtrack.tests.util import CommTrackTest, get_ota_balance_xml, FIXED_USER, extract_balance_xml
from casexml.apps.case.tests.util import check_xml_line_by_line, check_user_has_case
from corehq.apps.hqcase.utils import get_cases_in_domain
from corehq.apps.receiverwrapper import submit_form_locally
from corehq.apps.commtrack.tests.util import make_loc, make_supply_point
from corehq.apps.commtrack.const import DAYS_IN_MONTH
from corehq.apps.commtrack.requisitions import get_notification_message
from corehq.apps.commtrack.tests.data.balances import (
balance_ota_block,
submission_wrap,
balance_submission,
transfer_dest_only,
transfer_source_only,
transfer_both,
balance_first,
transfer_first,
create_requisition_xml,
create_fulfillment_xml,
create_received_xml,
receipts_enumerated,
balance_enumerated,
products_xml, long_date)
class CommTrackOTATest(CommTrackTest):
user_definitions = [FIXED_USER]
def setUp(self):
super(CommTrackOTATest, self).setUp()
self.user = self.users[0]
def test_ota_blank_balances(self):
user = self.user
self.assertFalse(get_ota_balance_xml(user))
def test_ota_basic(self):
user = self.user
amounts = [(p._id, i*10) for i, p in enumerate(self.products)]
report = _report_soh(amounts, self.sp._id, 'stock')
check_xml_line_by_line(
self,
balance_ota_block(
self.sp,
'stock',
amounts,
datestring=json_format_datetime(report.date),
),
get_ota_balance_xml(user)[0],
)
def test_ota_multiple_stocks(self):
user = self.user
date = datetime.utcnow()
report = StockReport.objects.create(form_id=uuid.uuid4().hex, date=date,
type=stockconst.REPORT_TYPE_BALANCE)
amounts = [(p._id, i*10) for i, p in enumerate(self.products)]
section_ids = sorted(('stock', 'losses', 'consumption'))
for section_id in section_ids:
_report_soh(amounts, self.sp._id, section_id, report=report)
balance_blocks = get_ota_balance_xml(user)
self.assertEqual(3, len(balance_blocks))
for i, section_id in enumerate(section_ids):
check_xml_line_by_line(
self,
balance_ota_block(
self.sp,
section_id,
amounts,
datestring=json_format_datetime(date),
),
balance_blocks[i],
)
def test_ota_consumption(self):
self.ct_settings.consumption_config = ConsumptionConfig(
min_transactions=0,
min_window=0,
optimal_window=60,
)
self.ct_settings.ota_restore_config = StockRestoreConfig(
section_to_consumption_types={'stock': 'consumption'}
)
set_default_monthly_consumption_for_domain(self.domain.name, 5 * DAYS_IN_MONTH)
amounts = [(p._id, i*10) for i, p in enumerate(self.products)]
report = _report_soh(amounts, self.sp._id, 'stock')
balance_blocks = _get_ota_balance_blocks(self.ct_settings, self.user)
self.assertEqual(2, len(balance_blocks))
stock_block, consumption_block = balance_blocks
check_xml_line_by_line(
self,
balance_ota_block(
self.sp,
'stock',
amounts,
datestring=json_format_datetime(report.date),
),
stock_block,
)
check_xml_line_by_line(
self,
balance_ota_block(
self.sp,
'consumption',
[(p._id, 150) for p in self.products],
datestring=json_format_datetime(report.date),
),
consumption_block,
)
def test_force_consumption(self):
self.ct_settings.consumption_config = ConsumptionConfig(
min_transactions=0,
min_window=0,
optimal_window=60,
)
self.ct_settings.ota_restore_config = StockRestoreConfig(
section_to_consumption_types={'stock': 'consumption'},
)
set_default_monthly_consumption_for_domain(self.domain.name, 5)
balance_blocks = _get_ota_balance_blocks(self.ct_settings, self.user)
self.assertEqual(0, len(balance_blocks))
# self.ct_settings.ota_restore_config.use_dynamic_product_list = True
self.ct_settings.ota_restore_config.force_consumption_case_types = [const.SUPPLY_POINT_CASE_TYPE]
balance_blocks = _get_ota_balance_blocks(self.ct_settings, self.user)
# with no data, there should be no consumption block
self.assertEqual(0, len(balance_blocks))
self.ct_settings.ota_restore_config.use_dynamic_product_list = True
balance_blocks = _get_ota_balance_blocks(self.ct_settings, self.user)
self.assertEqual(1, len(balance_blocks))
[balance_block] = balance_blocks
element = etree.fromstring(balance_block)
self.assertEqual(3, len([child for child in element]))
class CommTrackSubmissionTest(CommTrackTest):
user_definitions = [FIXED_USER]
def setUp(self):
super(CommTrackSubmissionTest, self).setUp()
self.user = self.users[0]
loc2 = make_loc('loc1')
self.sp2 = make_supply_point(self.domain.name, loc2)
@override_settings(CASEXML_FORCE_DOMAIN_CHECK=False)
def submit_xml_form(self, xml_method, **submit_extras):
instance_id = uuid.uuid4().hex
instance = submission_wrap(
instance_id,
self.products,
self.user,
self.sp,
self.sp2,
xml_method,
)
submit_form_locally(
instance=instance,
domain=self.domain.name,
**submit_extras
)
return instance_id
def check_stock_models(self, case, product_id, expected_soh, expected_qty, section_id):
if not isinstance(expected_qty, Decimal):
expected_qty = Decimal(str(expected_qty))
if not isinstance(expected_soh, Decimal):
expected_soh = Decimal(str(expected_soh))
latest_trans = StockTransaction.latest(case._id, section_id, product_id)
self.assertIsNotNone(latest_trans)
self.assertEqual(section_id, latest_trans.section_id)
self.assertEqual(expected_soh, latest_trans.stock_on_hand)
self.assertEqual(expected_qty, latest_trans.quantity)
def check_product_stock(self, supply_point, product_id, expected_soh, expected_qty, section_id='stock'):
self.check_stock_models(supply_point, product_id, expected_soh, expected_qty, section_id)
class CommTrackBalanceTransferTest(CommTrackSubmissionTest):
def test_balance_submit(self):
amounts = [(p._id, float(i*10)) for i, p in enumerate(self.products)]
self.submit_xml_form(balance_submission(amounts))
for product, amt in amounts:
self.check_product_stock(self.sp, product, amt, 0)
def test_balance_enumerated(self):
amounts = [(p._id, float(i*10)) for i, p in enumerate(self.products)]
self.submit_xml_form(balance_enumerated(amounts))
for product, amt in amounts:
self.check_product_stock(self.sp, product, amt, 0)
def test_balance_consumption(self):
initial = float(100)
initial_amounts = [(p._id, initial) for p in self.products]
self.submit_xml_form(balance_submission(initial_amounts))
final_amounts = [(p._id, float(50 - 10*i)) for i, p in enumerate(self.products)]
self.submit_xml_form(balance_submission(final_amounts))
for product, amt in final_amounts:
self.check_product_stock(self.sp, product, amt, 0)
inferred = amt - initial
inferred_txn = StockTransaction.objects.get(case_id=self.sp._id, product_id=product,
subtype=stockconst.TRANSACTION_SUBTYPE_INFERRED)
self.assertEqual(Decimal(str(inferred)), inferred_txn.quantity)
self.assertEqual(Decimal(str(amt)), inferred_txn.stock_on_hand)
self.assertEqual(stockconst.TRANSACTION_TYPE_CONSUMPTION, inferred_txn.type)
def test_balance_submit_multiple_stocks(self):
def _random_amounts():
return [(p._id, float(random.randint(0, 100))) for i, p in enumerate(self.products)]
section_ids = ('stock', 'losses', 'consumption')
stock_amounts = [(id, _random_amounts()) for id in section_ids]
for section_id, amounts in stock_amounts:
self.submit_xml_form(balance_submission(amounts, section_id=section_id))
for section_id, amounts in stock_amounts:
for product, amt in amounts:
self.check_product_stock(self.sp, product, amt, 0, section_id)
def test_transfer_dest_only(self):
amounts = [(p._id, float(i*10)) for i, p in enumerate(self.products)]
self.submit_xml_form(transfer_dest_only(amounts))
for product, amt in amounts:
self.check_product_stock(self.sp, product, amt, amt)
def test_transfer_source_only(self):
initial = float(100)
initial_amounts = [(p._id, initial) for p in self.products]
self.submit_xml_form(balance_submission(initial_amounts))
deductions = [(p._id, float(50 - 10*i)) for i, p in enumerate(self.products)]
self.submit_xml_form(transfer_source_only(deductions))
for product, amt in deductions:
self.check_product_stock(self.sp, product, initial-amt, -amt)
def test_transfer_both(self):
initial = float(100)
initial_amounts = [(p._id, initial) for p in self.products]
self.submit_xml_form(balance_submission(initial_amounts))
transfers = [(p._id, float(50 - 10*i)) for i, p in enumerate(self.products)]
self.submit_xml_form(transfer_both(transfers))
for product, amt in transfers:
self.check_product_stock(self.sp, product, initial-amt, -amt)
self.check_product_stock(self.sp2, product, amt, amt)
def test_transfer_enumerated(self):
initial = float(100)
initial_amounts = [(p._id, initial) for p in self.products]
self.submit_xml_form(balance_submission(initial_amounts))
receipts = [(p._id, float(50 - 10*i)) for i, p in enumerate(self.products)]
self.submit_xml_form(receipts_enumerated(receipts))
for product, amt in receipts:
self.check_product_stock(self.sp, product, initial + amt, amt)
def test_balance_first_doc_order(self):
initial = float(100)
balance_amounts = [(p._id, initial) for p in self.products]
transfers = [(p._id, float(50 - 10*i)) for i, p in enumerate(self.products)]
self.submit_xml_form(balance_first(balance_amounts, transfers))
for product, amt in transfers:
self.check_product_stock(self.sp, product, initial + amt, amt)
def test_transfer_first_doc_order(self):
# first set to 100
initial = float(100)
initial_amounts = [(p._id, initial) for p in self.products]
self.submit_xml_form(balance_submission(initial_amounts))
# then mark some receipts
transfers = [(p._id, float(50 - 10*i)) for i, p in enumerate(self.products)]
# then set to 50
final = float(50)
balance_amounts = [(p._id, final) for p in self.products]
self.submit_xml_form(transfer_first(transfers, balance_amounts))
for product, amt in transfers:
self.check_product_stock(self.sp, product, final, 0)
class BugSubmissionsTest(CommTrackSubmissionTest):
def test_device_report_submissions_ignored(self):
"""
submit a device report with a stock block and make sure it doesn't
get processed
"""
self.assertEqual(0, StockTransaction.objects.count())
fpath = os.path.join(os.path.dirname(__file__), 'data', 'xml', 'device_log.xml')
with open(fpath) as f:
form = f.read()
amounts = [(p._id, 10) for p in self.products]
product_block = products_xml(amounts)
form = form.format(
form_id=uuid.uuid4().hex,
user_id=self.user._id,
date=long_date(),
sp_id=self.sp._id,
product_block=product_block
)
submit_form_locally(
instance=form,
domain=self.domain.name,
)
self.assertEqual(0, StockTransaction.objects.count())
class CommTrackRequisitionTest(CommTrackSubmissionTest):
def setUp(self):
self.requisitions_enabled = True
super(CommTrackRequisitionTest, self).setUp()
def expected_notification_message(self, req, amounts):
summary = sorted(
['%s:%d' % (str(Product.get(p).code), amt) for p, amt in amounts]
)
return const.notification_template(req.get_next_action().action).format(
name='Unknown', # TODO currently not storing requester
summary=' '.join(summary),
loc=self.sp.location.site_code,
keyword=req.get_next_action().keyword
)
def test_create_fulfill_and_receive_requisition(self):
amounts = [(p._id, 50.0 + float(i*10)) for i, p in enumerate(self.products)]
# ----------------
# Create a request
# ----------------
self.submit_xml_form(create_requisition_xml(amounts))
req_cases = list(get_cases_in_domain(self.domain.name, type=const.REQUISITION_CASE_TYPE))
self.assertEqual(1, len(req_cases))
req = RequisitionCase.get(req_cases[0]._id)
[index] = req.indices
self.assertEqual(req.requisition_status, 'requested')
self.assertEqual(const.SUPPLY_POINT_CASE_TYPE, index.referenced_type)
self.assertEqual(self.sp._id, index.referenced_id)
self.assertEqual('parent_id', index.identifier)
# TODO: these types of tests probably belong elsewhere
self.assertEqual(req.get_next_action().keyword, 'fulfill')
self.assertEqual(req.get_location()._id, self.sp.location._id)
self.assertEqual(len(RequisitionCase.open_for_location(
self.domain.name,
self.sp.location._id
)), 1)
self.assertEqual(
get_notification_message(
req.get_next_action(),
[req]
),
self.expected_notification_message(req, amounts)
)
for product, amt in amounts:
self.check_stock_models(req, product, amt, 0, 'ct-requested')
# ----------------
# Mark it fulfilled
# -----------------
self.submit_xml_form(create_fulfillment_xml(req, amounts))
req = RequisitionCase.get(req._id)
self.assertEqual(req.requisition_status, 'fulfilled')
self.assertEqual(req.get_next_action().keyword, 'rec')
self.assertEqual(
get_notification_message(
req.get_next_action(),
[req]
),
self.expected_notification_message(req, amounts)
)
for product, amt in amounts:
# we are expecting two separate blocks to have come with the same
# values
self.check_stock_models(req, product, amt, amt, 'stock')
self.check_stock_models(req, product, amt, 0, 'ct-fulfilled')
# ----------------
# Mark it received
# ----------------
self.submit_xml_form(create_received_xml(req, amounts))
req = RequisitionCase.get(req._id)
self.assertEqual(req.requisition_status, 'received')
self.assertIsNone(req.get_next_action())
self.assertEqual(len(RequisitionCase.open_for_location(
self.domain.name,
self.sp.location._id
)), 0)
for product, amt in amounts:
self.check_stock_models(req, product, 0, -amt, 'stock')
self.check_stock_models(self.sp, product, amt, amt, 'stock')
class CommTrackSyncTest(CommTrackSubmissionTest):
def setUp(self):
super(CommTrackSyncTest, self).setUp()
# reused stuff
self.casexml_user = self.user.to_casexml_user()
self.sp_block = CaseBlock(
case_id=self.sp._id,
version=V2,
).as_xml()
# bootstrap ota stuff
self.ct_settings.consumption_config = ConsumptionConfig(
min_transactions=0,
min_window=0,
optimal_window=60,
)
self.ct_settings.ota_restore_config = StockRestoreConfig(
section_to_consumption_types={'stock': 'consumption'}
)
set_default_monthly_consumption_for_domain(self.domain.name, 5)
self.ota_settings = self.ct_settings.get_ota_restore_settings()
# get initial restore token
restore_config = RestoreConfig(
self.casexml_user,
version=V2,
stock_settings=self.ota_settings,
)
self.sync_log_id = synclog_id_from_restore_payload(restore_config.get_payload())
def testStockSyncToken(self):
# first restore should not have the updated case
check_user_has_case(self, self.casexml_user, self.sp_block, should_have=False,
restore_id=self.sync_log_id, version=V2)
# submit with token
amounts = [(p._id, float(i*10)) for i, p in enumerate(self.products)]
self.submit_xml_form(balance_submission(amounts), last_sync_token=self.sync_log_id)
# now restore should have the case
check_user_has_case(self, self.casexml_user, self.sp_block, should_have=True,
restore_id=self.sync_log_id, version=V2, line_by_line=False)
class CommTrackArchiveSubmissionTest(CommTrackSubmissionTest):
def testArchiveLastForm(self):
initial_amounts = [(p._id, float(100)) for p in self.products]
self.submit_xml_form(balance_submission(initial_amounts))
final_amounts = [(p._id, float(50)) for i, p in enumerate(self.products)]
second_form_id = self.submit_xml_form(balance_submission(final_amounts))
def _assert_initial_state():
self.assertEqual(1, StockReport.objects.filter(form_id=second_form_id).count())
# 6 = 3 stockonhand and 3 inferred consumption txns
self.assertEqual(6, StockTransaction.objects.filter(report__form_id=second_form_id).count())
self.assertEqual(3, StockState.objects.filter(case_id=self.sp._id).count())
for state in StockState.objects.filter(case_id=self.sp._id):
self.assertEqual(Decimal(50), state.stock_on_hand)
self.assertIsNotNone(state.daily_consumption)
# check initial setup
_assert_initial_state()
# archive and confirm commtrack data is deleted
form = XFormInstance.get(second_form_id)
form.archive()
self.assertEqual(0, StockReport.objects.filter(form_id=second_form_id).count())
self.assertEqual(0, StockTransaction.objects.filter(report__form_id=second_form_id).count())
self.assertEqual(3, StockState.objects.filter(case_id=self.sp._id).count())
for state in StockState.objects.filter(case_id=self.sp._id):
# balance should be reverted to 100 in the StockState
self.assertEqual(Decimal(100), state.stock_on_hand)
# consumption should be none since there will only be 1 data point
self.assertIsNone(state.daily_consumption)
# unarchive and confirm commtrack data is restored
form.unarchive()
_assert_initial_state()
def testArchiveOnlyForm(self):
# check no data in stock states
self.assertEqual(0, StockState.objects.filter(case_id=self.sp._id).count())
initial_amounts = [(p._id, float(100)) for p in self.products]
form_id = self.submit_xml_form(balance_submission(initial_amounts))
# check that we made stuff
def _assert_initial_state():
self.assertEqual(1, StockReport.objects.filter(form_id=form_id).count())
self.assertEqual(3, StockTransaction.objects.filter(report__form_id=form_id).count())
self.assertEqual(3, StockState.objects.filter(case_id=self.sp._id).count())
for state in StockState.objects.filter(case_id=self.sp._id):
self.assertEqual(Decimal(100), state.stock_on_hand)
_assert_initial_state()
# archive and confirm commtrack data is cleared
form = XFormInstance.get(form_id)
form.archive()
self.assertEqual(0, StockReport.objects.filter(form_id=form_id).count())
self.assertEqual(0, StockTransaction.objects.filter(report__form_id=form_id).count())
self.assertEqual(0, StockState.objects.filter(case_id=self.sp._id).count())
# unarchive and confirm commtrack data is restored
form.unarchive()
_assert_initial_state()
def _report_soh(amounts, case_id, section_id='stock', report=None):
if report is None:
report = StockReport.objects.create(
form_id=uuid.uuid4().hex,
date=datetime.utcnow(),
type=stockconst.REPORT_TYPE_BALANCE,
)
for product_id, amount in amounts:
StockTransaction.objects.create(
report=report,
section_id=section_id,
case_id=case_id,
product_id=product_id,
stock_on_hand=amount,
quantity=0,
type=stockconst.TRANSACTION_TYPE_STOCKONHAND,
)
return report
def _get_ota_balance_blocks(ct_settings, user):
ota_settings = ct_settings.get_ota_restore_settings()
restore_config = RestoreConfig(
user.to_casexml_user(),
version=V2,
stock_settings=ota_settings,
)
return extract_balance_xml(restore_config.get_payload())
| 40.654867
| 116
| 0.657423
|
78bf93374fcc95e78fba6cdd501cc5368c079e0a
| 2,262
|
py
|
Python
|
venv/Lib/site-packages/qtpy/tests/test_qtlocation.py
|
BoxicaLion/BasicMathFormulas
|
4d9782f2c0c75ecccf4c0ea995f324f93e4fb6e2
|
[
"MIT"
] | 1,520
|
2015-01-06T15:55:15.000Z
|
2022-03-31T21:50:23.000Z
|
qtpy/tests/test_qtlocation.py
|
phil65/qtpy
|
a228a667829be5822cf810fb06b53bf03a2b7b39
|
[
"MIT"
] | 546
|
2015-01-02T07:59:42.000Z
|
2022-03-31T12:51:29.000Z
|
qtpy/tests/test_qtlocation.py
|
phil65/qtpy
|
a228a667829be5822cf810fb06b53bf03a2b7b39
|
[
"MIT"
] | 328
|
2015-01-06T15:55:21.000Z
|
2022-03-28T22:07:25.000Z
|
from __future__ import absolute_import
import pytest
from qtpy import PYQT5, PYSIDE2
@pytest.mark.skipif(not (PYQT5 or PYSIDE2), reason="Only available in Qt5 bindings")
def test_qtlocation():
"""Test the qtpy.QtLocation namespace"""
from qtpy import QtLocation
assert QtLocation.QGeoCodeReply is not None
assert QtLocation.QGeoCodingManager is not None
assert QtLocation.QGeoCodingManagerEngine is not None
assert QtLocation.QGeoManeuver is not None
assert QtLocation.QGeoRoute is not None
assert QtLocation.QGeoRouteReply is not None
assert QtLocation.QGeoRouteRequest is not None
assert QtLocation.QGeoRouteSegment is not None
assert QtLocation.QGeoRoutingManager is not None
assert QtLocation.QGeoRoutingManagerEngine is not None
assert QtLocation.QGeoServiceProvider is not None
#assert QtLocation.QGeoServiceProviderFactory is not None
assert QtLocation.QPlace is not None
assert QtLocation.QPlaceAttribute is not None
assert QtLocation.QPlaceCategory is not None
assert QtLocation.QPlaceContactDetail is not None
assert QtLocation.QPlaceContent is not None
assert QtLocation.QPlaceContentReply is not None
assert QtLocation.QPlaceContentRequest is not None
assert QtLocation.QPlaceDetailsReply is not None
assert QtLocation.QPlaceEditorial is not None
assert QtLocation.QPlaceIcon is not None
assert QtLocation.QPlaceIdReply is not None
assert QtLocation.QPlaceImage is not None
assert QtLocation.QPlaceManager is not None
assert QtLocation.QPlaceManagerEngine is not None
assert QtLocation.QPlaceMatchReply is not None
assert QtLocation.QPlaceMatchRequest is not None
assert QtLocation.QPlaceProposedSearchResult is not None
assert QtLocation.QPlaceRatings is not None
assert QtLocation.QPlaceReply is not None
assert QtLocation.QPlaceResult is not None
assert QtLocation.QPlaceReview is not None
assert QtLocation.QPlaceSearchReply is not None
assert QtLocation.QPlaceSearchRequest is not None
assert QtLocation.QPlaceSearchResult is not None
assert QtLocation.QPlaceSearchSuggestionReply is not None
assert QtLocation.QPlaceSupplier is not None
assert QtLocation.QPlaceUser is not None
| 46.163265
| 84
| 0.799735
|
431d15cfe82c7eb45d49c4dd3bad452700397dfa
| 506
|
py
|
Python
|
Lib/site-packages/plotly/validators/histogram/marker/_reversescale.py
|
tytanya/my-first-blog
|
2b40adb0816c3546e90ad6ca1e7fb50d924c1536
|
[
"bzip2-1.0.6"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/histogram/marker/_reversescale.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/histogram/marker/_reversescale.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class ReversescaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name='reversescale',
parent_name='histogram.marker',
**kwargs
):
super(ReversescaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 26.631579
| 75
| 0.618577
|
c4a5ed74cbb767745cd033ab8a465df783ed3f9a
| 11,678
|
py
|
Python
|
nodeum_sdk/models/task_source_up.py
|
nodeum-io/nodeum-sdk-python
|
205536491bff507dea7be44af46202c17e7121d9
|
[
"MIT"
] | null | null | null |
nodeum_sdk/models/task_source_up.py
|
nodeum-io/nodeum-sdk-python
|
205536491bff507dea7be44af46202c17e7121d9
|
[
"MIT"
] | null | null | null |
nodeum_sdk/models/task_source_up.py
|
nodeum-io/nodeum-sdk-python
|
205536491bff507dea7be44af46202c17e7121d9
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Nodeum API
The Nodeum API makes it easy to tap into the digital data mesh that runs across your organisation. Make requests to our API endpoints and we’ll give you everything you need to interconnect your business workflows with your storage. All production API requests are made to: http://nodeumhostname/api/ The current production version of the API is v1. **REST** The Nodeum API is a RESTful API. This means that the API is designed to allow you to get, create, update, & delete objects with the HTTP verbs GET, POST, PUT, PATCH, & DELETE. **JSON** The Nodeum API speaks exclusively in JSON. This means that you should always set the Content-Type header to application/json to ensure that your requests are properly accepted and processed by the API. **Authentication** All API calls require user-password authentication. **Cross-Origin Resource Sharing** The Nodeum API supports CORS for communicating from Javascript for these endpoints. You will need to specify an Origin URI when creating your application to allow for CORS to be whitelisted for your domain. **Pagination** Some endpoints such as File Listing return a potentially lengthy array of objects. In order to keep the response sizes manageable the API will take advantage of pagination. Pagination is a mechanism for returning a subset of the results for a request and allowing for subsequent requests to “page” through the rest of the results until the end is reached. Paginated endpoints follow a standard interface that accepts two query parameters, limit and offset, and return a payload that follows a standard form. These parameters names and their behavior are borrowed from SQL LIMIT and OFFSET keywords. **Versioning** The Nodeum API is constantly being worked on to add features, make improvements, and fix bugs. This means that you should expect changes to be introduced and documented. However, there are some changes or additions that are considered backwards-compatible and your applications should be flexible enough to handle them. These include: - Adding new endpoints to the API - Adding new attributes to the response of an existing endpoint - Changing the order of attributes of responses (JSON by definition is an object of unordered key/value pairs) **Filter parameters** When browsing a list of items, multiple filter parameters may be applied. Some operators can be added to the value as a prefix: - `=` value is equal. Default operator, may be omitted - `!=` value is different - `>` greater than - `>=` greater than or equal - `<` lower than - `>=` lower than or equal - `><` included in list, items should be separated by `|` - `!><` not included in list, items should be separated by `|` - `~` pattern matching, may include `%` (any characters) and `_` (one character) - `!~` pattern not matching, may include `%` (any characters) and `_` (one character) # noqa: E501
The version of the OpenAPI document: 2.1.0
Contact: info@nodeum.io
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from nodeum_sdk.configuration import Configuration
class TaskSourceUp(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'file_id': 'int',
'file_path': 'str',
'range': 'list[int]',
'import_file_id': 'int',
'import_file_path': 'str',
'tape_id': 'int',
'tape_barcode': 'str',
'pool_id': 'int',
'pool_name': 'str'
}
attribute_map = {
'file_id': 'file_id',
'file_path': 'file_path',
'range': 'range',
'import_file_id': 'import_file_id',
'import_file_path': 'import_file_path',
'tape_id': 'tape_id',
'tape_barcode': 'tape_barcode',
'pool_id': 'pool_id',
'pool_name': 'pool_name'
}
def __init__(self, file_id=None, file_path=None, range=None, import_file_id=None, import_file_path=None, tape_id=None, tape_barcode=None, pool_id=None, pool_name=None, local_vars_configuration=None): # noqa: E501
"""TaskSourceUp - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._file_id = None
self._file_path = None
self._range = None
self._import_file_id = None
self._import_file_path = None
self._tape_id = None
self._tape_barcode = None
self._pool_id = None
self._pool_name = None
self.discriminator = None
if file_id is not None:
self.file_id = file_id
if file_path is not None:
self.file_path = file_path
if range is not None:
self.range = range
if import_file_id is not None:
self.import_file_id = import_file_id
if import_file_path is not None:
self.import_file_path = import_file_path
if tape_id is not None:
self.tape_id = tape_id
if tape_barcode is not None:
self.tape_barcode = tape_barcode
if pool_id is not None:
self.pool_id = pool_id
if pool_name is not None:
self.pool_name = pool_name
@property
def file_id(self):
"""Gets the file_id of this TaskSourceUp. # noqa: E501
:return: The file_id of this TaskSourceUp. # noqa: E501
:rtype: int
"""
return self._file_id
@file_id.setter
def file_id(self, file_id):
"""Sets the file_id of this TaskSourceUp.
:param file_id: The file_id of this TaskSourceUp. # noqa: E501
:type: int
"""
self._file_id = file_id
@property
def file_path(self):
"""Gets the file_path of this TaskSourceUp. # noqa: E501
:return: The file_path of this TaskSourceUp. # noqa: E501
:rtype: str
"""
return self._file_path
@file_path.setter
def file_path(self, file_path):
"""Sets the file_path of this TaskSourceUp.
:param file_path: The file_path of this TaskSourceUp. # noqa: E501
:type: str
"""
self._file_path = file_path
@property
def range(self):
"""Gets the range of this TaskSourceUp. # noqa: E501
:return: The range of this TaskSourceUp. # noqa: E501
:rtype: list[int]
"""
return self._range
@range.setter
def range(self, range):
"""Sets the range of this TaskSourceUp.
:param range: The range of this TaskSourceUp. # noqa: E501
:type: list[int]
"""
self._range = range
@property
def import_file_id(self):
"""Gets the import_file_id of this TaskSourceUp. # noqa: E501
:return: The import_file_id of this TaskSourceUp. # noqa: E501
:rtype: int
"""
return self._import_file_id
@import_file_id.setter
def import_file_id(self, import_file_id):
"""Sets the import_file_id of this TaskSourceUp.
:param import_file_id: The import_file_id of this TaskSourceUp. # noqa: E501
:type: int
"""
self._import_file_id = import_file_id
@property
def import_file_path(self):
"""Gets the import_file_path of this TaskSourceUp. # noqa: E501
:return: The import_file_path of this TaskSourceUp. # noqa: E501
:rtype: str
"""
return self._import_file_path
@import_file_path.setter
def import_file_path(self, import_file_path):
"""Sets the import_file_path of this TaskSourceUp.
:param import_file_path: The import_file_path of this TaskSourceUp. # noqa: E501
:type: str
"""
self._import_file_path = import_file_path
@property
def tape_id(self):
"""Gets the tape_id of this TaskSourceUp. # noqa: E501
:return: The tape_id of this TaskSourceUp. # noqa: E501
:rtype: int
"""
return self._tape_id
@tape_id.setter
def tape_id(self, tape_id):
"""Sets the tape_id of this TaskSourceUp.
:param tape_id: The tape_id of this TaskSourceUp. # noqa: E501
:type: int
"""
self._tape_id = tape_id
@property
def tape_barcode(self):
"""Gets the tape_barcode of this TaskSourceUp. # noqa: E501
:return: The tape_barcode of this TaskSourceUp. # noqa: E501
:rtype: str
"""
return self._tape_barcode
@tape_barcode.setter
def tape_barcode(self, tape_barcode):
"""Sets the tape_barcode of this TaskSourceUp.
:param tape_barcode: The tape_barcode of this TaskSourceUp. # noqa: E501
:type: str
"""
self._tape_barcode = tape_barcode
@property
def pool_id(self):
"""Gets the pool_id of this TaskSourceUp. # noqa: E501
:return: The pool_id of this TaskSourceUp. # noqa: E501
:rtype: int
"""
return self._pool_id
@pool_id.setter
def pool_id(self, pool_id):
"""Sets the pool_id of this TaskSourceUp.
:param pool_id: The pool_id of this TaskSourceUp. # noqa: E501
:type: int
"""
self._pool_id = pool_id
@property
def pool_name(self):
"""Gets the pool_name of this TaskSourceUp. # noqa: E501
:return: The pool_name of this TaskSourceUp. # noqa: E501
:rtype: str
"""
return self._pool_name
@pool_name.setter
def pool_name(self, pool_name):
"""Sets the pool_name of this TaskSourceUp.
:param pool_name: The pool_name of this TaskSourceUp. # noqa: E501
:type: str
"""
self._pool_name = pool_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TaskSourceUp):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TaskSourceUp):
return True
return self.to_dict() != other.to_dict()
| 35.387879
| 2,886
| 0.631615
|
dcce2b8ecfa02b997eb3e1edc98ba6c8f90deefc
| 727
|
py
|
Python
|
MachineLearning/hw2_q2/Step 1/hw2.py
|
SeanSyue/SklearnReferences
|
a2770a7108947877e772f3525bc915c5de4114bb
|
[
"MIT"
] | null | null | null |
MachineLearning/hw2_q2/Step 1/hw2.py
|
SeanSyue/SklearnReferences
|
a2770a7108947877e772f3525bc915c5de4114bb
|
[
"MIT"
] | null | null | null |
MachineLearning/hw2_q2/Step 1/hw2.py
|
SeanSyue/SklearnReferences
|
a2770a7108947877e772f3525bc915c5de4114bb
|
[
"MIT"
] | null | null | null |
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
##Read csv file retreived from "https://www.kaggle.com/c/spooky-author-identification"
csv = pd.read_csv("C:/Users/Sean/Downloads/sample_submission/sample_submission.csv")
##Train the decision tree
tree = DecisionTreeClassifier(criterion='entropy', max_depth=5)
tree_result = tree.fit(csv[['MWS', 'EAP', 'HPL']], csv[['id']])
print(tree_result)
##Compare feature importances between each colomn
tree.feature_importances_.tolist()
df = pd.DataFrame({'items': ['MWS', 'EAP', 'HPL'], 'feature_importances_': tree.feature_importances_.tolist()})
df = df.sort_values(by=['feature_importances_'], ascending=True).reset_index(drop=True)
print(df)
| 42.764706
| 112
| 0.748281
|
c3c45ccb00c5af4e0c2ad0e8ce5ea2892d21c0ab
| 1,960
|
py
|
Python
|
examples/augmentations.py
|
simleo/pyecvl
|
c044dc2ddf9bb69e93ffe06113de9365dc84e168
|
[
"MIT"
] | 2
|
2020-04-29T13:17:15.000Z
|
2021-01-07T19:13:14.000Z
|
examples/augmentations.py
|
simleo/pyecvl
|
c044dc2ddf9bb69e93ffe06113de9365dc84e168
|
[
"MIT"
] | 19
|
2020-01-16T11:55:07.000Z
|
2022-02-28T11:27:40.000Z
|
examples/augmentations.py
|
deephealthproject/pyecvl
|
3fb256a77ab6d7ff62219044d54b51d84471db6e
|
[
"MIT"
] | 2
|
2020-01-20T13:47:05.000Z
|
2020-02-27T11:13:32.000Z
|
# Copyright (c) 2019-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""\
Reads an image, applies augmentations and writes results.
"""
import argparse
import os
import sys
import pyecvl.ecvl as ecvl
def main(args):
if not ecvl.ECVL_EDDL:
print("No EDDL support - quitting")
sys.exit(0)
head, ext = os.path.splitext(os.path.basename(args.in_fn))
img = ecvl.ImRead(args.in_fn)
c = ecvl.SequentialAugmentationContainer([ecvl.AugFlip(0.5)])
da = ecvl.DatasetAugmentations([c, None, None])
# some output images should be flipped
for i in range(10):
da.Apply(ecvl.SplitType.training, img)
out_fn = "%s_flip_%d%s" % (head, i, ext)
print("writing", out_fn)
ecvl.ImWrite(out_fn, img)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("in_fn", metavar="INPUT_PATH")
main(parser.parse_args(sys.argv[1:]))
| 37.692308
| 79
| 0.729592
|
f8050d9c4283bfdf08f0c0a367086c6b43081de1
| 4,392
|
py
|
Python
|
networking_bgpvpn/tests/unit/extensions/test_bgpvpn_vni.py
|
sapcc/networking-bgpvpn
|
902f817dc49bb19dfbda51ba85b3d0702542e859
|
[
"Apache-2.0"
] | null | null | null |
networking_bgpvpn/tests/unit/extensions/test_bgpvpn_vni.py
|
sapcc/networking-bgpvpn
|
902f817dc49bb19dfbda51ba85b3d0702542e859
|
[
"Apache-2.0"
] | null | null | null |
networking_bgpvpn/tests/unit/extensions/test_bgpvpn_vni.py
|
sapcc/networking-bgpvpn
|
902f817dc49bb19dfbda51ba85b3d0702542e859
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2017 Ericsson India Global Services Pvt Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import mock
from oslo_utils import uuidutils
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.extensions import base as test_extensions_base
from neutron_lib.api.definitions import bgpvpn as bgpvpn_def
from neutron_lib.api.definitions import bgpvpn_vni as bgpvpn_vni_def
from webob import exc
from networking_bgpvpn.neutron.extensions import bgpvpn
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
BGPVPN_PREFIX = 'bgpvpn'
BGPVPN_URI = BGPVPN_PREFIX + '/' + 'bgpvpns'
BGPVPN_PLUGIN_BASE_NAME = (
bgpvpn.BGPVPNPluginBase.__module__ + '.' +
bgpvpn.BGPVPNPluginBase.__name__)
class BgpvpnVniTestExtensionManager(object):
def get_resources(self):
bgpvpn_def.RESOURCE_ATTRIBUTE_MAP[bgpvpn_def.COLLECTION_NAME].update(
bgpvpn_vni_def.RESOURCE_ATTRIBUTE_MAP[bgpvpn_def.COLLECTION_NAME])
return bgpvpn.Bgpvpn.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class BgpvpnVniExtensionTestCase(test_extensions_base.ExtensionTestCase):
fmt = 'json'
def setUp(self):
super(BgpvpnVniExtensionTestCase, self).setUp()
plural_mappings = {'bgpvpn': 'bgpvpns'}
self.setup_extension(
BGPVPN_PLUGIN_BASE_NAME,
bgpvpn_def.ALIAS,
BgpvpnVniTestExtensionManager(),
BGPVPN_PREFIX,
plural_mappings=plural_mappings,
translate_resource_name=True)
self.instance = self.plugin.return_value
def test_bgpvpn_create(self):
bgpvpn_id = _uuid()
data = {
'bgpvpn': {'name': 'bgpvpn1',
'type': 'l3',
'route_targets': ['1234:56'],
'vni': 1000,
'tenant_id': _uuid()}
}
expected_ret_val = copy.copy(data['bgpvpn'])
expected_ret_val['import_targets'] = []
expected_ret_val['export_targets'] = []
expected_ret_val['route_distinguishers'] = []
expected_ret_val['vni'] = 1000
expected_call_args = copy.copy(expected_ret_val)
expected_ret_val.update({'id': bgpvpn_id})
self.instance.create_bgpvpn.return_value = expected_ret_val
res = self.api.post(_get_path(BGPVPN_URI, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.assertTrue(self.instance.create_bgpvpn.called)
self.assertDictSupersetOf(
expected_call_args,
self.instance.create_bgpvpn.call_args[1]['bgpvpn']['bgpvpn'])
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('bgpvpn', res)
self.assertDictSupersetOf(expected_ret_val, res['bgpvpn'])
def test_bgpvpn_get(self):
bgpvpn_id = _uuid()
return_value = {'name': 'bgpvpn1',
'type': 'l3',
'route_targets': ['1234:56'],
'tenant_id': _uuid(),
'vni': 1000,
'id': bgpvpn_id}
self.instance.get_bgpvpn.return_value = return_value
res = self.api.get(_get_path(BGPVPN_URI,
id=bgpvpn_id,
fmt=self.fmt))
self.instance.get_bgpvpn.assert_called_with(
mock.ANY, bgpvpn_id, fields=mock.ANY
)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('bgpvpn', res)
self.assertEqual(res['bgpvpn'], return_value)
def test_bgpvpn_delete(self):
self._test_entity_delete('bgpvpn')
| 35.707317
| 78
| 0.643215
|
b7754813fb4035c4bfd1d0f6e35d307f87785f66
| 20,563
|
py
|
Python
|
app.py
|
Architkapoor13/EZY-BUY-SELL-E-Commerce
|
835defcc668a397a8c384ddf05f04856137bb78b
|
[
"MIT"
] | 3
|
2021-04-25T00:11:16.000Z
|
2022-03-07T16:05:32.000Z
|
app.py
|
Architkapoor13/EZY-BUY-SELL-E-Commerce
|
835defcc668a397a8c384ddf05f04856137bb78b
|
[
"MIT"
] | null | null | null |
app.py
|
Architkapoor13/EZY-BUY-SELL-E-Commerce
|
835defcc668a397a8c384ddf05f04856137bb78b
|
[
"MIT"
] | null | null | null |
import os
from cs50 import SQL
from flask import Flask, flash, jsonify, redirect, render_template, request, session, url_for
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from werkzeug.utils import secure_filename
from helpers import apology, login_required, lookup, usd
from itsdangerous import URLSafeTimedSerializer, SignatureExpired
from flask_mail import Mail, Message
# Configure application
app = Flask(__name__)
app.config.update(
DEBUG=True,
#EMAIL SETTINGS
MAIL_SERVER='smtp.gmail.com',
MAIL_PORT=465,
MAIL_USE_SSL=True,
MAIL_USERNAME = 'anything@gmail.com',
MAIL_PASSWORD = 'anything'
)
mail = Mail(app)
# mail.init_app(app)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
# app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///buy&sell.db")
# Make sure API key is set
# if not os.environ.get("API_KEY"):
# raise RuntimeError("API_KEY not set")
@app.route("/")
@login_required
def index():
"""Show Different Items on Sale"""
user_id = session["user_id"]
propic = db.execute("SELECT profilepicpath FROM profilepicture WHERE id=:id", id=user_id)
username = db.execute("SELECT username FROM users WHERE id=:id", id=user_id)
return render_template("index.html", propic=propic[0]["profilepicpath"], username=username[0]["username"])
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""buy items"""
user_id = session["user_id"]
if request.method == "GET":
propic = db.execute("SELECT profilepicpath FROM profilepicture WHERE id=:id", id=user_id)
username= db.execute("SELECT username FROM users WHERE id=:id", id=user_id)
# saleitems = db.execute("SELECT imagename, name, discription, price FROM buydisplayimage JOIN sdetails ON buydisplayimage.code = sdetails.code")
saleitems = db.execute(
"SELECT buydisplayimage.bimagename, sdetails.name, sdetails.discription, sdetails.price, sdetails.code, udetails.fullname, udetails.mobilenumber, udetails.email FROM sdetails "
"JOIN buydisplayimage ON sdetails.code=buydisplayimage.code "
"JOIN users ON sdetails.id=users.id "
"JOIN udetails ON users.username = udetails.username "
# "JOIN images ON sdetails.code=images.code"
)
images = db.execute("SELECT * FROM images")
return render_template("buy.html", saleitems=saleitems, propic=propic[0]["profilepicpath"], username=username[0]["username"], images=images)
else:
itemtype = request.form.get("itemtype")
itemname = request.form.get("itemname")
propic = db.execute("SELECT profilepicpath FROM profilepicture WHERE id=:id", id=user_id)
username = db.execute("SELECT username FROM users WHERE id=:id", id=user_id)
saleitems = db.execute(
"SELECT buydisplayimage.bimagename, sdetails.name, sdetails.discription, sdetails.price, sdetails.code, udetails.fullname, udetails.mobilenumber, udetails.email FROM sdetails "
"JOIN buydisplayimage ON sdetails.code=buydisplayimage.code "
"JOIN users ON sdetails.id=users.id "
"JOIN udetails ON users.username = udetails.username "
# "JOIN images ON sdetails.code=images.code "
"WHERE sdetails.itemname=:itemname", itemname=itemname)
images = db.execute("SELECT * FROM images")
# images = db.execute("SELECT ")
# saleitems = db.execute("SELECT buydisplayimage.imagename, sdetails.name, sdetails.discription, sdetails.price FROM buydisplayimage JOIN sdetails ON buydisplayimage.code = sdetails.code WHERE sdetails.itemname=:itemname", itemname=itemname)
# saleitems = db.execute("SELECT * FROM sdetails WHERE itemname=:itemname AND type=:itemtype", itemname=itemname,
# itemtype=itemtype)
return render_template("buy.html", saleitems=saleitems, propic=propic[0]["profilepicpath"], images=images, username=username[0]["username"])
# return apology("TODO")
@app.route("/sellingorders")
@login_required
def history():
"""Show history of transactions"""
if request.method == "GET":
user_id = session["user_id"]
details = db.execute("SELECT buydisplayimage.bimagename, sdetails.name, sdetails.itemname, sdetails.price, sdetails.code FROM sdetails "
"JOIN buydisplayimage ON sdetails.code=buydisplayimage.code WHERE id = :user_id", user_id=user_id)
return render_template("orders.html", details=details)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
flash("must provide username")
return render_template("login.html")
# return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
flash("Password not provided!")
return render_template("login.html")
username = request.form.get("username")
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
flash("invalid username and/or password")
return render_template("login.html")
# return apology("invalid username and/or password", 403)
username_c = db.execute("SELECT confirmed FROM udetails WHERE username=:username", username=username)
if username_c[0]["confirmed"] == 1:
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
username = request.form.get("username")
username_c = db.execute("SELECT confirmed FROM udetails WHERE username=:username", username=username)
# Redirect user to home page
flash("login Successfull")
return render_template("index.html")
if username_c[0]["confirmed"] == 0:
flash("Your email has not yet verified, please verify your email to continue")
return render_template("login.html")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
return apology("TODO")
s = URLSafeTimedSerializer("thisisasecret")
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "GET":
return render_template("register.html")
else:
fullname = request.form.get("fullname")
if not fullname:
flash("Please fill all the fields")
return redirect("/register")
phonenumber = request.form.get("phonenumber")
email = request.form.get("mail")
address = request.form.get("address")
username = request.form.get("username")
if not username:
flash("Please fill all the fields")
return redirect("/register")
# return apology("please provide username")
password = request.form.get("password")
if not password:
flash("Please fill all the fields")
return redirect("/register")
# return apology("Please provide a password")
confirm = request.form.get("confirmation")
if not confirm or not password == confirm:
flash("Passwords do not match!")
return redirect("/register")
# return apology("Passwords do not match!")
if len(password) < 8:
flash("Password must be 8 chracters long!")
return redirect("/register")
rows = db.execute("SELECT * FROM users WHERE username = :username", username=username)
coloumns = db.execute("SELECT * FROM udetails WHERE email = :email", email=email)
if len(rows) != 1 and len(coloumns) != 1:
hash_p = generate_password_hash(password)
db.execute("INSERT INTO users (username, hash) VALUES (:username, :hash_p)", username=username,
hash_p=hash_p)
db.execute("INSERT INTO udetails (fullname, mobilenumber,email, address, username, password) VALUES (:fullname, :phonenumber, :email, :address, :username, :hash_p)",
fullname=fullname, phonenumber=phonenumber , email=email, address=address, username=username, hash_p=hash_p)
id = db.execute("SELECT id FROM users WHERE username=:username", username=username)
db.execute("INSERT INTO profilepicture (id) VALUES (:id)", id=id[0]["id"])
token = s.dumps(email, salt="email-confirm")
db.execute("UPDATE udetails SET token=:token WHERE username=:username", token=token, username=username)
msg = Message("Account-Activation",
sender="ezybuysell.noreply@gmail.com",
recipients=[email])
link = url_for("confirm_email", token=token, _external=True)
# msg.body = f"your link is {link}"
msg.body = f"Welcome to EZY-BUY-SELL, we are delightful to have you on our website! \r\n\n To Activate your account click on the link below: \r\n\n {link}"
mail.send(msg)
print("mail sent!")
flash("An email regarding the activation of your account has been sent on your registered email, activate your account to login your account. ")
return render_template("login.html")
else:
flash("Username/email already exists")
return render_template("register.html")
# return apology("Username already exists")
@app.route("/confirm/<token>")
def confirm_email(token):
if request.method == "GET":
try:
email = s.loads(token, salt="email-confirm", max_age=3600)
except SignatureExpired:
return "token expired!"
db.execute("UPDATE udetails SET confirmed=1 WHERE token=:token", token=token)
new_token="0"
db.execute("UPDATE udetails SET token=:new_token WHERE token=:token", new_token=new_token, token=token)
return redirect("/login")
app.config["IMAGE_UPLOADS"] = "C:/Users/ARCHIT/Desktop/cs50final/static"
app.config["ALLOWED_IMAGE_EXTENSIONS"] = ["PNG", "JPG", "JPEG", "GIF"]
def allowed_ext(filename):
if not "." in filename:
return False
ext = filename.rsplit(".", 1)[1]
if ext.upper() in app.config["ALLOWED_IMAGE_EXTENSIONS"]:
return True
else:
return False
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell an Item"""
user_id = session["user_id"]
if request.method == "GET":
propic = db.execute("SELECT profilepicpath FROM profilepicture WHERE id=:id", id=user_id)
username = db.execute("SELECT username FROM users WHERE id=:id", id=user_id)
return render_template("sell.html", propic=propic[0]["profilepicpath"], username=username[0]["username"])
else:
propic = db.execute("SELECT profilepicpath FROM profilepicture WHERE id=:id", id=user_id)
iname = request.form.get("status")
type = request.form.get("source")
price = request.form.get("price")
discription = request.form.get("discription")
name = request.form.get("name")
if not iname or not type or not price or not discription or not name:
flash("Please fill all the fields!")
return render_template("sell.html", propic=propic[0]["profilepicpath"])
# return apology("Please fill all the fields!")
if 'files[]' not in request.files:
flash("no files selected")
return render_template("sell.html")
# return apology("no files selected")
db.execute("INSERT INTO sdetails (id, name, itemname, type, price, discription) VALUES (:user_id, :name, :itemname, :type, :price, :discription)",
user_id=user_id, name=name, itemname=iname, type=type, price=price, discription=discription)
files = request.files.getlist("files[]")
code = db.execute("SELECT code FROM sdetails WHERE id=:id AND name=:name AND itemname=:itemname AND type=:type AND price=:price AND discription=:discription",
id=user_id, name=name, itemname=iname, type=type, price=price, discription=discription)
number = 1
for file in files:
filename = secure_filename(file.filename)
if filename == "":
flash("No image selected")
return render_template("sell.html", propic=propic[0]["profilepicpath"])
if number == 1:
file.save(os.path.join(app.config['IMAGE_UPLOADS'], filename))
path = "../static/" + filename
db.execute("INSERT INTO buydisplayimage VALUES (:code, :imagename)", code=code[0]["code"], imagename=path)
db.execute("INSERT INTO images VALUES (:code, :imagename)", code=code[0]["code"], imagename=path)
print("images uploaded successfully")
number = number + 1
else:
file.save(os.path.join(app.config['IMAGE_UPLOADS'], filename))
path= "../static/" + filename
db.execute("INSERT INTO images VALUES (:code, :imagename)", code=code[0]["code"], imagename=path)
print("images uploaded successfully")
flash("Selling order placed!")
return render_template("index.html")
@app.route("/account", methods=["GET", "POST"])
def account():
user_id = session["user_id"]
if request.method == "GET":
details = db.execute("SELECT fullname, mobilenumber, email, address, username FROM udetails WHERE username = (SELECT username FROM users WHERE id = :user_id)", user_id= user_id)
name = details[0]["fullname"]
email = details[0]["email"]
address = details[0]["address"]
username = details[0]["username"]
phonenumber = details[0]["mobilenumber"]
propic = db.execute("SELECT profilepicpath FROM profilepicture WHERE id=:id", id=user_id)
# filename = "noimage.png"
return render_template("account.html", propic=propic[0]["profilepicpath"], name=name, email=email, username=username, address=address, phonenumber=phonenumber)
if 'dp' not in request.files:
flash("No image selected")
return redirect("/account")
# return apology("no image Selected")
propic = request.files["dp"]
if not propic:
flash("No image selected")
return redirect("/account")
# return apology("No file selected")
# flash("No file selected!")
# return redirect('/account')
filename = secure_filename(propic.filename)
if filename == "":
flash("No image selected")
return render_template("/account")
path = "../static/" + filename
propic.save(os.path.join(app.config['IMAGE_UPLOADS'], filename))
db.execute("UPDATE profilepicture SET profilepicpath=:filename WHERE id=:user_id", filename=path, user_id=user_id)
print("image uploaded")
return redirect("/account")
@app.route("/edit", methods=["GET", "POST"])
def edit():
if request.method == "POST":
code = request.form.get("code")
saleitems = db.execute(
"SELECT buydisplayimage.bimagename, sdetails.name, sdetails.discription, sdetails.price, sdetails.code, udetails.username, udetails.mobilenumber, udetails.email FROM sdetails "
"JOIN buydisplayimage ON sdetails.code=buydisplayimage.code "
"JOIN users ON sdetails.id=users.id "
"JOIN udetails ON users.username = udetails.username "
# "JOIN images ON sdetails.code=images.code "
"WHERE sdetails.code=:code", code=code)
images = db.execute("SELECT imagename FROM images WHERE code=:code", code=code)
return render_template("edit.html", saleitems=saleitems,code=code, images=images)
@app.route("/editorders", methods=["GET", "POST"])
def editorders():
if request.method == "POST":
name = request.form.get("name")
discription = request.form.get("discription")
price = request.form.get("price")
code = request.form.get("code")
if name:
db.execute("UPDATE sdetails SET name=:name WHERE code=:code", name=name, code=code)
if discription:
db.execute("UPDATE sdetails SET discription=:discription WHERE code=:code", discription=discription, code=code)
if price:
db.execute("UPDATE sdetails SET price=:price WHERE code=:code", price=price, code=code)
flash("Order updated!")
return render_template("index.html")
@app.route("/delete", methods=["GET", "POST"])
def delete():
if request.method == "POST":
code = request.form.get("code")
db.execute("DELETE FROM sdetails WHERE code=:code", code=code)
db.execute("DELETE FROM images WHERE code=:code", code=code)
db.execute("DELETE FROM buydisplayimage WHERE code=:code", code=code)
flash("Deleted successfully!")
return render_template("index.html")
@app.route("/forgetp", methods=["GET", "POST"])
def forgetp():
if request.method == "GET":
return render_template("forgetp.html")
else:
email = request.form.get("email")
# token = s.dumps(email, salt="password-forget")
msg = Message("Reset-Password",
sender="ezybuysell.noreply@gmail.com",
recipients=[email])
link = url_for("forget_password", email=email, _external=True)
# msg.body = f"your link is {link}"
msg.body = f"Welcome to EZY-BUY-SELL, Forgotten password? \r\n\n No worries! Click on the link below to reset your password: \r\n\n {link}"
mail.send(msg)
print("mail sent!")
flash(
"An email regarding the resetting your password has been sent on your registered email, you can change your password through that mail ")
return render_template("login.html")
# else:
# flash("Username already exists")
# return render_template("register.html")
# # return apology("Username already exists")
@app.route("/forgetp/<email>", methods=["GET", "POST"])
def forget_password(email):
if request.method == "GET":
return render_template("newpassword.html", email=email)
else:
new_password = request.form.get("newpassword")
cnew_password = request.form.get("cnewpassword")
hash_p = generate_password_hash(new_password)
db.execute("UPDATE udetails SET password=:password WHERE email=:email", password=hash_p, email=email)
db.execute("UPDATE users SET hash=:password WHERE username=(SELECT username FROM udetails WHERE email=:email)", password=hash_p, email=email)
# db.execute("UPDATE udetails SET token=:new_token WHERE token=:token", new_token=new_token, token=token)
flash("New password registered Successfully!")
return redirect("/login")
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
| 43.938034
| 249
| 0.649808
|
e7bcf39a37a94fbcc1f06f478b46b28c60ba0c4b
| 2,363
|
py
|
Python
|
ptpython/entry_points/run_ptipython.py
|
vxgmichel/ptpython
|
89017ba158ed1d95319233fa5aedf3931c3b8b77
|
[
"BSD-3-Clause"
] | 1
|
2020-03-12T06:46:32.000Z
|
2020-03-12T06:46:32.000Z
|
ptpython/entry_points/run_ptipython.py
|
vxgmichel/ptpython
|
89017ba158ed1d95319233fa5aedf3931c3b8b77
|
[
"BSD-3-Clause"
] | null | null | null |
ptpython/entry_points/run_ptipython.py
|
vxgmichel/ptpython
|
89017ba158ed1d95319233fa5aedf3931c3b8b77
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
from .run_ptpython import create_parser, get_config_and_history_file
def run(user_ns=None):
a = create_parser().parse_args()
config_file, history_file = get_config_and_history_file(a)
# If IPython is not available, show message and exit here with error status
# code.
try:
import IPython
except ImportError:
print("IPython not found. Please install IPython (pip install ipython).")
sys.exit(1)
else:
from ptpython.ipython import embed
from ptpython.repl import run_config, enable_deprecation_warnings
# Add the current directory to `sys.path`.
if sys.path[0] != "":
sys.path.insert(0, "")
# When a file has been given, run that, otherwise start the shell.
if a.args and not a.interactive:
sys.argv = a.args
path = a.args[0]
with open(path, "rb") as f:
code = compile(f.read(), path, "exec")
exec(code, {})
else:
enable_deprecation_warnings()
# Create an empty namespace for this interactive shell. (If we don't do
# that, all the variables from this function will become available in
# the IPython shell.)
if user_ns is None:
user_ns = {}
# Startup path
startup_paths = []
if "PYTHONSTARTUP" in os.environ:
startup_paths.append(os.environ["PYTHONSTARTUP"])
# --interactive
if a.interactive:
startup_paths.append(a.args[0])
sys.argv = a.args
# exec scripts from startup paths
for path in startup_paths:
if os.path.exists(path):
with open(path, "rb") as f:
code = compile(f.read(), path, "exec")
exec(code, user_ns, user_ns)
else:
print("File not found: {}\n\n".format(path))
sys.exit(1)
# Apply config file
def configure(repl):
if os.path.exists(config_file):
run_config(repl, config_file)
# Run interactive shell.
embed(
vi_mode=a.vi,
history_filename=history_file,
configure=configure,
user_ns=user_ns,
title="IPython REPL (ptipython)",
)
if __name__ == "__main__":
run()
| 29.17284
| 81
| 0.578079
|
80ed5ee378535cc4ee77be814e666111fab97c25
| 444
|
py
|
Python
|
ecommerce/exceptions.py
|
mitodl/mit-xpro
|
981d6c87d963837f0b9ccdd996067fe81394dba4
|
[
"BSD-3-Clause"
] | 32
|
2016-03-25T01:03:13.000Z
|
2022-01-15T19:35:42.000Z
|
ecommerce/exceptions.py
|
mitodl/mit-xpro
|
981d6c87d963837f0b9ccdd996067fe81394dba4
|
[
"BSD-3-Clause"
] | 4,858
|
2016-03-03T13:48:30.000Z
|
2022-03-29T22:09:51.000Z
|
ecommerce/exceptions.py
|
umarmughal824/micromasters
|
ea92d3bcea9be4601150fc497302ddacc1161622
|
[
"BSD-3-Clause"
] | 20
|
2016-08-18T22:07:44.000Z
|
2021-11-15T13:35:35.000Z
|
"""
Exceptions for ecommerce
"""
class EcommerceException(Exception):
"""
General exception regarding ecommerce
"""
class EcommerceEdxApiException(Exception):
"""
Exception regarding edx_api_client
"""
class EcommerceModelException(Exception):
"""
Exception regarding ecommerce models
"""
class ParseException(Exception):
"""
Exception regarding parsing CyberSource reference numbers
"""
| 15.857143
| 61
| 0.693694
|
11d01787ef9b8d38605583892d2e3f3484a769c8
| 14,054
|
py
|
Python
|
python/pytagcloudLocal/__init__.py
|
jstraub/dpMM
|
538c432d5f98c040d5c1adb072e545e38f97fc69
|
[
"MIT-feh"
] | 11
|
2015-04-27T15:14:01.000Z
|
2021-11-18T00:19:18.000Z
|
python/pytagcloudLocal/__init__.py
|
jstraub/dpMM
|
538c432d5f98c040d5c1adb072e545e38f97fc69
|
[
"MIT-feh"
] | null | null | null |
python/pytagcloudLocal/__init__.py
|
jstraub/dpMM
|
538c432d5f98c040d5c1adb072e545e38f97fc69
|
[
"MIT-feh"
] | 6
|
2015-07-02T12:46:20.000Z
|
2022-03-30T04:39:30.000Z
|
# -*- coding: utf-8 -*-
from copy import copy
from math import sin, cos, ceil
from pygame import transform, font, mask, Surface, Rect, SRCALPHA, draw
from pygame.sprite import Group, Sprite, collide_mask
from random import randint, choice
import colorsys
import math
import os
import pygame
import simplejson
TAG_PADDING = 5
STEP_SIZE = 2 # relative to base step size of each spiral function
RADIUS = 1
ECCENTRICITY = 1.5
LOWER_START = 0.45
UPPER_START = 0.55
FONT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fonts')
DEFAULT_FONT = 'Droid Sans'
DEFAULT_PALETTE = 'default'
FONT_CACHE = simplejson.load(open(os.path.join(FONT_DIR, 'fonts.json'), 'r'))
pygame.init()
convsurf = Surface((2 * TAG_PADDING, 2 * TAG_PADDING))
convsurf.fill((255, 0, 255))
convsurf.set_colorkey((255, 0, 255))
draw.circle(convsurf, (0, 0, 0), (TAG_PADDING, TAG_PADDING), TAG_PADDING)
CONVMASK = mask.from_surface(convsurf)
LAYOUT_HORIZONTAL = 0
LAYOUT_VERTICAL = 1
LAYOUT_MOST_HORIZONTAL = 2
LAYOUT_MOST_VERTICAL = 3
LAYOUT_MIX = 4
LAYOUTS = (
LAYOUT_HORIZONTAL,
LAYOUT_VERTICAL,
LAYOUT_MOST_HORIZONTAL,
LAYOUT_MOST_VERTICAL,
LAYOUT_MIX
)
LAST_COLLISON_HIT = None
class Tag(Sprite):
"""
Font tag sprite. Blit the font to a surface to correct the font padding
"""
def __init__(self, tag, initial_position, fontname=DEFAULT_FONT):
Sprite.__init__(self)
self.tag = copy(tag)
self.rotation = 0
# print FONT_DIR,fontname
self.font_spec = load_font(fontname)
self.font = font.Font(os.path.join(FONT_DIR,
self.font_spec['ttf']),
self.tag['size'])
fonter = self.font.render(tag['tag'], True, tag['color'])
frect = fonter.get_bounding_rect()
frect.x = -frect.x
frect.y = -frect.y
self.fontoffset = (-frect.x, -frect.y)
font_sf = Surface((frect.width, frect.height), pygame.SRCALPHA, 32)
font_sf.blit(fonter, frect)
self.image = font_sf
self.rect = font_sf.get_rect()
self.rect.width += TAG_PADDING
self.rect.height += TAG_PADDING
self.rect.x = initial_position[0]
self.rect.y = initial_position[1]
self._update_mask()
def _update_mask(self):
self.mask = mask.from_surface(self.image)
self.mask = self.mask.convolve(CONVMASK, None, (TAG_PADDING, TAG_PADDING))
def flip(self):
angle = 90 if self.rotation == 0 else - 90
self.rotate(angle)
def rotate(self, angle):
pos = (self.rect.x, self.rect.y)
self.image = transform.rotate(self.image, angle)
self.rect = self.image.get_rect()
self.rect.x, self.rect.y = pos
self._update_mask()
def update_fontsize(self):
self.font = font.Font(os.path.join(FONT_DIR, self.font_spec['ttf']),
self.tag['size'])
def load_font(name):
for font in FONT_CACHE:
if font['name'] == name:
return font
raise AttributeError('Invalid font name. Should be one of %s' %
", ".join([f['name'] for f in FONT_CACHE]))
def defscale(count, mincount, maxcount, minsize, maxsize):
if maxcount == mincount:
return int((maxsize - minsize) / 2.0 + minsize)
return int(minsize + (maxsize - minsize) *
(count * 1.0 / (maxcount - mincount)) ** 0.8)
def make_tags(wordcounts, minsize=3, maxsize=36, colors=None, scalef=defscale):
"""
sizes and colors tags
wordcounts is a list of tuples(tags, count). (e.g. how often the
word appears in a text)
the tags are assigned sizes between minsize and maxsize, the function used
is determined by scalef (default: square root)
color is either chosen from colors (list of rgb tuples) if provided or random
"""
counts = [tag[1] for tag in wordcounts]
if not len(counts):
return []
maxcount = max(counts)
mincount = min(counts)
tags = []
for word_count in wordcounts:
color = choice(colors) if colors else (randint(10, 220), randint(10, 220),
randint(10, 220))
tags.append({'color': color, 'size': scalef(word_count[1], mincount,
maxcount, minsize, maxsize),
'tag': word_count[0]})
return tags
def _do_collide(sprite, group):
"""
Use mask based collision detection
"""
global LAST_COLLISON_HIT
# Test if we still collide with the last hit
if LAST_COLLISON_HIT and collide_mask(sprite, LAST_COLLISON_HIT):
return True
for sp in group:
if collide_mask(sprite, sp):
LAST_COLLISON_HIT = sp
return True
return False
def _get_tags_bounding(tag_store):
if not len(tag_store):
return Rect(0,0,0,0)
rects = [tag.rect for tag in tag_store]
return rects[0].unionall(rects[1:])
def _get_group_bounding(tag_store, sizeRect):
if not isinstance(sizeRect, pygame.Rect):
sizeRect = Rect(0, 0, sizeRect[0], sizeRect[1])
if tag_store:
rects = [tag.rect for tag in tag_store]
union = rects[0].unionall(rects[1:])
if sizeRect.contains(union):
return union
return sizeRect
def _archimedean_spiral(reverse):
DEFAULT_STEP = 0.05 # radians
t = 0
r = 1
if reverse:
r = -1
while True:
t += DEFAULT_STEP * STEP_SIZE * r
yield (ECCENTRICITY * RADIUS * t * cos(t), RADIUS * t * sin(t))
def _rectangular_spiral(reverse):
DEFAULT_STEP = 3 # px
directions = [(1, 0), (0, 1), (-1, 0), (0, -1)]
if reverse:
directions.reverse()
direction = directions[0]
spl = 1
dx = dy = 0
while True:
for step in range(spl * 2):
if step == spl:
direction = directions[(spl - 1) % 4]
dx += direction[0] * STEP_SIZE * DEFAULT_STEP
dy += direction[1] * STEP_SIZE * DEFAULT_STEP
yield dx, dy
spl += 1
def _search_place(current_tag, tag_store, canvas, spiral, ratio):
"""
Start a spiral search with random direction.
Resize the canvas if the spiral exceeds the bounding rectangle
"""
reverse = choice((0, 1))
start_x = current_tag.rect.x
start_y = current_tag.rect.y
min_dist = None
opt_x = opt_y = 0
current_bounding = _get_tags_bounding(tag_store)
cx = current_bounding.w / 2.0
cy = current_bounding.h / 2.0
for dx, dy in spiral(reverse):
current_tag.rect.x = start_x + dx
current_tag.rect.y = start_y + dy
if not _do_collide(current_tag, tag_store):
if canvas.contains(current_tag.rect):
tag_store.add(current_tag)
return
else:
# get the distance from center
current_dist = (abs(cx - current_tag.rect.x) ** 2 +
abs(cy - current_tag.rect.y) ** 2) ** 0.5
if not min_dist or current_dist < min_dist:
opt_x = current_tag.rect.x
opt_y = current_tag.rect.y
min_dist = current_dist
# only add tag if the spiral covered the canvas boundaries
if abs(dx) > canvas.width / 2.0 and abs(dy) > canvas.height / 2.0:
current_tag.rect.x = opt_x
current_tag.rect.y = opt_y
tag_store.add(current_tag)
new_bounding = current_bounding.union(current_tag.rect)
delta_x = delta_y = 0.0
if new_bounding.w > canvas.width:
delta_x = new_bounding.w - canvas.width
canvas.width = new_bounding.w
delta_y = ratio * new_bounding.w - canvas.height
canvas.height = ratio * new_bounding.w
if new_bounding.h > canvas.height:
delta_y = new_bounding.h - canvas.height
canvas.height = new_bounding.h
canvas.width = new_bounding.h / ratio
delta_x = canvas.width - canvas.width
# realign
for tag in tag_store:
tag.rect.x += delta_x / 2.0
tag.rect.y += delta_y / 2.0
canvas = _get_tags_bounding(tag_store)
return
def _draw_cloud(
tag_list,
layout=LAYOUT_MIX,
size=(500,500),
fontname=DEFAULT_FONT,
rectangular=False):
# sort the tags by size and word length
tag_list.sort(key=lambda tag: len(tag['tag']))
tag_list.sort(key=lambda tag: tag['size'])
tag_list.reverse()
# create the tag space
tag_sprites = []
area = 0
for tag in tag_list:
# print tag, fontname
tag_sprite = Tag(tag, (0, 0), fontname=fontname)
area += tag_sprite.mask.count()
tag_sprites.append(tag_sprite)
canvas = Rect(0, 0, 0, 0)
ratio = float(size[1]) / size[0]
if rectangular:
spiral = _rectangular_spiral
else:
spiral = _archimedean_spiral
aligned_tags = Group()
for tag_sprite in tag_sprites:
angle = 0
if layout == LAYOUT_MIX and randint(0, 2) == 0:
angle = 90
elif layout == LAYOUT_VERTICAL:
angle = 90
tag_sprite.rotate(angle)
xpos = canvas.width - tag_sprite.rect.width
if xpos < 0: xpos = 0
xpos = randint(int(xpos * LOWER_START) , int(xpos * UPPER_START))
tag_sprite.rect.x = xpos
ypos = canvas.height - tag_sprite.rect.height
if ypos < 0: ypos = 0
ypos = randint(int(ypos * LOWER_START), int(ypos * UPPER_START))
tag_sprite.rect.y = ypos
_search_place(tag_sprite, aligned_tags, canvas, spiral, ratio)
canvas = _get_tags_bounding(aligned_tags)
# resize cloud
zoom = min(float(size[0]) / canvas.w, float(size[1]) / canvas.h)
for tag in aligned_tags:
tag.rect.x *= zoom
tag.rect.y *= zoom
tag.rect.width *= zoom
tag.rect.height *= zoom
tag.tag['size'] = int(tag.tag['size'] * zoom)
tag.update_fontsize()
canvas = _get_tags_bounding(aligned_tags)
return canvas, aligned_tags
def create_tag_image(
tags,
output,
size=(500,500),
background=(255, 255, 255),
layout=LAYOUT_MIX,
fontname=DEFAULT_FONT,
rectangular=False):
"""
Create a png tag cloud image
"""
if not len(tags):
return
sizeRect, tag_store = _draw_cloud(tags,
layout,
size=size,
fontname=fontname,
rectangular=rectangular)
image_surface = Surface((sizeRect.w, sizeRect.h), SRCALPHA, 32)
image_surface.fill(background)
for tag in tag_store:
image_surface.blit(tag.image, tag.rect)
pygame.image.save(image_surface, output)
def create_html_data(tags,
size=(500,500),
layout=LAYOUT_MIX,
fontname=DEFAULT_FONT,
rectangular=False):
"""
Create data structures to be used for HTML tag clouds.
"""
if not len(tags):
return
sizeRect, tag_store = _draw_cloud(tags,
layout,
size=size,
fontname=fontname,
rectangular=rectangular)
tag_store = sorted(tag_store, key=lambda tag: tag.tag['size'])
tag_store.reverse()
data = {
'css': {},
'links': []
}
color_map = {}
for color_index, tag in enumerate(tags):
if not color_map.has_key(tag['color']):
color_name = "c%d" % color_index
hslcolor = colorsys.rgb_to_hls(tag['color'][0] / 255.0,
tag['color'][1] / 255.0,
tag['color'][2] / 255.0)
lighter = hslcolor[1] * 1.4
if lighter > 1: lighter = 1
light = colorsys.hls_to_rgb(hslcolor[0], lighter, hslcolor[2])
data['css'][color_name] = ('#%02x%02x%02x' % tag['color'],
'#%02x%02x%02x' % (light[0] * 255,
light[1] * 255,
light[2] * 255))
color_map[tag['color']] = color_name
for stag in tag_store:
line_offset = 0
line_offset = stag.font.get_linesize() - (stag.font.get_ascent() + abs(stag.font.get_descent()) - stag.rect.height) - 4
tag = {
'tag': stag.tag['tag'],
'cls': color_map[stag.tag['color']],
'top': stag.rect.y - sizeRect.y,
'left': stag.rect.x - sizeRect.x,
'size': int(stag.tag['size'] * 0.85),
'height': int(stag.rect.height * 1.19) + 4,
'width': stag.rect.width,
'lh': line_offset
}
data['links'].append(tag)
data['size'] = (sizeRect.w, sizeRect.h * 1.15)
return data
| 33.86506
| 128
| 0.534866
|
bb07f9bab1819334214ddcaefcab3a4f097f81ca
| 199
|
py
|
Python
|
examples/another_sync_example.py
|
Bs0Dd/KunAPIPy
|
1e729b150ba787c84fb37b7351c2da2264977440
|
[
"Unlicense"
] | 6
|
2020-04-21T11:15:52.000Z
|
2022-02-05T06:42:07.000Z
|
examples/another_sync_example.py
|
Bs0Dd/KunAPIPy
|
1e729b150ba787c84fb37b7351c2da2264977440
|
[
"Unlicense"
] | 2
|
2022-02-02T16:28:27.000Z
|
2022-02-25T14:28:20.000Z
|
examples/another_sync_example.py
|
Bs0Dd/KunAPIPy
|
1e729b150ba787c84fb37b7351c2da2264977440
|
[
"Unlicense"
] | null | null | null |
from kunapipy.kundelik import kundelik
login = "login"
password = "password"
with kundelik.KunAPI(login=login, password=password) as dn:
print(dn.get_classmates())
print(dn.get_context())
| 19.9
| 59
| 0.738693
|
dabd44e515abfdda403b5fb8cfb9c0415d08e385
| 32,277
|
py
|
Python
|
dafi/random_field/field.py
|
cmichelenstrofer/DAFI
|
926c1fc009fbb92294b9f4c565ef2fc91979a8fa
|
[
"Apache-2.0"
] | 24
|
2020-03-03T14:33:09.000Z
|
2022-03-14T00:11:26.000Z
|
dafi/random_field/field.py
|
cmichelenstrofer/DAFI
|
926c1fc009fbb92294b9f4c565ef2fc91979a8fa
|
[
"Apache-2.0"
] | null | null | null |
dafi/random_field/field.py
|
cmichelenstrofer/DAFI
|
926c1fc009fbb92294b9f4c565ef2fc91979a8fa
|
[
"Apache-2.0"
] | 19
|
2020-03-03T14:33:42.000Z
|
2022-03-28T11:42:34.000Z
|
""" Random fields representation and manipulation.
These functions can be called directly from ``dafi.random_field``, e.g.
.. code-block:: python
>>> dafi.random_field.calc_kl_modes(*args)
"""
# standard library imports
import warnings
# third party imports
import numpy as np
from scipy import sparse as sp
from scipy.sparse import linalg as splinalg
from scipy import interpolate
from scipy import spatial
# KL decomposition
def calc_kl_modes(cov, nmodes=None, weight_field=None, eps=1e-8,
normalize=True):
""" Calculate the first N Karhunen-Loève modes for a covariance
field.
Converts the covariance to a sparse matrix if it is not one yet.
Parameters
----------
cov : ndarray
Covariance matrix. Can be ndarray, matrix, or scipy sparse
matrix. *dtype=float*, *ndim=2*, *shape=(nstate, nstate)*
nmodes : int
Number of KL modes to obtain.
weight_field : ndarray
Weight (e.g. cell volume) associated with each state.
Default ones (1). *dtype=float*, *ndim=1*, *shape=(nstate)*
eps : float
Small quantity to add to the diagonal of the covariance matrix
for numerical stability.
normalize : bool
Whether to normalize (norm = 1) the KL modes.
Returns
-------
eig_vals : ndarray
Eigenvalue associated with each mode.
*dtype=float*, *ndim=1*, *shape=(nmodes)*
kl_modes : ndarray
KL modes (eigenvectors).
*dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
"""
# convert to sparse matrix
cov = sp.csc_matrix(cov)
# default values
nstate = cov.shape[0]
if nmodes is None:
nmodes = nstate-1
weight_field = _preprocess_field(weight_field, nstate, 1.0)
# add small value to diagonal
cov = cov + sp.eye(cov.shape[0], format='csc')*eps
weight_field = np.squeeze(weight_field)
weight_vec = np.atleast_2d(weight_field)
weight_mat = np.sqrt(np.dot(weight_vec.T, weight_vec))
cov_weighted = cov.multiply(weight_mat)
# perform the eig-decomposition
eig_vals, eig_vecs = sp.linalg.eigsh(cov_weighted, k=nmodes)
# sort the eig-value and eig-vectors in a descending order
ascending_order = eig_vals.argsort()
descending_order = ascending_order[::-1]
eig_vals = eig_vals[descending_order]
eig_vecs = eig_vecs[:, descending_order]
# normalized KL modes
weight_diag = np.diag(np.sqrt(weight_field))
kl_modes = np.dot(np.linalg.inv(weight_diag), eig_vecs) # normalized
# check if negative eigenvalues
for imode in np.arange(nmodes):
neg_eigv = False
if eig_vals[imode] < 0:
neg_eigv = True
warn_message = f'Negative eigenvalue for mode {imode}.'
warnings.warn(warn_message)
kl_modes[:, imode] *= 0.
if neg_eigv:
warn_message = 'Some modes have negative eigenvalues. ' + \
'The number of KL modes might be too large. ' + \
"Alternatively, use a larger value for 'eps'."
# weight by appropriate variance
if not normalize:
kl_modes = scale_kl_modes(eig_vals, kl_modes)
return eig_vals, kl_modes
def calc_kl_modes_coverage(cov, coverage, weight_field=None, eps=1e-8,
max_modes=None, normalize=True):
""" Calculate all KL modes and return only those required to achieve
a certain coverage of the variance.
Parameters
----------
cov : ndarray
Covariance matrix. Can be ndarray, matrix, or scipy sparse
matrix. *dtype=float*, *ndim=2*, *shape=(nstate, nstate)*
coverage : float
Desired percentage coverage of the variance. Value between 0-1.
weight_field : ndarray
Weight (e.g. cell volume) associated with each state.
Default ones (1). *dtype=float*, *ndim=1*, *shape=(nstate)*
eps : float
Small quantity to add to the diagonal of the covariance matrix
for numerical stability.
normalize : bool
Whether to normalize (norm = 1) the KL modes.
Returns
-------
eig_vals : ndarray
Eigenvalue associated with each mode. For the first N modes such
that the desired coverage of the variance is achieved.
*dtype=float*, *ndim=1*, *shape=(N)*
kl_modes : ndarray
first N KL modes (eigenvectors) such that the desired coverage
of the variance is achieved.
*dtype=float*, *ndim=2*, *shape=(nstate, N)*
"""
# convert to sparse matrix
cov = sp.csc_matrix(cov)
# default values
nstate = cov.shape[0]
weight_field = _preprocess_field(weight_field, nstate, 1.0)
if max_modes is None:
max_modes = nstate - 1
# get the first max_modes KL modes
eig_vals, kl_modes = calc_kl_modes(
cov, max_modes, weight_field, eps, normalize)
# return only those KL modes required for desired coverage
cummalative_variance = kl_coverage(cov, eig_vals, weight_field)
coverage_index = np.argmax(cummalative_variance >= coverage)
if coverage_index == 0:
coverage_index = max_modes
return eig_vals[:coverage_index], kl_modes[:, :coverage_index]
def scale_kl_modes(eig_vals, kl_modes_norm):
""" Weight the KL modes by the appropriate variance.
Parameters
----------
eig_vals : ndarray
Eigenvalue associated with each mode.
*dtype=float*, *ndim=1*, *shape=(nmodes)*
kl_modes_norm : ndarray
Normalized (norm = 1) KL modes (eigenvectors).
*dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
Returns
-------
kl_modes_weighted : ndarray
KL modes with correct magnitude.
*dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
"""
nmodes = len(eig_vals)
kl_modes_weighted = kl_modes_norm.copy()
for imode in np.arange(nmodes):
kl_modes_weighted[:, imode] *= np.sqrt(eig_vals[imode])
return kl_modes_weighted
def kl_coverage(cov, eig_vals, weight_field=None):
""" Calculate the percentage of the covariance covered by the the
first N KL modes for N from 1-nmodes.
Parameters
----------
cov : ndarray
Covariance matrix. Can be ndarray, matrix, or scipy sparse
matrix. *dtype=float*, *ndim=2*, *shape=(nstate, nstate)*
eig_vals : ndarray
Eigenvalues associated with each mode.
*dtype=float*, *ndim=1*, *shape=(nmodes)*
weight_field : ndarray
Weight (e.g. cell volume) associated with each state.
*dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
coverage: ndarray
Cumulative variance coverage of the first N modes. Each value
is 0-1 and increasing.
*dtype=float*, *ndim=1*, *shape=(nmodes)*
"""
# make sparse if its not already
cov = sp.csc_matrix(cov)
# default values
nstate = cov.shape[0]
weight_field = _preprocess_field(weight_field, nstate, 1.0)
# calculate coverage
weight_vec = np.atleast_2d(weight_field)
weight_mat = np.sqrt(np.dot(weight_vec.T, weight_vec))
cov_weighted = cov.multiply(weight_mat)
cov_trace = np.sum(cov_weighted.diagonal())
return np.cumsum(eig_vals) / cov_trace
def reconstruct_kl(modes, coeffs, mean=None):
""" Reconstruct a field using KL modes and given coefficients.
Can create multiple fields by providing two dimensional array of
coefficients.
Parameters
----------
modes : ndarray
KL modes. *dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
coeffs : ndarray
Array of coefficients.
*dtype=float*, *ndim=2*, *shape=(nmodes, nsamples)*
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
fields : ndarray
Reconstructed fields.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
# number of modes, samples, and states
if len(coeffs.shape) == 1:
coeffs = np.expand_dims(coeffs, 1)
nmodes, nsamps = coeffs.shape
nstate = modes.shape[0]
# mean vector
mean = _preprocess_field(mean, nstate, 0.0)
mean = np.expand_dims(np.squeeze(mean), axis=1)
# create samples
fields = np.tile(mean, [nsamps])
for imode in range(nmodes):
vec1 = np.atleast_2d(coeffs[imode, :])
vec2 = np.atleast_2d(modes[:, imode])
fields += np.dot(vec1.T, vec2).T
return fields
def project_kl(field, modes, weight_field=None, mean=None):
""" Project a field onto a set of modes.
Parameters
----------
field : ndarray
Scalar field. *dtype=float*, *ndim=1*, *shape=(ncells)*
modes : ndarray
KL modes. *dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
weight_field : ndarray
Weight (e.g. cell volume) associated with each state.
*dtype=float*, *ndim=1*, *shape=(nstate)*
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
coeffs : ndarray
Projection magnitude.
*dtype=float*, *ndim=1*, *shape=(nmodes)*
"""
nstate, nmode = modes.shape
mean = _preprocess_field(mean, nstate, 0.0)
coeffs = []
for imode in range(nmode):
mode = modes[:, imode]
coeffs.append(projection_magnitude(field-mean, mode, weight_field))
return np.array(coeffs)
def _preprocess_field(field, nstate, default):
"""Pre-process provided weight field. """
# default value
if field is None:
field = np.ones(nstate)*default
# constant value
if len(np.atleast_1d(np.squeeze(np.array(field)))) == 1:
field = np.ones(nstate)*field
return field
# linear algebra on scalar fields
def integral(field, weight_field):
""" Calculate the integral of a field.
Parameters
----------
field : ndarray
Scalar field. *dtype=float*, *ndim=1*, *shape=(ncells)*
weight_field : ndarray
Cell volumes. *dtype=float*, *ndim=1*, *shape=(ncells)*
Returns
-------
field_integral : float
The integral of the field over the domain.
"""
field = np.squeeze(field)
assert field.ndim == 1
nstate = len(field)
weight_field = _preprocess_field(weight_field, nstate, 1.0)
return np.sum(field * weight_field)
def inner_product(field_1, field_2, weight_field):
""" Calculate the inner product between two fields.
The two fields share the same weights.
Parameters
----------
field_1 : ndarray
One scalar field. *dtype=float*, *ndim=1*, *shape=(ncells)*
field_2 : ndarray
Another scalar field.
*dtype=float*, *ndim=1*, *shape=(ncells)*
weight_field : ndarray
Cell volumes. *dtype=float*, *ndim=1*, *shape=(ncells)*
Returns
-------
product : float
The inner product between the two fields.
"""
return integral(field_1 * field_2, weight_field)
def norm(field, weight_field):
""" Calculate the L2-norm of a field.
Parameters
----------
field : ndarray
Scalar field. *dtype=float*, *ndim=1*, *shape=(ncells)*
weight_field : ndarray
Cell volumes. *dtype=float*, *ndim=1*, *shape=(ncells)*
Returns
-------
field_norm : float
The norm of the field.
"""
return np.sqrt(inner_product(field, field, weight_field))
def unit_field(field, weight_field):
""" Calculate the unit field (norm = 1) in same direction.
Parameters
----------
field : ndarray
Scalar field. *dtype=float*, *ndim=1*, *shape=(ncells)*
weight_field : ndarray
Cell volumes. *dtype=float*, *ndim=1*, *shape=(ncells)*
Returns
-------
field_normed : ndarray
Normalized (norm = 1) scalar field.
*dtype=float*, *ndim=1*, *shape=(ncells)*
"""
return field / norm(field, weight_field)
def projection_magnitude(field_1, field_2, weight_field):
""" Get magnitude of projection of field_1 onto field_2.
The two fields share the same weights.
Parameters
----------
field_1 : ndarray
Scalar field being projected.
*dtype=float*, *ndim=1*, *shape=(ncells)*
field_2 : ndarray
Scalar field used for projection direction.
*dtype=float*, *ndim=1*, *shape=(ncells)*
weight_field : ndarray
Cell volumes.
*dtype=float*, *ndim=1*, *shape=(ncells)*
Returns
-------
magnitude : float
magnitude of the projected field.
"""
magnitude = inner_product(field_1, field_2, weight_field) / \
(norm(field_2, weight_field)**2)
return magnitude
def projection(field_1, field_2, weight_field):
""" Project field_1 onto field_2.
The two fields share the same weights.
Parameters
----------
field_1 : ndarray
Scalar field being projected.
*dtype=float*, *ndim=1*, *shape=(ncells)*
field_2 : ndarray
Scalar field used for projection direction.
*dtype=float*, *ndim=1*, *shape=(ncells)*
weight_field : ndarray
Cell volumes.
*dtype=float*, *ndim=1*, *shape=(ncells)*
Returns
-------
projected_field : ndarray
Projected field.
*dtype=float*, *ndim=1*, *shape=(ncells)*
"""
magnitude = projection_magnitude(field_1, field_2, weight_field)
direction = unit_field(field_2, weight_field)
return magnitude*direction
# interpolation
def interpolate_field_rbf(data, coords, kernel, length_scale):
""" Interpolate data using a radial basis function (RBF) to create a
field from sparse specifications.
This is used for instance to specify a variance field based on
expert knowledge.
Parameters
----------
data : ndarray
Sparse data to create interpolation from. For an NxM array, the
number of data points is N, the number of dimensions
(coordinates) is M-1, and the Mth column is the data value.
*dtype=float*, *ndim=2*, *shape=(N, M)*
coords : ndarray
Coordinates of the cell centers of the full discretized field.
The RBF will be evaluated at these points.
*dtype=float*, *ndim=2*, *shape=(ncells, M-1)*
kernel : str
Kernel (function) of the RBF. See *'function'* input of
`scipy.interpolate.Rbf`_ for list of options.
length_scale : float
Length scale parameter (epsilon in `scipy.interpolate.Rbf`_)
in the RBF kernel.
Returns
-------
field : ndarray
Full field. *dtype=float*, *ndim=1*, *shape=(ncells)*
"""
args1 = []
args2 = []
ncoord = coords.shape[1]
for icoord in range(ncoord):
args1.append(data[:, icoord])
args2.append(coords[:, icoord])
interp_func = interpolate.Rbf(
*args1, function=kernel, epsilon=length_scale)
return interp_func(*args2)
def inverse_distance_weights(coords, connectivity, points, tol=1e-6):
""" Create linear interpolation matrix (observation operatror H).
"""
# get host cell (cell centre closest to point)
tree = spatial.cKDTree(coords)
distances, indexes = tree.query(list(points))
npoints = points.shape[0]
ncells = coords.shape[0]
# calculate weights
mat = sp.lil_matrix((npoints, ncells))
for i in range(npoints):
id = indexes[i]
if distances[i] < tol:
# if location is cell centre
mat[i, id] = 1.0
else:
point = np.expand_dims(np.squeeze(points[i, :]), 0)
neighbours = coords[connectivity[id], :]
dist = spatial.distance.cdist(point, neighbours)
weight = 1 / dist
wsum = np.sum(weight) + 1 / distances[i]
weight /= wsum
# host cell
mat[i, id] = (1 / distances[i]) / wsum
# neighbour cells
mat[i, connectivity[id]] = weight
return sp.csc_matrix(mat)
# Gaussian process: generate samples
def gp_samples_cholesky(cov, nsamples, mean=None, eps=1e-8):
""" Generate samples of a Gaussian Process using Cholesky
decomposition.
Parameters
----------
cov : ndarray
Covariance matrix. Can be ndarray, matrix, or scipy sparse
matrix. *dtype=float*, *ndim=2*, *shape=(nstate, nstate)*
nsamples : int
Number of samples to generate.
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
eps : float
Small quantity to add to the diagonal of the covariance matrix
for numerical stability.
Returns
-------
samples : ndarray
Matrix of samples.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
# make sparse if its not already
cov = sp.csc_matrix(cov)
nstate = cov.shape[0]
# add small value to diagonal
cov = cov + sp.eye(nstate, format='csc')*eps
# mean vector
mean = _preprocess_field(mean, nstate, 0.0)
mean = np.expand_dims(np.squeeze(mean), axis=1)
# Create samples using Cholesky Decomposition
L = sparse_cholesky(cov)
a = np.random.normal(size=(nstate, nsamples))
perturb = L.dot(a)
return mean + perturb
def sparse_cholesky(cov):
""" Compute the Cholesky decomposition for a sparse (scipy) matrix.
Adapted from `gist.github.com/omitakahiro`_.
Parameters
----------
cov : ndarray
Covariance matrix. Can be ndarray, matrix, or scipy sparse
matrix. *dtype=float*, *ndim=2*, *shape=(nstate, nstate)*
Returns
-------
lower: scipy.sparse.csc_matrix
Lower triangular Cholesky factor of the covariance matrix.
"""
# convert to sparse matrix
cov = sp.csc_matrix(cov)
# LU decomposition
LU = splinalg.splu(cov, diag_pivot_thresh=0)
# check the matrix is positive definite.
n = cov.shape[0]
posd = (LU.perm_r == np.arange(n)).all() and (LU.U.diagonal() > 0).all()
if not posd:
raise ValueError('The matrix is not positive definite')
return LU.L.dot(sp.diags(LU.U.diagonal()**0.5))
def gp_samples_kl(cov, nsamples, weight_field, nmodes=None, mean=None,
eps=1e-8):
""" Generate samples of a Gaussian Process using KL decomposition.
Parameters
----------
cov : ndarray
Covariance matrix. Can be ndarray, matrix, or scipy sparse
matrix. *dtype=float*, *ndim=2*, *shape=(nstate, nstate)*
nsamples : int
Number of samples to generate.
weight_field : ndarray
Weight (e.g. cell volume) associated with each state.
*dtype=float*, *ndim=1*, *shape=(nstate)*
nmodes : int
Number of modes to use when generating samples. *'None'* to use
all modes.
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
eps : float
Small quantity to add to the diagonal of the covariance matrix
for numerical stability.
Returns
-------
samples : ndarray
Matrix of samples.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
# KL decomposition
eigv, modes = calc_kl_modes(cov, nmodes, weight_field, eps, False)
if nmodes is None:
nmodes = len(eigv)
# create samples
coeffs = np.random.normal(0, 1, [nmodes, nsamples])
return reconstruct_kl(modes, coeffs, mean)
def gp_samples_klmodes(modes, nsamples, mean=None):
""" Generate samples of a Gaussian Process using the given KL
modes.
Parameters
----------
modes : ndarray
KL modes. *dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
nsamples : int
Number of samples to generate.
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
samples : ndarray
Matrix of samples.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
# create samples
nmodes = modes.shape[1]
coeffs = np.random.normal(0, 1, [nmodes, nsamples])
return reconstruct_kl(modes, coeffs, mean)
def gp_samples_kl_coverage(cov, nsamples, weight_field, coverage=0.99,
max_modes=None, mean=None, eps=1e-8):
""" Generate samples of a Gaussian Process using KL decomposition.
Only the firs N modes required to get the desired variance coverage
are used.
Parameters
----------
cov : ndarray
Covariance matrix. Can be ndarray, matrix, or scipy sparse
matrix. *dtype=float*, *ndim=2*, *shape=(nstate, nstate)*
nsamples : int
Number of samples to generate.
weight_field : ndarray
Weight (e.g. cell volume) associated with each state.
*dtype=float*, *ndim=1*, *shape=(nstate)*
coverage : float
Desired percentage coverage of the variance. Value between 0-1.
max_modes : int
Maximum number of modes used. This is the number of modes that
is calculated. If less are needed to achieve the desired
coverage the additional ones are discarded.
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
eps : float
Small quantity to add to the diagonal of the covariance matrix
for numerical stability.
Returns
-------
samples : ndarray
Matrix of samples.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
nmodes : int
Number of modes used to achieve the requested coverage.
"""
# KL decomposition
eigv, klmodes = calc_kl_modes_coverage(
cov, coverage, weight_field, eps, max_modes, False)
nmodes = len(eigv)
# create samples
coeffs = np.random.normal(0, 1, [nmodes, nsamples])
return reconstruct_kl(klmodes, coeffs, mean), nmodes
def gp_sqrexp_samples(nsamples, coords, stddev, length_scales, mean=None,
weight_field=None, max_modes=None):
""" Generate samples from a Gaussian Process with square exponential
correlation kernel.
This is a convinience function for new users or simple cases.
It create the covariance matrix, does the KL decomposition, keeps
the required modes for 99% coverage, and create the samples.
Parameters
----------
nsamples : int
Number of samples to generate.
coords : ndarray
Array of coordinates. Each row correspond to a different point
and the number of columns is the number of physical dimensions
(e.g. 3 for (x,y,z)).
*dtype=float*, *ndim=2*, *shape=(npoints, ndims)*
stddev : ndarray
Standard deviation of each state. Alternatively, provide a float
for a constant standard deviation.
*dtype=float*, *ndim=1*, *shape=(nstate)*
length_scales : list
Length scale for each physical dimensions. List length is ndims.
Each entry is either a one dimensional ndarray of length nstate
(length scale field) or a float (constant length scale).
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
weight_field : ndarray
Weight (e.g. cell volume) associated with each state.
*dtype=float*, *ndim=1*, *shape=(nstate)*
max_modes : int
Maximum number of modes used. This is the number of modes that
is calculated. If less are needed to achieve 99% coverage the
additional ones are discarded.
"""
from dafi.random_field.covariance import generate_cov
cov = generate_cov(
'sqrexp', stddev, coords=coords, length_scales=length_scales)
samples, _ = gp_samples_kl_coverage(
cov, nsamples, weight_field, 0.99, max_modes, mean)
return samples
# Random field class
class GaussianProcess(object):
""" Gaussian process class.
Also allows for the creation of a function of a Gaussian process.
E.g. see *'Lognormal'* class.
"""
def __init__(self, klmodes, mean=None, weights=None, func=None,
funcinv=None):
""" Initialize Gaussian process class.
Parameters
----------
klmodes : ndarray
KL modes (eigenvectors).
*dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
mean : ndarray
Mean vector. Default zero (0).
*dtype=float*, *ndim=1*, *shape=(nstate)*
weights : ndarray
Weight (e.g. cell volume) associated with each state.
Default ones (1). *dtype=float*, *ndim=1*, *shape=(nstate)*
func: function
Function to create a random process that is a function of
a Gaussian process. Default is identity function (GP).
funcinv: function
Inverse of func.
"""
nstate = klmodes.shape[0]
self.klmodes = klmodes
self.ncell, self.nmodes = self.klmodes.shape
self.mean = _preprocess_field(mean, self.ncell, 0.0)
self.weights = _preprocess_field(mean, nstate, 1.0)
def func_identity(x):
return x
if func is None:
func = func_identity
if funcinv is None:
funcinv = func_identity
self.func = func
self.funcinv = funcinv
def sample_coeffs(self, nsamples):
""" Create Karhunen-Loève (KL) coefficents for random samples.
Parameters
----------
nsamples : int
Number of samples for which to generate KL coefficients.
Returns
-------
coeffs : ndarray
Matrix of samples KL coefficients for the Gaussian process.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
return np.random.normal(0, 1, [self.nmodes, nsamples])
def sample_gp(self, nsamples, mean=None):
""" Generate samples of the Gaussian process.
Parameters
----------
nsamples : int
Number of samples to generate.
mean : ndarray
Mean vector. If *None*, self.mean is used.
*dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
samples : ndarray
Sample fields from Gaussian process.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
coeffs : ndarray
Matrix of samples KL coefficients for the Gaussian process.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
if mean is None:
mean = self.mean
coeffs = self.sample_coeffs(nsamples)
return reconstruct_kl(self.klmodes, coeffs, mean), coeffs
def sample_func(self, nsamples, mean=None):
""" Generate samples of the function of the Gaussian process.
Parameters
----------
nsamples : int
Number of samples to generate.
mean : ndarray
Mean vector. If *None*, self.mean is used.
*dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
samples : ndarray
Sample fields from the function of the Gaussian process.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
if mean is None:
mean = self.mean
coeffs = self.sample_coeffs(nsamples)
samps_gp = reconstruct_kl(self.klmodes, coeffs, mean)
return self.func(samps_gp), coeffs
def reconstruct_gp(self, coeffs, mean=None):
""" Reconstruct the Gaussian process field from given
KL coefficients.
Parameters
----------
coeffs : ndarray
Array of KL coefficients.
*dtype=float*, *ndim=2*, *shape=(nmodes, nsamples)*
mean : ndarray
Mean vector. If *None*, self.mean is used.
*dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
fields : ndarray
Reconstructed fields.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
if mean is None:
mean = self.mean
return reconstruct_kl(self.klmodes, coeffs, mean)
def reconstruct_func(self, coeffs, mean=None):
""" Reconstruct the function of the Gaussian process field
from given KL coefficients.
Parameters
----------
coeffs : ndarray
Array of KL coefficients.
*dtype=float*, *ndim=2*, *shape=(nmodes, nsamples)*
mean : ndarray
Mean vector. If *None*, self.mean is used.
*dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
fields : ndarray
Reconstructed fields.
*dtype=float*, *ndim=2*, *shape=(nstate, nsamples)*
"""
if mean is None:
mean = self.mean
val_gp = reconstruct_kl(self.klmodes, coeffs, mean)
return self.func(val_gp)
def pdf(self, coeffs):
""" Probaility density function (PDF).
PDF(x) where x is a field (point in sample space) specified by
KL coeffiecients.
Parameters
----------
coeffs : ndarray
Array of KL coefficients.
*dtype=float*, *ndim=2*, *shape=(nmodes, nsamples)*
Returns
-------
pdf : ndarray
Value of the PDF function for the given point in the sample space.
"""
return np.exp(logpdf(coeffs))
def logpdf(self, coeffs):
""" Logarithm of the probability density function.
log(PDF(x)) where x is a field (point in sample space)
specified by KL coeffiecients.
Parameters
----------
coeffs : ndarray
Array of KL coefficients.
*dtype=float*, *ndim=2*, *shape=(nmodes, nsamples)*
Returns
-------
logpdf : ndarray
Logarithm of the value of the PDF function for the given
point in the sample space.
"""
if len(coeffs.shape) == 1:
coeffs = np.expand_dims(coeffs, 1)
norm_coeff = np.linalg.norm(coeffs, axis=0)
const = np.log((2*np.pi)**(-self.ncell/2))
return const + -0.5*norm_coeff**2
def project_gp_field(self, field, mean=None):
""" Project a field onto the KL modes.
Parameters
----------
field : ndarray
Scalar field. *dtype=float*, *ndim=1*, *shape=(ncells)*
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
coeffs : ndarray
Projection magnitude.
*dtype=float*, *ndim=1*, *shape=(nmodes)*
"""
return project_kl(field, self.klmodes, self.weights, mean)
def project_func_field(self, field, mean=None):
""" Project a field from the function of the Gaussian process
onto the KL modes.
Parameters
----------
field : ndarray
Scalar field. *dtype=float*, *ndim=1*, *shape=(ncells)*
mean : ndarray
Mean vector. *dtype=float*, *ndim=1*, *shape=(nstate)*
Returns
-------
coeffs : ndarray
Projection magnitude.
*dtype=float*, *ndim=1*, *shape=(nmodes)*
"""
field = self.funcinv(field)
mean = _preprocess_field(mean)
mean = self.funcinv(mean)
return project_kl(field, self.klmodes, self.weights, mean)
class LogNormal(GaussianProcess):
""" Log-normal process class. """
def __init__(self, klmodes_gp, median=1.0, weights=None):
""" Initialize log-normal process class.
Parameters
----------
klmodes_gp : ndarray
KL modes (eigenvectors) of the underlying Gaussian process.
*dtype=float*, *ndim=2*, *shape=(nstate, nmodes)*
median : ndarray
Median vector. Default one (1).
*dtype=float*, *ndim=1*, *shape=(nstate)*
weights : ndarray
Weight (e.g. cell volume) associated with each state.
Default ones (1). *dtype=float*, *ndim=1*, *shape=(nstate)*
"""
nstate = klmodes_gp.shape[0]
median = _preprocess_field(median, nstate, 1.0)
self.median_func = np.expand_dims(np.squeeze(median), 1)
def func(x):
return self.median_func * np.exp(x)
def funcinv(y):
return np.log(y / self.median_func)
mean = 0.0
super(self.__class__, self).__init__(
klmodes_gp, mean, weights, func, funcinv)
| 31.55132
| 79
| 0.608886
|
36711536321b96febc7e357452abee76ddf967f2
| 2,872
|
py
|
Python
|
examples/01_plotting/plot_overlay.py
|
ctw/nilearn
|
932eee9c69cd8fbf40ee6af5cee77f8f93b25da3
|
[
"BSD-2-Clause"
] | 827
|
2015-01-30T23:11:42.000Z
|
2022-03-29T21:21:05.000Z
|
examples/01_plotting/plot_overlay.py
|
ctw/nilearn
|
932eee9c69cd8fbf40ee6af5cee77f8f93b25da3
|
[
"BSD-2-Clause"
] | 2,845
|
2015-01-04T22:14:41.000Z
|
2022-03-31T20:28:09.000Z
|
examples/01_plotting/plot_overlay.py
|
ctw/nilearn
|
932eee9c69cd8fbf40ee6af5cee77f8f93b25da3
|
[
"BSD-2-Clause"
] | 484
|
2015-02-03T10:58:19.000Z
|
2022-03-29T21:57:16.000Z
|
"""
Visualizing a probabilistic atlas: the default mode in the MSDL atlas
=====================================================================
Visualizing a probabilistic atlas requires visualizing the different
maps that compose it.
Here we represent the nodes constituting the default mode network in the
`MSDL atlas
<https://team.inria.fr/parietal/18-2/spatial_patterns/spatial-patterns-in-resting-state/>`_.
The tools that we need to leverage are:
* :func:`nilearn.image.index_img` to retrieve the various maps composing
the atlas
* Adding overlays on an existing brain display, to plot each of these
maps
Alternatively, :func:`nilearn.plotting.plot_prob_atlas` allows to plot the maps in one step that
with less control over the plot (see below)
"""
############################################################################
# Fetching probabilistic atlas - MSDL atlas
# -----------------------------------------
from nilearn import datasets
atlas_data = datasets.fetch_atlas_msdl()
atlas_filename = atlas_data.maps
#############################################################################
# Visualizing a probabilistic atlas with plot_stat_map and add_overlay object
# ---------------------------------------------------------------------------
from nilearn import plotting, image
# First plot the map for the PCC: index 4 in the atlas
display = plotting.plot_stat_map(image.index_img(atlas_filename, 4),
colorbar=False,
title="DMN nodes in MSDL atlas")
# Now add as an overlay the maps for the ACC and the left and right
# parietal nodes
display.add_overlay(image.index_img(atlas_filename, 5),
cmap=plotting.cm.black_blue)
display.add_overlay(image.index_img(atlas_filename, 6),
cmap=plotting.cm.black_green)
display.add_overlay(image.index_img(atlas_filename, 3),
cmap=plotting.cm.black_pink)
plotting.show()
###############################################################################
# Visualizing a probabilistic atlas with plot_prob_atlas
# ======================================================
#
# Alternatively, we can create a new 4D-image by selecting the 3rd, 4th, 5th and 6th (zero-based) probabilistic map from atlas
# via :func:`nilearn.image.index_img` and use :func:`nilearn.plotting.plot_prob_atlas` (added in version 0.2)
# to plot the selected nodes in one step.
#
# Unlike :func:`nilearn.plotting.plot_stat_map` this works with 4D images
dmn_nodes = image.index_img(atlas_filename, [3, 4, 5, 6])
# Note that dmn_node is now a 4D image
print(dmn_nodes.shape)
####################################
display = plotting.plot_prob_atlas(dmn_nodes,
cut_coords=(0, -55, 29),
title="DMN nodes in MSDL atlas")
plotting.show()
| 39.342466
| 126
| 0.588788
|
3ed999f20ceaec0707c6bc0cbe46a3fc10b04d26
| 3,820
|
py
|
Python
|
src/aux.py
|
MartaLopesGomes/KEGGScraper
|
94f9b63b89f0b45c9e3bf39b6d2e825637b3ca00
|
[
"MIT"
] | null | null | null |
src/aux.py
|
MartaLopesGomes/KEGGScraper
|
94f9b63b89f0b45c9e3bf39b6d2e825637b3ca00
|
[
"MIT"
] | null | null | null |
src/aux.py
|
MartaLopesGomes/KEGGScraper
|
94f9b63b89f0b45c9e3bf39b6d2e825637b3ca00
|
[
"MIT"
] | 1
|
2020-03-17T06:29:20.000Z
|
2020-03-17T06:29:20.000Z
|
# -*- coding: utf-8 -*-
"""
"""
from bs4 import BeautifulSoup as bs
def parser_ids(file):
res = []
with open(file, 'r') as f:
lines = f.readlines()
for line in lines:
kID = line.strip().upper()
if len(kID) > 0:
res.append(kID)
return res
def check_ec(ecs):
ec_list = [x.lower() for x in ecs]
if ec_list[0][0:2] == 'ec':
ec_list = [x.replace('ec', '').strip() for x in ec_list]
ec_list = [x.replace(' ', '') for x in ec_list]
return ec_list
def make_blocks(url_list):
# How many url each chunk should have
n = 100
res = [url_list[i * n:(i + 1) * n] for i in range((len(url_list) + n - 1) // n)]
return res
# API links
def get_orthology_ids_url_from_map(pathway_id):
URL = 'http://www.genome.jp'
FUN = '/dbget-bin/get_linkdb?-t+orthology+path:'
return URL + FUN + pathway_id
def get_gene_ids_url(orthology_id):
URL = 'http://www.genome.jp'
FUN = '/dbget-bin/get_linkdb?-t+genes+ko:'
return URL + FUN + orthology_id
def get_orthology_url_from_ec(ec):
URL = 'https://www.genome.jp'
FUN = '/dbget-bin/get_linkdb?-t+orthology+ec:' # ec:1.3.1.25'
return URL + FUN + ec
def get_orthology_url_from_rn(rn):
URL = 'https://www.genome.jp'
FUN = '/dbget-bin/get_linkdb?-t+orthology+rn:'
return URL + FUN + rn
def get_ko_url(ko):
URL = 'https://www.genome.jp'
FUN = '/dbget-bin/www_bget?ko:'
return URL + FUN + ko
def get_ec_url(ec):
URL = 'https://www.genome.jp'
FUN = '/dbget-bin/www_bget?ec:'
return URL + FUN + ec
def get_fastaProt_url(prot_id):
URL = 'http://www.genome.jp'
FUN = '/dbget-bin/www_bget?-f+-n+a+'
return URL + FUN + prot_id
def get_fasta_url(gene_id):
URL = 'http://www.genome.jp'
FUN = '/dbget-bin/www_bget?-f+-n+n+'
return URL + FUN + gene_id
def get_ec_url_from_ko(ko):
URL = 'https://www.genome.jp'
FUN = '/dbget-bin/get_linkdb?-t+enzyme+ko:'
return URL + FUN + ko
def get_rn_url_from_ko(ko):
URL = 'https://www.genome.jp'
FUN = '/dbget-bin/get_linkdb?-t+rn+ko:'
return URL + FUN + ko
def get_ec_url_from_rn(rn):
URL = 'https://www.genome.jp'
FUN = '/dbget-bin/get_linkdb?-t+enzyme+rn:'
return URL + FUN + rn
def get_ids(response):
try:
html = response.text
b = bs(html, features="html.parser")
links = b.find_all('a')
valid_link = lambda x: 'www_bget' in x.get('href')
links = filter(valid_link, links)
lista = [link.text for link in links]
return lista
except AttributeError:
html = response.read()
b = bs(html, features="html.parser")
links = b.find_all('a')
valid_link = lambda x: 'www_bget' in x.get('href')
links = filter(valid_link, links)
lista = [link.text for link in links]
return lista
else:
return None
def get_fastaProt(response):
try:
html = bs(response.text, features="html.parser")
return html.pre.text
except:
return None
def get_ko_name(response):
html = response.read()
b = bs(html, features="html.parser")
rows = b.findAll("tr")
for r in rows:
lines = r.find("nobr")
if lines:
n = lines.text
if n == 'Definition':
definition = r.find("td").text
return definition
def get_ec_names(response):
names = []
html = response.read()
b = bs(html, features="html.parser")
rows = b.findAll("tr")
for r in rows:
lines = r.find("nobr")
if lines:
n = lines.text
if n == 'Name':
cells = r.findAll("td")
for cell in cells:
names = [x.strip() for x in cell.text.split(';')]
return names
| 24.025157
| 84
| 0.575916
|
ae8d2ac4c388f16af66f26814c4770f3ba5a8bcf
| 259
|
py
|
Python
|
old/pepper1/src/libpepper/vals/control_flow/__init__.py
|
andybalaam/pepper
|
f0d12379c7c6e2d82003c0c1d6130bfac685e1c3
|
[
"MIT"
] | 2
|
2018-12-19T16:32:46.000Z
|
2019-01-10T09:00:33.000Z
|
old/pepper1/src/libpepper/vals/control_flow/__init__.py
|
andybalaam/pepper
|
f0d12379c7c6e2d82003c0c1d6130bfac685e1c3
|
[
"MIT"
] | null | null | null |
old/pepper1/src/libpepper/vals/control_flow/__init__.py
|
andybalaam/pepper
|
f0d12379c7c6e2d82003c0c1d6130bfac685e1c3
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2012 Andy Balaam and The Pepper Developers
# Released under the MIT License. See the file COPYING.txt for details.
from pepfor import PepFor
from peprange import PepRange
from pepwhile import PepWhile
from pepcodeblock import PepCodeBlock
| 25.9
| 72
| 0.810811
|
1fc0b9300e5455af40af83a938635562940d24aa
| 1,311
|
py
|
Python
|
interview/wepay/binary-tree-level-order-print.py
|
Zhenye-Na/leetcode
|
95196a45f5709ccf7b970ee5ac84a4bf8fe2301e
|
[
"MIT"
] | 10
|
2019-09-15T00:23:57.000Z
|
2022-01-05T12:53:42.000Z
|
interview/wepay/binary-tree-level-order-print.py
|
Zhenye-Na/leetcode
|
95196a45f5709ccf7b970ee5ac84a4bf8fe2301e
|
[
"MIT"
] | 3
|
2021-06-30T00:39:26.000Z
|
2021-08-01T07:13:59.000Z
|
interview/wepay/binary-tree-level-order-print.py
|
Zhenye-Na/leetcode
|
95196a45f5709ccf7b970ee5ac84a4bf8fe2301e
|
[
"MIT"
] | 6
|
2020-02-08T02:55:22.000Z
|
2022-01-02T22:48:18.000Z
|
"""
1
2 3
4 5
print out should be
1
2 3
. 4 . 5
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
queue = deque([root])
res = []
depth = self.get_depth(root)
curr_depth = 0
while queue and curr_depth < depth:
size = len(queue)
level = []
for _ in range(size):
node = queue.popleft()
if node is not None:
level.append(str(node.val))
queue.append(node.left)
queue.append(node.right)
else:
level.append(".")
curr_depth += 1
res.append(level)
# for level in res:
# print(level)
return res
def get_depth(self, root):
if not root:
return 0
left = self.get_depth(root.left)
right = self.get_depth(root.right)
return max(left, right) + 1
| 22.220339
| 60
| 0.45614
|
0f10290691c418f80ff42cdb7ca66a15b2def894
| 481
|
py
|
Python
|
backend/config/mirror.py
|
sartography/star-drive
|
c0f33378d42913c3e677e07f74eb46d7b2b82a0a
|
[
"MIT"
] | null | null | null |
backend/config/mirror.py
|
sartography/star-drive
|
c0f33378d42913c3e677e07f74eb46d7b2b82a0a
|
[
"MIT"
] | 368
|
2018-12-18T14:43:20.000Z
|
2022-03-02T02:54:18.000Z
|
backend/config/mirror.py
|
sartography/star-drive
|
c0f33378d42913c3e677e07f74eb46d7b2b82a0a
|
[
"MIT"
] | 2
|
2019-10-02T03:06:06.000Z
|
2020-10-05T16:53:48.000Z
|
MIRRORING = True
SERVER_NAME = "localhost:5001"
MASTER_URL = "http://localhost:5000"
MASTER_EMAIL = "daniel.h.funk@gmail.com"
MASTER_PASS = "dfunk7"
SQLALCHEMY_DATABASE_URI = "postgresql://ed_user:ed_pass@localhost/stardrive_mirror"
# Elastic Search
ELASTIC_SEARCH = {
"index_prefix": "stardrive_mirror",
"hosts": ["localhost"],
"port": 9200,
"timeout": 20,
"verify_certs": False,
"use_ssl": False,
"http_auth_user": "",
"http_auth_pass": ""
}
| 20.913043
| 83
| 0.679834
|
0c36e7dcdb51e8a31b837592e6156e115cfbc5f8
| 4,941
|
py
|
Python
|
docs/conf.py
|
pyenergyplus/witheppy
|
ea9d21976fc018261aa5f8464125df4bf866171a
|
[
"MIT"
] | 8
|
2018-12-12T23:00:44.000Z
|
2021-12-12T05:41:45.000Z
|
docs/conf.py
|
pyenergyplus/witheppy
|
ea9d21976fc018261aa5f8464125df4bf866171a
|
[
"MIT"
] | 27
|
2018-10-18T10:31:27.000Z
|
2021-12-15T05:56:21.000Z
|
docs/conf.py
|
pyenergyplus/witheppy
|
ea9d21976fc018261aa5f8464125df4bf866171a
|
[
"MIT"
] | 2
|
2018-10-15T15:36:02.000Z
|
2020-12-30T00:17:02.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# witheppy documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
import witheppy
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"nbsphinx",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"witheppy"
copyright = u"2018-2021, Santosh Philip"
author = u"Santosh Philip"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = witheppy.__version__
# The full version, including alpha/beta/rc tags.
release = witheppy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "witheppydoc"
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"witheppy.tex",
u"witheppy Documentation",
u"Santosh Philip",
"manual",
),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "witheppy", u"witheppy Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"witheppy",
u"witheppy Documentation",
author,
"witheppy",
"One line description of project.",
"Miscellaneous",
),
]
| 29.586826
| 78
| 0.67456
|
388c11b0ac8cee5f8d5a9a41acd7a56fdf35ace4
| 2,081
|
py
|
Python
|
starboard/starboard_entry.py
|
glryanon/Trusty-cogs
|
1d5056ad166a7e7ee5039baa31748b1995ae81f6
|
[
"MIT"
] | 1
|
2020-12-28T15:58:16.000Z
|
2020-12-28T15:58:16.000Z
|
starboard/starboard_entry.py
|
glryanon/Trusty-cogs
|
1d5056ad166a7e7ee5039baa31748b1995ae81f6
|
[
"MIT"
] | null | null | null |
starboard/starboard_entry.py
|
glryanon/Trusty-cogs
|
1d5056ad166a7e7ee5039baa31748b1995ae81f6
|
[
"MIT"
] | 1
|
2020-07-27T00:04:45.000Z
|
2020-07-27T00:04:45.000Z
|
class StarboardEntry:
def __init__(
self,
name: str,
channel: int,
emoji: str,
colour: str = "user",
enabled: bool = True,
selfstar: bool = False,
blacklist_role: list = [],
whitelist_role: list = [],
messages: list = [],
blacklist_channel: list = [],
whitelist_channel: list = [],
threshold: int = 1,
):
super().__init__()
self.name = name
self.channel = channel
self.emoji = emoji
self.colour = colour
self.enabled = enabled
self.selfstar = selfstar
self.blacklist_role = blacklist_role
self.whitelist_role = whitelist_role
self.messages = messages
self.blacklist_channel = blacklist_channel
self.whitelist_channel = whitelist_channel
self.threshold = threshold
def to_json(self) -> dict:
return {
"name": self.name,
"enabled": self.enabled,
"channel": self.channel,
"emoji": self.emoji,
"colour": self.colour,
"selfstar": self.selfstar,
"blacklist_role": self.blacklist_role,
"whitelist_role": self.whitelist_role,
"messages": self.messages,
"blacklist_channel": self.blacklist_channel,
"whitelist_channel": self.whitelist_channel,
"threshold": self.threshold,
}
@classmethod
def from_json(cls, data: dict):
colour = "user"
selfstar = False
if "selfstar" in data:
selfstar = data["selfstar"]
if "colour" in data:
colour = data["colour"]
return cls(
data["name"],
data["channel"],
data["emoji"],
colour,
data["enabled"],
selfstar,
data["blacklist_role"],
data["whitelist_role"],
data["messages"],
data["blacklist_channel"],
data["whitelist_channel"],
data["threshold"],
)
| 29.728571
| 56
| 0.524748
|
e2c307238258053dd5f23f0c8f7c7f3954207bac
| 18,061
|
py
|
Python
|
nevergrad/functions/base.py
|
mathuvu/nevergrad
|
8e116190a8a29c238e655d728fc4816f7b4e0415
|
[
"MIT"
] | null | null | null |
nevergrad/functions/base.py
|
mathuvu/nevergrad
|
8e116190a8a29c238e655d728fc4816f7b4e0415
|
[
"MIT"
] | null | null | null |
nevergrad/functions/base.py
|
mathuvu/nevergrad
|
8e116190a8a29c238e655d728fc4816f7b4e0415
|
[
"MIT"
] | null | null | null |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
from pathlib import Path
import numbers
import numpy as np
import nevergrad.common.typing as tp
from nevergrad.common import errors
from nevergrad.common.errors import ( # pylint: disable=unused-import
UnsupportedExperiment as UnsupportedExperiment,
)
from nevergrad.parametrization import parameter as p
from nevergrad.optimization import multiobjective as mobj
EF = tp.TypeVar("EF", bound="ExperimentFunction")
ME = tp.TypeVar("ME", bound="MultiExperiment")
def _reset_copy(obj: p.Parameter) -> p.Parameter:
"""Copy a parameter and resets its random state to obtain variability"""
out = obj.copy()
out._set_random_state(None) # propagates None to sub-parameters
return out
# pylint: disable=too-many-instance-attributes
class ExperimentFunction:
"""Combines a function and its parametrization for running experiments (see benchmark subpackage)
Parameters
----------
function: callable
the callable to convert
parametrization: Parameter
the parametrization of the function
Notes
-----
- you can redefine custom "evaluation_function" and "compute_pseudotime" for custom behaviors in experiments
- the bool/int/str/float init arguments are added as descriptors for the experiment which will serve in
definining test cases. You can add more through "add_descriptors".
- Makes sure you the "copy()" methods works (provides a new copy of the function *and* its parametrization)
if you subclass ExperimentFunction since it is intensively used in benchmarks.
By default, this will create a new instance using the same init arguments as your current instance
(they were recorded through "__new__"'s magic) and apply the additional descriptors you may have added,
as well as propagate the new parametrization *if it has a different name as the current one*.
"""
def __new__(cls: tp.Type[EF], *args: tp.Any, **kwargs: tp.Any) -> EF:
"""Identifies initialization parameters during initialization and store them"""
inst = object.__new__(cls)
sig = inspect.signature(cls.__init__)
callargs: tp.Dict[str, tp.Any] = {}
try:
boundargs = sig.bind(inst, *args, **kwargs)
except TypeError:
pass # either a problem which will be caught later or a unpickling
else:
boundargs.apply_defaults() # make sure we get the default non-provided arguments
callargs = dict(boundargs.arguments)
callargs.pop("self")
inst._auto_init = callargs
inst._descriptors = {
x: y for x, y in callargs.items() if isinstance(y, (str, tuple, int, float, bool))
}
inst._descriptors["function_class"] = cls.__name__
return inst # type: ignore
def __init__(
self: EF,
function: tp.Callable[..., tp.Loss],
parametrization: p.Parameter,
) -> None:
assert callable(function)
self._auto_init: tp.Dict[str, tp.Any] # filled by __new__
self._descriptors: tp.Dict[str, tp.Any] # filled by __new__
self._parametrization: p.Parameter
self.parametrization = parametrization
# force random state initialization
self.multiobjective_upper_bounds: tp.Optional[np.ndarray] = None
self.__function = function # __ to prevent overrides
# if this is not a function bound to this very instance, add the function/callable name to the descriptors
if not hasattr(function, "__self__") or function.__self__ != self: # type: ignore
name = function.__name__ if hasattr(function, "__name__") else function.__class__.__name__
self._descriptors.update(name=name)
if len(self.parametrization.name) > 24:
raise RuntimeError(
f"For the sake of benchmarking, please rename the current parametrization:\n{self.parametrization!r}\n"
"to a shorter name. This way it will be more readable in the experiments.\n"
'Eg: parametrization.set_name("") to just ignore it\n'
"CAUTION: Make sure you set different names for different parametrization configurations if you want it "
"to be used in order to differentiate between benchmarks cases."
)
@property
def dimension(self) -> int:
return self._parametrization.dimension
@property
def parametrization(self) -> p.Parameter:
return self._parametrization
@parametrization.setter
def parametrization(self, parametrization: p.Parameter) -> None:
self._parametrization = parametrization
self._parametrization.freeze()
# pylint: disable=pointless-statement
self._parametrization.random_state # force initialization for synchronization of random state
# # TODO investigate why this synchronization is needed
@property
def function(self) -> tp.Callable[..., tp.Loss]:
return self.__function
def __call__(self, *args: tp.Any, **kwargs: tp.Any) -> tp.Loss:
"""Call the function directly (equivalent to parametrized_function.function(*args, **kwargs))"""
return self.function(*args, **kwargs)
@property
def descriptors(self) -> tp.Dict[str, tp.Any]:
"""Description of the function parameterization, as a dict. This base class implementation provides function_class,
noise_level, transform and dimension
"""
desc = dict(self._descriptors) # Avoid external modification
desc.update(parametrization=self.parametrization.name, dimension=self.dimension)
return desc
def add_descriptors(self, **kwargs: tp.Optional[tp.Hashable]) -> None:
self._descriptors.update(kwargs)
def __repr__(self) -> str:
"""Shows the function name and its summary"""
params = [f"{x}={repr(y)}" for x, y in sorted(self._descriptors.items())]
return "Instance of {}({})".format(self.__class__.__name__, ", ".join(params))
def equivalent_to(self, other: tp.Any) -> bool:
"""Check that two instances where initialized with same settings.
This is not meant to be used to check if functions are exactly equal
(initialization may hold some randomness)
This is only useful for unit testing.
(may need to be overloaded to make faster if tests are getting slow)
"""
if other.__class__ != self.__class__:
return False
return (
bool(self._descriptors == other._descriptors)
and self.parametrization.name == other.parametrization.name
)
def _internal_copy(self: EF) -> EF:
"""This is "black magic" which creates a new instance using the same init parameters
that you provided and which were recorded through the __new__ method of ExperimentFunction
"""
# auto_init is automatically filled by __new__, aka when creating the instance
output: EF = self.__class__(
**{x: _reset_copy(y) if isinstance(y, p.Parameter) else y for x, y in self._auto_init.items()}
)
return output
def copy(self: EF) -> EF:
"""Provides a new equivalent instance of the class, possibly with
different random initialization, to provide different equivalent test cases
when using different seeds.
This also checks that parametrization and descriptors are correct.
You should preferably override _internal_copy
"""
# add descriptors present in self but not added by initialization
# (they must have been added manually)
output = self._internal_copy()
keys = set(output.descriptors)
output.add_descriptors(**{x: y for x, y in self.descriptors.items() if x not in keys})
# parametrization may have been overriden, so let's always update it
# Caution: only if names differ!
if output.parametrization.name != self.parametrization.name:
output.parametrization = _reset_copy(self.parametrization)
# then if there are still differences, something went wrong
if not output.equivalent_to(self):
raise errors.ExperimentFunctionCopyError(
f"Copy of\n{self}\nwith descriptors:\n{self._descriptors}\nreturned non-equivalent\n"
f"{output}\nwith descriptors\n{output._descriptors}.\n\n"
"This means that the auto-copy behavior of ExperimentFunction does not work.\n"
"You may want to implement your own copy method, or check implementation of "
"ExperimentFunction.__new__ and copy to better understand what happens"
)
# propagate other useful information # TODO a bit hacky
output.parametrization._constraint_checkers = self.parametrization._constraint_checkers
output.multiobjective_upper_bounds = (
self.multiobjective_upper_bounds
) # TODO not sure why this is needed
return output
def compute_pseudotime( # pylint: disable=unused-argument
self, input_parameter: tp.Any, loss: tp.Loss
) -> float:
"""Computes a pseudotime used during benchmarks for mocking parallelization in a reproducible way.
By default, each call takes 1 unit of pseudotime, but this can be modified by overriding this
function and the pseudo time can be a function of the function inputs and output.
Note: This replaces get_postponing_delay which has been aggressively deprecated
Parameters
----------
input_parameter: Any
the input that was provided to the actual function
value: float
the output of the actual function
Returns
-------
float
the pseudo computation time of the call to the actual function
"""
return 1.0
def evaluation_function(self, *recommendations: p.Parameter) -> float:
"""Provides the evaluation crieterion for the experiment.
In case of mono-objective, it defers to evaluation_function
Otherwise, it uses the hypervolume.
This function can be overriden to provide custom behaviors.
Parameters
----------
*pareto: Parameter
pareto front provided by the optimizer
"""
if self.multiobjective_upper_bounds is None: # singleobjective case
assert len(recommendations) == 1
output = self.function(*recommendations[0].args, **recommendations[0].kwargs)
assert isinstance(
output, numbers.Number
), f"evaluation_function can only be called on singleobjective experiments (output={output}) function={self.function}."
return output # type: ignore
# multiobjective case
hypervolume = mobj.HypervolumePareto(
upper_bounds=self.multiobjective_upper_bounds, seed=self.parametrization.random_state
)
for candidate in recommendations:
hypervolume.add(candidate)
return -hypervolume.best_volume
def update_leaderboard(identifier: str, loss: float, array: np.ndarray, verbose: bool = True) -> None:
"""Handy function for storing best results for challenging functions (eg.: Photonics)
The best results are kept in a file that is automatically updated with new data.
This may require installing nevergrad in dev mode.
Parameters
----------
identifier: str
the identifier of the problem
loss: float
the new loss, if better than the one in the file, the file will be updated
array: np.ndarray
the array corresponding to the loss
verbose: bool
whether to also print a message if the leaderboard was updated
"""
# pylint: disable=import-outside-toplevel
import pandas as pd # lazzy to avoid requiring pandas for using an ExperimentFunction
loss = np.round(loss, decimals=12) # this is probably already too precise for the machine
filepath = Path(__file__).with_name("leaderboard.csv")
bests = pd.DataFrame(columns=["loss", "array"])
if filepath.exists():
bests = pd.read_csv(filepath, index_col=0)
if identifier not in bests.index:
bests.loc[identifier, :] = (float("inf"), "")
try:
if not bests.loc[identifier, "loss"] < loss: # works for nan
bests.loc[identifier, "loss"] = loss
string = "[" + ",".join(str(x) for x in array.ravel()) + "]"
bests.loc[identifier, "array"] = string
bests = bests.loc[sorted(x for x in bests.index), :]
bests.to_csv(filepath)
if verbose:
print(f"New best value for {identifier}: {loss}\nwith: {string[:80]}")
except Exception: # pylint: disable=broad-except
pass # better avoir bugs for this
class ArrayExperimentFunction(ExperimentFunction):
"""Combines a function and its parametrization for running experiments (see benchmark subpackage).
Extends ExperimentFunction, in the special case of an array, by allowing the creation of symmetries
of a single function. We can create ArrayExperimentFunction(callable, symmetry=i) for i in range(0, 2**d)
when the callable works on R^d.
Works only if there are no constraints.
Parameters
----------
function: callable
the callable to convert
parametrization: Parameter
the parametrization of the function
symmetry: int
number parametrizing how we symmetrize the function.
"""
def __init__(
self, function: tp.Callable[..., tp.Loss], parametrization: p.Parameter, symmetry: int = 0
) -> None:
"""Adds a "symmetry" parameter, which allows the creation of many symmetries of a given function.
symmetry: an int, 0 by default.
if not zero, a symmetrization is applied to the input; each of the 2^d possible values
for symmetry % 2^d gives one different function.
Makes sense if and only if (1) the input is a single ndarray (2) the domains are symmetric."""
self._inner_function = function
super().__init__(self.symmetrized_function, parametrization)
assert isinstance(
parametrization, p.Array
), f"{type(parametrization)} is not p.Array; {parametrization}."
assert (parametrization.bounds[0] is None) == (parametrization.bounds[1] is None)
assert len(parametrization._constraint_checkers) == 0
assert symmetry >= 0
assert symmetry < 2**self.dimension
# The number 11111111111111111111111 is prime (using a prime is an overkill but ok).
symmetry = (symmetry * 11111111111111111111111) % (2**self.dimension)
if symmetry != 0:
self._function = self.symmetrized_function
self.threshold_coefficients = np.zeros(self.dimension)
self.slope_coefficients = np.ones(self.dimension)
for i in range(self.dimension): # pylint: disable=consider-using-enumerate
if symmetry % 2 == 1:
if self.parametrization.bounds[0] is not None and self.parametrization.bounds[1] is not None: # type: ignore
middle = (self.parametrization.bounds[0][0] + self.parametrization.bounds[1][0]) / 2.0 # type: ignore
else:
middle = 0.0
self.threshold_coefficients[i] = 2.0 * middle # Otherwise we keep 0.
self.slope_coefficients[i] = -1.0 # Otherwise we keep 1.
symmetry = symmetry // 2
else:
self._function = function
self.threshold_coefficients = np.zeros(self.dimension)
self.slope_coefficients = np.ones(self.dimension)
def symmetrized_function(self, x: np.ndarray) -> tp.Loss:
assert isinstance(x, np.ndarray), "symmetry != 0 works only when the input is an array."
assert len(x.shape) == 1, "only one-dimensional arrays for now."
return self._inner_function(self.threshold_coefficients + self.slope_coefficients * x) # type: ignore
class MultiExperiment(ExperimentFunction):
"""Pack several mono-objective experiments into a multiobjective experiment
Parameters
----------
experiments: iterable of ExperimentFunction
Notes
-----
- packing of multiobjective experiments is not supported.
- parametrization must match between all functions (only their name is checked as initialization)
- there is no descriptor for the packed functions, except the name (concatenetion of packed function names).
"""
def __init__(
self,
experiments: tp.Iterable[ExperimentFunction],
upper_bounds: tp.ArrayLike,
) -> None:
xps = list(experiments)
assert xps
assert len(xps) == len({id(xp) for xp in xps}), "All experiments must be different instances"
assert all(
xp.multiobjective_upper_bounds is None for xp in xps
), "Packing multiobjective xps is not supported."
assert all(
xps[0].parametrization.name == xp.parametrization.name for xp in xps[1:]
), "Parametrization do not match"
super().__init__(self._multi_func, xps[0].parametrization)
self.multiobjective_upper_bounds = np.array(upper_bounds)
self._descriptors.update(name=",".join(xp._descriptors.get("name", "#unknown#") for xp in xps))
self._experiments = xps
def _multi_func(self, *args: tp.Any, **kwargs: tp.Any) -> np.ndarray:
outputs = [f(*args, **kwargs) for f in self._experiments]
return np.array(outputs)
def _internal_copy(self) -> "MultiExperiment":
assert self.multiobjective_upper_bounds is not None
return MultiExperiment([f.copy() for f in self._experiments], self.multiobjective_upper_bounds)
| 46.790155
| 131
| 0.665633
|
bfde06c5cc36c0bb7e4ce523a7c5f573da81586c
| 4,482
|
py
|
Python
|
mitmproxy/protocol/http_replay.py
|
jvillacorta/mitmproxy
|
3aa2d59f627e0fc95167fb76ffbe84330e3a5cc5
|
[
"MIT"
] | 1
|
2018-03-31T17:16:07.000Z
|
2018-03-31T17:16:07.000Z
|
mitmproxy/protocol/http_replay.py
|
jvillacorta/mitmproxy
|
3aa2d59f627e0fc95167fb76ffbe84330e3a5cc5
|
[
"MIT"
] | null | null | null |
mitmproxy/protocol/http_replay.py
|
jvillacorta/mitmproxy
|
3aa2d59f627e0fc95167fb76ffbe84330e3a5cc5
|
[
"MIT"
] | 4
|
2018-04-18T13:17:01.000Z
|
2021-02-21T17:08:33.000Z
|
from __future__ import absolute_import, print_function, division
import traceback
import netlib.exceptions
from mitmproxy import controller
from mitmproxy import exceptions
from mitmproxy import models
from netlib.http import http1
from netlib import basethread
# TODO: Doesn't really belong into mitmproxy.protocol...
class RequestReplayThread(basethread.BaseThread):
name = "RequestReplayThread"
def __init__(self, config, flow, event_queue, should_exit):
"""
event_queue can be a queue or None, if no scripthooks should be
processed.
"""
self.config, self.flow = config, flow
if event_queue:
self.channel = controller.Channel(event_queue, should_exit)
else:
self.channel = None
super(RequestReplayThread, self).__init__(
"RequestReplay (%s)" % flow.request.url
)
def run(self):
r = self.flow.request
first_line_format_backup = r.first_line_format
try:
self.flow.response = None
# If we have a channel, run script hooks.
if self.channel:
request_reply = self.channel.ask("request", self.flow)
if isinstance(request_reply, models.HTTPResponse):
self.flow.response = request_reply
if not self.flow.response:
# In all modes, we directly connect to the server displayed
if self.config.options.mode == "upstream":
server_address = self.config.upstream_server.address
server = models.ServerConnection(server_address, (self.config.options.listen_host, 0))
server.connect()
if r.scheme == "https":
connect_request = models.make_connect_request((r.data.host, r.port))
server.wfile.write(http1.assemble_request(connect_request))
server.wfile.flush()
resp = http1.read_response(
server.rfile,
connect_request,
body_size_limit=self.config.options.body_size_limit
)
if resp.status_code != 200:
raise exceptions.ReplayException("Upstream server refuses CONNECT request")
server.establish_ssl(
self.config.clientcerts,
sni=self.flow.server_conn.sni
)
r.first_line_format = "relative"
else:
r.first_line_format = "absolute"
else:
server_address = (r.host, r.port)
server = models.ServerConnection(server_address, (self.config.options.listen_host, 0))
server.connect()
if r.scheme == "https":
server.establish_ssl(
self.config.clientcerts,
sni=self.flow.server_conn.sni
)
r.first_line_format = "relative"
server.wfile.write(http1.assemble_request(r))
server.wfile.flush()
self.flow.server_conn = server
self.flow.response = models.HTTPResponse.wrap(http1.read_response(
server.rfile,
r,
body_size_limit=self.config.options.body_size_limit
))
if self.channel:
response_reply = self.channel.ask("response", self.flow)
if response_reply == exceptions.Kill:
raise exceptions.Kill()
except (exceptions.ReplayException, netlib.exceptions.NetlibException) as e:
self.flow.error = models.Error(str(e))
if self.channel:
self.channel.ask("error", self.flow)
except exceptions.Kill:
# Kill should only be raised if there's a channel in the
# first place.
from ..proxy.root_context import Log
self.channel.tell("log", Log("Connection killed", "info"))
except Exception:
from ..proxy.root_context import Log
self.channel.tell("log", Log(traceback.format_exc(), "error"))
finally:
r.first_line_format = first_line_format_backup
| 42.283019
| 106
| 0.550424
|
4f219e99578270fa19c1394d075fae20ade11680
| 2,375
|
py
|
Python
|
aiven/service/postgres.py
|
abn/aiven-monitor-http
|
a519aa8b17ecd9f645243dda8f6e32690b93741e
|
[
"Apache-2.0"
] | 1
|
2020-04-26T13:56:26.000Z
|
2020-04-26T13:56:26.000Z
|
aiven/service/postgres.py
|
abn/aiven-monitor-http
|
a519aa8b17ecd9f645243dda8f6e32690b93741e
|
[
"Apache-2.0"
] | null | null | null |
aiven/service/postgres.py
|
abn/aiven-monitor-http
|
a519aa8b17ecd9f645243dda8f6e32690b93741e
|
[
"Apache-2.0"
] | 2
|
2020-07-10T13:48:39.000Z
|
2021-05-24T10:09:24.000Z
|
import logging
import os
import ssl
from contextlib import asynccontextmanager
from dataclasses import dataclass, field
from typing import AsyncGenerator, List, Optional
import asyncpg
import ujson
logger = logging.getLogger(__name__)
@dataclass
class PostgresManager:
url: str = field(
default=os.environ.get(
"POSTGRES_URL", "postgresql://aiven:aiven@127.0.0.1:5432/aiven"
)
)
ssl_cafile: Optional[str] = field(default=os.environ.get("POSTGRES_CAFILE"))
ssl_context: Optional[ssl.SSLContext] = field(default=None, init=False, repr=False)
_pool: Optional[asyncpg.pool.Pool] = field(default=None, repr=False)
def __post_init__(self):
if self.ssl_cafile:
self.ssl_context = ssl.create_default_context(
purpose=ssl.Purpose.SERVER_AUTH, cafile=self.ssl_cafile
)
async def close(self):
if self._pool is not None:
await self._pool.close()
self._pool = None
async def init(self) -> None:
if self._pool is None:
async def init_connection(conn):
await conn.set_type_codec(
"jsonb",
encoder=ujson.dumps,
decoder=ujson.loads,
schema="pg_catalog",
)
self._pool = await asyncpg.create_pool(
dsn=self.url,
ssl=self.ssl_context,
min_size=1,
max_size=3,
init=init_connection,
)
@asynccontextmanager
async def connection(self, warning_msg: str = None) -> AsyncGenerator:
await self.init()
try:
async with self._pool.acquire() as connection:
yield connection
except (ValueError, AttributeError, TypeError) as e:
logger.warning(e)
if warning_msg:
logger.warning(warning_msg)
except Exception as e:
logger.exception(e)
async def execute(self, *args, **kwargs) -> List[asyncpg.Record]:
"""
Helper method to execute an sql query and fetch results within a transaction.
"""
async with self.connection() as connection: # type: asyncpg.Connection
async with connection.transaction():
return await connection.fetch(*args, **kwargs)
| 31.666667
| 87
| 0.596632
|
d4cb1bb632f63b7e6bd47776bdc7088cae09ef1c
| 430
|
py
|
Python
|
apps/accounts/migrations/0003_alter_profile_allow_nsfw.py
|
Visualway/Vitary
|
c7db9a25837fa7390b2177b9db48e73c6f1ab3c8
|
[
"BSD-3-Clause"
] | 4
|
2021-12-24T16:07:44.000Z
|
2022-03-04T02:30:20.000Z
|
apps/accounts/migrations/0003_alter_profile_allow_nsfw.py
|
Visualway/Vitary
|
c7db9a25837fa7390b2177b9db48e73c6f1ab3c8
|
[
"BSD-3-Clause"
] | 4
|
2021-12-30T13:32:56.000Z
|
2022-03-15T03:58:48.000Z
|
apps/accounts/migrations/0003_alter_profile_allow_nsfw.py
|
Visualway/Vitary
|
c7db9a25837fa7390b2177b9db48e73c6f1ab3c8
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-05-04 06:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_profile_allow_nsfw'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='allow_nsfw',
field=models.BooleanField(default=False, verbose_name='Allow NSFW Content'),
),
]
| 22.631579
| 88
| 0.623256
|
740864a7334ef28bda4562198f520548c049c6f8
| 5,872
|
py
|
Python
|
docs/conf.py
|
jbellevi/lumopt
|
c350a1a03b7e4e4df2af3435dadac4624258da29
|
[
"MIT"
] | 101
|
2018-09-25T01:46:01.000Z
|
2022-03-22T23:15:13.000Z
|
docs/conf.py
|
irdiez/lumopt_04_04_2020
|
c350a1a03b7e4e4df2af3435dadac4624258da29
|
[
"MIT"
] | 13
|
2018-10-16T00:04:57.000Z
|
2022-02-18T06:28:28.000Z
|
docs/conf.py
|
irdiez/lumopt_04_04_2020
|
c350a1a03b7e4e4df2af3435dadac4624258da29
|
[
"MIT"
] | 62
|
2018-09-30T00:59:47.000Z
|
2022-03-08T16:27:31.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
print(os.path.abspath('..'))
import matplotlib
matplotlib.use('Agg')
# -- Project information -----------------------------------------------------
project = u'LumOpt'
copyright = u'2018, Christopher Lalau-Keraly'
author = u'Christopher Lalau-Keraly'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.7.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
#'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'LumOptdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'LumOpt.tex', u'LumOpt Documentation',
u'Christopher Lalau-Keraly', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'LumOpt', u' Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'LumOpt', u'LumOpt Documentation',
author, 'LumOpt', 'Continuous adjoint optimization wrapper for Lumerical.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
autoclass_content = 'both'
html_show_sourcelink = False
#
import sys
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['lumapi']#,'matplotlib','matplotlib.pyplot','matplotlib.animation']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
| 30.42487
| 83
| 0.652759
|
0f86be2feb31a6a5eb3eceb86c70f94bc8511842
| 1,143
|
py
|
Python
|
tools/view_npy.py
|
rehohoho/coiltraine
|
c3ce77d13fdf8f96eed3de0dae4c01be65cb5656
|
[
"MIT"
] | 204
|
2019-01-28T13:31:53.000Z
|
2022-03-23T23:57:18.000Z
|
tools/view_npy.py
|
rehohoho/coiltraine
|
c3ce77d13fdf8f96eed3de0dae4c01be65cb5656
|
[
"MIT"
] | 39
|
2019-02-02T22:14:14.000Z
|
2022-01-30T08:21:51.000Z
|
tools/view_npy.py
|
rehohoho/coiltraine
|
c3ce77d13fdf8f96eed3de0dae4c01be65cb5656
|
[
"MIT"
] | 64
|
2019-02-24T10:26:04.000Z
|
2022-03-04T12:49:59.000Z
|
import argparse
from PIL import Image
import numpy as np
import os
from shutil import copyfile
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='NPY viewer')
# parser.add_argument('model', type=str, help='Path to model definition json. Model weights should be on the same path.')
parser.add_argument('-f', '--file', default="")
test_images_write_path = 'tools/_test_images_'
args = parser.parse_args()
preload_name = args.file
sensor_data_names, measurements = np.load(os.path.join('../_preloads', preload_name + '.npy'))
if not os.path.exists(test_images_write_path + preload_name):
os.mkdir(test_images_write_path + preload_name)
for i in range(len(measurements)):
img_path = os.path.join(os.environ["COIL_DATASET_PATH"], preload_name, # Make this preload name better
sensor_data_names[i].split('/')[-2],
sensor_data_names[i].split('/')[-1])
copyfile(img_path,
os.path.join(test_images_write_path + preload_name, str(i) + '.png'))
print (' imager ', i)
| 30.078947
| 125
| 0.649169
|
eff06a2e0b1e9e169a80801fdc11248190795b2a
| 1,992
|
py
|
Python
|
exercicios_python/Exercicio_068.py
|
GabsOrtega/logica-python
|
6f4e752d0796c9bf70be8f7108bc3bd49d877709
|
[
"MIT"
] | null | null | null |
exercicios_python/Exercicio_068.py
|
GabsOrtega/logica-python
|
6f4e752d0796c9bf70be8f7108bc3bd49d877709
|
[
"MIT"
] | null | null | null |
exercicios_python/Exercicio_068.py
|
GabsOrtega/logica-python
|
6f4e752d0796c9bf70be8f7108bc3bd49d877709
|
[
"MIT"
] | null | null | null |
from random import randint
from time import sleep
num = soma = venceu = 0
cond = ''
print('='*20)
print('PAR OU ÍMPAR')
print('='*20)
while True:
num = int(input('Digite o valor desejado: '))
cond = str(input('Você quer par ou impar? [P/I]').upper().strip()[0])
computador = randint(0, 10)
soma = num + computador
print('='*20)
if cond == 'P':
print(f'Você escolheu PAR e o valor {num}')
print(f'Computador escolheu IMPAR e o valor {computador}')
print('PROCESSANDO...')
print('='*20)
sleep(2)
print(f'A soma é igual a {soma}')
if soma % 2 == 0:
print('-' * 20)
print('DEU PAR')
print('Você venceu!')
print('Computador perdeu!')
venceu += 1
print('-' * 20)
else:
print('-' * 20)
print('DEU IMPAR')
print('Computador venceu!')
print('Você perdeu!')
print('-' * 20)
break
if cond == 'I':
print('=-'*20)
print(f'Você escolheu Impar e o valor {num}')
print(f'Computador escolheu par e o valor {computador}')
print('PROCESSANDO...')
print('=-' * 20)
sleep(2)
print(f'A soma é igual a {soma}')
if soma % 2 == 0:
print('-'*20)
print('Deu PAR')
print('Você perdeu!')
print('Computador ganhou!')
print('-' * 20)
break
else:
print('-' * 20)
print('Deu IMPAR')
print('Você venceu!')
print('Computador perdeu!')
print('-' * 20)
venceu += 1
elif cond != 'P' and 'I':
print('=-' * 20)
print('Apenas PAR/IMPAR [P/I] são aceitos!')
print('=-' * 20)
print('=-'*20)
print(f'GAME OVER! Você venceu {venceu} vezes')
print('Finalizando programa...')
print('=-'*20)
| 29.731343
| 74
| 0.461847
|
38cf9ce3f7b0a16fcec9fe642139d032ad5ce3ab
| 5,617
|
py
|
Python
|
asim/models/MXGSEventReport.py
|
garethcmurphy/mysite
|
21a8247dbed3d3196ab22c5cdd10b0c79dccb991
|
[
"MIT"
] | null | null | null |
asim/models/MXGSEventReport.py
|
garethcmurphy/mysite
|
21a8247dbed3d3196ab22c5cdd10b0c79dccb991
|
[
"MIT"
] | null | null | null |
asim/models/MXGSEventReport.py
|
garethcmurphy/mysite
|
21a8247dbed3d3196ab22c5cdd10b0c79dccb991
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils import timezone
from django.contrib.postgres.fields import ArrayField
import datetime
from django.contrib import admin
class MXGSEventReport (models.Model):
packet_length = models.IntegerField('Packet length')
INFORMATION = "INF"
ADVISORY = "ADV"
CAUTION = "CAU"
WARNING = "WAR"
EMERGENCY = "EME"
EVENT_SEVERITY_CHOICES = (
(INFORMATION, "Information"),
(ADVISORY, "Advisory"),
(CAUTION, "Caution"),
(WARNING, "Warning"),
(EMERGENCY, "Emergency")
)
event_severity = models.CharField(max_length=3,
choices=EVENT_SEVERITY_CHOICES,
default=INFORMATION)
EVENT_30000 = "30000"
EVENT_30010 = "30010"
EVENT_30090 = "30090"
EVENT_30100 = "30100"
EVENT_30110 = "30110"
EVENT_30120 = "30120"
EVENT_30130 = "30130"
EVENT_30140 = "30140"
EVENT_30160 = "30160"
EVENT_40000 = "40000"
EVENT_40010 = "40010"
EVENT_40020 = "40020"
EVENT_40030 = "40030"
EVENT_40040 = "40040"
EVENT_40050 = "40050"
EVENT_40090 = "40090"
EVENT_40100 = "40100"
EVENT_40110 = "40110"
EVENT_40120 = "40120"
EVENT_40150 = "40150"
EVENT_40160 = "40160"
EVENT_40170 = "40170"
EVENT_40180 = "40180"
EVENT_40190 = "40190"
EVENT_40200 = "40200"
EVENT_40210 = "40210"
EVENT_40260 = "40260"
EVENT_40270 = "40270"
EVENT_40280 = "40280"
EVENT_40290 = "40290"
EVENT_ID_CHOICES = (
(EVENT_30000, "Software change, severity 0"),
(EVENT_30010, "Submode change, severity 0"),
(EVENT_30090, "Science observation with priority 3 discarded due to data collection buffer being full, severity 1"),
(EVENT_30100, "Science observation with priority 2 discarded due to data collection buffer being full, severity 2"),
(EVENT_30110, "Science observation with priority 1 discarded due to data collection buffer being full, severity 3"),
(EVENT_30120, "Science observation transferred from data collection buffer to science downlink buffer, severity 0"),
(EVENT_30130, "Science downlink buffer full, data transfer function is suspended, severity 1"),
(EVENT_30140, "Science downlink buffer has free space after being full, data transfer function is resumed, severity 0"),
(EVENT_30160, "Trigger occurred in TGF search window, trigger is sent to MMIA, severity 0"),
(EVENT_40000, "Validation of parameters of a configuration table failed, severity 3"),
(EVENT_40010, "LHP Startup heaters powered on, severity 0"),
(EVENT_40020, "LHP Startup heaters powered off, severity 0"),
(EVENT_40030, "Front-End Ring Buffer FIFO, implemented in the DPU FPGA for incoming Detector Event Data, becomes full, severity 3"),
(EVENT_40040, "Front-End Ring Buffer FIFO, implemented in the DPU FPGA for incoming Detector Event Data, has free space available again immediately after being full, severity 2"),
(EVENT_40050, "trigger occurs in one or more TGF search windows, or when a trigger is received from MMIA, severity 0"),
(EVENT_40090, "successful transfer of a TGF Observation to the Data Collection Buffer, severity 1"),
(EVENT_40100, "ratemeter readings of LED Accepted Counts and HED Accepted Counts are available for background rate calculations and adjustment of trigger thresholds, severity 0"),
(EVENT_40110, "successful transfer of a Background Data Observation to the Data Collection Bufferi, severity 0"),
(EVENT_40120, "successful transfer of a Sampled Detector Events Observation to the Data Collection Bufferi, severity 0"),
(EVENT_40150, "successful transfer of a Pulse-Height Spectrum to the Data Collection Bufferi, severity 0"),
(EVENT_40160, "background rate estimate for the next 1-second period has been successfully calculated for both detector planes, severity 0"),
(EVENT_40170, "one or more of the TGF Trigger Thresholds is changed. Note that it is not transmitted at 1-second intervals, but only when there is a change in one or more thresholds, severity 1"),
(EVENT_40180, "Data Reduction Factor (Q) commanded to the DAUs is changed, severity 1"),
(EVENT_40190, "DPU starts to acquire high-time-resolution histograms directly from the DAUs in Auroral Capture Submode, severity 0"),
(EVENT_40200, "DPU ends acquisition of high-time-resolution histograms in Auroral Capture Submode, severity 0"),
(EVENT_40210, "successful transfer of an Auroral Capture Observation to the Data Collection Buffer, severity 0"),
(EVENT_40260, "loading configuration data to the DAU and/or PSU has failed, severity 3"),
(EVENT_40270, " loading configuration data to the DAU and/or PSU has been successful, severity 0"),
(EVENT_40280, " accepted counts ratemeter acquired directly from the DAU is too high and would result in an algorithm overflow, severity 3"),
(EVENT_40290, "one or more software task scheduling errors are detected, severity 3"),
)
event_id = models.CharField(max_length=5,
choices=EVENT_SEVERITY_CHOICES,
default=EVENT_30000)
event_id = models.IntegerField()
utc_year = models.IntegerField('UTC year')
utc_msec = models.IntegerField('UTC msec')
utc_seconds = models.IntegerField('UTC seconds')
| 56.737374
| 204
| 0.680612
|
bc4ada34d6493d74e12bd34eb5f6e18dc0f48d47
| 1,788
|
py
|
Python
|
data_tools/text/binary_classification.py
|
lopez86/DataTools
|
573419f3a40ddeb5e9eaf5ced8ea8dbf41c8a65e
|
[
"MIT"
] | null | null | null |
data_tools/text/binary_classification.py
|
lopez86/DataTools
|
573419f3a40ddeb5e9eaf5ced8ea8dbf41c8a65e
|
[
"MIT"
] | null | null | null |
data_tools/text/binary_classification.py
|
lopez86/DataTools
|
573419f3a40ddeb5e9eaf5ced8ea8dbf41c8a65e
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from ..tf.model_layers import (
concatenate, dense_layer, dropout, global_max_pool, simple_text_inputs,
text_conv_layer, text_embedding_layer,
)
def binary_text_classification_cnn(
sentence_length,
vocab_size,
embedding_size,
convolutions,
dense_size,
do_keep_frac=0.7,
optimizer=tf.train.AdamOptimizer()
):
graph = tf.Graph()
with graph.as_default():
x, y, is_train = simple_text_inputs(sentence_length, 1)
embeddings = text_embedding_layer(x, vocab_size, embedding_size)
max_pools = []
for conv_size, n_filters in convolutions.items():
scope = 'conv{}'.format(conv_size)
activations = text_conv_layer(
embeddings, conv_size, n_filters,
name_scope=scope, var_scope=scope
)
max_pools.append(global_max_pool(activations, scope))
concatenated = concatenate(max_pools, name_scope='concat')
dropped_out = dropout(
concatenated, is_train, name_scope='dropout', keep_prob=do_keep_frac
)
activations = dense_layer(
dropped_out, dense_size,
name_scope='dense', variable_scope='dense'
)
output = dense_layer(
activations, 1, name_scope='out_dense', variable_scope='out_dense',
activation=tf.nn.sigmoid
)
with tf.name_scope('output'):
predictions = tf.identity(output, name='predictions')
loss = tf.identity(
tf.losses.log_loss(y, predictions),
name='loss'
)
training_op = optimizer.minimize(loss, name='training')
return graph
| 35.058824
| 81
| 0.599553
|
1f040de9d9be3fd82dbf02e98c96455aa8cda294
| 50
|
py
|
Python
|
app/web/views/__init__.py
|
saury2013/online_book
|
53cf56b6a8e088011224559e90be4d23b2f604f9
|
[
"MIT"
] | null | null | null |
app/web/views/__init__.py
|
saury2013/online_book
|
53cf56b6a8e088011224559e90be4d23b2f604f9
|
[
"MIT"
] | 5
|
2021-03-18T20:34:55.000Z
|
2022-03-11T23:24:30.000Z
|
app/web/views/__init__.py
|
saury2013/online_book
|
53cf56b6a8e088011224559e90be4d23b2f604f9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import app.web.views.home
| 16.666667
| 25
| 0.62
|
a3e4bf84475c2b99406b685f732ef21ad14b10a2
| 3,216
|
py
|
Python
|
TermTk/TTkWidgets/TTkModelView/tree.py
|
UltraStudioLTD/pyTermTk
|
a1e96b0e7f43906b9fda0b16f19f427919a055c2
|
[
"MIT"
] | 1
|
2022-02-28T16:33:25.000Z
|
2022-02-28T16:33:25.000Z
|
TermTk/TTkWidgets/TTkModelView/tree.py
|
UltraStudioLTD/pyTermTk
|
a1e96b0e7f43906b9fda0b16f19f427919a055c2
|
[
"MIT"
] | null | null | null |
TermTk/TTkWidgets/TTkModelView/tree.py
|
UltraStudioLTD/pyTermTk
|
a1e96b0e7f43906b9fda0b16f19f427919a055c2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 Eugenio Parodi <ceccopierangiolieugenio AT googlemail DOT com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from TermTk.TTkCore.constant import TTkK
from TermTk.TTkCore.signal import pyTTkSlot, pyTTkSignal
from TermTk.TTkWidgets.TTkModelView import treewidget
from TermTk.TTkWidgets.TTkModelView.treewidget import TTkTreeWidget
from TermTk.TTkAbstract.abstractscrollarea import TTkAbstractScrollArea
class TTkTree(TTkAbstractScrollArea):
__slots__ = (
"_treeView",
# Forwarded Signals
"itemActivated",
"itemChanged",
"itemClicked",
"itemExpanded",
"itemCollapsed",
"itemDoubleClicked",
# Forwarded Methods
"setAlignment",
"setHeader",
"setHeaderLabels",
"setColumnSize",
"setColumnColors",
"appendItem",
"addTopLevelItem",
"clear",
)
def __init__(self, *args, **kwargs):
TTkAbstractScrollArea.__init__(self, *args, **kwargs)
self._name = kwargs.get("name", "TTkTree")
if "parent" in kwargs:
kwargs.pop("parent")
self._treeView = kwargs.get("treeWidget", TTkTreeWidget(*args, **kwargs))
self.setViewport(self._treeView)
self.setFocusPolicy(TTkK.ClickFocus)
# Forward the signal
self.itemActivated = self._treeView.itemActivated
self.itemChanged = self._treeView.itemChanged
self.itemClicked = self._treeView.itemClicked
self.itemExpanded = self._treeView.itemExpanded
self.itemCollapsed = self._treeView.itemCollapsed
self.itemDoubleClicked = self._treeView.itemDoubleClicked
# Forwarded Methods
# self.setAlignment = self._treeView.setAlignment
# self.setHeader = self._treeView.setHeader
self.setHeaderLabels = self._treeView.setHeaderLabels
# self.setColumnSize = self._treeView.setColumnSize
# self.setColumnColors = self._treeView.setColumnColors
# self.appendItem = self._treeView.appendItem
self.addTopLevelItem = self._treeView.addTopLevelItem
self.clear = self._treeView.clear
| 40.708861
| 83
| 0.711443
|
e3c45322b349ce7533e6ee1496b5825733595cf5
| 4,394
|
py
|
Python
|
20.py
|
xeno14/advent_of_code2018
|
87d0f4dc76ca9cd82d68618255709e446cf09b37
|
[
"MIT"
] | null | null | null |
20.py
|
xeno14/advent_of_code2018
|
87d0f4dc76ca9cd82d68618255709e446cf09b37
|
[
"MIT"
] | null | null | null |
20.py
|
xeno14/advent_of_code2018
|
87d0f4dc76ca9cd82d68618255709e446cf09b37
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self, val=""):
self.parent = None
self.children = []
self.val = val
self.next = None
def append(self, e):
e.parent = self
self.children.append(e)
def print(self, level=0):
print("{}{}".format(" "*(2*level), self.val))
for c in self.children:
c.print(level + 1)
if self.next is not None:
self.next.print(level)
def cleanup(self):
"""Remove empty nodes
"""
children = [c for c in self.children if c.val != ""]
self.children = children
for c in self.children:
c.cleanup()
return self
def parse(s: str) -> Node:
root = Node()
node = Node()
root.append(node)
for i in range(len(s)):
c = s[i]
if c == "(":
child = Node()
node.append(child)
node = child
elif c == ")":
nxt = Node()
node = node.parent
nxt.parent = node.parent
node.next = nxt
node = nxt
elif c == "|":
child = Node()
node.parent.append(child)
node = child
else:
node.val += c
return root.cleanup()
class DistanceMap:
"""map x,y -> distance
"""
INF = (1 << 20)
def __init__(self):
self.map = dict()
self.set(0, 0, 0)
def set(self, x, y, d):
if x not in self.map:
self.map[x] = dict()
if y not in self.map[x]:
self.map[x][y] = DistanceMap.INF
self.map[x][y] = min(self.map[x][y], d)
def get(self, x, y):
if x not in self.map:
return DistanceMap.INF
if y not in self.map[x]:
return DistanceMap.INF
return self.map[x][y]
def find_max(self):
res = -1
for v in self.map.values():
res = max(res, max(v.values()))
return res
def as_grid(self):
xs = list(self.map.keys())
ys = [y for x in xs for y in self.map[x]]
xmin = min(xs)
xmax = max(xs)
ymin = min(ys)
ymax = max(ys)
nx = xmax - xmin + 1
ny = ymax - ymin + 1
import numpy as np
grid = np.zeros((nx, ny), dtype=np.int)
for x in xs:
for y in ys:
i = x - xmin
j = y - ymin
grid[i, j] = self.get(x, y)
return grid
def print(self):
grid = self.as_grid()
grid[grid == self.INF] = -1
print(grid.T[::-1])
DELTA_POS = dict(N=(0, 1), E=(1, 0), W=(-1, 0), S=(0, -1))
def build_maze(root: Node, pos: tuple, dist: DistanceMap):
node = root
for c in node.val:
delta = DELTA_POS[c]
x = pos[0]
y = pos[1]
nx = x + delta[0]
ny = y + delta[1]
d = dist.get(x, y)
dist.set(nx, ny, d+1)
pos = (nx, ny)
for child in node.children:
build_maze(child, pos, dist)
if node.next is not None:
build_maze(node.next, pos, dist)
def solve(regex: str, verbose=False):
"""solve part1
"""
if regex.startswith("^"):
regex = regex[1:]
if regex.endswith("$"):
regex = regex[:-1]
root = parse(regex)
dist = DistanceMap()
pos = (0,0)
build_maze(root, pos, dist)
if verbose:
print(regex)
root.print()
dist.print()
return dist.as_grid().max()
def solve2(regex: str):
"""solve part2
"""
if regex.startswith("^"):
regex = regex[1:]
if regex.endswith("$"):
regex = regex[:-1]
root = parse(regex)
dist = DistanceMap()
pos = (0,0)
build_maze(root, pos, dist)
grid = dist.as_grid()
return sum(grid.flatten() >= 1000)
def main():
assert solve("^WNE$", True) == 3
assert solve("^ENWWW(NEEE|SSE(EE|N))$", True) == 10
assert solve("^ENNWSWW(NEWS|)SSSEEN(WNSE|)EE(SWEN|)NNN$", True) == 18
assert solve("^ESSWWN(E|NNENN(EESS(WNSE|)SSS|WWWSSSSE(SW|NNNE)))$", True) == 23
assert solve("^WSSEESWWWNW(S|NENNEEEENN(ESSSSW(NWSW|SSEN)|WSWWN(E|WWS(E|SS))))$", True) == 31
with open("input/20.txt") as f:
regex = f.read().strip()
ans1 = solve(regex, verbose=False)
ans2 = solve2(regex)
print("part1 =", ans1)
print("part2 =", ans2)
if __name__ == '__main__':
main()
| 23.623656
| 97
| 0.491807
|
2f8aed7f846bb03fea2da1dd51c95ba46bd4fc9b
| 2,331
|
py
|
Python
|
asdac/optimizer/copy_pasta.py
|
Akuli/asda
|
041719849cb488c3ad625a851705677a9c4a1553
|
[
"MIT"
] | 5
|
2018-12-01T22:41:42.000Z
|
2019-07-27T19:18:59.000Z
|
asdac/optimizer/copy_pasta.py
|
Akuli/asda
|
041719849cb488c3ad625a851705677a9c4a1553
|
[
"MIT"
] | 7
|
2019-03-03T20:33:29.000Z
|
2019-07-12T15:49:15.000Z
|
asdac/optimizer/copy_pasta.py
|
Akuli/asda
|
041719849cb488c3ad625a851705677a9c4a1553
|
[
"MIT"
] | 1
|
2019-07-07T22:54:51.000Z
|
2019-07-07T22:54:51.000Z
|
# if nodes c and d are "similar", this optimizes e.g. this...
#
# ... ...
# | |
# a b
# | |
# c d
# \ /
# e
#
# ...to this:
#
# ... ...
# | |
# a b
# \ /
# c
# |
# e
#
# currently this doesn't work without an 'e' node, but i think that's actually
# good, because then the "optimization" would add jumps to the opcode
import itertools
from asdac import decision_tree
def _nodes_are_similar(a: decision_tree.Node, b: decision_tree.Node):
def they_are(klass):
return isinstance(a, klass) and isinstance(b, klass)
if they_are(decision_tree.GetBuiltinVar):
return a.varname == b.varname
if (
they_are(decision_tree.SetLocalVar) or
they_are(decision_tree.GetLocalVar)):
return a.var == b.var
if they_are(decision_tree.PopOne):
return a.is_popping_a_dummy == b.is_popping_a_dummy
if (
they_are(decision_tree.Plus) or
they_are(decision_tree.Times) or
they_are(decision_tree.StoreReturnValue) or
they_are(decision_tree.CreateBox) or
they_are(decision_tree.SetToBox) or
they_are(decision_tree.UnBox)):
return True
if they_are(decision_tree.GetAttr):
return a.tybe is b.tybe and a.attrname is b.attrname
if they_are(decision_tree.StrConstant):
return a.python_string == b.python_string
if they_are(decision_tree.IntConstant):
return a.python_int == b.python_int
if they_are(decision_tree.CallFunction):
return (a.how_many_args == b.how_many_args and
a.is_returning == b.is_returning)
if they_are(decision_tree.StrJoin):
return a.how_many_strings == b.how_many_strings
return False
def optimize_similar_nodes(start_node, all_nodes, createfunc_node):
for node in all_nodes:
jumped_from = (
ref.objekt for ref in node.jumped_from
if isinstance(ref.objekt, decision_tree.PassThroughNode)
)
for a, b in itertools.combinations(jumped_from, 2):
assert a.next_node is node
assert b.next_node is node
if _nodes_are_similar(a, b):
decision_tree.replace_node(a, b)
# TODO: is it safe to optimize more than one a,b pair at once?
return True
return False
| 29.1375
| 78
| 0.635779
|
c7ed25d809aa42dc84d4c3d80b4d519ef4d80d48
| 1,338
|
py
|
Python
|
examples/experiments/version_checking_.py
|
drammock/expyfun
|
b92bf5291318ee4cb1692e7bcb9757a422f48304
|
[
"BSD-3-Clause"
] | 7
|
2015-09-27T23:54:07.000Z
|
2022-01-17T01:12:12.000Z
|
examples/experiments/version_checking_.py
|
drammock/expyfun
|
b92bf5291318ee4cb1692e7bcb9757a422f48304
|
[
"BSD-3-Clause"
] | 218
|
2015-02-17T20:29:31.000Z
|
2022-02-28T20:55:24.000Z
|
examples/experiments/version_checking_.py
|
drammock/expyfun
|
b92bf5291318ee4cb1692e7bcb9757a422f48304
|
[
"BSD-3-Clause"
] | 19
|
2015-02-19T18:43:43.000Z
|
2021-11-12T23:13:12.000Z
|
"""
==========================
Version checking functions
==========================
This demonstrates how the version checking functions work.
"""
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import tempfile
from expyfun import download_version, run_subprocess
print(__doc__)
# Let's say we want to fix our experiment to use a specific version of
# expyfun. First we'd want to install that version (referenced by the
# commit number) to the proper directory. Here we'll use a temporary
# directory so we don't break any other code examples, but usually you'd
# want to do it in the experiment directory:
temp_dir = tempfile.mkdtemp()
download_version('c18133c', temp_dir)
# Now we would normally need to restart Python so the next ``import expyfun``
# call imported the proper version. We'd want to add an ``assert_version``
# call to the top of our script We can simulate that here just by
# launching a new Python instance in the ``temp_dir`` and using our assertion
# function:
cmd = """
from expyfun import assert_version
assert_version('c18133c')
"""
try:
run_subprocess(['python', '-c', cmd], cwd=temp_dir)
except Exception as exp:
print('Failure: {0}'.format(exp))
else:
print('Success!')
# Try modifying the commit number to something invalid, and you should
# see a failure.
| 28.468085
| 77
| 0.714499
|
881efd49efe4b3a3f842571475e5d49d9fd943eb
| 9,864
|
py
|
Python
|
pan_cnc/lib/db_utils.py
|
PaloAltoNetworks/pan-cnc
|
610d1aa8366d30314e3475cc3dd23449ce3bca00
|
[
"Apache-2.0"
] | 3
|
2019-03-13T14:59:59.000Z
|
2020-04-26T06:30:16.000Z
|
pan_cnc/lib/db_utils.py
|
PaloAltoNetworks/pan-cnc
|
610d1aa8366d30314e3475cc3dd23449ce3bca00
|
[
"Apache-2.0"
] | 29
|
2019-02-05T00:01:32.000Z
|
2021-03-22T14:10:07.000Z
|
pan_cnc/lib/db_utils.py
|
PaloAltoNetworks/pan-cnc
|
610d1aa8366d30314e3475cc3dd23449ce3bca00
|
[
"Apache-2.0"
] | 2
|
2019-08-31T13:54:53.000Z
|
2020-11-18T16:27:11.000Z
|
import json
import os
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from skilletlib import SkilletLoader
from cnc.models import RepositoryDetails
from cnc.models import Skillet
from pan_cnc.lib import cnc_utils
from pan_cnc.lib.exceptions import DuplicateSkilletException
def initialize_default_repositories(app_name) -> None:
"""
Find any configured repositories in the application configuration
and build db records for their respective skillets.
Called from the WelcomeView to ensure all default skillets are found and indexed
:return: None
"""
app_config = cnc_utils.get_app_config(app_name)
if not app_config:
return
if 'repositories' not in app_config or not isinstance(app_config['repositories'], list):
return
for r in app_config.get('repositories', []):
repo_details = dict()
repo_details.update(r)
initialize_repo(repo_details)
def initialize_repo(repo_detail: dict) -> list:
"""
Initialize a git repository object using the supplied repositories details dictionary object
:param repo_detail:
:return: list of Skillets found in that repository
"""
repo_name = repo_detail.get('name', '')
(repository_object, created) = RepositoryDetails.objects.get_or_create(
name=repo_name,
defaults={'url': repo_detail.get('url', ''),
'details_json': json.dumps(repo_detail)
}
)
if created:
print(f'Indexing new repository object: {repository_object.name}')
return refresh_skillets_from_repo(repo_name)
return load_skillets_from_repo(repo_name)
def load_skillets_from_repo(repo_name: str) -> list:
"""
returns a list of skillets from the repository as found in the db
:param repo_name: name of the repository to search
:return: list of skillet dictionary objects
"""
all_skillets = list()
try:
repo_object = RepositoryDetails.objects.get(name=repo_name)
repo_skillet_qs = repo_object.skillet_set.all()
for skillet in repo_skillet_qs:
all_skillets.append(json.loads(skillet.skillet_json))
return all_skillets
except ObjectDoesNotExist:
return all_skillets
except ValueError:
return all_skillets
def update_skillet_cache() -> None:
"""
Updates the 'all_snippets' key in the cnc cache. This gets called whenever a repository is initialized or updated
to ensure the legacy cache is always kept up to date
:return: None
"""
all_skillets = load_all_skillets(refresh=True)
# FIXME - this can and will break if every more than one app tries to do this...
app_name = get_default_app_name()
# ensure everything gets removed!
cnc_utils.clear_long_term_cache(app_name)
cnc_utils.set_long_term_cached_value(app_name, 'all_snippets', all_skillets, -1)
# db_utils.load_add_skillets saves all_skillets under 'cnc' app name, ensure this is updated here as well...
cnc_utils.set_long_term_cached_value('cnc', 'all_snippets', all_skillets, -1)
# remove it all!
def get_repository_details(repository_name: str) -> (dict, None):
"""
returns the details dict as loaded from the database record for this db
:param repository_name: name of the repository to find and return
:return: loaded dict or None if not found
"""
if RepositoryDetails.objects.filter(name=repository_name).exists():
try:
repo_db_record = RepositoryDetails.objects.get(name=repository_name)
return json.loads(repo_db_record.details_json)
except ValueError as ve:
print(ve)
return None
else:
return None
def update_repository_details(repo_name: str, repo_detail: dict) -> None:
"""
Update the repository details json object on the db record
:param repo_name: name of the repository object to update
:param repo_detail: dictionary of repository details includes branches, url, name, commits, etc
:return: None
"""
try:
repo_db_record = RepositoryDetails.objects.get(name=repo_name)
except ObjectDoesNotExist as odne:
print(r'Could not update non-existent db record for {repo_name}')
print(odne)
return None
try:
repo_db_record.details_json = json.dumps(repo_detail)
except ValueError as ve:
print(f'Could not update db record with malformed json: {ve}')
return None
repo_db_record.save()
def refresh_skillets_from_repo(repo_name: str) -> list:
all_skillets = list()
user_dir = os.path.expanduser('~/.pan_cnc')
app_name = get_default_app_name()
snippets_dir = os.path.join(user_dir, app_name, 'repositories')
repo_dir = os.path.join(snippets_dir, repo_name)
if not os.path.exists(repo_dir):
print(f'Repository {repo_dir} does not exist!')
return all_skillets
try:
repo_object = RepositoryDetails.objects.get(name=repo_name)
sl = SkilletLoader()
found_skillets = sl.load_all_skillets_from_dir(repo_dir)
for skillet_object in found_skillets:
skillet_name = skillet_object.name
(skillet_record, created) = Skillet.objects.get_or_create(
name=skillet_name,
defaults={
'skillet_json': json.dumps(skillet_object.skillet_dict),
'repository_id': repo_object.id,
}
)
if not created:
if skillet_record.repository_id == repo_object.id:
# check if skillet contents have been updated
found_skillet_json = json.dumps(skillet_object.skillet_dict)
if skillet_record.skillet_json != found_skillet_json:
skillet_record.skillet_json = found_skillet_json
skillet_record.save()
else:
print(f'Found existing skillet from another repository: {skillet_name}!!')
raise DuplicateSkilletException(
f'Refusing to import duplicate Skillet: {skillet_name}'
)
for db_skillet in repo_object.skillet_set.all():
found = False
for found_skillet in found_skillets:
if db_skillet.name == found_skillet.name:
found = True
continue
if not found:
db_skillet.delete()
update_skillet_cache()
return load_skillets_from_repo(repo_name)
except ObjectDoesNotExist:
return all_skillets
def refresh_skillets_from_all_repos() -> None:
"""
Finds all previously indexed repositories and re-indexes all skillets found there-in
This gets call from the /clear_context menu item to ensure all local changes are found and up to date
:return: None
"""
all_repos = RepositoryDetails.objects.all()
for repository in all_repos:
refresh_skillets_from_repo(repository.name)
def load_skillet_by_name(skillet_name: str) -> (dict, None):
try:
skillet = Skillet.objects.get(name=skillet_name)
return json.loads(skillet.skillet_json)
except ObjectDoesNotExist:
return None
except ValueError:
print('Could not parse Skillet metadata in load_skillet_by_name')
return None
def load_all_skillet_label_values(label_name):
labels_list = list()
skillets = load_all_skillets()
for skillet in skillets:
if 'labels' not in skillet:
continue
labels = skillet.get('labels', [])
for label_key in labels:
if label_key == label_name:
if type(labels[label_name]) is str:
label_value = labels[label_name]
if label_value not in labels_list:
labels_list.append(label_value)
elif type(labels[label_name]) is list:
for label_list_value in labels[label_name]:
if label_list_value not in labels_list:
labels_list.append(label_list_value)
return labels_list
def load_all_skillets(refresh=False) -> list:
"""
Returns a list of skillet dictionaries
:param refresh: Boolean flag whether to use the cache or force a cache refresh
:return: skillet dictionaries
"""
if refresh is False:
cached_skillets = cnc_utils.get_long_term_cached_value('cnc', 'all_snippets')
if cached_skillets is not None:
return cached_skillets
skillet_dicts = list()
skillets = Skillet.objects.all()
for skillet in skillets:
skillet_dicts.append(json.loads(skillet.skillet_json))
cnc_utils.set_long_term_cached_value('cnc', 'all_snippets', skillet_dicts, -1)
return skillet_dicts
def load_skillets_with_label(label_name, label_value):
filtered_skillets = list()
all_skillets = load_all_skillets()
for skillet in all_skillets:
if 'labels' in skillet and label_name in skillet['labels']:
if type(skillet['labels'][label_name]) is str:
if skillet['labels'][label_name] == label_value:
filtered_skillets.append(skillet)
elif type(skillet['labels'][label_name]) is list:
for label_list_value in skillet['labels'][label_name]:
if label_list_value == label_value:
filtered_skillets.append(skillet)
return filtered_skillets
def get_default_app_name():
if len(settings.INSTALLED_APPS_CONFIG) != 1:
raise Exception('Cannot get default app configuration, please specify the app you need')
for k, v in settings.INSTALLED_APPS_CONFIG.items():
return k
| 32.662252
| 117
| 0.66322
|
70feca84ba5828dc64c0a3e2ad429bc825224d58
| 929
|
py
|
Python
|
manage.py
|
blowUA/mezz
|
caf909ad6dd48a61e735bbff7203573f0a61c0d7
|
[
"MIT"
] | 209
|
2015-02-06T02:24:22.000Z
|
2022-03-07T23:39:28.000Z
|
manage.py
|
blowUA/mezz
|
caf909ad6dd48a61e735bbff7203573f0a61c0d7
|
[
"MIT"
] | 193
|
2015-01-01T13:41:44.000Z
|
2020-10-02T18:41:06.000Z
|
manage.py
|
blowUA/mezz
|
caf909ad6dd48a61e735bbff7203573f0a61c0d7
|
[
"MIT"
] | 92
|
2015-03-04T11:13:55.000Z
|
2020-10-23T06:46:42.000Z
|
#!/usr/bin/env python
from __future__ import absolute_import, unicode_literals
import os
import sys
# Corrects some pathing issues in various contexts, such as cron jobs,
# and the project layout still being in Django 1.3 format.
from settings import PROJECT_ROOT, PROJECT_DIRNAME
sys.path.append(os.path.abspath(os.path.join(PROJECT_ROOT, "..")))
# Add the site ID CLI arg to the environment, which allows for the site
# used in any site related queries to be manually set for management
# commands.
for i, arg in enumerate(sys.argv):
if arg.startswith("--site"):
os.environ["MEZZANINE_SITE_ID"] = arg.split("=")[1]
sys.argv.pop(i)
# Run Django.
if __name__ == "__main__":
settings_module = "%s.settings" % PROJECT_DIRNAME
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings_module)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 32.034483
| 71
| 0.74704
|
5e2e612d8f7d17695653a4ebd0713f8992acd6d2
| 2,567
|
py
|
Python
|
hw/ip/rom_ctrl/util/gen_vivado_mem_image.py
|
asb/opentitan
|
af68ff5041b10c81e97adc075a4d042f8ac7ab20
|
[
"Apache-2.0"
] | 2
|
2019-11-21T14:05:14.000Z
|
2020-07-10T12:40:54.000Z
|
hw/ip/rom_ctrl/util/gen_vivado_mem_image.py
|
asb/opentitan
|
af68ff5041b10c81e97adc075a4d042f8ac7ab20
|
[
"Apache-2.0"
] | 9
|
2019-11-08T00:20:21.000Z
|
2021-09-25T03:56:33.000Z
|
hw/ip/rom_ctrl/util/gen_vivado_mem_image.py
|
asb/opentitan
|
af68ff5041b10c81e97adc075a4d042f8ac7ab20
|
[
"Apache-2.0"
] | 1
|
2019-11-19T05:28:23.000Z
|
2019-11-19T05:28:23.000Z
|
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
'''Script for generating a splicable Vivado ROM image
This script takes a .vmem file as input and converts that into a format usable
by Vivado for splicing FPGA bitstreams via updatemem. For details on the
required file format, refer to UG898 (Chapter 7, "Using UpdateMEM to Update BIT
files with MMI and ELF Data"):
https://www.xilinx.com/support/documentation/sw_manuals/xilinx2020_2/ug898-vivado-embedded-design.pdf#page=165
Typical usage:
>>> ./gen_vivado_mem_image.py boot_rom.scr.32.vmem boot_rom.updatemem.mem
'''
import argparse
import sys
import math
import re
from mem import MemFile
def swap_bytes(width: int, orig: int, swap_nibbles: bool) -> int:
num_bytes = math.ceil(width / 8)
swapped = 0
for i in range(num_bytes):
byte_value = ((orig >> (i * 8)) & 0xFF)
if swap_nibbles:
byte_value = ((byte_value << 4) | (byte_value >> 4)) & 0xFF
swapped |= (byte_value << ((num_bytes - i - 1) * 8))
return swapped
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=argparse.FileType('rb'))
parser.add_argument('outfile', type=argparse.FileType('w'))
parser.add_argument('--swap-nibbles', dest='swap_nibbles', action='store_true')
args = parser.parse_args()
# Extract width from ROM file name.
match = re.search(r'([0-9]+).vmem', args.infile.name)
if not match:
raise ValueError('Cannot extract ROM word width from file name ' +
args.infile.name)
else:
width = int(match.group(1))
# Load the input vmem file.
vmem = MemFile.load_vmem(width, args.infile)
# OpenTitan vmem files should always contain one single contiguous chunk.
assert len(vmem.chunks) == 1
# Loop over all words, and:
# 1) Generate the address,
# 2) convert the endianness, and
# 3) write this to the output file.
addr_chars = 8
word_chars = math.ceil(width / 4)
for idx, word in enumerate(vmem.chunks[0].words):
# Generate the address.
addr = idx * math.ceil(width / 8)
# Convert endianness.
data = swap_bytes(width, word, args.swap_nibbles)
# Write to file.
toks = [f'@{addr:0{addr_chars}X}']
toks.append(f'{data:0{word_chars}X}')
args.outfile.write(' '.join(toks) + '\n')
return 0
if __name__ == '__main__':
sys.exit(main())
| 32.0875
| 110
| 0.661862
|
fe8b9714b2463db9fa34d02994fdcbfb35eb1cf4
| 13,066
|
py
|
Python
|
cirq/interop/quirk/cells/arithmetic_cells.py
|
BillGatesNephew/Cirq
|
fda14a5f6c65356dfabf8a5bcd599bf57e542041
|
[
"Apache-2.0"
] | null | null | null |
cirq/interop/quirk/cells/arithmetic_cells.py
|
BillGatesNephew/Cirq
|
fda14a5f6c65356dfabf8a5bcd599bf57e542041
|
[
"Apache-2.0"
] | null | null | null |
cirq/interop/quirk/cells/arithmetic_cells.py
|
BillGatesNephew/Cirq
|
fda14a5f6c65356dfabf8a5bcd599bf57e542041
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import (
Callable,
Optional,
Union,
Iterable,
Sequence,
Iterator,
Tuple,
Any,
cast,
List,
Dict,
TYPE_CHECKING,
)
from cirq import ops, value
from cirq.interop.quirk.cells.cell import Cell, CellMaker, CELL_SIZES
if TYPE_CHECKING:
import cirq
@value.value_equality
class QuirkArithmeticOperation(ops.ArithmeticOperation):
"""Applies arithmetic to a target and some inputs.
Implements Quirk-specific implicit effects like assuming that the presence
of an 'r' input implies modular arithmetic.
In Quirk, modular operations have no effect on values larger than the
modulus. This convention is used because unitarity forces *some* convention
on out-of-range values (they cannot simply disappear or raise exceptions),
and the simplest is to do nothing. This call handles ensuring that happens,
and ensuring the new target register value is normalized modulo the modulus.
"""
def __init__(
self,
identifier: str,
target: Sequence['cirq.Qid'],
inputs: Sequence[Union[Sequence['cirq.Qid'], int]],
):
"""
Args:
identifier: The quirk identifier string for this operation.
target: The target qubit register.
inputs: Qubit registers (or classical constants) that
determine what happens to the target.
"""
self.identifier = identifier
self.target: Tuple['cirq.Qid', ...] = tuple(target)
self.inputs: Tuple[Union[Sequence['cirq.Qid'], int], ...] = tuple(
e if isinstance(e, int) else tuple(e) for e in inputs
)
for input_register in self.inputs:
if isinstance(input_register, int):
continue
if set(self.target) & set(input_register):
raise ValueError(f'Overlapping registers: ' f'{self.target} {self.inputs}')
if self.operation.is_modular:
r = inputs[-1]
if isinstance(r, int):
over = r > 1 << len(target)
else:
over = len(cast(Sequence, r)) > len(target)
if over:
raise ValueError(
'Target too small for modulus.\n' f'Target: {target}\n' f'Modulus: {r}'
)
@property
def operation(self) -> '_QuirkArithmeticCallable':
return ARITHMETIC_OP_TABLE[self.identifier]
def _value_equality_values_(self) -> Any:
return self.identifier, self.target, self.inputs
def registers(self) -> Sequence[Union[int, Sequence['cirq.Qid']]]:
return [self.target, *self.inputs]
def with_registers(
self, *new_registers: Union[int, Sequence['cirq.Qid']]
) -> 'QuirkArithmeticOperation':
if len(new_registers) != len(self.inputs) + 1:
raise ValueError(
'Wrong number of registers.\n'
f'New registers: {repr(new_registers)}\n'
f'Operation: {repr(self)}'
)
if isinstance(new_registers[0], int):
raise ValueError(
'The first register is the mutable target. '
'It must be a list of qubits, not the constant '
f'{new_registers[0]}.'
)
return QuirkArithmeticOperation(self.identifier, new_registers[0], new_registers[1:])
def apply(self, *registers: int) -> Union[int, Iterable[int]]:
return self.operation(*registers)
def _circuit_diagram_info_(self, args: 'cirq.CircuitDiagramInfoArgs') -> List[str]:
lettered_args = list(zip(self.operation.letters, self.inputs))
result: List[str] = []
# Target register labels.
consts = ''.join(
f',{letter}={reg}' for letter, reg in lettered_args if isinstance(reg, int)
)
result.append(f'Quirk({self.identifier}{consts})')
result.extend(f'#{i}' for i in range(2, len(self.target) + 1))
# Input register labels.
for letter, reg in lettered_args:
if not isinstance(reg, int):
result.extend(f'{letter.upper()}{i}' for i in range(len(cast(Sequence, reg))))
return result
def __repr__(self) -> str:
return (
'cirq.interop.quirk.QuirkArithmeticOperation(\n'
f' {repr(self.identifier)},\n'
f' target={repr(self.target)},\n'
f' inputs={_indented_list_lines_repr(self.inputs)},\n'
')'
)
_IntsToIntCallable = Union[
Callable[[int], int],
Callable[[int, int], int],
Callable[[int, int, int], int],
Callable[[int, int, int, int], int],
]
class _QuirkArithmeticCallable:
"""A callable with parameter-name-dependent behavior."""
def __init__(self, func: _IntsToIntCallable):
"""
Args:
func: Maps target int to its output value based on other input ints.
"""
self.func = func
# The lambda parameter names indicate the input letter to match.
letters: List[str] = list(inspect.signature(self.func).parameters)
# The target is always first, and should be ignored.
assert letters and letters[0] == 'x'
self.letters = tuple(letters[1:])
# The last argument is the modulus r for modular arithmetic.
self.is_modular = letters[-1] == 'r'
def __call__(self, *args, **kwargs):
assert not kwargs
if self.is_modular:
if args[0] >= args[-1]:
return args[0]
result = self.func(*args)
if self.is_modular:
result %= args[-1]
return result
@value.value_equality
class ArithmeticCell(Cell):
def __init__(
self,
identifier: str,
target: Sequence['cirq.Qid'],
inputs: Sequence[Union[None, Sequence['cirq.Qid'], int]],
):
self.identifier = identifier
self.target = tuple(target)
self.inputs = tuple(inputs)
def gate_count(self) -> int:
return 1
def _value_equality_values_(self) -> Any:
return self.identifier, self.target, self.inputs
def __repr__(self) -> str:
return (
f'cirq.interop.quirk.cells.arithmetic_cells.ArithmeticCell('
f'\n {self.identifier!r},'
f'\n {self.target!r},'
f'\n {self.inputs!r})'
)
def with_line_qubits_mapped_to(self, qubits: List['cirq.Qid']) -> 'Cell':
return ArithmeticCell(
identifier=self.identifier,
target=Cell._replace_qubits(self.target, qubits),
inputs=[
e if e is None or isinstance(e, int) else Cell._replace_qubits(e, qubits)
for e in self.inputs
],
)
@property
def operation(self):
return ARITHMETIC_OP_TABLE[self.identifier]
def with_input(
self, letter: str, register: Union[Sequence['cirq.Qid'], int]
) -> 'ArithmeticCell':
new_inputs = [
reg if letter != reg_letter else register
for reg, reg_letter in zip(self.inputs, self.operation.letters)
]
return ArithmeticCell(self.identifier, self.target, new_inputs)
def operations(self) -> 'cirq.OP_TREE':
missing_inputs = [
letter for reg, letter in zip(self.inputs, self.operation.letters) if reg is None
]
if missing_inputs:
raise ValueError(f'Missing input: {sorted(missing_inputs)}')
return QuirkArithmeticOperation(
self.identifier,
self.target,
cast(Sequence[Union[Sequence['cirq.Qid'], int]], self.inputs),
)
def _indented_list_lines_repr(items: Sequence[Any]) -> str:
block = '\n'.join([repr(op) + ',' for op in items])
indented = ' ' + '\n '.join(block.split('\n'))
return '[\n{}\n ]'.format(indented)
def _generate_helper() -> Iterator[CellMaker]:
# Comparisons.
yield _arithmetic_gate("^A<B", 1, lambda x, a, b: x ^ int(a < b))
yield _arithmetic_gate("^A>B", 1, lambda x, a, b: x ^ int(a > b))
yield _arithmetic_gate("^A<=B", 1, lambda x, a, b: x ^ int(a <= b))
yield _arithmetic_gate("^A>=B", 1, lambda x, a, b: x ^ int(a >= b))
yield _arithmetic_gate("^A=B", 1, lambda x, a, b: x ^ int(a == b))
yield _arithmetic_gate("^A!=B", 1, lambda x, a, b: x ^ int(a != b))
# Addition.
yield from _arithmetic_family("inc", lambda x: x + 1)
yield from _arithmetic_family("dec", lambda x: x - 1)
yield from _arithmetic_family("+=A", lambda x, a: x + a)
yield from _arithmetic_family("-=A", lambda x, a: x - a)
# Multiply-accumulate.
yield from _arithmetic_family("+=AA", lambda x, a: x + a * a)
yield from _arithmetic_family("-=AA", lambda x, a: x - a * a)
yield from _arithmetic_family("+=AB", lambda x, a, b: x + a * b)
yield from _arithmetic_family("-=AB", lambda x, a, b: x - a * b)
# Misc.
yield from _arithmetic_family("+cntA", lambda x, a: x + _popcnt(a))
yield from _arithmetic_family("-cntA", lambda x, a: x - _popcnt(a))
yield from _arithmetic_family("^=A", lambda x, a: x ^ a)
yield from _arithmetic_family("Flip<A", lambda x, a: a - x - 1 if x < a else x)
# Multiplication.
yield from _arithmetic_family("*A", lambda x, a: x * a if a & 1 else x)
yield from _size_dependent_arithmetic_family(
"/A", lambda n: lambda x, a: x * _mod_inv_else_1(a, 1 << n)
)
# Modular addition.
yield from _arithmetic_family("incmodR", lambda x, r: x + 1)
yield from _arithmetic_family("decmodR", lambda x, r: x - 1)
yield from _arithmetic_family("+AmodR", lambda x, a, r: x + a)
yield from _arithmetic_family("-AmodR", lambda x, a, r: x - a)
# Modular multiply-accumulate.
yield from _arithmetic_family("+ABmodR", lambda x, a, b, r: x + a * b)
yield from _arithmetic_family("-ABmodR", lambda x, a, b, r: x - a * b)
# Modular multiply.
yield from _arithmetic_family("*AmodR", lambda x, a, r: x * _invertible_else_1(a, r))
yield from _arithmetic_family("/AmodR", lambda x, a, r: x * _mod_inv_else_1(a, r))
yield from _arithmetic_family(
"*BToAmodR", lambda x, a, b, r: x * pow(_invertible_else_1(b, r), a, r)
)
yield from _arithmetic_family(
"/BToAmodR", lambda x, a, b, r: x * pow(_mod_inv_else_1(b, r), a, r)
)
def _extended_gcd(a: int, b: int) -> Tuple[int, int, int]:
if a == 0:
return b, 0, 1
gcd, y, x = _extended_gcd(b % a, a)
return gcd, x - (b // a) * y, y
def _invertible_else_1(a: int, m: int) -> int:
"""Returns `a` if `a` has a multiplicative inverse, else 1."""
i = _mod_inv_else_1(a, m)
return a if i != 1 else i
def _mod_inv_else_1(a: int, m: int) -> int:
"""Returns `a**-1` if `a` has a multiplicative inverse, else 1."""
if m == 0:
return 1
gcd, x, _ = _extended_gcd(a % m, m)
if gcd != 1:
return 1
return x % m
def _popcnt(a: int) -> int:
"""Returns the Hamming weight of the given non-negative integer."""
t = 0
while a > 0:
a &= a - 1
t += 1
return t
def _arithmetic_family(identifier_prefix: str, func: _IntsToIntCallable) -> Iterator[CellMaker]:
yield from _size_dependent_arithmetic_family(identifier_prefix, size_to_func=lambda _: func)
def _size_dependent_arithmetic_family(
identifier_prefix: str,
size_to_func: Callable[[int], _IntsToIntCallable],
) -> Iterator[CellMaker]:
for i in CELL_SIZES:
yield _arithmetic_gate(identifier_prefix + str(i), size=i, func=size_to_func(i))
def _arithmetic_gate(identifier: str, size: int, func: _IntsToIntCallable) -> CellMaker:
operation = _QuirkArithmeticCallable(func)
assert identifier not in ARITHMETIC_OP_TABLE
ARITHMETIC_OP_TABLE[identifier] = operation
return CellMaker(
identifier=identifier,
size=size,
maker=lambda args: ArithmeticCell(
identifier=identifier, target=args.qubits, inputs=[None] * len(operation.letters)
),
)
ARITHMETIC_OP_TABLE: Dict[str, _QuirkArithmeticCallable] = {}
# Caching is necessary in order to avoid overwriting entries in the table.
_cached_cells: Optional[Tuple[CellMaker, ...]] = None
def generate_all_arithmetic_cell_makers() -> Iterable[CellMaker]:
global _cached_cells
if _cached_cells is None:
_cached_cells = tuple(_generate_helper())
return _cached_cells
| 34.657825
| 96
| 0.61595
|
6a5b45a8d88cf31d625465e56eeef77bf441964b
| 423
|
py
|
Python
|
care/facility/migrations/0278_asset_not_working_reason.py
|
arpancodes/care
|
e4f295c29f70c7a356c0ef25f7326579992ef0cb
|
[
"MIT"
] | 189
|
2020-03-17T17:18:58.000Z
|
2022-02-22T09:49:45.000Z
|
care/facility/migrations/0278_asset_not_working_reason.py
|
arpancodes/care
|
e4f295c29f70c7a356c0ef25f7326579992ef0cb
|
[
"MIT"
] | 598
|
2020-03-19T21:22:09.000Z
|
2022-03-30T05:08:37.000Z
|
care/facility/migrations/0278_asset_not_working_reason.py
|
arpancodes/care
|
e4f295c29f70c7a356c0ef25f7326579992ef0cb
|
[
"MIT"
] | 159
|
2020-03-19T18:45:56.000Z
|
2022-03-17T13:23:12.000Z
|
# Generated by Django 2.2.11 on 2021-10-12 08:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facility', '0277_merge_20211011_2103'),
]
operations = [
migrations.AddField(
model_name='asset',
name='not_working_reason',
field=models.CharField(blank=True, max_length=1024, null=True),
),
]
| 22.263158
| 75
| 0.619385
|
3587575503d33a355081b85b32d67cceb6335b66
| 13,605
|
py
|
Python
|
pandas/core/arrays/integer.py
|
dycloud-chan/pandas
|
39ccb352b53e9b5b694c4f7f044774f9c3677e98
|
[
"BSD-3-Clause"
] | 3
|
2018-04-24T13:31:51.000Z
|
2019-07-09T07:31:43.000Z
|
pandas/core/arrays/integer.py
|
dycloud-chan/pandas
|
39ccb352b53e9b5b694c4f7f044774f9c3677e98
|
[
"BSD-3-Clause"
] | 4
|
2019-12-14T16:32:46.000Z
|
2022-02-12T00:32:28.000Z
|
pandas/core/arrays/integer.py
|
dycloud-chan/pandas
|
39ccb352b53e9b5b694c4f7f044774f9c3677e98
|
[
"BSD-3-Clause"
] | 5
|
2018-04-24T13:31:56.000Z
|
2021-10-21T05:06:23.000Z
|
from __future__ import annotations
from typing import overload
import numpy as np
from pandas._libs import (
lib,
missing as libmissing,
)
from pandas._typing import (
ArrayLike,
AstypeArg,
Dtype,
DtypeObj,
npt,
)
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.base import (
ExtensionDtype,
register_extension_dtype,
)
from pandas.core.dtypes.common import (
is_bool_dtype,
is_datetime64_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
is_string_dtype,
pandas_dtype,
)
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.masked import BaseMaskedDtype
from pandas.core.arrays.numeric import (
NumericArray,
NumericDtype,
)
from pandas.core.tools.numeric import to_numeric
class _IntegerDtype(NumericDtype):
"""
An ExtensionDtype to hold a single size & kind of integer dtype.
These specific implementations are subclasses of the non-public
_IntegerDtype. For example we have Int8Dtype to represent signed int 8s.
The attributes name & type are set when these subclasses are created.
"""
def __repr__(self) -> str:
sign = "U" if self.is_unsigned_integer else ""
return f"{sign}Int{8 * self.itemsize}Dtype()"
@cache_readonly
def is_signed_integer(self) -> bool:
return self.kind == "i"
@cache_readonly
def is_unsigned_integer(self) -> bool:
return self.kind == "u"
@property
def _is_numeric(self) -> bool:
return True
@classmethod
def construct_array_type(cls) -> type[IntegerArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return IntegerArray
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
# we only handle nullable EA dtypes and numeric numpy dtypes
if not all(
isinstance(t, BaseMaskedDtype)
or (
isinstance(t, np.dtype)
and (np.issubdtype(t, np.number) or np.issubdtype(t, np.bool_))
)
for t in dtypes
):
return None
np_dtype = np.find_common_type(
# error: List comprehension has incompatible type List[Union[Any,
# dtype, ExtensionDtype]]; expected List[Union[dtype, None, type,
# _SupportsDtype, str, Tuple[Any, Union[int, Sequence[int]]],
# List[Any], _DtypeDict, Tuple[Any, Any]]]
[
t.numpy_dtype # type: ignore[misc]
if isinstance(t, BaseMaskedDtype)
else t
for t in dtypes
],
[],
)
if np.issubdtype(np_dtype, np.integer):
return INT_STR_TO_DTYPE[str(np_dtype)]
elif np.issubdtype(np_dtype, np.floating):
from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE
return FLOAT_STR_TO_DTYPE[str(np_dtype)]
return None
def safe_cast(values, dtype, copy: bool):
"""
Safely cast the values to the dtype if they
are equivalent, meaning floats must be equivalent to the
ints.
"""
try:
return values.astype(dtype, casting="safe", copy=copy)
except TypeError as err:
casted = values.astype(dtype, copy=copy)
if (casted == values).all():
return casted
raise TypeError(
f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}"
) from err
def coerce_to_array(
values, dtype, mask=None, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask.
Parameters
----------
values : 1D list-like
dtype : integer dtype
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
# if values is integer numpy array, preserve its dtype
if dtype is None and hasattr(values, "dtype"):
if is_integer_dtype(values.dtype):
dtype = values.dtype
if dtype is not None:
if isinstance(dtype, str) and (
dtype.startswith("Int") or dtype.startswith("UInt")
):
# Avoid DeprecationWarning from NumPy about np.dtype("Int64")
# https://github.com/numpy/numpy/pull/7476
dtype = dtype.lower()
if not issubclass(type(dtype), _IntegerDtype):
try:
dtype = INT_STR_TO_DTYPE[str(np.dtype(dtype))]
except KeyError as err:
raise ValueError(f"invalid dtype specified {dtype}") from err
if isinstance(values, IntegerArray):
values, mask = values._data, values._mask
if dtype is not None:
values = values.astype(dtype.numpy_dtype, copy=False)
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
values = np.array(values, copy=copy)
inferred_type = None
if is_object_dtype(values) or is_string_dtype(values):
inferred_type = lib.infer_dtype(values, skipna=True)
if inferred_type == "empty":
pass
elif inferred_type not in [
"floating",
"integer",
"mixed-integer",
"integer-na",
"mixed-integer-float",
"string",
"unicode",
]:
raise TypeError(f"{values.dtype} cannot be converted to an IntegerDtype")
elif is_bool_dtype(values) and is_integer_dtype(dtype):
values = np.array(values, dtype=int, copy=copy)
elif not (is_integer_dtype(values) or is_float_dtype(values)):
raise TypeError(f"{values.dtype} cannot be converted to an IntegerDtype")
if values.ndim != 1:
raise TypeError("values must be a 1D list-like")
if mask is None:
mask = libmissing.is_numeric_na(values)
else:
assert len(mask) == len(values)
if mask.ndim != 1:
raise TypeError("mask must be a 1D list-like")
# infer dtype if needed
if dtype is None:
dtype = np.dtype("int64")
else:
dtype = dtype.type
# if we are float, let's make sure that we can
# safely cast
# we copy as need to coerce here
if mask.any():
values = values.copy()
values[mask] = 1
if inferred_type in ("string", "unicode"):
# casts from str are always safe since they raise
# a ValueError if the str cannot be parsed into an int
values = values.astype(dtype, copy=copy)
else:
values = safe_cast(values, dtype, copy=False)
return values, mask
class IntegerArray(NumericArray):
"""
Array of integer (optional missing) values.
.. versionchanged:: 1.0.0
Now uses :attr:`pandas.NA` as the missing value rather
than :attr:`numpy.nan`.
.. warning::
IntegerArray is currently experimental, and its API or internal
implementation may change without warning.
We represent an IntegerArray with 2 numpy arrays:
- data: contains a numpy integer array of the appropriate dtype
- mask: a boolean array holding a mask on the data, True is missing
To construct an IntegerArray from generic array-like input, use
:func:`pandas.array` with one of the integer dtypes (see examples).
See :ref:`integer_na` for more.
Parameters
----------
values : numpy.ndarray
A 1-d integer-dtype array.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values.
copy : bool, default False
Whether to copy the `values` and `mask`.
Attributes
----------
None
Methods
-------
None
Returns
-------
IntegerArray
Examples
--------
Create an IntegerArray with :func:`pandas.array`.
>>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype())
>>> int_array
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: Int32
String aliases for the dtypes are also available. They are capitalized.
>>> pd.array([1, None, 3], dtype='Int32')
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: Int32
>>> pd.array([1, None, 3], dtype='UInt16')
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: UInt16
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = 1
# Fill values used for any/all
_truthy_value = 1
_falsey_value = 0
@cache_readonly
def dtype(self) -> _IntegerDtype:
return INT_STR_TO_DTYPE[str(self._data.dtype)]
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not (isinstance(values, np.ndarray) and values.dtype.kind in ["i", "u"]):
raise TypeError(
"values should be integer numpy array. Use "
"the 'pd.array' function instead"
)
super().__init__(values, mask, copy=copy)
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Dtype | None = None, copy: bool = False
) -> IntegerArray:
values, mask = coerce_to_array(scalars, dtype=dtype, copy=copy)
return IntegerArray(values, mask)
@classmethod
def _from_sequence_of_strings(
cls, strings, *, dtype: Dtype | None = None, copy: bool = False
) -> IntegerArray:
scalars = to_numeric(strings, errors="raise")
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
def _coerce_to_array(self, value) -> tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value, dtype=self.dtype)
@overload
def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
...
@overload
def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
...
@overload
def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
...
def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
"""
Cast to a NumPy array or ExtensionArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
ndarray or ExtensionArray
NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an IntegerDtype, equivalent of same_kind
casting
"""
dtype = pandas_dtype(dtype)
if isinstance(dtype, ExtensionDtype):
return super().astype(dtype, copy=copy)
na_value: float | np.datetime64 | lib.NoDefault
# coerce
if is_float_dtype(dtype):
# In astype, we consider dtype=float to also mean na_value=np.nan
na_value = np.nan
elif is_datetime64_dtype(dtype):
na_value = np.datetime64("NaT")
else:
na_value = lib.no_default
return self.to_numpy(dtype=dtype, na_value=na_value, copy=False)
def _values_for_argsort(self) -> np.ndarray:
"""
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort : Return the indices that would sort this array.
"""
data = self._data.copy()
if self._mask.any():
data[self._mask] = data.min() - 1
return data
_dtype_docstring = """
An ExtensionDtype for {dtype} integer data.
.. versionchanged:: 1.0.0
Now uses :attr:`pandas.NA` as its missing value,
rather than :attr:`numpy.nan`.
Attributes
----------
None
Methods
-------
None
"""
# create the Dtype
@register_extension_dtype
class Int8Dtype(_IntegerDtype):
type = np.int8
name = "Int8"
__doc__ = _dtype_docstring.format(dtype="int8")
@register_extension_dtype
class Int16Dtype(_IntegerDtype):
type = np.int16
name = "Int16"
__doc__ = _dtype_docstring.format(dtype="int16")
@register_extension_dtype
class Int32Dtype(_IntegerDtype):
type = np.int32
name = "Int32"
__doc__ = _dtype_docstring.format(dtype="int32")
@register_extension_dtype
class Int64Dtype(_IntegerDtype):
type = np.int64
name = "Int64"
__doc__ = _dtype_docstring.format(dtype="int64")
@register_extension_dtype
class UInt8Dtype(_IntegerDtype):
type = np.uint8
name = "UInt8"
__doc__ = _dtype_docstring.format(dtype="uint8")
@register_extension_dtype
class UInt16Dtype(_IntegerDtype):
type = np.uint16
name = "UInt16"
__doc__ = _dtype_docstring.format(dtype="uint16")
@register_extension_dtype
class UInt32Dtype(_IntegerDtype):
type = np.uint32
name = "UInt32"
__doc__ = _dtype_docstring.format(dtype="uint32")
@register_extension_dtype
class UInt64Dtype(_IntegerDtype):
type = np.uint64
name = "UInt64"
__doc__ = _dtype_docstring.format(dtype="uint64")
INT_STR_TO_DTYPE: dict[str, _IntegerDtype] = {
"int8": Int8Dtype(),
"int16": Int16Dtype(),
"int32": Int32Dtype(),
"int64": Int64Dtype(),
"uint8": UInt8Dtype(),
"uint16": UInt16Dtype(),
"uint32": UInt32Dtype(),
"uint64": UInt64Dtype(),
}
| 27.319277
| 85
| 0.616538
|
5643dcbf19a216b5183595dc75eb51b7aa440352
| 19,753
|
py
|
Python
|
ca_timers.py
|
wadeguthrie/combat-accountant
|
e6a2140baafe71191cf68e01390b1f91446e17aa
|
[
"Apache-2.0"
] | null | null | null |
ca_timers.py
|
wadeguthrie/combat-accountant
|
e6a2140baafe71191cf68e01390b1f91446e17aa
|
[
"Apache-2.0"
] | 58
|
2019-10-16T21:48:37.000Z
|
2021-09-08T22:33:22.000Z
|
ca_timers.py
|
wadeguthrie/combat-accountant
|
e6a2140baafe71191cf68e01390b1f91446e17aa
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python
import copy
import curses
class Timer(object):
'''
Embodies a timer that counts down with fight rounds. There's optional
text associated with it (that's shown while the timer is counting down)
and there's optional actions when the timer actually reaches zero. This
is an object built around data that is intended to be kept in the Game File
data file but that's not strictly required for this object to work.
'''
round_count_string = '%d Rnds: ' # assume rounds takes same space as '%d'
len_timer_leader = len(round_count_string)
# time removed from an announcement timer so that it'll go off at the
# beginning of a round rather than the end
announcement_margin = 0.1
def __init__(self,
details # dict from the Game File, contains timer's info
):
self.details = details # This needs to actually be from the Game File
self.__complete_me()
def decrement(self):
self.details['rounds'] -= 1
def fire(self,
owner, # ThingsInFight object to receive timer action
window_manager # GmWindowManager object -- for display
):
''' Fires the timer. '''
# If there's a timer to be added back into the list, this will be
# used to return it
result = None
if 'state' in self.details['actions']:
owner.details['state'] = self.details['actions']['state']
if 'announcement' in self.details['actions']:
window_manager.display_window(
('Timer Fired for %s' % self.details['parent-name']),
[[{'text': self.details['actions']['announcement'],
'mode': curses.A_NORMAL}]])
if 'timer' in self.details['actions']:
result = self.details['actions']['timer']
# TODO (eventually): implement this (haven't yet figured out where
# |ruleset| is coming from)
#if 'action' in self.details['actions']:
# ruleset.do_action(owner,
# self.details['actions']['action'],
# None #fight_handler
# )
return result
def from_pieces(self,
pieces, # { 'parent-name': <text>, string describing the
# thing calling the timer
# 'rounds': <number>, rounds until timer
# fires (3.0 rounds
# fires at end of 3
# rounds; 2.9 rounds
# fires at beginning of
# 3 rounds).
# 'string': <text> or [<text>, <text>, ...],
# string to display (in
# fighter's notes) while
# timer is running
# 'actions': {'state': <text>,
# string describing new
# state of fighter (see
# Fighter.conscious_map)
# 'announcement': <text>},
# string to display (in
# its own window) when
# timer fires
# }
):
'''
Creates a new timer from scratch (rather than from data that's already
in the Game File).
'''
self.details = copy.deepcopy(pieces)
self.__complete_me()
def get_description(self):
'''
Returns a long description of the timer to show the user.
'''
result = [] # List of strings, one per line of output
this_line = []
rounds = self.details['rounds']
if ('announcement' in self.details['actions'] and
self.details['actions']['announcement'] is not None):
# Add back in the little bit we shave off of an announcement so
# that the timer will announce as the creature's round starts
# rather than at the end.
rounds += Timer.announcement_margin
round_count_string = Timer.round_count_string % (
rounds + Timer.announcement_margin)
this_line.append(round_count_string)
if 'announcement' in self.details['actions']:
this_line.append('[%s]' % self.details['actions']['announcement'])
result.append(''.join(this_line))
this_line = []
if ('state' in self.details['actions'] and
self.details['actions']['state'] is not None):
this_line.append('<%s>' % self.details['actions']['state'])
result.append(''.join(this_line))
this_line = []
if ('string' in self.details and self.details['string'] is not None
and len(self.details['string']) > 0):
if type(self.details['string']) is list:
for substring in self.details['string']:
this_line.append('%s' % (substring))
result.append(''.join(this_line))
this_line = []
else:
this_line.append('%s' % (self.details['string']))
result.append(''.join(this_line))
this_line = []
if len(this_line) > 0:
this_line.append('<<UNNAMED TIMER>>')
result.append(''.join(this_line))
return result
def get_one_line_description(self):
'''
Returns a short desctiption of the timer to show the user.
'''
this_line = []
rounds = self.details['rounds']
if ('announcement' in self.details['actions'] and
self.details['actions']['announcement'] is not None):
# Add back in the little bit we shave off of an announcement so
# that the timer will announce as the creature's round starts
# rather than at the end.
rounds += Timer.announcement_margin
round_count_string = Timer.round_count_string % (
rounds + Timer.announcement_margin)
this_line.append(round_count_string)
needs_headline = True
if 'announcement' in self.details['actions']:
this_line.append('[%s]' %
self.details['actions']['announcement'][0])
needs_headline = False
if ('string' in self.details and self.details['string'] is not None
and len(self.details['string']) > 0):
if type(self.details['string']) is list:
this_line.append('%s' % (self.details['string'][0]))
else:
this_line.append('%s' % (self.details['string']))
needs_headline = False
if needs_headline:
this_line.append('<<UNNAMED TIMER>>')
return ' '.join(this_line)
def mark_owner_as_busy(self,
is_busy=True):
self.details['busy'] = is_busy
def __complete_me(self):
'''
Fills-in any missing parts of the timer.
'''
if self.details is None:
self.details = {}
if 'parent-name' not in self.details:
self.details['parent-name'] = '<< Unknown Parent >>'
if 'busy' not in self.details:
self.mark_owner_as_busy(is_busy=False)
if 'rounds' not in self.details:
self.details['rounds'] = 1
if 'actions' not in self.details:
self.details['actions'] = {}
class TimersWidget(object):
'''
Consolodated GUI widget for creating timers.
'''
def __init__(self,
timers, # Timers object
window_manager # GmWindowManager object for menus and error
# reporting
):
self.__timers = timers
self.__window_manager = window_manager
def make_timer(self,
timer_recipient_name # string
):
'''
Makes a timer object and adds it to the Timers list.
Returns: nothing
'''
timer_dict = self.make_timer_dict(timer_recipient_name)
if timer_dict is not None:
timer_obj = Timer(None)
timer_obj.from_pieces(timer_dict)
self.__timers.add(timer_obj)
def make_timer_dict(self,
timer_recipient_name, # string
):
'''
Builds the data dictionary describing a new timer. Asks all the
questions necessary to provide the dict with the data it needs.
Returns: the dict for the new timer.
'''
# How long is the timer?
title = 'Rounds To Wait...'
height = 1
width = len(title)
adj = self.__window_manager.input_box_number(height, width, title)
if adj is None:
return None
rounds = adj
if rounds <= 0:
return None
# What does the timer do?
keep_asking_menu = [('yes', True), ('no', False)]
param = {'announcement': None,
'continuous_message': None,
'busy': None,
'state': None}
actions_menu = [('message (continuous)',
{'doit': self.__continuous_message_action,
'param': param}),
('announcement',
{'doit': self.__announcement_action,
'param': param}),
('mark busy',
{'doit': self.__mark_busy_action,
'param': param}),
# ('state change',
# {'doit': self.__new_state_action,
# 'param': param})
]
keep_asking = True
which_action = 0
while keep_asking:
result, which_action = self.__window_manager.menu('Timer Action',
actions_menu,
which_action)
if result is None:
return None
keep_asking, ignore = self.__window_manager.menu(
'Pick More Actions', keep_asking_menu)
if keep_asking is None:
keep_asking = True
# Install the timer.
if 'announcement' in param and param['announcement'] is not None:
# Shave a little off the time so that the timer will announce
# as his round starts rather than at the end.
rounds -= Timer.announcement_margin
timer_dict = {'parent-name': timer_recipient_name,
'rounds': rounds,
'string': param['continuous_message'],
'actions': {}}
if param['announcement'] is not None:
timer_dict['actions']['announcement'] = param['announcement']
if param['state'] is not None:
timer_dict['actions']['state'] = param['state']
if param['busy'] is not None:
timer_dict['busy'] = param['busy']
return timer_dict
# Private and Protected Methods
def __announcement_action(self,
param # dict passed by the menu handler --
# contains the announcement text
# associated with the timer
):
'''
Handler for the timer's 'what do I do with this timer' entry.
Sets the timer up to display a window containing text when the timer
fires.
Returns: True -- just so it's not None
'''
title = 'Enter the Announcement'
height = 1
width = (curses.COLS - 4) - Timer.len_timer_leader
announcement = self.__window_manager.input_box(height, width, title)
if announcement is not None and len(announcement) <= 0:
announcement = None
param['announcement'] = announcement
return True
def __continuous_message_action(self,
param # dict passed by the menu
# handler -- contains the text
# associated with the timer
):
'''
Handler for the timer's 'what do I do with this timer' entry.
Sets the timer up to display the provided text when the timer is
displayed.
Returns: True -- just so it's not None
'''
title = 'Enter the Continuous Message'
height = 1
width = (curses.COLS - 4) - Timer.len_timer_leader
string = self.__window_manager.input_box(height, width, title)
if string is not None and len(string) <= 0:
string = None
param['continuous_message'] = string
return True
def __mark_busy_action(self,
param # dict passed by the menu handler --
# contains the announcement text
# associated with the timer
):
'''
Handler for the timer's 'what do I do with this timer' entry.
Sets the timer up to mark the player as busy.
Returns: True -- just so it's not None
'''
param['busy'] = True
return True
# def __new_state_action(self,
# param # dict passed by the menu handler --
# # contains the destination state of the
# # Fighter associated with the timer
# ):
# '''
# Handler for the timer's 'what do I do with this timer' entry.
# Sets the timer up to change the consciousness state of it's associated
# object when the timer fires.
# Returns: True -- just so it's not None
# '''
# state_menu = [(x, x) for x in Fighter.conscious_map.keys()
# if x != 'fight']
# state_menu = sorted(state_menu, key=lambda x: x[0].upper())
# state, ignore = self.__window_manager.menu('Which State', state_menu)
# if state is not None:
# param['state'] = state
# return True
class Timers(object):
'''
Keeps a list of timers. There are two parallel lists: 'data' keeps the
actual data (it's a pointer to the spot in the Game File where the ultimate
data is stored) while 'obj' keeps Timer objects.
'''
def __init__(self,
timer_details, # List from Game File containing timers
owner, # ThingsInFight object to receive timer
# actions.
window_manager # For displaying error messages
):
# data and obj are parallel arrays. 'data' is just like it is in the
# Game File (and, in fact, should point to the Game File data) and
# 'obj' is the Timer object from that data.
self.__timers = {'data': timer_details,
'obj': []}
self.__owner = owner
for timer_data in timer_details:
timer_obj = Timer(timer_data)
self.__timers['obj'].append(timer_obj)
self.__window_manager = window_manager
def add(self,
timer # Timer object
):
'''
Adds a timer to this list's timers.
Returns the timer right back, again.
'''
self.__timers['data'].append(timer.details)
self.__timers['obj'].append(timer)
return timer
def clear_all(self):
''' Removes all of this list's timers. '''
# I think I have to pop the timer data, individually, because setting
# ['data'] to [] would just remove our pointer to the Game File data
# without modifying the Game File data.
while len(self.__timers['data']) > 0:
self.__timers['data'].pop()
self.__timers['obj'] = []
def decrement_all(self):
''' Decrements all timers. '''
for timer_obj in self.__timers['obj']:
timer_obj.decrement()
def get_all(self):
''' Returns a complete list of this list's Timer objects. '''
return self.__timers['obj']
def is_busy(self):
'''Returns 'True' if a current timer has the owner marked as busy.'''
for timer_obj in self.__timers['obj']:
if timer_obj.details['busy']:
return True
return False
def found_timer_string(self,
string
):
'''Returns 'True' if a current timer has string matching parameter.'''
for timer_obj in self.__timers['obj']:
if ('string' in timer_obj.details and
timer_obj.details['string' ] == string):
return True
return False
def remove_expired_keep_dying(self):
'''
Removes expired timers BUT KEEPS the timers that are dying this
round. Call this at the beginning of the round. Standard timers that
die this round are kept so that they're shown.
Returns nothing.
'''
remove_these = []
for index, timer in enumerate(self.__timers['obj']):
if timer.details['rounds'] < 0: # keeps timers dying this round
remove_these.insert(0, index) # largest indexes last
for index in remove_these:
self.__fire_timer(self.__timers['obj'][index])
self.remove_timer_by_index(index)
def remove_expired_kill_dying(self):
'''
Removes expired timers AND REMOVES the timers that are dying this
round. Call this at the end of the round to scrape off the timers
that expire this round.
Returns nothing.
'''
remove_these = []
for index, timer in enumerate(self.__timers['obj']):
if timer.details['rounds'] <= 0: # kills timers dying this round
remove_these.insert(0, index) # largest indexes last
for index in remove_these:
self.__fire_timer(self.__timers['obj'][index])
self.remove_timer_by_index(index)
def remove_timer_by_index(self,
index # Index of the timer to be removed
):
'''
Removes a timer from the timer list.
Returns nothing.
'''
del self.__timers['data'][index]
del self.__timers['obj'][index]
#
# Private methods
#
def __fire_timer(self,
timer # Timer object
):
'''
Has the timer do whatever it does when it fires (i.e., when its time
runs out).
Returns nothing.
'''
new_timer = timer.fire(self.__owner, self.__window_manager)
if new_timer is not None:
self.add(Timer(new_timer))
| 37.840996
| 79
| 0.512023
|
d815f2c7ca91ab3e6124c889fe39b6fc81676b64
| 4,919
|
py
|
Python
|
asposeimagingcloud/models/requests/get_image_features_request.py
|
aspose-imaging-cloud/aspose-imaging-cloud-python
|
9280a4a1aa415cb569ec26a05792b33186d31a85
|
[
"MIT"
] | 1
|
2022-01-14T10:06:26.000Z
|
2022-01-14T10:06:26.000Z
|
asposeimagingcloud/models/requests/get_image_features_request.py
|
aspose-imaging-cloud/aspose-imaging-cloud-python
|
9280a4a1aa415cb569ec26a05792b33186d31a85
|
[
"MIT"
] | 3
|
2019-07-17T15:01:31.000Z
|
2020-12-29T09:16:10.000Z
|
asposeimagingcloud/models/requests/get_image_features_request.py
|
aspose-imaging-cloud/aspose-imaging-cloud-python
|
9280a4a1aa415cb569ec26a05792b33186d31a85
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="get_image_features_request.py">
# Copyright (c) 2018-2020 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
from asposeimagingcloud.models.requests.imaging_request import ImagingRequest
from asposeimagingcloud.models.requests.http_request import HttpRequest
class GetImageFeaturesRequest(ImagingRequest):
"""
Request model for get_image_features operation.
Initializes a new instance.
:param search_context_id The search context identifier.
:param image_id The image identifier.
:param folder The folder.
:param storage The storage.
"""
def __init__(self, search_context_id, image_id, folder=None, storage=None):
ImagingRequest.__init__(self)
self.search_context_id = search_context_id
self.image_id = image_id
self.folder = folder
self.storage = storage
def to_http_info(self, config):
"""
Prepares initial info for HTTP request
:param config: Imaging API configuration
:type: asposeimagingcloud.Configuration
:return: http_request configured http request
:rtype: Configuration.models.requests.HttpRequest
"""
# verify the required parameter 'search_context_id' is set
if self.search_context_id is None:
raise ValueError("Missing the required parameter `search_context_id` when calling `get_image_features`")
# verify the required parameter 'image_id' is set
if self.image_id is None:
raise ValueError("Missing the required parameter `image_id` when calling `get_image_features`")
collection_formats = {}
path = '/imaging/ai/imageSearch/{searchContextId}/features'
path_params = {}
if self.search_context_id is not None:
path_params[self._lowercase_first_letter('searchContextId')] = self.search_context_id
query_params = []
if self._lowercase_first_letter('imageId') in path:
path = path.replace('{' + self._lowercase_first_letter('imageId' + '}'), self.image_id if self.image_id is not None else '')
else:
if self.image_id is not None:
query_params.append((self._lowercase_first_letter('imageId'), self.image_id))
if self._lowercase_first_letter('folder') in path:
path = path.replace('{' + self._lowercase_first_letter('folder' + '}'), self.folder if self.folder is not None else '')
else:
if self.folder is not None:
query_params.append((self._lowercase_first_letter('folder'), self.folder))
if self._lowercase_first_letter('storage') in path:
path = path.replace('{' + self._lowercase_first_letter('storage' + '}'), self.storage if self.storage is not None else '')
else:
if self.storage is not None:
query_params.append((self._lowercase_first_letter('storage'), self.storage))
header_params = {}
form_params = []
local_var_files = []
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = 'multipart/form-data' if form_params else self._select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['JWT']
return HttpRequest(path, path_params, query_params, header_params, form_params, body_params, local_var_files,
collection_formats, auth_settings)
| 45.546296
| 136
| 0.665379
|
fde5051fafa2e5cf1c382214f2bae13adffa4bb5
| 813
|
py
|
Python
|
src/deepfaceannotator/mappers/gender_2_mappers.py
|
StefanieStoppel/DeepfaceAnnotator
|
ef09c2bd6be3eb158f83f3c28973088b10244ac3
|
[
"MIT"
] | null | null | null |
src/deepfaceannotator/mappers/gender_2_mappers.py
|
StefanieStoppel/DeepfaceAnnotator
|
ef09c2bd6be3eb158f83f3c28973088b10244ac3
|
[
"MIT"
] | null | null | null |
src/deepfaceannotator/mappers/gender_2_mappers.py
|
StefanieStoppel/DeepfaceAnnotator
|
ef09c2bd6be3eb158f83f3c28973088b10244ac3
|
[
"MIT"
] | null | null | null |
class Gender2Mapper:
GENDER_LABELS_DEEPFACE = ['Woman', 'Man']
def __init__(self, analysis_dict: dict):
self._genders = analysis_dict["gender"]
self._most_likely_gender = analysis_dict["dominant_gender"]
@property
def probability_female(self):
return self._genders[self.GENDER_LABELS_DEEPFACE[0]]
@property
def probability_male(self):
return self._genders[self.GENDER_LABELS_DEEPFACE[1]]
@property
def values(self):
return self._genders.values()
@property
def keys(self):
return self._genders.keys()
@property
def items(self):
return self._genders.items()
@property
def most_likely_gender(self):
return 'Female' if self._most_likely_gender == self.GENDER_LABELS_DEEPFACE[0] else 'Male'
| 26.225806
| 97
| 0.676507
|
04f818e6a064feb5328b9623732ee846dc0f1267
| 1,315
|
py
|
Python
|
examples/computer_vision/mmdetection_pytorch/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py
|
RAbraham/determined
|
1161b667ed6d0242f70f9f15d58600f910c8d7f9
|
[
"Apache-2.0"
] | 1
|
2021-03-29T13:39:45.000Z
|
2021-03-29T13:39:45.000Z
|
examples/computer_vision/mmdetection_pytorch/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py
|
RAbraham/determined
|
1161b667ed6d0242f70f9f15d58600f910c8d7f9
|
[
"Apache-2.0"
] | null | null | null |
examples/computer_vision/mmdetection_pytorch/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py
|
RAbraham/determined
|
1161b667ed6d0242f70f9f15d58600f910c8d7f9
|
[
"Apache-2.0"
] | null | null | null |
_base_ = "./cascade_rcnn_r50_fpn_1x_coco.py"
model = dict(
pretrained="open-mmlab://detectron2/resnet50_caffe",
backbone=dict(norm_cfg=dict(requires_grad=False), style="caffe"),
)
# use caffe img_norm
img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type="LoadImageFromFile"),
dict(type="LoadAnnotations", with_bbox=True),
dict(type="Resize", img_scale=(1333, 800), keep_ratio=True),
dict(type="RandomFlip", flip_ratio=0.5),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="DefaultFormatBundle"),
dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels"]),
]
test_pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="ImageToTensor", keys=["img"]),
dict(type="Collect", keys=["img"]),
],
),
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline),
)
| 32.073171
| 88
| 0.629658
|
637e3f3ce052f74bca339bb26b434b3f98e6a67f
| 509
|
py
|
Python
|
solutions/python3/945.py
|
sm2774us/amazon_interview_prep_2021
|
f580080e4a6b712b0b295bb429bf676eb15668de
|
[
"MIT"
] | 42
|
2020-08-02T07:03:49.000Z
|
2022-03-26T07:50:15.000Z
|
solutions/python3/945.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | null | null | null |
solutions/python3/945.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | 40
|
2020-02-08T02:50:24.000Z
|
2022-03-26T15:38:10.000Z
|
class Solution:
def minIncrementForUnique(self, A):
st, used, move = set(A), set(), 0
heapq.heapify(A)
empty = [i for i in range(80000) if i not in st][::-1] if A else []
while A:
num = heapq.heappop(A)
if num not in used:
used.add(num)
else:
while empty[-1] < num:
empty.pop()
move += empty[-1] - num
heapq.heappush(A, empty.pop())
return move
| 33.933333
| 76
| 0.451866
|
74d4795327c05d008aef2e600e08a900f640bd62
| 1,443
|
py
|
Python
|
442burn.py
|
SteveMacenski/Orbital-Mechanics-Tools
|
4110ab3514654af830dadd1804cf002ebe6a8ac8
|
[
"MIT"
] | null | null | null |
442burn.py
|
SteveMacenski/Orbital-Mechanics-Tools
|
4110ab3514654af830dadd1804cf002ebe6a8ac8
|
[
"MIT"
] | null | null | null |
442burn.py
|
SteveMacenski/Orbital-Mechanics-Tools
|
4110ab3514654af830dadd1804cf002ebe6a8ac8
|
[
"MIT"
] | null | null | null |
# Senior design, finds delta V from Trans-Mars Injection Orbit to Mars Orbital R
import math
def brake_dv(r_mars_orbit,delta_i1,delta_i2):
#inputs r of mars orbit, and 2 inclination changes
mu_sun = 1.327235e20; #m^2/s^2
mu_earth = 3.986e14;
mu_mars = 4.2828e13;
r_mars =227.94e9; #m
r_earth = 149.6e9; #m
delta_i1 = math.radians(delta_i1);
delta_i2 = math.radians(delta_i2);
aMTO = 0.5*(r_earth + r_mars);
r_SOIm = 0.576e9; #m
#V at mars for mars transfer orbit
v_SOIm = -math.sqrt(mu_sun*(2/r_mars - 1/aMTO)) + math.sqrt(mu_sun/r_mars);
a_MTOm = 1/(2/r_SOIm - v_SOIm*v_SOIm/mu_mars);
#V mars transfer at mars
v_rend = math.sqrt(mu_mars*(2/r_mars_orbit - 1/a_MTOm));
#V at final orbit
v_LMO = math.sqrt(mu_mars/r_mars_orbit); #V mars low orbit
#DV for inclination change
delta_v_inclination1 = 2*v_LMO*math.sin(delta_i1/2);
delta_v_inclination2 = 2*v_LMO*math.sin(delta_i2/2);
print "[delta v tot for incline 1 km/s, delta v tot for incline 2 km/s]"
return [(v_rend-v_LMO)/1000+delta_v_inclination1/1000, \
(v_rend-v_LMO)/1000+delta_v_inclination2/1000]; #delta V total
if __name__=="__main__"():
Isp = 318;
delta_V = brake_dv(11000e3,0,20);
mass_ratio1 = math.exp(delta_V[0]*1000/9.81/Isp);
mass_ratio2 = math.exp(delta_V[1]*1000/9.81/Isp);
print delta_V, 1-1./mass_ratio1, 1-1./mass_ratio2;
| 31.369565
| 80
| 0.663895
|
5ba26c65921e521fb6b0feaae0c219db9e18fd9d
| 3,210
|
py
|
Python
|
scripts/prod_to_local.py
|
ephes/python-podcast
|
af08970b6de318984ff1f46a19692775c8aa2d71
|
[
"BSD-3-Clause"
] | 1
|
2018-11-16T08:35:44.000Z
|
2018-11-16T08:35:44.000Z
|
scripts/prod_to_local.py
|
ephes/python-podcast
|
af08970b6de318984ff1f46a19692775c8aa2d71
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/prod_to_local.py
|
ephes/python-podcast
|
af08970b6de318984ff1f46a19692775c8aa2d71
|
[
"BSD-3-Clause"
] | 1
|
2018-11-17T14:56:18.000Z
|
2018-11-17T14:56:18.000Z
|
import os
from subprocess import check_output
import paramiko
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
# get backup dump from production
host = os.environ.get("PRODUCTION_HOST")
username = os.environ.get("PRODUCTION_USERNAME")
db_name = os.environ.get("DATABASE_NAME")
db_user = os.environ.get("POSTGRES_USER")
client = paramiko.SSHClient()
client.load_system_host_keys()
client.connect(host, username=username)
ssh_stdin, ssh_stdout, ssh_stderr = client.exec_command("cd site;python3 scripts/prod_backup.py")
stdout_lines = ssh_stdout.read().decode("utf-8").split("\n")
print(stdout_lines)
remote_path = None
for line in stdout_lines:
if "scp" in line:
remote_path = line.split()[1].split(":")[-1]
print(remote_path)
file_name = os.path.basename(remote_path)
local_path = os.path.join("backups", file_name)
if not os.path.exists("backups"):
os.mkdir("backups")
sftp = client.open_sftp()
sftp.get(remote_path, "backups/{}".format(file_name))
# recreate local docker environment from production
docker_postgres_cmd = "docker-compose -f local.yml up -d postgres"
detach_postgres_out = check_output(docker_postgres_cmd, shell=True)
print(detach_postgres_out)
# restore backup dump to database
docker_id_cmd = 'docker ps | grep postgres | cut -d " " -f 1'
postgres_id = (check_output(docker_id_cmd, shell=True)
.decode('utf-8')
.replace("\n", ""))
print(postgres_id)
backup_copy_cmd = "docker cp {} {}:/backups".format(local_path, postgres_id)
print(backup_copy_cmd)
result = check_output(backup_copy_cmd, shell=True)
print(result)
docker_down = "docker-compose -f local.yml down"
result = check_output(docker_down, shell=True)
print(result)
start_postgres_cmd = "docker-compose -f local.yml run --rm django ./manage.py"
result = check_output(start_postgres_cmd, shell=True)
print(result)
restore_cmd = "docker-compose -f local.yml run --rm postgres restore {}".format(file_name)
print(restore_cmd)
result = check_output(restore_cmd, shell=True)
print(result)
# remove stale media files
#delete_stale_cmd = "docker-compose -f local.yml run --rm django ./manage.py s3_stale --delete"
#delete_stale_cmd = "docker-compose -f local.yml run --rm django ./manage.py s3_stale"
#print(delete_stale_cmd)
#result = check_output(delete_stale_cmd, shell=True)
#print(result)
# get new media files from s3
#backup_s3_cmd = "docker-compose -f local.yml run --rm django ./manage.py s3_backup"
#print(backup_s3_cmd)
#result = check_output(backup_s3_cmd, shell=True)
#print(result)
# recreate local db from production
#dropdb_cmd = f"dropdb {db_name}"
#print(dropdb_cmd)
#result = check_output(dropdb_cmd, shell=True)
#print(result)
try:
createdb_cmd = f"createuser {db_user}"
print(createdb_cmd)
result = check_output(createdb_cmd, shell=True)
print(result)
except Exception:
pass
try:
createdb_cmd = f"createdb {db_name}"
print(createdb_cmd)
result = check_output(createdb_cmd, shell=True)
print(result)
except Exception:
pass
local_restore_cmd = f"gunzip -c {local_path} | psql {db_name} -U {db_name}"
print(local_restore_cmd)
result = check_output(local_restore_cmd, shell=True)
print(result)
| 30.865385
| 97
| 0.75109
|
b17aacf2968d4c0695d8302dff9573c76e58fe44
| 5,501
|
py
|
Python
|
doc/conf.py
|
RamonPujol/OTSun
|
0a587980b8465bcc886811de246718e08e6dab06
|
[
"MIT"
] | 5
|
2019-02-28T10:29:51.000Z
|
2022-02-09T10:31:20.000Z
|
doc/conf.py
|
RamonPujol/OTSun
|
0a587980b8465bcc886811de246718e08e6dab06
|
[
"MIT"
] | 4
|
2017-02-07T11:10:24.000Z
|
2018-06-21T15:33:23.000Z
|
doc/conf.py
|
RamonPujol/OTSun
|
0a587980b8465bcc886811de246718e08e6dab06
|
[
"MIT"
] | 6
|
2019-02-28T10:29:56.000Z
|
2022-03-28T16:58:40.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
# sys.path.insert(0, os.path.abspath('/usr/lib/freecad-0.16/lib'))
# -- Project information -----------------------------------------------------
project = u'OTSun'
copyright = u'2019, Gabriel Cardona & Ramon Pujol'
author = u'Gabriel Cardona & Ramon Pujol'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'otsundoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'otsun.tex', u'otsun Documentation',
u'BC', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'otsun', u'otsun Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'otsun', u'otsun Documentation',
author, 'otsun', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
autodoc_default_options = {
'member-order': 'bysource',
}
autodoc_mock_imports = ["autologging", "FreeCAD", "Part"]
| 29.575269
| 79
| 0.6477
|
06f5a41f7fcd40952ef1ebf3859fff01572ae66f
| 7,419
|
py
|
Python
|
setup.py
|
squarefk/test_actions
|
dd3b0305c49b577102786eb1c24c590ef160bc30
|
[
"MIT"
] | null | null | null |
setup.py
|
squarefk/test_actions
|
dd3b0305c49b577102786eb1c24c590ef160bc30
|
[
"MIT"
] | null | null | null |
setup.py
|
squarefk/test_actions
|
dd3b0305c49b577102786eb1c24c590ef160bc30
|
[
"MIT"
] | null | null | null |
import glob
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
from setuptools import Extension, find_packages, setup
from setuptools.command.build_ext import build_ext
from setuptools.command.build_py import build_py
from setuptools.command.egg_info import egg_info
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Topic :: Software Development :: Compilers',
'Topic :: Multimedia :: Graphics',
'Topic :: Games/Entertainment :: Simulation',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
project_name = os.getenv('PROJECT_NAME', 'taichi')
TI_VERSION_MAJOR = 0
TI_VERSION_MINOR = 7
TI_VERSION_PATCH = 27
version = f'{TI_VERSION_MAJOR}.{TI_VERSION_MINOR}.{TI_VERSION_PATCH}'
data_files = glob.glob('python/lib/*')
print(data_files)
packages = find_packages('python')
print(packages)
# Our python package root dir is python/
package_dir = 'python'
def get_python_executable():
return sys.executable.replace('\\', '/')
def get_os_name():
name = platform.platform()
# in python 3.8, platform.platform() uses mac_ver() on macOS
# it will return 'macOS-XXXX' instead of 'Darwin-XXXX'
if name.lower().startswith('darwin') or name.lower().startswith('macos'):
return 'osx'
elif name.lower().startswith('windows'):
return 'win'
elif name.lower().startswith('linux'):
return 'linux'
assert False, "Unknown platform name %s" % name
def remove_tmp(taichi_dir):
shutil.rmtree(os.path.join(taichi_dir, 'assets'), ignore_errors=True)
shutil.rmtree(os.path.join(taichi_dir, 'examples'), ignore_errors=True)
shutil.rmtree(os.path.join(taichi_dir, 'tests'), ignore_errors=True)
class CMakeExtension(Extension):
def __init__(self, name):
Extension.__init__(self, name, sources=[])
class EggInfo(egg_info):
def run(self):
taichi_dir = os.path.join(package_dir, 'taichi')
remove_tmp(taichi_dir)
shutil.rmtree('build', ignore_errors=True)
shutil.copytree('tests/python', os.path.join(taichi_dir, 'tests'))
shutil.copytree('examples', os.path.join(taichi_dir, 'examples'))
shutil.copytree('external/assets', os.path.join(taichi_dir, 'assets'))
egg_info.run(self)
# python setup.py build runs the following commands in order:
# python setup.py build_py
# python setup.py build_ext
class BuildPy(build_py):
def run(self):
build_py.run(self)
taichi_dir = os.path.join(package_dir, 'taichi')
remove_tmp(taichi_dir)
class CMakeBuild(build_ext):
def parse_cmake_args_from_env(self):
# Source: TAICHI_CMAKE_ARGS=... python setup.py ...
cmake_args = os.getenv('TAICHI_CMAKE_ARGS', '')
return cmake_args.strip().split()
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
# CMakeLists.txt is in the same directory as this setup.py file
cmake_list_dir = os.path.abspath(os.path.dirname(__file__))
self.build_temp = os.path.join(cmake_list_dir, 'build')
build_directory = os.path.abspath(self.build_temp)
cmake_args = self.parse_cmake_args_from_env()
cmake_args += [
f'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={build_directory}',
f'-DPYTHON_EXECUTABLE={get_python_executable()}',
f'-DTI_VERSION_MAJOR={TI_VERSION_MAJOR}',
f'-DTI_VERSION_MINOR={TI_VERSION_MINOR}',
f'-DTI_VERSION_PATCH={TI_VERSION_PATCH}',
]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
# Assuming Makefiles
build_args += ['--', f'-j{multiprocessing.cpu_count()}']
self.build_args = build_args
env = os.environ.copy()
os.makedirs(self.build_temp, exist_ok=True)
print('-' * 10, 'Running CMake prepare', '-' * 40)
subprocess.check_call(['cmake', cmake_list_dir] + cmake_args,
cwd=self.build_temp,
env=env)
print('-' * 10, 'Building extensions', '-' * 40)
cmake_cmd = ['cmake', '--build', '.'] + self.build_args
subprocess.check_call(cmake_cmd, cwd=self.build_temp)
self.prepare_package()
def prepare_package(self):
# We need to make sure these additional files are ready for
# - develop mode: must exist in local python/taichi/lib/ folder
# - install mode: must exist in self.build_lib/taichi/lib
taichi_lib_dir = 'taichi/lib'
for target in (
os.path.join(package_dir, taichi_lib_dir),
os.path.join(self.build_lib, taichi_lib_dir),
):
shutil.rmtree(target, ignore_errors=True)
os.makedirs(target)
if get_os_name() == 'linux':
shutil.copy(os.path.join(self.build_temp, 'libtaichi_core.so'),
os.path.join(target, 'taichi_core.so'))
elif get_os_name() == 'osx':
shutil.copy(
os.path.join(self.build_temp, 'libtaichi_core.dylib'),
os.path.join(target, 'taichi_core.so'))
else:
shutil.copy('../runtimes/RelWithDebInfo/taichi_core.dll',
os.path.join(target, 'taichi_core.pyd'))
if get_os_name() != 'osx':
libdevice_path = 'external/cuda_libdevice/slim_libdevice.10.bc'
print("copying libdevice:", libdevice_path)
assert os.path.exists(libdevice_path)
shutil.copy(libdevice_path,
os.path.join(target, 'slim_libdevice.10.bc'))
llvm_runtime_dir = 'taichi/runtime/llvm'
for f in os.listdir(llvm_runtime_dir):
if f.startswith('runtime_') and f.endswith('.bc'):
print(f"Fetching runtime file {f} to {target} folder")
shutil.copy(os.path.join(llvm_runtime_dir, f), target)
setup(name=project_name,
packages=packages,
package_dir={"": package_dir},
version=version,
description='The Taichi Programming Language',
author='Taichi developers',
author_email='yuanmhu@gmail.com',
url='https://github.com/taichi-dev/taichi',
install_requires=[
'numpy',
'pybind11>=2.5.0',
'sourceinspect>=0.0.4',
'colorama',
'astor',
],
data_files=[('lib', data_files)],
keywords=['graphics', 'simulation'],
license='MIT',
include_package_data=True,
entry_points={
'console_scripts': [
'ti=taichi.main:main',
],
},
classifiers=classifiers,
ext_modules=[CMakeExtension('taichi_core')],
cmdclass=dict(egg_info=EggInfo, build_py=BuildPy, build_ext=CMakeBuild),
has_ext_modules=lambda: True)
| 34.995283
| 79
| 0.622591
|
d4e658cbd17a6728fa98cc52688f77fdc0dc3993
| 20,120
|
py
|
Python
|
NEMbox/api.py
|
wangjianyuan10/musicbox
|
f182053b07badc5d34190aeea85ff38d364a164e
|
[
"MIT"
] | 2
|
2020-03-21T15:20:28.000Z
|
2020-04-16T07:22:46.000Z
|
NEMbox/api.py
|
e71828/musicbox
|
f182053b07badc5d34190aeea85ff38d364a164e
|
[
"MIT"
] | null | null | null |
NEMbox/api.py
|
e71828/musicbox
|
f182053b07badc5d34190aeea85ff38d364a164e
|
[
"MIT"
] | 1
|
2020-06-10T09:22:38.000Z
|
2020-06-10T09:22:38.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: omi
# @Date: 2014-08-24 21:51:57
'''
网易云音乐 Api
'''
from __future__ import (
print_function, unicode_literals, division, absolute_import
)
import json
from collections import OrderedDict
from http.cookiejar import LWPCookieJar
from http.cookiejar import Cookie
import platform
import time
import requests
import requests_cache
from .config import Config
from .const import Constant
from .storage import Storage
from .encrypt import encrypted_request
from . import logger
requests_cache.install_cache(Constant.cache_path, expire_after=3600)
log = logger.getLogger(__name__)
# 歌曲榜单地址
TOP_LIST_ALL = {
0: ['云音乐新歌榜', '3779629'],
1: ['云音乐热歌榜', '3778678'],
2: ['网易原创歌曲榜', '2884035'],
3: ['云音乐飙升榜', '19723756'],
4: ['云音乐电音榜', '10520166'],
5: ['UK排行榜周榜', '180106'],
6: ['美国Billboard周榜', '60198'],
7: ['KTV嗨榜', '21845217'],
8: ['iTunes榜', '11641012'],
9: ['Hit FM Top榜', '120001'],
10: ['日本Oricon周榜', '60131'],
11: ['韩国Melon排行榜周榜', '3733003'],
12: ['韩国Mnet排行榜周榜', '60255'],
13: ['韩国Melon原声周榜', '46772709'],
14: ['中国TOP排行榜(港台榜)', '112504'],
15: ['中国TOP排行榜(内地榜)', '64016'],
16: ['香港电台中文歌曲龙虎榜', '10169002'],
17: ['华语金曲榜', '4395559'],
18: ['中国嘻哈榜', '1899724'],
19: ['法国 NRJ EuroHot 30周榜', '27135204'],
20: ['台湾Hito排行榜', '112463'],
21: ['Beatport全球电子舞曲榜', '3812895'],
22: ['云音乐ACG音乐榜', '71385702'],
23: ['云音乐嘻哈榜', '991319590']
}
PLAYLIST_CLASSES = OrderedDict([
('语种', ['华语', '欧美', '日语', '韩语', '粤语', '小语种']),
('风格', ['流行', '摇滚', '民谣', '电子', '舞曲', '说唱', '轻音乐', '爵士', '乡村', 'R&B/Soul', '古典', '民族',
'英伦', '金属', '朋克', '蓝调', '雷鬼', '世界音乐', '拉丁', '另类/独立', 'New Age', '古风', '后摇', 'Bossa Nova']),
('场景', ['清晨', '夜晚', '学习', '工作', '午休', '下午茶',
'地铁', '驾车', '运动', '旅行', '散步', '酒吧']),
('情感', ['怀旧', '清新', '浪漫', '性感', '伤感', '治愈',
'放松', '孤独', '感动', '兴奋', '快乐', '安静', '思念']),
('主题', ['影视原声', 'ACG', '儿童', '校园', '游戏', '70后', '80后', '90后',
'网络歌曲', 'KTV', '经典', '翻唱', '吉他', '钢琴', '器乐', '榜单', '00后'])
])
DEFAULT_TIMEOUT = 10
BASE_URL = 'http://music.163.com'
class Parse(object):
@classmethod
def _song_url_by_id(cls, sid):
# 128k
url = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(sid)
quality = 'LD 128k'
return url, quality
@classmethod
def song_url(cls, song):
if 'url' in song:
# songs_url resp
url = song['url']
if url is None:
return Parse._song_url_by_id(song['id'])
br = song['br']
if br >= 320000:
quality = 'HD'
elif br >= 192000:
quality = 'MD'
else:
quality = 'LD'
return url, '{} {}k'.format(quality, br // 1000)
else:
# songs_detail resp
return Parse._song_url_by_id(song['id'])
@classmethod
def song_album(cls, song):
# 对新老接口进行处理
if 'al' in song:
if song['al'] is not None:
album_name = song['al']['name']
album_id = song['al']['id']
else:
album_name = '未知专辑'
album_id = ''
elif 'album' in song:
if song['album'] is not None:
album_name = song['album']['name']
album_id = song['album']['id']
else:
album_name = '未知专辑'
album_id = ''
else:
raise ValueError
return album_name, album_id
@classmethod
def song_artist(cls, song):
artist = ''
# 对新老接口进行处理
if 'ar' in song:
artist = ', '.join([a['name']
for a in song['ar'] if a['name'] is not None])
# 某些云盘的音乐会出现 'ar' 的 'name' 为 None 的情况
# 不过会多个 ’pc' 的字段
# {'name': '简单爱', 'id': 31393663, 'pst': 0, 't': 1, 'ar': [{'id': 0, 'name': None, 'tns': [], 'alias': []}],
# 'alia': [], 'pop': 0.0, 'st': 0, 'rt': None, 'fee': 0, 'v': 5, 'crbt': None, 'cf': None,
# 'al': {'id': 0, 'name': None, 'picUrl': None, 'tns': [], 'pic': 0}, 'dt': 273000, 'h': None, 'm': None,
# 'l': {'br': 193000, 'fid': 0, 'size': 6559659, 'vd': 0.0}, 'a': None, 'cd': None, 'no': 0, 'rtUrl': None,
# 'ftype': 0, 'rtUrls': [], 'djId': 0, 'copyright': 0, 's_id': 0, 'rtype': 0, 'rurl': None, 'mst': 9,
# 'cp': 0, 'mv': 0, 'publishTime': 0,
# 'pc': {'nickname': '', 'br': 192, 'fn': '简单爱.mp3', 'cid': '', 'uid': 41533322, 'alb': 'The One 演唱会',
# 'sn': '简单爱', 'version': 2, 'ar': '周杰伦'}, 'url': None, 'br': 0}
if artist == '' and 'pc' in song:
artist = '未知艺术家' if song['pc']['ar'] is None else song['pc']['ar']
elif 'artists' in song:
artist = ', '.join([a['name'] for a in song['artists']])
else:
artist = '未知艺术家'
return artist
@classmethod
def songs(cls, songs):
song_info_list = []
for song in songs:
url, quality = Parse.song_url(song)
if not url:
continue
album_name, album_id = Parse.song_album(song)
song_info = {
'song_id': song['id'],
'artist': Parse.song_artist(song),
'song_name': song['name'],
'album_name': album_name,
'album_id': album_id,
'mp3_url': url,
'quality': quality,
'expires': song['expires'],
'get_time': song['get_time']
}
song_info_list.append(song_info)
return song_info_list
@classmethod
def artists(cls, artists):
return [{
'artist_id': artist['id'],
'artists_name': artist['name'],
'alias': ''.join(artist['alias'])
} for artist in artists]
@classmethod
def albums(cls, albums):
return [{
'album_id': album['id'],
'albums_name': album['name'],
'artists_name': album['artist']['name']
} for album in albums]
@classmethod
def playlists(cls, playlists):
return [{
'playlist_id': pl['id'],
'playlist_name': pl['name'],
'creator_name': pl['creator']['nickname']
} for pl in playlists]
class NetEase(object):
def __init__(self):
self.header = {
'Accept': '*/*',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'music.163.com',
'Referer': 'http://music.163.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
}
self.storage = Storage()
cookie_jar = LWPCookieJar(self.storage.cookie_path)
cookie_jar.load()
self.session = requests.Session()
self.session.cookies = cookie_jar
for cookie in cookie_jar:
if cookie.is_expired():
cookie_jar.clear()
self.storage.database['user'] = {
'username': '',
'password': '',
'user_id': '',
'nickname': '',
}
self.storage.save()
break
@property
def toplists(self):
return [l[0] for l in TOP_LIST_ALL.values()]
def logout(self):
self.session.cookies.clear()
self.storage.database['user'] = {
'username': '',
'password': '',
'user_id': '',
'nickname': '',
}
self.session.cookies.save()
self.storage.save()
def _raw_request(self, method, endpoint, data=None):
if method == 'GET':
resp = self.session.get(
endpoint, params=data, headers=self.header, timeout=DEFAULT_TIMEOUT
)
elif method == 'POST':
resp = self.session.post(
endpoint, data=data, headers=self.header, timeout=DEFAULT_TIMEOUT
)
return resp
# 生成Cookie对象
def make_cookie(self, name, value):
return Cookie(
version=0,
name=name,
value=value,
port=None,
port_specified=False,
domain="music.163.com",
domain_specified=True,
domain_initial_dot=False,
path="/",
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest=None
)
def request(self, method, path, params={}, default={'code': -1}, custom_cookies={}):
endpoint = '{}{}'.format(BASE_URL, path)
csrf_token = ''
for cookie in self.session.cookies:
if cookie.name == '__csrf':
csrf_token = cookie.value
break
params.update({'csrf_token': csrf_token})
data = default
for key, value in custom_cookies.items():
cookie = self.make_cookie(key, value)
self.session.cookies.set_cookie(cookie)
params = encrypted_request(params)
try:
resp = self._raw_request(method, endpoint, params)
data = resp.json()
except requests.exceptions.RequestException as e:
log.error(e)
except ValueError as e:
log.error('Path: {}, response: {}'.format(path, resp.text[:200]))
finally:
return data
def login(self, username, password):
self.session.cookies.load()
if username.isdigit():
path = '/weapi/login/cellphone'
params = dict(
phone=username,
password=password,
rememberLogin='true',
)
else:
# magic token for login
# see https://github.com/Binaryify/NeteaseCloudMusicApi/blob/master/router/login.js#L15
client_token = '1_jVUMqWEPke0/1/Vu56xCmJpo5vP1grjn_SOVVDzOc78w8OKLVZ2JH7IfkjSXqgfmh'
path = '/weapi/login'
params = dict(
username=username,
password=password,
rememberLogin='true',
clientToken=client_token
)
data = self.request('POST', path, params)
self.session.cookies.save()
return data
# 每日签到
def daily_task(self, is_mobile=True):
path = '/weapi/point/dailyTask'
params = dict(type=0 if is_mobile else 1)
return self.request('POST', path, params)
# 用户歌单
def user_playlist(self, uid, offset=0, limit=50):
path = '/weapi/user/playlist'
params = dict(
uid=uid,
offset=offset,
limit=limit,
csrf_token=''
)
return self.request('POST', path, params).get('playlist', [])
# 每日推荐歌单
def recommend_resource(self):
path = '/weapi/v1/discovery/recommend/resource'
return self.request('POST', path).get('recommend', [])
# 每日推荐歌曲
def recommend_playlist(self, total=True, offset=0, limit=20):
path = '/weapi/v1/discovery/recommend/songs' # NOQA
params = dict(
total=total,
offset=offset,
limit=limit,
csrf_token=''
)
return self.request('POST', path, params).get('recommend', [])
# 私人FM
def personal_fm(self):
path = '/weapi/v1/radio/get'
return self.request('POST', path).get('data', [])
# like
def fm_like(self, songid, like=True, time=25, alg='itembased'):
path = '/weapi/radio/like'
params = dict(
alg=alg,
trackId=songid,
like='true' if like else 'false',
time=time
)
return self.request('POST', path, params)['code'] == 200
# FM trash
def fm_trash(self, songid, time=25, alg='RT'):
path = '/weapi/radio/trash/add'
params = dict(
songId=songid,
alg=alg,
time=time,
)
return self.request('POST', path, params)['code'] == 200
# 搜索单曲(1),歌手(100),专辑(10),歌单(1000),用户(1002) *(type)*
def search(self, keywords, stype=1, offset=0, total='true', limit=50):
path = '/weapi/search/get'
params = dict(
s=keywords,
type=stype,
offset=offset,
total=total,
limit=limit
)
return self.request('POST', path, params).get('result', {})
# 新碟上架
def new_albums(self, offset=0, limit=50):
path = '/weapi/album/new'
params = dict(
area='ALL',
offset=offset,
total=True,
limit=limit,
)
return self.request('POST', path, params).get('albums', [])
# 歌单(网友精选碟) hot||new http://music.163.com/#/discover/playlist/
def top_playlists(self, category='全部', order='hot', offset=0, limit=50):
path = '/weapi/playlist/list'
params = dict(
cat=category,
order=order,
offset=offset,
total='true',
limit=limit
)
return self.request('POST', path, params).get('playlists', [])
def playlist_catelogs(self):
path = '/weapi/playlist/catalogue'
return self.request('POST', path)
# 歌单详情
def playlist_detail(self, playlist_id):
path = '/weapi/v3/playlist/detail'
params = dict(
id=playlist_id,
total='true',
limit=1000,
n=1000,
offest=0
)
# cookie添加os字段
custom_cookies = dict(
os=platform.system()
)
return self.request('POST', path, params, {'code': -1}, custom_cookies).get('playlist', {}).get('tracks', [])
# 热门歌手 http://music.163.com/#/discover/artist/
def top_artists(self, offset=0, limit=100):
path = '/weapi/artist/top'
params = dict(
offset=offset,
total=True,
limit=limit
)
return self.request('POST', path, params).get('artists', [])
# 热门单曲 http://music.163.com/discover/toplist?id=
def top_songlist(self, idx=0, offset=0, limit=100):
playlist_id = TOP_LIST_ALL[idx][1]
return self.playlist_detail(playlist_id)
# 歌手单曲
def artists(self, artist_id):
path = '/weapi/v1/artist/{}'.format(artist_id)
return self.request('POST', path).get('hotSongs', [])
def get_artist_album(self, artist_id, offset=0, limit=50):
path = '/weapi/artist/albums/{}'.format(artist_id)
params = dict(
offset=offset,
total=True,
limit=limit
)
return self.request('POST', path, params).get('hotAlbums', [])
# album id --> song id set
def album(self, album_id):
path = '/weapi/v1/album/{}'.format(album_id)
return self.request('POST', path).get('songs', [])
def song_comments(self, music_id, offset=0, total='false', limit=100):
path = '/weapi/v1/resource/comments/R_SO_4_{}/'.format(music_id)
params = dict(
rid=music_id,
offset=offset,
total=total,
limit=limit
)
return self.request('POST', path, params)
# song ids --> song urls ( details )
def songs_detail(self, ids):
path = '/weapi/v3/song/detail'
params = dict(
c=json.dumps([{'id': _id} for _id in ids]),
ids=json.dumps(ids),
)
return self.request('POST', path, params).get('songs', [])
def songs_url(self, ids):
quality = Config().get('music_quality')
rate_map = {
0: 320000,
1: 192000,
2: 128000
}
path = '/weapi/song/enhance/player/url'
params = dict(
ids=ids,
br=rate_map[quality]
)
return self.request('POST', path, params).get('data', [])
# lyric http://music.163.com/api/song/lyric?os=osx&id= &lv=-1&kv=-1&tv=-1
def song_lyric(self, music_id):
path = '/weapi/song/lyric'
params = dict(
os='osx',
id=music_id,
lv=-1,
kv=-1,
tv=-1
)
lyric = self.request('POST', path, params).get(
'lrc', {}).get('lyric', [])
if not lyric:
return []
else:
return lyric.split('\n')
def song_tlyric(self, music_id):
path = '/weapi/song/lyric'
params = dict(
os='osx',
id=music_id,
lv=-1,
kv=-1,
tv=-1
)
lyric = self.request('POST', path, params).get(
'tlyric', {}).get('lyric', [])
if not lyric:
return []
else:
return lyric.split('\n')
# 今日最热(0), 本周最热(10),历史最热(20),最新节目(30)
def djchannels(self, offset=0, limit=50):
path = '/weapi/djradio/hot/v1'
params = dict(
limit=limit,
offset=offset
)
channels = self.request('POST', path, params).get('djRadios', [])
return channels
def djprograms(self, radio_id, asc=False, offset=0, limit=50):
path = '/weapi/dj/program/byradio'
params = dict(
asc=asc,
radioId=radio_id,
offset=offset,
limit=limit
)
programs = self.request('POST', path, params).get('programs', [])
return [p['mainSong'] for p in programs]
# 获取版本
def get_version(self):
action = 'https://pypi.org/pypi/macmusicbox/json'
try:
return requests.get(action).json()
except requests.exceptions.RequestException as e:
log.error(e)
return {}
def dig_info(self, data, dig_type):
if not data:
return []
if dig_type == 'songs' or dig_type == 'fmsongs':
urls = self.songs_url([s['id'] for s in data])
timestamp = time.time()
# api 返回的 urls 的 id 顺序和 data 的 id 顺序不一致
# 为了获取到对应 id 的 url,对返回的 urls 做一个 id2index 的缓存
# 同时保证 data 的 id 顺序不变
url_id_index = {}
for index, url in enumerate(urls):
url_id_index[url['id']] = index
for s in data:
url_index = url_id_index.get(s['id'])
if url_index is None:
log.error("can't get song url, id: %s", s['id'])
continue
s['url'] = urls[url_index]['url']
s['br'] = urls[url_index]['br']
s['expires'] = urls[url_index]['expi']
s['get_time'] = timestamp
return Parse.songs(data)
elif dig_type == 'refresh_urls':
urls_info = self.songs_url(data)
timestamp = time.time()
songs = []
for url_info in urls_info:
song = {}
song['song_id'] = url_info['id']
song['mp3_url'] = url_info['url']
song['expires'] = url_info['expi']
song['get_time'] = timestamp
songs.append(song)
return songs
elif dig_type == 'artists':
return Parse.artists(data)
elif dig_type == 'albums':
return Parse.albums(data)
elif dig_type == 'playlists' or dig_type == 'top_playlists':
return Parse.playlists(data)
elif dig_type == 'playlist_classes':
return list(PLAYLIST_CLASSES.keys())
elif dig_type == 'playlist_class_detail':
return PLAYLIST_CLASSES[data]
else:
raise ValueError('Invalid dig type')
| 32.140575
| 150
| 0.505716
|
d65c3318009b39e8f9f4e8fee874e2eb9c6c8dfc
| 4,371
|
py
|
Python
|
sigal/utils.py
|
fidergo-stephane-gourichon/sigal
|
b1f2e947700e618425e170e8758b1fbb82c91acb
|
[
"MIT"
] | null | null | null |
sigal/utils.py
|
fidergo-stephane-gourichon/sigal
|
b1f2e947700e618425e170e8758b1fbb82c91acb
|
[
"MIT"
] | null | null | null |
sigal/utils.py
|
fidergo-stephane-gourichon/sigal
|
b1f2e947700e618425e170e8758b1fbb82c91acb
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2011-2020 - Simon Conseil
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
import shutil
from urllib.parse import quote
from markdown import Markdown
from markupsafe import Markup
VIDEO_MIMES = {'.mp4': 'video/mp4',
'.webm': 'video/webm',
'.ogv': 'video/ogg'}
MD = None
class Devnull:
"""'Black hole' for output that should not be printed"""
def write(self, *_):
pass
def flush(self, *_):
pass
def copy(src, dst, symlink=False, rellink=False):
"""Copy or symlink the file."""
func = os.symlink if symlink else shutil.copy2
if symlink and os.path.lexists(dst):
os.remove(dst)
if rellink: # relative symlink from dst
func(os.path.relpath(src, os.path.dirname(dst)), dst)
else:
try:
func(src, dst)
except PermissionError:
# this can happen if the file is not writable, so we try to remove
# it first
os.remove(dst)
func(src, dst)
def check_or_create_dir(path):
"Create the directory if it does not exist"
if not os.path.isdir(path):
os.makedirs(path)
def url_from_path(path):
"""Transform path to url, converting backslashes to slashes if needed."""
if os.sep != '/':
path = '/'.join(path.split(os.sep))
return quote(path)
def read_markdown(filename):
"""Reads markdown file, converts output and fetches title and meta-data for
further processing.
"""
global MD
# Use utf-8-sig codec to remove BOM if it is present. This is only possible
# this way prior to feeding the text to the markdown parser (which would
# also default to pure utf-8)
with open(filename, encoding='utf-8-sig') as f:
text = f.read()
if MD is None:
MD = Markdown(extensions=['markdown.extensions.meta',
'markdown.extensions.tables'],
output_format='html5')
else:
MD.reset()
# When https://github.com/Python-Markdown/markdown/pull/672
# will be available, this can be removed.
MD.Meta = {}
# Mark HTML with Markup to prevent jinja2 autoescaping
output = {'description': Markup(MD.convert(text))}
try:
meta = MD.Meta.copy()
except AttributeError:
pass
else:
output['meta'] = meta
try:
output['title'] = MD.Meta['title'][0]
except KeyError:
pass
return output
def is_valid_html5_video(ext):
"""Checks if ext is a supported HTML5 video."""
return ext in VIDEO_MIMES.keys()
def get_mime(ext):
"""Returns mime type for extension."""
return VIDEO_MIMES[ext]
class cached_property:
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source:
https://github.com/pydanny/cached-property (BSD Licensed)
https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
| 30.354167
| 90
| 0.655914
|
52486be89738795f95cf21160c2e62f61cb1a1a0
| 4,542
|
py
|
Python
|
setup.py
|
th2-net/th2-grpc-codec
|
dd6d67b4bfc165589569b3924e54ea77ad4dc64c
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
th2-net/th2-grpc-codec
|
dd6d67b4bfc165589569b3924e54ea77ad4dc64c
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
th2-net/th2-grpc-codec
|
dd6d67b4bfc165589569b3924e54ea77ad4dc64c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020-2020 Exactpro (Exactpro Systems Limited)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from distutils.cmd import Command
from distutils.dir_util import copy_tree
from distutils.sysconfig import get_python_lib
from pathlib import Path
from shutil import rmtree
from pkg_resources import resource_filename
from setuptools import setup, find_packages
from setuptools.command.sdist import sdist
class ProtoGenerator(Command):
description = 'build protobuf modules'
user_options = [('strict-mode', 's', 'exit with non-zero value if the proto compiling fails')]
def initialize_options(self):
self.strict_mode = False
def finalize_options(self):
pass
def run(self):
proto_path = os.path.abspath('src/main/proto')
gen_path = os.path.abspath('src/gen/main/python')
if not os.path.exists(gen_path):
os.makedirs(gen_path)
proto_files = []
for root, _, files in os.walk(proto_path):
for filename in files:
if filename.endswith('.proto'):
proto_files.append(os.path.abspath(os.path.join(root, filename)))
protos = [('grpc_tools', '_proto')]
protos_include = [f'--proto_path={proto_path}'] + \
[f'--proto_path={resource_filename(x[0], x[1])}' for x in protos] + \
[f'--proto_path={get_python_lib()}']
from grpc_tools import protoc
for proto_file in proto_files:
command = ['grpc_tools.protoc'] + \
protos_include + \
[f'--python_out={gen_path}', f'--grpc_python_out={gen_path}'] + \
[f'--mypy_out={gen_path}'] + \
[proto_file]
if protoc.main(command) != 0:
if self.strict_mode:
raise Exception(f'error: {command} failed')
class CustomDist(sdist):
def run(self):
copy_tree(f'src/main/proto/{package_name}', package_name)
copy_tree(f'src/gen/main/python/{package_name}', package_name)
copy_tree(f'src/gen/main/services/python/{package_name}', package_name)
Path(f'{package_name}/__init__.py').touch()
Path(f'{package_name}/py.typed').touch()
def make_packages(root_dir):
for path in Path(root_dir).iterdir():
if path.is_dir():
path.joinpath('__init__.py').touch()
make_packages(path)
make_packages(package_name)
self.distribution.packages = [''] + find_packages(include=[package_name, f'{package_name}.*'])
self.distribution.package_data = {'': ['package_info.json'],
**dict.fromkeys(self.distribution.packages[1:],
['*.proto', 'py.typed', '*.pyi'])}
sdist.run(self)
rmtree(package_name, ignore_errors=True)
with open('package_info.json', 'r') as file:
package_info = json.load(file)
package_name = package_info['package_name'].replace('-', '_')
package_version = package_info['package_version']
with open('README.md', 'r') as file:
long_description = file.read()
packages = [''] + find_packages(include=[package_name, f'{package_name}.*'])
package_data = {'': ['package_info.json'],
**dict.fromkeys(packages[1:], ['*.proto', 'py.typed', '*.pyi'])}
setup(
name=package_name,
version=package_version,
description=package_name,
long_description=long_description,
long_description_content_type='text/markdown',
author='TH2-devs',
author_email='th2-devs@exactprosystems.com',
url='https://github.com/th2-net/th2-grpc-codec',
license='Apache License 2.0',
python_requires='>=3.7',
install_requires=[
'th2-grpc-common~=3.9.0'
],
packages=packages,
package_data=package_data,
cmdclass={
'generate': ProtoGenerator,
'sdist': CustomDist
}
)
| 34.150376
| 102
| 0.623514
|
5d4864ce0459ae34c162523f8bc60c58ed9716b2
| 1,755
|
py
|
Python
|
training_dataset/vid/visual.py
|
Anonymous502/siamfda-for-eccv
|
72dff5c174b7ebe30c59a6e21bb6f06fdb06c3fb
|
[
"Apache-2.0"
] | 216
|
2020-03-17T03:29:15.000Z
|
2022-03-25T13:51:37.000Z
|
training_dataset/vid/visual.py
|
Anonymous502/siamfda-for-eccv
|
72dff5c174b7ebe30c59a6e21bb6f06fdb06c3fb
|
[
"Apache-2.0"
] | 64
|
2020-04-20T01:17:06.000Z
|
2022-01-05T07:08:33.000Z
|
training_dataset/vid/visual.py
|
Anonymous502/siamfda-for-eccv
|
72dff5c174b7ebe30c59a6e21bb6f06fdb06c3fb
|
[
"Apache-2.0"
] | 52
|
2020-05-09T12:43:33.000Z
|
2022-03-23T11:38:38.000Z
|
from os.path import join
from os import listdir
import cv2
import numpy as np
import glob
import xml.etree.ElementTree as ET
visual = True
color_bar = np.random.randint(0, 255, (90, 3))
VID_base_path = './ILSVRC2015'
ann_base_path = join(VID_base_path, 'Annotations/VID/train/')
img_base_path = join(VID_base_path, 'Data/VID/train/')
sub_sets = sorted({'a', 'b', 'c', 'd', 'e'})
for sub_set in sub_sets:
sub_set_base_path = join(ann_base_path, sub_set)
videos = sorted(listdir(sub_set_base_path))
for vi, video in enumerate(videos):
print('subset: {} video id: {:04d} / {:04d}'.format(sub_set, vi, len(videos)))
video_base_path = join(sub_set_base_path, video)
xmls = sorted(glob.glob(join(video_base_path, '*.xml')))
for xml in xmls:
f = dict()
xmltree = ET.parse(xml)
size = xmltree.findall('size')[0]
frame_sz = [int(it.text) for it in size]
objects = xmltree.findall('object')
if visual:
im = cv2.imread(xml.replace('xml', 'JPEG').replace('Annotations', 'Data'))
for object_iter in objects:
trackid = int(object_iter.find('trackid').text)
bndbox = object_iter.find('bndbox')
bbox = [int(bndbox.find('xmin').text), int(bndbox.find('ymin').text),
int(bndbox.find('xmax').text), int(bndbox.find('ymax').text)]
if visual:
pt1 = (int(bbox[0]), int(bbox[1]))
pt2 = (int(bbox[2]), int(bbox[3]))
cv2.rectangle(im, pt1, pt2, color_bar[trackid].tolist(), 3)
if visual:
cv2.imshow('img', im)
cv2.waitKey(1)
print('done!')
| 39
| 90
| 0.57208
|
162936782968539bedef75865beddd3aaadb4ab3
| 24,699
|
py
|
Python
|
mmhuman3d/core/visualization/visualize_keypoints2d.py
|
ttxskk/mmhuman3d
|
f6d39e24a2d5cc216448fc3bd82832ff45eee436
|
[
"Apache-2.0"
] | null | null | null |
mmhuman3d/core/visualization/visualize_keypoints2d.py
|
ttxskk/mmhuman3d
|
f6d39e24a2d5cc216448fc3bd82832ff45eee436
|
[
"Apache-2.0"
] | null | null | null |
mmhuman3d/core/visualization/visualize_keypoints2d.py
|
ttxskk/mmhuman3d
|
f6d39e24a2d5cc216448fc3bd82832ff45eee436
|
[
"Apache-2.0"
] | null | null | null |
import glob
import os
import os.path as osp
import shutil
import warnings
from pathlib import Path
from typing import Iterable, List, Optional, Tuple, Union
import cv2
import numpy as np
from tqdm import tqdm
from mmhuman3d.core.conventions.keypoints_mapping import KEYPOINTS_FACTORY
from mmhuman3d.core.conventions.keypoints_mapping.human_data import (
HUMAN_DATA_LIMBS_INDEX,
HUMAN_DATA_PALETTE,
)
from mmhuman3d.utils.ffmpeg_utils import images_to_video, video_to_images
from mmhuman3d.utils.keypoint_utils import get_different_colors, search_limbs
from mmhuman3d.utils.path_utils import (
Existence,
check_input_path,
check_path_existence,
check_path_suffix,
prepare_output_path,
)
def _plot_kp2d_frame(kp2d_person: np.ndarray,
canvas: np.ndarray,
limbs: Union[list, dict,
np.ndarray] = HUMAN_DATA_LIMBS_INDEX,
palette: Optional[Union[dict, np.ndarray]] = None,
draw_bbox: bool = False,
with_number: bool = False,
font_size: Union[float, int] = 0.5,
disable_limbs: bool = False) -> np.ndarray:
"""Plot a single frame(array) with keypoints, limbs, bbox, index.
Args:
kp2d_person (np.ndarray): `np.ndarray` shape of (J * 2).
canvas (np.ndarray): cv2 image, (H * W * 3) array.
limbs (Union[list, dict, np.ndarray], optional): limbs in form of
`dict` or 2-dimensional `list` or `np.ndarray` of shape
(num_limb, 2).
`dict` is used mainly for function `visualize_kp2d`, you can also
get the limbs by function `search_limbs`.
Defaults to `HUMAN_DATA_LIMBS_INDEX`.
palette (Optional[Union[dict, np.ndarray, list]], optional):
Pass an (1, 3) `np.ndarray` or `list` [B, G, R] if want the whole
limbs and keypoints will be in same color.
Pass `None` to use our colorful palette.
Pass an (num_limb, 3) `np.ndarray` to get each limb your specific
color.
`dict` is used mainly for function `visualize_kp2d`, you can also
get the palette by function `search_limbs`.
Defaults to `HUMAN_DATA_PALETTE`.
draw_bbox (bool, optional): whether need to draw bounding boxes.
Defaults to False.
with_number (bool, optional): whether need to draw index numbers.
Defaults to False.
font_size (Union[float, int], optional): the font size of the index.
Defaults to 0.5.
disable_limbs (bool, optional): whether need to disable drawing limbs.
Defaults to False.
Returns:
np.ndarray: opencv image of shape (H * W * 3).
"""
# slice the kp2d array
kp2d_person = kp2d_person.copy()
if kp2d_person.shape[-1] >= 3:
kp2d_person = kp2d_person[..., :-1]
warnings.warn(
'The input array has more than 2-Dimensional coordinates, will'
'keep only the first 2-Dimensions of the last axis. The new'
f'array shape: {kp2d_person.shape}')
if kp2d_person.ndim == 3 and kp2d_person.shape[0] == 1:
kp2d_person = kp2d_person[0]
assert kp2d_person.ndim == 2 and kp2d_person.shape[
-1] == 2, f'Wrong input array shape {kp2d_person.shape}, \
should be (num_kp, 2)'
if draw_bbox:
bbox = _get_bbox(kp2d_person, canvas, expand=True)
else:
bbox = None
# determine the limb connections and palette
if not disable_limbs:
if isinstance(limbs, list):
limbs = {'body': limbs}
elif isinstance(limbs, np.ndarray):
limbs = {'body': limbs.reshape(-1, 2).astype(np.int32).tolist()}
else:
assert set(limbs.keys()).issubset(HUMAN_DATA_LIMBS_INDEX)
if palette is None:
palette = {'body': None}
elif isinstance(palette, dict):
assert set(palette.keys()) == set(limbs.keys())
else:
limbs = {'body': None}
# draw by part to specify the thickness and color
for part_name, part_limbs in limbs.items():
# scatter_points_index means the limb end points
if not disable_limbs:
scatter_points_index = list(
set(np.array([part_limbs]).reshape(-1).tolist()))
else:
scatter_points_index = list(range(len(kp2d_person)))
if isinstance(palette, dict) and part_name == 'body':
thickness = 2
radius = 3
color = get_different_colors(len(scatter_points_index))
elif disable_limbs and palette is None:
radius = 2
color = get_different_colors(len(scatter_points_index))
else:
thickness = 2
radius = 2
if isinstance(palette, np.ndarray):
color = palette.astype(np.int32)
elif isinstance(palette, dict):
color = np.array(palette[part_name]).astype(np.int32)
elif isinstance(palette, list):
color = np.array(palette).reshape(-1, 3).astype(np.int32)
if not disable_limbs:
for limb_index, limb in enumerate(part_limbs):
limb_index = min(limb_index, len(color) - 1)
cv2.line(
canvas,
tuple(kp2d_person[limb[0]].astype(np.int32)),
tuple(kp2d_person[limb[1]].astype(np.int32)),
color=tuple(color[limb_index].tolist()),
thickness=thickness)
# draw the points inside the image region
for index in scatter_points_index:
x, y = kp2d_person[index, :2]
if np.isnan(x) or np.isnan(y):
continue
if 0 <= x < canvas.shape[1] and 0 <= y < canvas.shape[0]:
if disable_limbs:
point_color = color[index].tolist()
else:
point_color = color[min(color.shape[0] - 1,
len(scatter_points_index) -
1)].tolist()
cv2.circle(
canvas, (int(x), int(y)),
radius,
point_color,
thickness=-1)
if with_number:
cv2.putText(
canvas, str(index), (int(x), int(y)),
cv2.FONT_HERSHEY_SIMPLEX, font_size,
np.array([255, 255, 255]).astype(np.int32).tolist(), 2)
# draw the bboxes
if bbox is not None:
bbox = bbox.astype(np.int32)
cv2.rectangle(canvas, (bbox[0], bbox[2]), (bbox[1], bbox[3]),
(0, 255, 255), 1)
return canvas
def _get_bbox(keypoint_np: np.ndarray,
img_mat: Optional[np.ndarray] = None,
expand: bool = False):
"""get bbox of kp2d."""
x_max = np.max(keypoint_np[:, 0])
x_min = np.min(keypoint_np[:, 0])
y_max = np.max(keypoint_np[:, 1])
y_min = np.min(keypoint_np[:, 1])
if expand and img_mat is not None:
x_expand = (x_max - x_min) * 0.1
y_expand = (y_max - y_min) * 0.1
x_min = max(0, x_min - x_expand)
x_max = min(img_mat.shape[1], x_max + x_expand)
y_min = max(0, y_min - y_expand)
y_max = min(img_mat.shape[0], y_max + y_expand)
return np.asarray([x_min, x_max, y_min, y_max])
def _prepare_limb_palette(limbs,
palette,
pop_parts,
data_source,
mask,
search_limbs_func=search_limbs):
"""Prepare limbs and their palette for plotting.
Args:
limbs (Union[np.ndarray, List[int]]):
The preset limbs. This option is for free skeletons like BVH file.
In most cases, it's set to None,
this function will search a result for limbs automatically.
palette (Iterable):
The preset palette for limbs. Specified palette,
three int represents (B, G, R). Should be tuple or list.
In most cases, it's set to None,
a palette will be generated with the result of search_limbs.
pop_parts (Iterable[str]):
The body part names you do not
want to visualize.
When it's none, nothing will be removed.
data_source (str):
Data source type.
mask (Union[list, np.ndarray):
A mask to mask out the incorrect points.
Returns:
Tuple[dict, dict]: (limbs_target, limbs_palette).
"""
if limbs is not None:
limbs_target, limbs_palette = {
'body': limbs.tolist() if isinstance(limbs, np.ndarray) else limbs
}, get_different_colors(len(limbs))
else:
limbs_target, limbs_palette = search_limbs_func(
data_source=data_source, mask=mask)
if palette:
limbs_palette = np.array(palette, dtype=np.uint8)[None]
# check and pop the pop_parts
assert set(pop_parts).issubset(
HUMAN_DATA_PALETTE
), f'wrong part_names in pop_parts, supported parts are\
{set(HUMAN_DATA_PALETTE.keys())}'
for part_name in pop_parts:
if part_name in limbs_target:
limbs_target.pop(part_name)
limbs_palette.pop(part_name)
return limbs_target, limbs_palette
def _prepare_output_path(output_path, overwrite):
"""Prepare output path."""
prepare_output_path(
output_path,
allowed_suffix=['.mp4', ''],
tag='output video',
path_type='auto',
overwrite=overwrite)
# output_path is a directory
if check_path_suffix(output_path, ['']):
temp_folder = output_path
os.makedirs(temp_folder, exist_ok=True)
else:
temp_folder = output_path + '_temp_images'
if check_path_existence(temp_folder, 'dir') in [
Existence.DirectoryExistNotEmpty, Existence.DirectoryExistEmpty
]:
shutil.rmtree(temp_folder)
os.makedirs(temp_folder, exist_ok=True)
return temp_folder
def _check_frame_path(frame_list):
"""Check frame path."""
for frame_path in frame_list:
if check_path_existence(frame_path, 'file') != Existence.FileExist or \
not check_path_suffix(frame_path, ['.png', '.jpg', '.jpeg']):
raise FileNotFoundError(
f'The frame should be .png or .jp(e)g: {frame_path}')
def _check_temp_path(temp_folder, frame_list, overwrite):
"""Check temp frame folder path."""
if not overwrite and frame_list is not None and len(frame_list) > 0:
if Path(temp_folder).absolute() == \
Path(frame_list[0]).parent.absolute():
raise FileExistsError(
f'{temp_folder} exists (set --overwrite to overwrite).')
class _CavasProducer:
"""Prepare background canvas, pure white if not set."""
def __init__(self,
frame_list,
resolution,
kp2d,
image_array=None,
default_scale=1.5):
"""Initialize a canvas writer."""
# check the origin background frames
if frame_list is not None:
_check_frame_path(frame_list)
self.frame_list = frame_list
else:
self.frame_list = []
self.resolution = resolution
self.kp2d = kp2d
# with numpy array frames
self.image_array = image_array
if self.image_array is not None:
self.auto_resolution = self.image_array.shape[1:3]
elif len(self.frame_list) > 1 and \
check_path_existence(
self.frame_list[0], 'file') == Existence.FileExist:
tmp_image_array = cv2.imread(self.frame_list[0])
self.auto_resolution = tmp_image_array.shape[:2]
else:
self.auto_resolution = [
int(np.max(kp2d) * default_scale),
int(np.max(kp2d) * default_scale)
]
if self.image_array is None:
self.len = len(self.frame_list)
else:
self.len = self.image_array.shape[0]
def get_data(self, frame_index):
"""Get frame data from frame_list of image_array."""
# frame file exists, resolution not set
if frame_index < self.len and self.resolution is None:
if self.image_array is not None:
canvas = self.image_array[frame_index]
else:
canvas = cv2.imread(self.frame_list[frame_index])
kp2d_frame = self.kp2d[frame_index]
# no frame file, resolution has been set
elif frame_index >= self.len and self.resolution is not None:
canvas = np.ones((self.resolution[0], self.resolution[1], 3),
dtype=np.uint8) * 255
kp2d_frame = self.kp2d[frame_index]
# frame file exists, resolution has been set
elif frame_index < self.len and self.resolution is not None:
if self.image_array is not None:
canvas = self.image_array[frame_index]
else:
canvas = cv2.imread(self.frame_list[frame_index])
w_scale = self.resolution[1] / canvas.shape[1]
h_scale = self.resolution[0] / canvas.shape[0]
canvas = cv2.resize(canvas,
(self.resolution[1], self.resolution[0]),
cv2.INTER_CUBIC)
kp2d_frame = np.array([[w_scale, h_scale]
]) * self.kp2d[frame_index]
# no frame file, no resolution
else:
canvas = np.ones(
(self.auto_resolution[0], self.auto_resolution[1], 3),
dtype=np.uint8) * 255
kp2d_frame = self.kp2d[frame_index]
return canvas, kp2d_frame
def update_frame_list(frame_list, origin_frames, img_format, start, end):
"""Update frame list if have origin_frames."""
input_temp_folder = None
# choose in frame_list or origin_frames
if frame_list is None and origin_frames is None:
print('No background provided, will use pure white background.')
elif frame_list is not None and origin_frames is not None:
warnings.warn('Redundant input, will only use frame_list.')
origin_frames = None
if origin_frames is not None:
check_input_path(
input_path=origin_frames,
allowed_suffix=['.mp4', '.gif', ''],
tag='origin frames',
path_type='auto')
if Path(origin_frames).is_file():
input_temp_folder = origin_frames + '_temp_images/'
video_to_images(
origin_frames, input_temp_folder, start=start, end=end)
frame_list = glob.glob(osp.join(input_temp_folder, '*.png'))
frame_list.sort()
else:
if img_format is None:
frame_list = []
for im_name in os.listdir(origin_frames):
if Path(im_name).suffix.lower() in [
'.png', '.jpg', '.jpeg'
]:
frame_list.append(osp.join(origin_frames, im_name))
else:
frame_list = []
for index in range(start, end):
frame_path = osp.join(origin_frames, img_format % index)
if osp.exists(frame_path):
frame_list.append(frame_path)
frame_list.sort()
return frame_list, input_temp_folder
def visualize_kp2d(
kp2d: np.ndarray,
output_path: Optional[str] = None,
frame_list: Optional[List[str]] = None,
origin_frames: Optional[str] = None,
image_array: Optional[np.ndarray] = None,
limbs: Optional[Union[np.ndarray, List[int]]] = None,
palette: Optional[Iterable[int]] = None,
data_source: str = 'coco',
mask: Optional[Union[list, np.ndarray]] = None,
img_format: str = '%06d.png',
start: int = 0,
end: Optional[int] = None,
overwrite: bool = False,
with_file_name: bool = True,
resolution: Optional[Union[Tuple[int, int], list]] = None,
fps: Union[float, int] = 30,
draw_bbox: bool = False,
with_number: bool = False,
pop_parts: Iterable[str] = None,
disable_tqdm: bool = False,
disable_limbs: bool = False,
return_array: Optional[bool] = False,
keypoints_factory: dict = KEYPOINTS_FACTORY
) -> Union[None, np.ndarray]:
"""Visualize 2d keypoints to a video or into a folder of frames.
Args:
kp2d (np.ndarray): should be array of shape (f * J * 2)
or (f * n * J * 2)]
output_path (str): output video path or image folder.
frame_list (Optional[List[str]], optional): list of origin background
frame paths, element in list each should be a image path like
`*.jpg` or `*.png`. Higher priority than `origin_frames`.
Use this when your file names is hard to sort or you only want to
render a small number frames.
Defaults to None.
origin_frames (Optional[str], optional): origin background frame path,
could be `.mp4`, `.gif`(will be sliced into a folder) or an image
folder. Lower priority than `frame_list`.
Defaults to None.
limbs (Optional[Union[np.ndarray, List[int]]], optional):
if not specified, the limbs will be searched by search_limbs,
this option is for free skeletons like BVH file.
Defaults to None.
palette (Iterable, optional): specified palette, three int represents
(B, G, R). Should be tuple or list.
Defaults to None.
data_source (str, optional): data source type. Defaults to 'coco'.
mask (Optional[Union[list, np.ndarray]], optional):
mask to mask out the incorrect point.
Pass a `np.ndarray` of shape (J,) or `list` of length J.
Defaults to None.
img_format (str, optional): input image format. Default to '%06d.png',
start (int, optional): start frame index. Defaults to 0.
end (int, optional): end frame index. Exclusive.
Could be positive int or negative int or None.
None represents include all the frames.
overwrite (bool, optional): whether replace the origin frames.
Defaults to False.
with_file_name (bool, optional): whether write origin frame name on
the images. Defaults to True.
resolution (Optional[Union[Tuple[int, int], list]], optional):
(height, width) of the output video
will be the same size as the original images if not specified.
Defaults to None.
fps (Union[float, int], optional): fps. Defaults to 30.
draw_bbox (bool, optional): whether need to draw bounding boxes.
Defaults to False.
with_number (bool, optional): whether draw index number.
Defaults to False.
pop_parts (Iterable[str], optional): The body part names you do not
want to visualize. Supported parts are ['left_eye','right_eye'
,'nose', 'mouth', 'face', 'left_hand', 'right_hand'].
Defaults to [].frame_list
disable_tqdm (bool, optional):
Whether to disable the entire progressbar wrapper.
Defaults to False.
disable_limbs (bool, optional): whether need to disable drawing limbs.
Defaults to False.
return_array (bool, optional): Whether to return images as a opencv
array. Defaults to None.
keypoints_factory (dict, optional): Dict of all the conventions.
Defaults to KEYPOINTS_FACTORY.
Raises:
FileNotFoundError: check output video path.
FileNotFoundError: check input frame paths.
Returns:
Union[None, np.ndarray].
"""
# check the input array shape, reshape to (num_frames, num_person, J, 2)
kp2d = kp2d[..., :2].copy()
if kp2d.ndim == 3:
kp2d = kp2d[:, np.newaxis]
assert kp2d.ndim == 4
num_frames, num_person = kp2d.shape[0], kp2d.shape[1]
# slice the input array temporally
end = (min(num_frames - 1, end) +
num_frames) % num_frames if end is not None else num_frames
kp2d = kp2d[start:end]
if image_array is not None:
origin_frames = None
frame_list = None
return_array = True
input_temp_folder = None
else:
frame_list, input_temp_folder = update_frame_list(
frame_list, origin_frames, img_format, start, end)
if frame_list is not None:
num_frames = min(len(frame_list), num_frames)
kp2d = kp2d[:num_frames]
# check output path
if output_path is not None:
output_temp_folder = _prepare_output_path(output_path, overwrite)
# check whether temp_folder will overwrite frame_list by accident
_check_temp_path(output_temp_folder, frame_list, overwrite)
else:
output_temp_folder = None
# check data_source & mask
if data_source not in keypoints_factory:
raise ValueError('Wrong data_source. Should choose in'
f'{list(keypoints_factory.keys())}')
if mask is not None:
if isinstance(mask, list):
mask = np.array(mask).reshape(-1)
assert mask.shape == (
len(keypoints_factory[data_source]),
), f'mask length should fit with keypoints number \
{len(keypoints_factory[data_source])}'
# search the limb connections and palettes from superset smplx
# check and pop the pop_parts
if pop_parts is None:
pop_parts = []
if disable_limbs:
limbs_target, limbs_palette = None, None
else:
limbs_target, limbs_palette = _prepare_limb_palette(
limbs, palette, pop_parts, data_source, mask)
canvas_producer = _CavasProducer(frame_list, resolution, kp2d, image_array)
out_image_array = []
# start plotting by frame
for frame_index in tqdm(range(kp2d.shape[0]), disable=disable_tqdm):
canvas, kp2d_frame = canvas_producer.get_data(frame_index)
# start plotting by person
for person_index in range(num_person):
if num_person >= 2 and not disable_limbs:
limbs_palette = get_different_colors(
num_person)[person_index].reshape(1, 3)
canvas = _plot_kp2d_frame(
kp2d_person=kp2d_frame[person_index],
canvas=canvas,
limbs=limbs_target,
palette=limbs_palette,
draw_bbox=draw_bbox,
with_number=with_number,
font_size=0.5,
disable_limbs=disable_limbs)
if with_file_name and frame_list is not None:
h, w, _ = canvas.shape
if frame_index <= len(frame_list) - 1:
cv2.putText(
canvas, str(Path(frame_list[frame_index]).name),
(w // 2, h // 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5 * h / 500,
np.array([255, 255, 255]).astype(np.int32).tolist(), 2)
if output_path is not None:
# write the frame with opencv
if frame_list is not None and check_path_suffix(output_path, ['']):
frame_path = os.path.join(output_temp_folder,
Path(frame_list[frame_index]).name)
img_format = None
else:
frame_path = \
os.path.join(output_temp_folder, f'{frame_index:06d}.png')
img_format = '%06d.png'
cv2.imwrite(frame_path, canvas)
if return_array:
out_image_array.append(canvas[None])
if input_temp_folder is not None:
shutil.rmtree(input_temp_folder)
# convert frames to video
if output_path is not None:
if check_path_suffix(output_path, ['.mp4']):
images_to_video(
input_folder=output_temp_folder,
output_path=output_path,
remove_raw_file=True,
img_format=img_format,
fps=fps)
if return_array:
out_image_array = np.concatenate(out_image_array)
return out_image_array
| 41.441275
| 79
| 0.585449
|
68711423708dd17744a7e8cd2e1a176b7f6f24e4
| 2,567
|
py
|
Python
|
netgrasp/utils/exclusive_lock.py
|
jeremyandrews/netgrasp
|
4ebc86c7023bc80415211925f972a1a78000bc54
|
[
"BSD-2-Clause-FreeBSD"
] | 34
|
2017-05-01T09:55:03.000Z
|
2021-02-21T19:40:22.000Z
|
netgrasp/utils/exclusive_lock.py
|
jeremyandrews/netgrasp
|
4ebc86c7023bc80415211925f972a1a78000bc54
|
[
"BSD-2-Clause-FreeBSD"
] | 23
|
2017-05-29T18:12:09.000Z
|
2017-08-13T10:20:12.000Z
|
netgrasp/utils/exclusive_lock.py
|
jeremyandrews/netgrasp
|
4ebc86c7023bc80415211925f972a1a78000bc54
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2017-07-13T22:05:38.000Z
|
2019-05-15T15:57:54.000Z
|
import fcntl
import os
import errno
import time
from netgrasp.utils import debug
from netgrasp.utils import simple_timer
class ExclusiveFileLock:
def __init__(self, ng, timeout, name):
self.ng = ng
self._lockfile = ng.database["lock"]
self._timeout = timeout
self._name = name
self._timer = None
self._fd = None
def __enter__(self):
try:
self._fd = os.open(self._lockfile, os.O_CREAT)
started = time.time()
while True:
self.ng.debugger.debug("grabbing lock: %s", (self._name,))
self._timer = simple_timer.Timer()
try:
fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
# We got the lock.
self.ng.debugger.debug("grabbed lock (took %.5f seconds): %s", (round(self._timer.elapsed(), 5), self._name))
self._timer = simple_timer.Timer()
return
except (OSError, IOError) as ex:
if ex.errno != errno.EAGAIN:
# Resource temporarily unavailable.
self.ng.debugger.warning("LOCK UNAVAILABLE: %s", (self._name,))
raise
elif self._timeout is not None and time.time() > (started + self._timeout):
# Exceeded timeout.
self.ng.debugger.warning("LOCK TIMEOUT: %s", (self._name,))
raise
# Briefly wait before trying the lock again.
time.sleep(0.05)
except Exception as e:
self.ng.debugger.dump_exception("ExclusiveFileLock.__enter__()", False)
def __exit__(self, *args):
try:
fcntl.flock(self._fd, fcntl.LOCK_UN)
held_lock = round(self._timer.elapsed(), 5)
if held_lock > 1:
# Holding the lock this long suggests a possible problem.
self.ng.debugger.warning("released lock (held %.5f seconds): %s", (held_lock, self._name))
else:
self.ng.debugger.debug("released lock (held %.5f seconds): %s", (held_lock, self._name))
os.close(self._fd)
self._timer = None
self._fd = None
try:
# Remove the lockfile if we can.
os.unlink(self._path)
except:
pass
except Exception as e:
self.ng.debugger.dump_exception("ExclusiveFileLock.__exit__()", False)
| 38.893939
| 129
| 0.53136
|
9aa7760a987eb6b9b656773e1f89ddbfa5caf11f
| 4,803
|
py
|
Python
|
examples/mnist-cnn.py
|
YesDrX/streamlit
|
1305566934152dfc5a086184d3854f1c4313a93a
|
[
"Apache-2.0"
] | 1
|
2020-09-20T11:18:09.000Z
|
2020-09-20T11:18:09.000Z
|
examples/mnist-cnn.py
|
YesDrX/streamlit
|
1305566934152dfc5a086184d3854f1c4313a93a
|
[
"Apache-2.0"
] | 1
|
2021-09-13T06:08:13.000Z
|
2021-09-13T06:08:13.000Z
|
examples/mnist-cnn.py
|
YesDrX/streamlit
|
1305566934152dfc5a086184d3854f1c4313a93a
|
[
"Apache-2.0"
] | 1
|
2020-03-29T11:54:31.000Z
|
2020-03-29T11:54:31.000Z
|
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of monitoring a simple neural net as it trains."""
import streamlit as st
from streamlit import config
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import SGD
from keras.utils import np_utils
from tensorflow import keras
import math
import numpy as np
import pandas as pd
import time
import tensorflow as tf
# dynamically grow the memory used on the GPU
# this option is fine on non gpus as well.
tf_config = tf.compat.v1.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf_config.log_device_placement = True
# https://kobkrit.com/using-allow-growth-memory-option-in-tensorflow-and-keras-dc8c8081bc96
tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=tf_config))
class MyCallback(keras.callbacks.Callback):
def __init__(self, x_test):
self._x_test = x_test
def on_train_begin(self, logs=None):
st.header("Summary")
self._summary_chart = st.area_chart()
self._summary_stats = st.text("%8s : 0" % "epoch")
st.header("Training Log")
def on_epoch_begin(self, epoch, logs=None):
self._ts = time.time()
self._epoch = epoch
st.subheader("Epoch %s" % epoch)
self._epoch_chart = st.line_chart()
self._epoch_progress = st.info("No stats yet.")
self._epoch_summary = st.empty()
def on_batch_end(self, batch, logs=None):
if batch % 10 == 0:
rows = {"loss": [logs["loss"]], "accuracy": [logs["accuracy"]]}
self._epoch_chart.add_rows(rows)
if batch % 100 == 99:
rows = {"loss": [logs["loss"]], "accuracy": [logs["accuracy"]]}
self._summary_chart.add_rows(rows)
percent_complete = batch / self.params["steps"]
self._epoch_progress.progress(math.ceil(percent_complete * 100))
ts = time.time() - self._ts
self._epoch_summary.text(
"loss: %(loss)7.5f | accuracy: %(accuracy)7.5f | ts: %(ts)d"
% {"loss": logs["loss"], "accuracy": logs["accuracy"], "ts": ts}
)
def on_epoch_end(self, epoch, logs=None):
# st.write('**Summary**')
indices = np.random.choice(len(self._x_test), 36)
test_data = self._x_test[indices]
prediction = np.argmax(self.model.predict(test_data), axis=1)
st.image(1.0 - test_data, caption=prediction)
summary = "\n".join(
"%(k)8s : %(v)8.5f" % {"k": k, "v": v} for (k, v) in logs.items()
)
st.text(summary)
self._summary_stats.text(
"%(epoch)8s : %(epoch)s\n%(summary)s"
% {"epoch": epoch, "summary": summary}
)
st.title("MNIST CNN")
(x_train, y_train), (x_test, y_test) = mnist.load_data()
img_width = 28
img_height = 28
x_train = x_train.astype("float32")
x_train /= 255.0
x_test = x_test.astype("float32")
x_test /= 255.0
# reshape input data
x_train = x_train.reshape(x_train.shape[0], img_width, img_height, 1)
x_test = x_test.reshape(x_test.shape[0], img_width, img_height, 1)
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# build model
model = Sequential()
layer_1_size = 10
epochs = 3
model.add(Conv2D(10, (5, 5), input_shape=(img_width, img_height, 1), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Conv2D(config.layer_2_size, (5, 5), input_shape=(img_width, img_height,1), activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(8, activation="relu"))
model.add(Dense(num_classes, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"])
show_terminal_output = not config.get_option("server.liveSave")
model.fit(
x_train,
y_train,
validation_data=(x_test, y_test),
epochs=epochs,
verbose=show_terminal_output,
callbacks=[MyCallback(x_test)],
)
st.success("Finished training!")
# model.save("convnet.h5")
| 33.124138
| 106
| 0.681657
|
a8a3095306912b2e66edbe779bb236acc4dd2efb
| 12,211
|
py
|
Python
|
src/biome/text/helpers.py
|
ignacioct/biome-text
|
e4eab5fd4ea9115bd600f61e97429977053da2a5
|
[
"Apache-2.0"
] | null | null | null |
src/biome/text/helpers.py
|
ignacioct/biome-text
|
e4eab5fd4ea9115bd600f61e97429977053da2a5
|
[
"Apache-2.0"
] | null | null | null |
src/biome/text/helpers.py
|
ignacioct/biome-text
|
e4eab5fd4ea9115bd600f61e97429977053da2a5
|
[
"Apache-2.0"
] | null | null | null |
import copy
import functools
import inspect
import os
import os.path
import re
from inspect import Parameter
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
import spacy
import spacy.gold
import yaml
from allennlp.common import util
from allennlp.data import Token as AllenNLPToken
from allennlp.data.dataset_readers.dataset_utils import to_bioul
from spacy.tokens import Token as SpacyToken
from spacy.tokens.doc import Doc
from biome.text import environment
_INVALID_TAG_CHARACTERS = re.compile(r"[^-/\w\.]")
def yaml_to_dict(filepath: str) -> Dict[str, Any]:
"""Loads a yaml file into a data dictionary
Parameters
----------
filepath
Path to the yaml file
Returns
-------
dict
"""
with open(filepath) as yaml_content:
config = yaml.safe_load(yaml_content)
return config
def get_env_cuda_device() -> int:
"""Gets the cuda device from an environment variable.
This is necessary to activate a GPU if available
Returns
-------
cuda_device
The integer number of the CUDA device
"""
cuda_device = int(os.getenv(environment.CUDA_DEVICE, "-1"))
return cuda_device
def update_method_signature(
signature: inspect.Signature, to_method: Callable
) -> Callable:
"""Updates the signature of a method
Parameters
----------
signature
The signature with which to update the method
to_method
The method whose signature will be updated
Returns
-------
updated_method
"""
def wrapper(*args, **kwargs):
return to_method(*args, **kwargs)
wrapper.__signature__ = signature
wrapper.__doc__ = to_method.__doc__
return wrapper
def isgeneric(class_type: Type) -> bool:
"""Checks if a class type is a generic type (List[str] or Union[str, int]"""
return hasattr(class_type, "__origin__")
def is_running_on_notebook() -> bool:
"""Checks if code is running inside a jupyter notebook"""
try:
import IPython
return IPython.get_ipython().has_trait("kernel")
except (AttributeError, NameError, ModuleNotFoundError):
return False
def split_signature_params_by_predicate(
signature_function: Callable, predicate: Callable
) -> Tuple[List[Parameter], List[Parameter]]:
"""Splits parameters signature by defined boolean predicate function"""
signature = inspect.signature(signature_function)
parameters = list(
filter(
lambda p: p.name != "self"
and p.kind not in [Parameter.VAR_KEYWORD, Parameter.VAR_POSITIONAL],
signature.parameters.values(),
)
)
matches_group = list(filter(lambda p: predicate(p), parameters))
non_matches_group = list(filter(lambda p: not predicate(p), parameters))
return matches_group, non_matches_group
def sanitize_metric_name(name: str) -> str:
"""Sanitizes the name to comply with tensorboardX conventions when logging.
Parameter
---------
name
Name of the metric
Returns
-------
sanitized_name
"""
if not name:
return name
new_name = _INVALID_TAG_CHARACTERS.sub("_", name)
new_name = new_name.lstrip("/")
return new_name
def save_dict_as_yaml(dictionary: dict, path: str) -> str:
"""Save a cfg dict to path as yaml
Parameters
----------
dictionary
Dictionary to be saved
path
Filesystem location where the yaml file will be saved
Returns
-------
path
Location of the yaml file
"""
dir_name = os.path.dirname(path)
# Prevent current workdir relative routes
# `save_dict_as_yaml("just_here.yml")
if dir_name:
os.makedirs(dir_name, exist_ok=True)
with open(path, "w") as yml_file:
yaml.dump(dictionary, yml_file, default_flow_style=False, allow_unicode=True)
return path
def get_full_class_name(the_class: Type) -> str:
"""Given a type class return the full qualified class name """
# o.__module__ + "." + o.__class__.__qualname__ is an example in
# this context of H.L. Mencken's "neat, plausible, and wrong."
# Python makes no guarantees as to whether the __module__ special
# attribute is defined, so we take a more circumspect approach.
# Alas, the module name is explicitly excluded from __qualname__
# in Python 3.
module = the_class.__module__
if module is None or module == str.__class__.__module__:
return the_class.__name__ # Avoid reporting __builtin__
else:
return module + "." + the_class.__name__
def stringify(value: Any) -> Any:
"""Creates an equivalent data structure representing data values as string
Parameters
----------
value
Value to be stringified
Returns
-------
stringified_value
"""
if value is None or isinstance(value, str):
return value
if isinstance(value, dict):
return {key: stringify(value) for key, value in value.items()}
if isinstance(value, Iterable):
return [stringify(v) for v in value]
return str(value)
def sanitize_for_params(x: Any) -> Any:
"""Sanitizes the input for a more flexible usage with AllenNLP's `.from_params()` machinery.
For now it is mainly used to transform numpy numbers to python types
Parameters
----------
x
The parameter passed on to `allennlp.common.FromParams.from_params()`
Returns
-------
sanitized_x
"""
# AllenNLP has a similar function (allennlp.common.util.sanitize) but it does not work for my purpose, since
# numpy types are checked only after the float type check, and:
# isinstance(numpy.float64(1), float) == True !!!
if isinstance(x, util.numpy.number):
return x.item()
elif isinstance(x, util.numpy.bool_):
# Numpy bool_ need to be converted to python bool.
return bool(x)
if isinstance(x, (str, float, int, bool)):
return x
elif isinstance(x, dict):
# Dicts need their values sanitized
return {key: sanitize_for_params(value) for key, value in x.items()}
# Lists and Tuples need their values sanitized
elif isinstance(x, list):
return [sanitize_for_params(x_i) for x_i in x]
elif isinstance(x, tuple):
return tuple(sanitize_for_params(x_i) for x_i in x)
# We include `to_json` function customize sanitization for user defined classes
elif hasattr(x, "to_json"):
return x.to_json()
return x
def sanitize_for_yaml(value: Any):
"""Sanitizes the value for a simple yaml output, that is classes only built-in types"""
if isinstance(value, list):
return [sanitize_for_yaml(v) for v in value]
if isinstance(value, tuple):
return tuple(sanitize_for_yaml(v) for v in value)
if isinstance(value, dict):
return {k: sanitize_for_yaml(v) for k, v in value.items()}
try:
yaml.dump(value)
# we try a string representation
except Exception:
return str(value)
else:
return value
def span_labels_to_tag_labels(
labels: List[str], label_encoding: str = "BIO"
) -> List[str]:
"""Converts a list of span labels to tag labels following `spacy.gold.biluo_tags_from_offsets`
Parameters
----------
labels
Span labels to convert
label_encoding
The label format used for the tag labels
Returns
-------
tag_labels
"""
if label_encoding == "BIOUL":
converted_labels = [
f"{char}-{label}" for char in ["B", "I", "U", "L"] for label in labels
] + ["O"]
elif label_encoding == "BIO":
converted_labels = [
f"{char}-{label}" for char in ["B", "I"] for label in labels
] + ["O"]
else:
raise ValueError(
f"'{label_encoding}' is not a supported label encoding scheme."
)
return converted_labels
def bioul_tags_to_bio_tags(tags: List[str]) -> List[str]:
"""Converts BIOUL tags to BIO tags
Parameters
----------
tags
BIOUL tags to convert
Returns
-------
bio_tags
"""
return [tag.replace("L-", "I-", 1).replace("U-", "B-", 1) for tag in tags]
def tags_from_offsets(
doc: Doc,
offsets: List[Dict],
label_encoding: Optional[str] = "BIOUL",
) -> List[str]:
"""Converts offsets to BIOUL or BIO tags using spacy's `gold.biluo_tags_from_offsets`.
Parameters
----------
doc
A spaCy Doc created with `text` and the backbone tokenizer
offsets
A list of dicts with start and end character index with respect to the doc, and the span label:
`{"start": int, "end": int, "label": str}`
label_encoding
The label encoding to be used: BIOUL or BIO
Returns
-------
tags (BIOUL or BIO)
"""
tags = spacy.gold.biluo_tags_from_offsets(
doc, [(offset["start"], offset["end"], offset["label"]) for offset in offsets]
)
if label_encoding == "BIO":
tags = bioul_tags_to_bio_tags(tags)
return tags
def offsets_from_tags(
doc: Doc,
tags: List[str],
label_encoding: Optional[str] = "BIOUL",
only_token_spans: bool = False,
) -> List[Dict]:
"""Converts BIOUL or BIO tags to offsets
Parameters
----------
doc
A spaCy Doc created with `text` and the backbone tokenizer
tags
A list of BIOUL or BIO tags
label_encoding
The label encoding of the tags: BIOUL or BIO
only_token_spans
If True, offsets contains only token index references. Default is False
Returns
-------
offsets
A list of dicts with start and end character/token index with respect to the doc and the span label:
`{"start": int, "end": int, "start_token": int, "end_token": int, "label": str}`
"""
# spacy.gold.offsets_from_biluo_tags surprisingly does not check this ...
if len(doc) != len(tags):
raise ValueError(
f"Number of tokens and tags must be the same, "
f"but 'len({list(doc)}) != len({tags})"
)
if label_encoding == "BIO":
tags = to_bioul(tags, encoding="BIO")
offsets = []
for start, end, label in spacy.gold.offsets_from_biluo_tags(doc, tags):
span = doc.char_span(start, end)
data = {
"start_token": span.start,
"end_token": span.end,
"label": label,
}
if not only_token_spans:
data.update({"start": start, "end": end})
offsets.append(data)
return offsets
def merge_dicts(source: Dict[str, Any], destination: Dict[str, Any]) -> Dict[str, Any]:
"""
Merge two dictionaries recursivelly
Examples
--------
>>> a = { 'first' : { 'all_rows' : { 'pass' : 'dog', 'number' : '1' } } }
>>> b = { 'first' : { 'all_rows' : { 'fail' : 'cat', 'number' : '5' } } }
>>> merge_dicts(b, a)
{'first': {'all_rows': {'pass': 'dog', 'number': '5', 'fail': 'cat'}}}
"""
if not isinstance(destination, dict):
return source
result = copy.deepcopy(destination)
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = result.setdefault(key, {})
value = merge_dicts(value, node)
result[key] = value
return result
def copy_sign_and_docs(org_func):
"""Decorator to copy the signature and the docstring from the org_func"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.__signature__ = inspect.signature(org_func)
wrapper.__doc__ = org_func.__doc__
return wrapper
return decorator
def spacy_to_allennlp_token(token: SpacyToken) -> AllenNLPToken:
return AllenNLPToken(
text=token.text,
idx=token.idx,
idx_end=token.idx + len(token),
lemma_=token.lemma_,
pos_=token.pos_,
tag_=token.tag_,
dep_=token.dep_,
ent_type_=token.ent_type_,
)
| 27.81549
| 112
| 0.635656
|
0b70ad9d9706c3000ec056e0b6d48cd4d9e697da
| 89
|
py
|
Python
|
leasing/apps.py
|
hkotkanen/mvj
|
a22d40869ef1b13924da428f3026d248acef81a7
|
[
"MIT"
] | null | null | null |
leasing/apps.py
|
hkotkanen/mvj
|
a22d40869ef1b13924da428f3026d248acef81a7
|
[
"MIT"
] | null | null | null |
leasing/apps.py
|
hkotkanen/mvj
|
a22d40869ef1b13924da428f3026d248acef81a7
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class LeasingConfig(AppConfig):
name = 'leasing'
| 14.833333
| 33
| 0.752809
|
4f673cdd0d672e36f63764c06ffac78f0529bcf6
| 50,694
|
py
|
Python
|
tensorflow/python/distribute/client/client.py
|
rishabhBudhouliya/tensorflow
|
071c0532fb6f0cb793001700e158677695d06717
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/distribute/client/client.py
|
rishabhBudhouliya/tensorflow
|
071c0532fb6f0cb793001700e158677695d06717
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/distribute/client/client.py
|
rishabhBudhouliya/tensorflow
|
071c0532fb6f0cb793001700e158677695d06717
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for `Client` and relevant cluster-worker related library.
This is currently under development and the API is subject to change.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import enum
import functools
import os
import re
import sys
import threading
import weakref
from absl import logging
from six.moves import queue
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import parameter_server_strategy_v2
from tensorflow.python.distribute.client import metric_utils
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import executor
from tensorflow.python.eager import function as tf_function
from tensorflow.python.eager import remote
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import server_lib
from tensorflow.python.util import nest
# Maximum time for failed worker to come back is 1 hour
_WORKER_MAXIMUM_RECOVERY_SEC = 3600
# Maximum size for queued closures, "infinite" if set to 0.
# When the maximum queue size is reached, further schedule calls will become
# blocking until some previously queued closures are executed on workers.
# Note that using an "infinite" queue size can take a non-trivial portion of
# memory, and even lead to client OOM. Modify the size to a smaller value for
# client with constrained memory resource (only recommended for advanced users).
# Also used in unit tests to ensure the correctness when the queue is full.
_CLOSURE_QUEUE_MAX_SIZE = 256 * 1024
# RPC error message from PS
_RPC_ERROR_FROM_PS = "GRPC error information from remote target /job:ps"
# InvalidArgumentError (unknown device) will not have "GRPC error..." string.
_JOB_WORKER_STRING_IDENTIFIER = "/job:worker"
class _RemoteValueStatus(enum.Enum):
"""The status of a `RemoteValue` object.
A `RemoteValue` object can have three states:
1) not ready: no value, no non-retryable error and not aborted;
2) aborted: i.e. the execution of function was aborted because of task
failure, but can be retried;
3) ready: i.e. has value or has non-tryable error;
The initial state of a `RemoteValue` is "not ready". When its corresponding
closure has
been executed at least once, it will become aborted or ready. The state
transitions are:
1) not ready -> 2) aborted:
when the corresponding closure is aborted due to worker failure, and the
worker failure is not immediately handled.
1) not ready -> 3) ready:
when the corresponding closure has been executed successfully.
2) aborted -> 3) ready:
when the `RemoteValue` is rebuilt by rerunning the corresponding closure
and the closure has been executed successfully.
3) ready -> 2) aborted:
when the corresponding closure had been executed successfully but later
the corresponding remote worker failed. This is currently only implemented
for resource `RemoteValue` like iterators.
"""
NOT_READY = "NOT_READY"
ABORTED = "ABORTED"
READY = "READY"
class RemoteValue(object):
"""An asynchronously available value of a remotely executed function.
`RemoteValue` class is used as the return value of `Client.schedule()` where
the underlying concrete value comes at a later time once the function has been
remotely executed. `RemoteValue` can be used as an input to a subsequent
function scheduled with `Client.schedule()`.
Note: this class is not thread-safe.
"""
def __init__(self, closure, type_spec):
self._closure = closure
# The type spec for this `RemoteValue` which is used to trace functions that
# take this `RemoteValue` as input.
self._type_spec = func_graph.convert_structure_to_signature(type_spec)
self._value = None
self._error = None
self._status_available_event = threading.Event()
self._status = _RemoteValueStatus.NOT_READY
def _set_aborted(self):
self._status = _RemoteValueStatus.ABORTED
self._value = None
self._error = None
# Wake up any waiting thread and clear the event.
self._status_available_event.set()
def _rebuild_on(self, worker):
self._status_available_event.clear()
# TODO(yuefengz): we may need to rebuild its inputs as well.
self._closure.execute_on(worker)
def _set_value(self, value):
self._status = _RemoteValueStatus.READY
self._value = value
self._error = None
self._status_available_event.set()
def _set_error(self, exception):
self._status = _RemoteValueStatus.READY
self._value = None
self._error = exception
self._status_available_event.set()
def _get_value(self):
self._status_available_event.wait()
return self._value
def _get_error(self):
self._status_available_event.wait()
return self._error
def _set_type_spec(self, type_spec):
self._type_spec = func_graph.convert_structure_to_signature(type_spec)
def fetch(self):
"""Wait for the result of RemoteValue to be ready and return the result.
Returns:
The remote value, as a numpy data type (if scalar) or ndarray.
Raises:
FunctionRetryableError: If the function that produces this `RemoteValue`
is aborted or cancelled due to failure, and the user should handle and
reschedule.
"""
self._status_available_event.wait()
if self._status is _RemoteValueStatus.ABORTED:
raise FunctionRetryableError(
"The corresponding function is aborted. Please reschedule the "
"function.")
if self._error is not None:
raise self._error # pylint: disable=raising-bad-type
else:
if isinstance(self._value,
(ops.Tensor, resource_variable_ops.BaseResourceVariable)):
return self._value.numpy()
else:
return self._value
class InputError(Exception):
def __init__(self, original_exception):
message = ("Input has an error, the original exception is %r, "
"error message is %s." %
(original_exception, str(original_exception)))
super().__init__(message)
class FunctionRetryableError(Exception):
"""An error that represents the closure was aborted and should be retried."""
pass
def _maybe_get_error_and_rebuild_remote_values(worker, structure):
"""Attempts to return errors from `RemoteValue`s. Rebuilds them if needed."""
errors_in_structure = []
def _get_error(val):
if isinstance(val, RemoteValue):
if val._status is _RemoteValueStatus.ABORTED: # pylint: disable=protected-access
with worker.failure_handler.wait_on_failure(
on_recovery_fn=functools.partial(val._rebuild_on, worker), # pylint: disable=protected-access
worker_device_name=worker.device_name):
val._rebuild_on(worker) # pylint: disable=protected-access
error = val._get_error() # pylint: disable=protected-access
if error:
errors_in_structure.append(error)
nest.map_structure(_get_error, structure)
if errors_in_structure:
return errors_in_structure[0]
else:
return None
def _maybe_get_remote_value(val):
"""Gets the value of `val` if it is a `RemoteValue`."""
if isinstance(val, RemoteValue):
error = val._get_error() # pylint: disable=protected-access
if error:
raise AssertionError(
"RemoteValue doesn't have a value because it has errors.")
else:
return val._get_value() # pylint: disable=protected-access
else:
return val
def _maybe_as_type_spec(val):
if isinstance(val, RemoteValue):
if val._type_spec is None: # pylint: disable=protected-access
raise ValueError("Output of a scheduled function that is not "
"tf.function cannot be the input of another function.")
return val._type_spec # pylint: disable=protected-access
else:
return val
class PerWorkerValues(object):
"""Holds a list of per worker values."""
def __init__(self, values):
self._values = tuple(values)
def _select_worker_slice(worker_id, structured):
"""Selects the worker slice of each of the items in `structured`."""
def _get(x):
return x._values[worker_id] if isinstance(x, PerWorkerValues) else x # pylint: disable=protected-access
return nest.map_structure(_get, structured)
class Closure(object):
"""Hold a function to be scheduled and its arguments."""
def __init__(self, function, cancellation_mgr, args=None, kwargs=None):
if not callable(function):
raise ValueError("Function passed to `Client.schedule` must be a "
"callable object.")
self._args = args or ()
self._kwargs = kwargs or {}
if isinstance(function, def_function.Function):
replica_args = _select_worker_slice(0, self._args)
replica_kwargs = _select_worker_slice(0, self._kwargs)
# Note: no need to handle function registration failure since this kind of
# failure will not raise exceptions as designed in the runtime. The client
# has to rely on subsequent operations that raise to catch function
# registration failure.
# Record the function tracing overhead. Note that we pass in the tracing
# count of the def_function.Function as a state tracker, so that metrics
# will only record the time for actual function tracing (i.e., excluding
# function cache lookups).
with metric_utils.monitored_timer(
"function_tracing", state_tracker=function._get_tracing_count): # pylint: disable=protected-access
concrete_function = function.get_concrete_function(
*nest.map_structure(_maybe_as_type_spec, replica_args),
**nest.map_structure(_maybe_as_type_spec, replica_kwargs))
self._function = cancellation_mgr.get_cancelable_function(
concrete_function)
self._output_remote_values = nest.map_structure(
lambda x: RemoteValue(self, x), concrete_function.structured_outputs)
elif isinstance(function, tf_function.ConcreteFunction):
self._function = cancellation_mgr.get_cancelable_function(function)
self._output_remote_values = nest.map_structure(
lambda x: RemoteValue(self, x), function.structured_outputs)
else:
# Regular python functions.
self._function = function
# TODO(yuefengz): maybe we should trace python functions if their inputs
# are Python primitives, tensors and composite tensors.
self._output_remote_values = RemoteValue(self, None)
def _fetch_output_remote_values(self):
"""Temporary method used to sync the scheduler."""
# It will do nothing if there is no return value.
nest.map_structure(lambda x: x.fetch(), self._output_remote_values) # pylint: disable=protected-access
def _set_output_remote_values_aborted(self):
"""Set output remote_value aborted."""
# It will do nothing if there is no return value.
nest.map_structure(lambda x: x._set_aborted(), self._output_remote_values) # pylint: disable=protected-access
def _set_output_remote_values_cancelled(self):
nest.map_structure(
lambda x: x._set_error( # pylint: disable=protected-access,g-long-lambda
FunctionRetryableError("The corresponding function is "
"cancelled. Please reschedule the "
"function.")),
self._output_remote_values) # pylint: disable=protected-access
def execute_on(self, worker):
"""Executes the closure on the given worker.
Args:
worker: a `Worker` object.
"""
replica_args = _select_worker_slice(worker.worker_index, self._args)
replica_kwargs = _select_worker_slice(worker.worker_index, self._kwargs)
e = (
_maybe_get_error_and_rebuild_remote_values(worker, replica_args) or
_maybe_get_error_and_rebuild_remote_values(worker, replica_kwargs))
if e:
if not isinstance(e, InputError):
e = InputError(e)
for remote_value in nest.flatten(self._output_remote_values):
remote_value._set_error(e) # pylint: disable=protected-access
return
with ops.device(worker.device_name):
with context.executor_scope(worker.executor):
with metric_utils.monitored_timer("closure_execution"):
output_value = self._function(
*nest.map_structure(_maybe_get_remote_value, replica_args),
**nest.map_structure(_maybe_get_remote_value, replica_kwargs))
for remote_value, value in zip(
nest.flatten(self._output_remote_values), nest.flatten(output_value)):
remote_value._set_value(value) # pylint: disable=protected-access
class _CoordinatedClosureQueue(object):
"""Manage a queue of closures, inflight count and errors from execution.
This class is thread-safe.
"""
def __init__(self):
# `self._inflight_closure_count` only tracks the number of inflight closures
# that are "in generation". Once an error occurs, error generation is
# incremented and all subsequent arriving closures (from inflight) are
# considered "out of generation".
self._inflight_closure_count = 0
self._queue_lock = threading.Lock()
# Condition indicating that all pending closures (either queued or inflight)
# have been processed, failed, or cancelled.
self._stop_waiting_condition = threading.Condition(self._queue_lock)
# Condition indicating that an item becomes available in queue (not empty).
self._closures_queued_condition = threading.Condition(self._queue_lock)
# Condition indicating that a queue slot becomes available (not full).
# Note that even with "infinite" queue size, there is still a "practical"
# size limit for the queue depending on host memory capacity, and thus the
# queue will eventually become full with a lot of enqueued closures.
self._queue_free_slot_condition = threading.Condition(self._queue_lock)
# Condition indicating there is no inflight closures.
self._no_inflight_closure_condition = threading.Condition(self._queue_lock)
# Use to cancel in-flight closures.
self._cancellation_mgr = cancellation.CancellationManager()
if _CLOSURE_QUEUE_MAX_SIZE <= 0:
logging.warning(
"In ParameterServerClient, creating an infinite closure queue can "
"consume a significant amount of memory and even lead to OOM.")
self._queue = queue.Queue(maxsize=_CLOSURE_QUEUE_MAX_SIZE)
self._error = None
# The following is a lock to make sure when `wait` is called and before it
# returns no `put` can be executed during this period. It is because `wait`
# won't know what to do with newly put closures. This lock adds an cutoff
# for `wait` so that closures put into the queue while waiting would not be
# taken responsible by this `wait`.
#
# We cannot reuse the `self._queue_lock` since when `wait` waits for a
# condition, the `self._queue_lock` will be released.
#
# We don't use a reader/writer's lock on purpose to reduce the complexity
# of the code.
self._put_wait_lock = threading.Lock()
def _cancel_all_closures(self):
"""Clears the queue and sets remaining closures cancelled error.
This method expects self._queue_lock to be held prior to entry.
"""
self._cancellation_mgr.start_cancel()
while self._inflight_closure_count > 0:
self._no_inflight_closure_condition.wait()
while True:
try:
closure = self._queue.get(block=False)
self._queue_free_slot_condition.notify()
closure._set_output_remote_values_cancelled() # pylint: disable=protected-access
except queue.Empty:
break
# The cancellation manager cannot be reused once cancelled. After all
# closures (queued or inflight) are cleaned up, recreate the cancellation
# manager with clean state.
# Note on thread-safety: this is triggered when one of theses client APIs
# are called: `schedule`, `wait`, and `done`. At the same time, no new
# closures can be constructed (which reads the _cancellation_mgr to get
# cancellable functions).
self._cancellation_mgr = cancellation.CancellationManager()
def _raise_if_error(self):
"""Raises the error if one exists.
If an error exists, cancel the closures in queue, raises it, and clear
the error.
This method expects self._queue_lock to be held prior to entry.
"""
if self._error:
logging.error("Start cancelling closures due to error %r: %s",
self._error, self._error)
self._cancel_all_closures()
try:
raise self._error # pylint: disable=raising-bad-type
finally:
self._error = None
def put(self, closure):
"""Put a closure into the queue for later execution.
If `mark_failed` was called before `put`, the error from the first
invocation of `mark_failed` will be raised.
Args:
closure: The `Closure` to put into the queue.
"""
with self._put_wait_lock, self._queue_lock:
self._queue_free_slot_condition.wait_for(lambda: not self._queue.full())
self._queue.put(closure, block=False)
self._raise_if_error()
self._closures_queued_condition.notify()
def get(self, timeout=None):
"""Return a closure from the queue to be executed."""
with self._queue_lock:
while self._queue.empty():
if not self._closures_queued_condition.wait(timeout=timeout):
return None
closure = self._queue.get(block=False)
self._queue_free_slot_condition.notify()
self._inflight_closure_count += 1
return closure
def mark_finished(self):
"""Let the queue know that a closure has been successfully executed."""
with self._queue_lock:
if self._inflight_closure_count < 1:
raise AssertionError("There is no inflight closures to mark_finished.")
self._inflight_closure_count -= 1
if self._inflight_closure_count == 0:
self._no_inflight_closure_condition.notifyAll()
if self._queue.empty() and self._inflight_closure_count == 0:
self._stop_waiting_condition.notifyAll()
def put_back(self, closure):
"""Put the closure back into the queue as it was not properly executed."""
with self._queue_lock:
if self._inflight_closure_count < 1:
raise AssertionError("There is no inflight closures to put_back.")
if self._error:
closure._set_output_remote_values_cancelled() # pylint: disable=protected-access
else:
self._queue_free_slot_condition.wait_for(lambda: not self._queue.full())
self._queue.put(closure, block=False)
self._closures_queued_condition.notify()
self._inflight_closure_count -= 1
if self._inflight_closure_count == 0:
self._no_inflight_closure_condition.notifyAll()
def wait(self, timeout=None):
"""Wait for all closures to be finished before returning.
If `mark_failed` was called before or during `wait`, the error from the
first invocation of `mark_failed` will be raised.
Args:
timeout: A float specifying a timeout for the wait in seconds.
Returns:
True unless the given timeout expired, in which case it returns False.
"""
with self._put_wait_lock, self._queue_lock:
while (not self._error and
(not self._queue.empty() or self._inflight_closure_count > 0)):
if not self._stop_waiting_condition.wait(timeout=timeout):
return False
self._raise_if_error()
return True
def mark_failed(self, e):
"""Sets error and unblocks any wait() call."""
with self._queue_lock:
# TODO(yuefengz): maybe record all failure and give users more
# information?
if self._inflight_closure_count < 1:
raise AssertionError("There is no inflight closures to mark_failed.")
if self._error is None:
self._error = e
self._inflight_closure_count -= 1
if self._inflight_closure_count == 0:
self._no_inflight_closure_condition.notifyAll()
self._stop_waiting_condition.notifyAll()
def done(self):
"""Returns true if the queue is empty and there is no inflight closure.
If `mark_failed` was called before `done`, the error from the first
invocation of `mark_failed` will be raised.
"""
with self._queue_lock:
self._raise_if_error()
return self._queue.empty() and self._inflight_closure_count == 0
class WorkerPreemptionHandler(object):
"""Handles worker preemptions."""
def __init__(self, server_def, cluster):
self._server_def = server_def
self._cluster = cluster
self._cluster_update_lock = threading.Lock()
self._cluster_due_for_update = threading.Event()
self._worker_up_cond = threading.Condition(self._cluster_update_lock)
threading.Thread(target=self._preemption_handler,
name="WorkerPreemptionHandler",
daemon=True).start()
def _validate_preemption_failure(self, e):
"""Validates that the given exception represents worker preemption."""
if _is_worker_failure(e):
return
raise e
@contextlib.contextmanager
def wait_on_failure(self,
on_failure_fn=None,
on_recovery_fn=None,
worker_device_name="(unknown)"):
"""Catches worker preemption error and wait until failed workers are back.
Args:
on_failure_fn: an optional function to run if preemption happens.
on_recovery_fn: an optional function to run when a worker is recovered
from preemption.
worker_device_name: the device name of the worker instance that is passing
through the failure.
Yields:
None.
"""
try:
yield
except errors.OpError as e:
# If the error is due to temporary connectivity issues between worker and
# ps, put back closure, ignore error and do not mark worker as failure.
if self._cluster._record_and_ignore_transient_ps_failure(e): # pylint: disable=protected-access
if on_failure_fn:
on_failure_fn()
return
self._validate_preemption_failure(e)
logging.error("Worker %s failed with error: %s", worker_device_name, e)
if on_failure_fn:
on_failure_fn()
with self._cluster_update_lock:
self._cluster_due_for_update.set()
self._worker_up_cond.wait(_WORKER_MAXIMUM_RECOVERY_SEC)
logging.info("Worker %s has been recovered.", worker_device_name)
if on_recovery_fn:
with self.wait_on_failure(
on_recovery_fn=on_recovery_fn,
worker_device_name=worker_device_name):
on_recovery_fn()
def _preemption_handler(self):
"""A loop that handles preemption.
This loop waits for signal of worker preemption and upon worker preemption,
it waits until all workers are back and updates the cluster about the
restarted workers.
"""
while True:
self._cluster_due_for_update.wait()
with self._cluster_update_lock:
try:
# TODO(haoyuzhang): support partial cluster recovery
logging.info("Cluster now being recovered.")
context.context().update_server_def(self._server_def)
# Cluster updated successfully, clear the update signal, and notify
# all workers that they are recovered from failure.
logging.info("Cluster successfully recovered.")
self._worker_up_cond.notify_all()
self._cluster_due_for_update.clear()
except Exception as e: # pylint: disable=broad-except
self._validate_preemption_failure(e)
# NOTE: Since the first RPC (GetStatus) of update_server_def is
# currently blocking by default, error should only happen if:
# (1) More workers failed while waiting for the previous workers to
# come back;
# (2) Worker failed when exchanging subsequent RPCs after the first
# RPC returns.
# Consider adding backoff retry logic if we see the error logged
# too frequently.
logging.error("Cluster update failed with error: %s. Retrying...", e)
class Worker(object):
"""A worker in a cluster.
Attributes:
worker_index: The index of the worker in the cluster.
device_name: The device string of the worker, e.g. "/job:worker/task:1".
executor: The worker's executor for remote function execution.
failure_handler: The failure handler used to handler worker preemption
failure.
"""
def __init__(self, worker_index, device_name, cluster):
self.worker_index = worker_index
self.device_name = device_name
self.executor = executor.new_executor(enable_async=False)
self.failure_handler = cluster.failure_handler
self._cluster = cluster
self._resource_remote_value_refs = []
# Worker threads need to start after `Worker`'s initialization.
threading.Thread(target=self._process_queue,
name="WorkerClosureProcessingLoop-%d" % self.worker_index,
daemon=True).start()
def _set_resources_aborted(self):
# TODO(yuefengz): maybe we can query whether a tensor is valid or not
# instead of marking a tensor aborted?
for weakref_resource in self._resource_remote_value_refs:
resource = weakref_resource()
if resource:
resource._set_aborted() # pylint: disable=protected-access
def _set_dead(self):
raise NotImplementedError("_set_dead is not implemented.")
def _process_closure(self, closure):
"""Runs a closure with preemption handling."""
try:
with self._cluster.failure_handler.wait_on_failure(
on_failure_fn=lambda: self._cluster._closure_queue.put_back(closure), # pylint: disable=protected-access
on_recovery_fn=self._set_resources_aborted,
worker_device_name=self.device_name):
closure.execute_on(self)
# TODO(yuefengz): we don't have to materialize results every step.
with metric_utils.monitored_timer("remote_value_fetch"):
closure._fetch_output_remote_values() # pylint: disable=protected-access
self._cluster._closure_queue.mark_finished() # pylint: disable=protected-access
except Exception as e: # pylint: disable=broad-except
# Avoid logging the derived cancellation error
if not isinstance(e, errors.CancelledError):
logging.error(
"/job:worker/task:%d encountered the following error when "
"processing closure: %r:%s", self.worker_index, e, e)
nest.map_structure(
lambda x: x._set_error(e), # pylint: disable=protected-access
closure._output_remote_values) # pylint: disable=protected-access
self._cluster._closure_queue.mark_failed(e) # pylint: disable=protected-access
def _process_queue(self):
while True:
closure = self._cluster._closure_queue.get() # pylint: disable=protected-access
self._process_closure(closure)
def _create_resource(self, function, args=None, kwargs=None):
"""Synchronously creates a per-worker resource represented by a `RemoteValue`.
Args:
function: the resource function to be run remotely. It should be a
`tf.function`, a concrete function or a Python function.
args: positional arguments to be passed to the function.
kwargs: keyword arguments to be passed to the function.
Returns:
one or several RemoteValue objects depending on the function return
values.
"""
# Some notes about the concurrency: currently all the activities related to
# the same worker such as creating resources, setting resources' aborted
# status, and executing closures happen on the same thread. This allows us
# to have simpler logic of concurrency.
closure = Closure(
function,
self._cluster._closure_queue._cancellation_mgr, # pylint: disable=protected-access
args=args,
kwargs=kwargs)
resource_remote_value = closure._output_remote_values # pylint: disable=protected-access
self._register_resource(resource_remote_value)
# The following is a short-term solution to lazily create resources in
# parallel.
# TODO(b/160343165): we should create resources eagerly, i.e. schedule the
# resource creation function as soon as users call this method.
resource_remote_value._set_aborted() # pylint: disable=protected-access
return resource_remote_value
def _register_resource(self, resource_remote_value):
if not isinstance(resource_remote_value, RemoteValue):
raise ValueError(
"Resource being registered is not of type `RemoteValue`.")
self._resource_remote_value_refs.append(weakref.ref(resource_remote_value))
class Cluster(object):
"""A cluster with workers.
We assume all function errors are fatal and based on this assumption our
error reporting logic is:
1) Both `schedule` and `join` can raise a non-retryable error which is the
first error seen by the client from any previously scheduled functions.
2) When an error is raised, there is no guarantee on how many previously
scheduled functions have been executed; functions that have not been executed
will be thrown away and marked as cancelled.
3) After an error is raised, the internal state of error will be cleared.
I.e. functions can continue to be scheduled and subsequent calls of `schedule`
or `join` will not raise the same error again.
Attributes:
failure_handler: The failure handler used to handler worker preemption
failure.
workers: a list of `Worker` objects in the cluster.
"""
def __init__(self, cluster_resolver, client_name="chief"):
"""Initializes the cluster instance and connect to the remote cluster."""
if client_name in ["worker", "ps"]:
raise ValueError("Client name should not be 'worker' or 'ps'.")
cluster_spec = cluster_resolver.cluster_spec()
self._num_workers = len(cluster_spec.as_dict().get("worker", ()))
self._num_ps = len(cluster_spec.as_dict().get("ps", ()))
device_filters = server_lib.ClusterDeviceFilters()
# For any worker, only the devices on PS and chief nodes are visible
for i in range(self._num_workers):
device_filters.set_device_filters(
"worker", i, ["/job:ps", "/job:%s" % client_name])
# Similarly for any ps, only the devices on workers and chief are visible
for i in range(self._num_ps):
device_filters.set_device_filters(
"ps", i, ["/job:worker", "/job:%s" % client_name])
context.context().mirroring_policy = context.MIRRORING_ALL
# Allow at most one outstanding RPC for each worker at a certain time. This
# is to simplify worker failure handling in the runtime
os.environ["TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE"] = "False"
remote.connect_to_cluster(cluster_spec,
job_name=client_name,
protocol=cluster_resolver.rpc_layer,
cluster_device_filters=device_filters)
# Ignore PS failures reported by workers due to transient connection errors.
# Transient connectivity issues between workers and PS are relayed by the
# workers to the client, leading the client to believe that there are PS
# failures. The difference between transient vs. permanent PS failure is the
# number of reports from the workers. When this env var is set to a positive
# integer K, the client ignores up to K reports of a failed PS task. I.e.,
# only when there are more than K trials of executing closures fail due to
# errors from the same PS instance do we consider the PS instance encounters
# a failure.
# TODO(b/164279603): Remove this workaround when the underlying connectivity
# issue in gRPC server is resolved.
self._transient_ps_failures_threshold = int(os.environ.get(
"TF_CLIENT_IGNORE_TRANSIENT_PS_FAILURES", 3))
self._potential_ps_failures_lock = threading.Lock()
self._potential_ps_failures_count = [0] * self._num_ps
self._closure_queue = _CoordinatedClosureQueue()
self.failure_handler = WorkerPreemptionHandler(context.get_server_def(),
self)
worker_device_strings = [
"/job:worker/replica:0/task:%d" % i for i in range(self._num_workers)
]
self.workers = [
Worker(i, w, self) for i, w in enumerate(worker_device_strings)
]
def _record_and_ignore_transient_ps_failure(self, e):
"""Records potential PS failures and return if failure should be ignored."""
if self._transient_ps_failures_threshold <= 0 or not _is_ps_failure(e):
return False
ps_tasks = _extract_failed_ps_instances(str(e))
with self._potential_ps_failures_lock:
for t in ps_tasks:
self._potential_ps_failures_count[t] += 1
# The number of UnavailableError encountered on this PS task exceeds the
# maximum number of ignored error
if (self._potential_ps_failures_count[t] >=
self._transient_ps_failures_threshold):
return False
return True
def schedule(self, function, args, kwargs):
"""Schedules `function` to be dispatched to a worker for execution.
Args:
function: The function to be dispatched to a worker for execution
asynchronously.
args: Positional arguments for `fn`.
kwargs: Keyword arguments for `fn`.
Returns:
A structure of `RemoteValue` object.
"""
closure = Closure(
function,
self._closure_queue._cancellation_mgr, # pylint: disable=protected-access
args=args,
kwargs=kwargs)
self._closure_queue.put(closure)
return closure._output_remote_values # pylint: disable=protected-access
def join(self):
"""Blocks until all scheduled functions are executed."""
self._closure_queue.wait()
def done(self):
"""Returns true if all scheduled functions are executed."""
return self._closure_queue.done()
class ParameterServerFailureError(Exception):
"""An error representing at least one parameter server is interrupted."""
pass
class Client(object):
"""An object to schedule and orchestrate remote function execution.
A `Client` object represents a program used to create dataset, schedule
functions to be executed, and fetch the results of the functions.
Currently, `Client` is not supported to be used in a standalone manner.
It should be used in conjunction with `ParameterServerStrategyV2`. The
recommended way of using the combination is through a `ParameterServerClient`
object. Please see `ParameterServerClient` for more information.
This is currently under development, and the API as well as implementation
is subject to changes.
"""
def __init__(self, strategy):
"""Initialization of a `Client` instance.
This connects the client to remote workers and parameter servers, through
a `tf.config.experimental_connect_to_cluster` call.
Args:
strategy: a `tf.distribute.Strategy` object. Currently, only
`ParameterServerStrategyV2` is supported.
Raises:
ValueError: if the strategy being used is not supported.
"""
if not isinstance(strategy,
parameter_server_strategy_v2.ParameterServerStrategyV2):
raise ValueError("Only `ParameterServerStrategyV2` is supported in "
"`Client` currently.")
self._strategy = strategy
self.cluster = Cluster(strategy._cluster_resolver)
@property
def strategy(self):
return self._strategy
def schedule(self, fn, args=None, kwargs=None):
"""Schedules `fn` to be dispatched to a worker for execution asynchronously.
When calling `schedule` with a function `fn`, `fn` will be executed on a
remote worker at some later time. The process is asynchronous, meaning
`schedule` returns immediately, possibly without having the result ready
yet. `schedule` returns a structure of `RemoteValue` object, which wraps the
output of the function. Call `fetch()` on `RemoteValue` to wait for the
function execution to finish and retrieve its output from the remote worker.
`schedule` guarantees that `fn` will be executed on a worker at least once;
it could be more than once if its corresponding worker fails in the middle
of its execution. Note that since worker can fail at any point when
executing the function, it is possible that the function is partially
executed, but `Client` guarantees that in those events, the function will
eventually be fully executed, possibly on a different worker that is
available.
If any previously scheduled function raises an error, `schedule` will fail
by raising any one of those errors, and clear the errors collected so far.
There are two implications when this happens: 1) user should call `schedule`
with `fn` again to re-schedule, and 2) some of the previously scheduled
functions may have not been executed. User can call `fetch` on the returned
`RemoteValue` to inspect if they have executed, failed, or cancelled, and
reschedule the corresponding function if needed.
When `schedule` raises, it guarantees that there is no function that is
still being executed.
At this time, there is no support of worker assignment for function
execution, or priority of the workers.
`args` and `kwargs` are the arguments passed into `fn`, when `fn` is
executed on a worker. They can be `PerWorkerValues`, which is a collection
of values, each of which represents a component specific to a worker; in
this case, the argument will be substituted with the corresponding component
on the target worker. Arguments that are not `PerWorkerValues` will be
passed into `fn` as-is.
Args:
fn: A `tf.function`; the function to be dispatched to a worker for
execution asynchronously.
args: Positional arguments for `fn`.
kwargs: Keyword arguments for `fn`.
Returns:
A structure of `RemoteValue` object.
Raises:
Exception: one of the exceptions caught by the client by any previously
scheduled function since the last time an error was thrown or since
the beginning of the program.
"""
# Slot variables are usually created during function tracing time; thus
# `schedule` needs to be called within the `strategy.scope()`.
with self.strategy.scope(), _translate_parameter_server_failure():
return self.cluster.schedule(fn, args=args, kwargs=kwargs)
def join(self):
"""Blocks until all the scheduled functions have finished execution.
If any previously scheduled function raises an error, `join` will fail by
raising any one of those errors, and clear the errors collected so far. If
this happens, some of the previously scheduled functions may have not been
executed. Users can call `fetch` on the returned `RemoteValue` to inspect if
they have executed, failed, or cancelled. If some that have been cancelled
need to be rescheduled, users should call `schedule` with the function
again.
When `join` returns or raises, it guarantees that there is no function that
is still being executed.
Raises:
Exception: one of the exceptions caught by the client by any previously
scheduled function since the last time an error was thrown or since
the beginning of the program.
"""
with _translate_parameter_server_failure():
self.cluster.join()
def done(self):
"""Returns whether all the scheduled functions have finished execution.
If any previously scheduled function raises an error, `done` will fail by
raising any one of those errors.
When `done` returns True or raises, it guarantees that there is no function
that is still being executed.
"""
return self.cluster.done()
def create_per_worker_dataset(self, dataset_fn):
"""Create dataset on workers by calling `dataset_fn` on worker devices.
This creates the given dataset generated by dataset_fn on the workers
and returns an object that represents the collection of those individual
datasets. Calling `iter` on such collection of dataset returns a
`PerWorkerValues`, which is a collection of iterators, where the iterators
have been placed on respective workers.
Calling `next` on this `PerWorkerValues` of iterators is currently
unsupported; it is meant to be passed as an argument into `Client.schedule`.
When the scheduled function is picked up and being executed by a worker, the
function will receive the individual iterator that corresponds to the
worker, and now `next` can be called on iterator to get the next (batch or
example) of data.
Dataset shuffling and repeating are usually needed in `dataset_fn`; however,
sharding is not recommended: some worker may not be available and those
examples may be skipped and not covered by other workers, if the dataset is
sharded.
Args:
dataset_fn: The dataset function that returns a dataset. This is to be
executed on the workers.
Returns:
An object that represents the collection of those individual
datasets. `iter` is expected to be called on this object that returns
a `PerWorkerValues` of the iterators (that are on the workers).
"""
input_workers = input_lib.InputWorkers([
(w.device_name, [w.device_name]) for w in self.cluster.workers
])
return _PerWorkerDistributedDataset(dataset_fn, input_workers, self)
def _create_per_worker_resources(self, fn, args=None, kwargs=None):
"""Synchronously create resources on the workers.
The resources are represented by `RemoteValue`s.
Args:
fn: The function to be dispatched to all workers for execution
asynchronously.
args: Positional arguments for `fn`.
kwargs: Keyword arguments for `fn`.
Returns:
A `PerWorkerValues` object, which wraps a tuple of `RemoteValue` objects.
"""
results = []
for w in self.cluster.workers:
results.append(w._create_resource(fn, args=args, kwargs=kwargs)) # pylint: disable=protected-access
return PerWorkerValues(tuple(results))
def fetch(self, val):
"""Blocking call to fetch results from `RemoteValue`s.
This returns the execution result of `RemoteValue`s; if not ready,
waiting for it while blocking the caller.
Args:
val: The value to fetch the results from. If this is structure of
`RemoteValue`, `fetch()` will be called on the individual `RemoteValue`
to get the result.
Returns:
If `val` is a `RemoteValue` or a structure of `RemoteValue`s, returns
the fetched `RemoteValue` value immediately if it's available, or blocks
the call until it's available, and returns the fetched `RemoteValue`
values with the same structure. If `val` is other types, return (`val`,).
"""
def _maybe_fetch(val):
if isinstance(val, RemoteValue):
return val.fetch()
else:
return val
# TODO(yuefengz): we should fetch values in a batch.
result = nest.map_structure(_maybe_fetch, val)
if not isinstance(result, tuple):
return (result,)
return result
# pylint: disable=missing-function-docstring
@contextlib.contextmanager
def _translate_parameter_server_failure():
try:
yield
except Exception as e: # pylint: disable=broad-except
if _is_ps_failure(e):
raise ParameterServerFailureError(e)
else:
raise
# pylint: disable=missing-function-docstring
@contextlib.contextmanager
def handle_parameter_server_failure():
try:
with _translate_parameter_server_failure():
yield
except ParameterServerFailureError as e: # pylint: disable=broad-except
restart_exit_code = os.environ.get("TF_CLIENT_NON_FATAL_RESTART_EXIT_CODE",
None)
if restart_exit_code is not None:
sys.exit(int(restart_exit_code))
else:
raise
class _PerWorkerDistributedDataset(object):
"""Represents worker-distributed datasets created from dataset function."""
def __init__(self, dataset_fn, input_workers, client):
"""Makes an iterable from datasets created by the given function.
Args:
dataset_fn: A function that returns a `Dataset`.
input_workers: an `InputWorkers` object.
client: a `Client` object, used to create dataset resources.
"""
def disallow_variable_creation(next_creator, **kwargs):
raise ValueError("Creating variables in `dataset_fn` is not allowed.")
if isinstance(dataset_fn, def_function.Function):
with variable_scope.variable_creator_scope(disallow_variable_creation):
dataset_fn = dataset_fn.get_concrete_function()
elif not isinstance(dataset_fn, tf_function.ConcreteFunction):
with variable_scope.variable_creator_scope(disallow_variable_creation):
dataset_fn = def_function.function(dataset_fn).get_concrete_function()
self._dataset_fn = dataset_fn
self._input_workers = input_workers
self._client = client
self._element_spec = None
def __iter__(self):
# We would like users to create iterators outside `tf.function`s so that we
# can track them.
if (not context.executing_eagerly() or
ops.get_default_graph().building_function):
raise RuntimeError(
"__iter__() is not supported inside of tf.function or in graph mode.")
def _create_per_worker_iterator():
dataset = self._dataset_fn()
return iter(dataset)
# If _PerWorkerDistributedDataset.__iter__ is called multiple
# times, for the same object it should only create and register resource
# once. Using object id to distinguish different iterator resources.
per_worker_iterator = self._client._create_per_worker_resources(
_create_per_worker_iterator)
# Create an iterator, so the consumer function of this iterator can start
# tracing using this iterator without needing to wait for the completion of
# the iterater creation. Note: the iterator shouldn't use memory until it is
# consumed.
# TODO(b/154675763): get rid of this workaround once we can make input_fn a
# tf.function.
iterator = _create_per_worker_iterator()
for iterator_remote_value in per_worker_iterator._values:
iterator_remote_value._set_type_spec(iterator._type_spec)
return _PerWorkerDistributedIterator(per_worker_iterator._values)
@property
def element_spec(self):
"""The type specification of an element of this dataset."""
raise NotImplementedError("Passing `AsyncDistributedDataset` to a "
"tf.function is not supported.")
class _PerWorkerDistributedIterator(PerWorkerValues):
"""Distributed iterator for `Client`."""
def __next__(self):
return self.get_next()
def get_next(self, name=None):
"""Returns the next input from the iterator for all replicas."""
raise NotImplementedError("Iterating over an `AsyncDistributedIterator` "
"is not supported right now.")
def _extract_failed_ps_instances(err_msg):
"""Return a set of potentially failing ps instances from error message."""
tasks = re.findall("/job:ps/replica:0/task:[0-9]+", err_msg)
return set(int(t.split(":")[-1]) for t in tasks)
def _is_ps_failure(error):
"""Whether the error is considered a parameter server failure."""
if (_RPC_ERROR_FROM_PS in str(error) or
(isinstance(error, errors.InvalidArgumentError) and
"/job:ps" in str(error))):
return True
def _is_worker_failure(error):
"""Whether the error is considered a worker failure."""
if _JOB_WORKER_STRING_IDENTIFIER not in str(error):
return False
if _RPC_ERROR_FROM_PS in str(error):
return False
# TODO(haoyuzhang): Consider using special status code if error from a
# remote is derived from RPC errors originated from other hosts.
if isinstance(error, (errors.UnavailableError, errors.AbortedError)):
return True
# The following error could happen when the remote task fails and restarts
# in a very short interval during which no RPCs were exchanged to detect the
# failure. In that case, gRPC allows channel (which is different from a
# connection) to be reused for a replaced server listening to same address.
if isinstance(error, errors.InvalidArgumentError):
if ("Unable to find a context_id" in str(error) or
"unknown device" in str(error) or
"Unable to find the relevant tensor remote_handle" in str(error)):
# TODO(b/159961667): Fix "Unable to find the relevant tensor
# remote_handle" part.
return True
# TODO(b/162541228): The following 3 types of errors are very rare and only
# observed in large-scale testing. The types of errors should be reduced.
# This error could show up when copying function inputs from remote tasks.
if isinstance(error, errors.InternalError):
if ("Failed copying input tensor" in str(error) or
"Unable to find a context_id" in str(error)):
return True
# This could happen when the function registration fails. In the observed
# cases this only happens to the dataset related functions.
if isinstance(error, errors.NotFoundError):
if ("is neither a type of a primitive operation nor a name of a function "
"registered" in str(error)):
return True
# This could happen when the iterator is no longer valid on the remote worker
# "Resource input tensor contains an invalid device"
if isinstance(error, errors.CancelledError):
return True
return False
| 40.948304
| 115
| 0.715548
|
ad63b9b0c94148e0a08f1bcc9e238b15e90b04fd
| 23,327
|
py
|
Python
|
meu_grafo_matriz_adjacencia_nao_dir.py
|
RafaelMunizz/Teoria-dos-Grafos
|
1502219c629a3081a815cbcc9d0d3353eb471872
|
[
"MIT"
] | null | null | null |
meu_grafo_matriz_adjacencia_nao_dir.py
|
RafaelMunizz/Teoria-dos-Grafos
|
1502219c629a3081a815cbcc9d0d3353eb471872
|
[
"MIT"
] | null | null | null |
meu_grafo_matriz_adjacencia_nao_dir.py
|
RafaelMunizz/Teoria-dos-Grafos
|
1502219c629a3081a815cbcc9d0d3353eb471872
|
[
"MIT"
] | null | null | null |
from bibgrafo.grafo_matriz_adj_nao_dir import GrafoMatrizAdjacenciaNaoDirecionado
from bibgrafo.grafo_exceptions import *
from copy import deepcopy
class MeuGrafo(GrafoMatrizAdjacenciaNaoDirecionado):
def vertices_nao_adjacentes(self):
'''
Provê uma lista de vértices não adjacentes no grafo. A lista terá o seguinte formato: [X-Z, X-W, ...]
Onde X, Z e W são vértices no grafo que não tem uma aresta entre eles.
:return: Uma lista com os pares de vértices não adjacentes
'''
lista = []
for i in range(len(self.M)):
for j in range(len(self.M)):
if self.M[i][j] != '-':
if not self.M[i][j] and i != j:
lista.append(self.N[i] + '-' + self.N[j])
return lista
def verticesAdjacentes(self, V=''):
'''
Provê uma lista de todas os vertices adjacentes a determinado vertice.
:return: Uma lista com os vértices adjacentes possiveis.
'''
matriz = self.matrizModificada()
verticesAdjacentes = []
for i in matriz:
if i[1] == V:
verticesAdjacentes.append(i[2])
return verticesAdjacentes
def ha_laco(self):
'''
Verifica se existe algum laço no grafo.
:return: Um valor booleano que indica se existe algum laço.
'''
for i in range(len(self.M)):
if self.M[i][i]:
return True
return False
def grau(self, V=''):
'''
Provê o grau do vértice passado como parâmetro
:param V: O rótulo do vértice a ser analisado
:return: Um valor inteiro que indica o grau do vértice
:raises: VerticeInvalidoException se o vértice não existe no grafo
'''
if V not in self.N: raise VerticeInvalidoException("Vértice não existe")
indiceVertice = self.N.index(V)
grau = 0
for i in range(len(self.N)):
if self.M[indiceVertice][i] and self.M[i][indiceVertice]:
if len(self.M[indiceVertice][i]) > 0 and self.M[indiceVertice][i] != '-': # LINHA
grau += len(self.M[indiceVertice][i])
if len(self.M[i][indiceVertice]) > 0 and self.M[i][indiceVertice] != '-': # COLUNA
grau += len(self.M[i][indiceVertice])
return grau
def ha_paralelas(self):
'''
Verifica se há arestas paralelas no grafo
:return: Um valor booleano que indica se existem arestas paralelas no grafo.
'''
for i in range(len(self.M)):
for j in range(len(self.M[i])):
if len(self.M[i][j]) > 1:
return True
return False
def arestas_sobre_vertice(self, V):
'''
Provê uma lista que contém os rótulos das arestas que incidem sobre o vértice passado como parâmetro
:param V: O vértice a ser analisado
:return: Uma lista os rótulos das arestas que incidem sobre o vértice
:raises: VerticeInvalidoException se o vértice não existe no grafo
'''
if V not in self.N: raise VerticeInvalidoException("Vértice não existe")
indiceVertice = self.N.index(V)
matrizArestas = []
for i in range(len(self.N)):
if self.M[indiceVertice][i] and self.M[i][indiceVertice]:
if self.M[indiceVertice][i] != '-': # LINHA
matrizArestas.append(list(self.M[indiceVertice][i].keys()))
if self.M[i][indiceVertice] != '-': # COLUNA
matrizArestas.append(list(self.M[i][indiceVertice].keys()))
arestasVertice = []
for arestas in matrizArestas:
for aresta in arestas:
arestasVertice.append(aresta)
return arestasVertice
def eh_completo(self):
'''
Verifica se o grafo é completo.
:return: Um valor booleano que indica se o grafo é completo
'''
for i in self.N:
if len(self.arestas_sobre_vertice(i)) != (len(self.N) - 1):
return False
return True
def conexo(self):
'''
Verifica se o grafo é conexo
:return: Bool representando se o grafo é conexo
'''
listadfs = list()
dfs = self.dfs(self.N[0])
l = list()
for linha in self.M:
for coluna in linha:
if coluna != "-" and coluna != {}:
for aresta in coluna:
l.append(coluna[aresta].getV1())
l.append(coluna[aresta].getV2())
listadfs.append(l)
for dfs in listadfs:
for vertice in self.N:
if not (vertice in dfs):
return False
return True
def ponte(self, N):
'''
Verifica se determinado vertice é uma ponte dentro do grafo.
:return: bool representando se é ponte
'''
l = list()
for linha in self._matriz:
for coluna in linha:
if coluna != "-" and coluna != {}:
l.append(coluna[1])
l.append(coluna[2])
for vertice in N:
if not (vertice in l):
return True
return False
def listaRotulos(self):
'''
Prove uma lista com as arestas do grafo.
Lista do tipo [A0,A1,...,An]
:return: lista.
'''
la = list()
for lin in range(len(self.M)):
for col in range(len(self.M)):
if self.M[lin][col] != "-" and self.M[lin][col] != {}:
for aresta in self.M[lin][col]:
la.append(aresta)
return la
def colocarArestas(self, listaVertices):
'''
Provê uma lista com as arestas adicionadas a uma lista anteriormente com vértices apenas.
:param V: Uma lista com vértices. Ex: ['A', 'J', 'K', 'G', 'H']
:return: Uma lista com vértices e arestas. Ex: ['A', 'a3', 'J', 'a5', 'K', 'a4', 'G', 'a9', 'H']
'''
pares = [] # Formando pares com a lista de vértices passados
for i in range(1, len(listaVertices)):
pares.append(listaVertices[i - 1] + '-' + listaVertices[i])
rotulos = [] # Lista com os rótulos dos pares
for j in pares:
rotulos.append(self.pegarRotulo(j[0], j[2]))
listaFinal = [] # Lista com vértices e arestas (Um por índice)
for k in range(len(listaVertices)): # Colocando um vértice e uma aresta por vez
listaFinal.append(listaVertices[k])
if len(listaVertices) - 1 != k:
listaFinal.append(rotulos[k])
return listaFinal
############### DFS ###############
def dfs(self, V='', **kwargs):
'''
Provê uma árvore (um grafo) que contém apenas as arestas do dfs
:param V: O vértice a de onde se inicia o caminho
:return: Um grafo do tipo árvore
:raises: VerticeInvalidoException se o vértice não existe no grafo
'''
if V not in self.N:
raise VerticeInvalidoException
try:
self.acdfs = kwargs["dfs"]
except:
self.acdfs = True
if self.acdfs == True:
self._arvoredfs = MeuGrafo(self.N)
self.arestas_dfs = []
self.acdfs = False
for coluna in self.M[self.N.index(V)]:
if coluna != "-" and coluna != {}:
for aresta in coluna:
if ((coluna[aresta].getV1() not in self.arestas_dfs) or (
coluna[aresta].getV2() not in self.arestas_dfs)) \
and (coluna[aresta].getV1() != coluna[aresta].getV2()):
self._arvoredfs.adicionaAresta(aresta, coluna[aresta].getV1(), coluna[aresta].getV2())
self.arestas_dfs.append(coluna[aresta].getV1())
self.arestas_dfs.append(coluna[aresta].getV2())
self.dfs(coluna[aresta].getV2() if coluna[aresta].getV2() != V else coluna[aresta].getV1(),
**{"dfs": False})
return self._arvoredfs
############### CAMINHO EULERIANO ###############
def caminhoEuleriano(self):
'''
Verifica se o grafo possui um caminho euleriano.
Caso possua, prove uma lista do tipo [V0,A0 .. Vn,An].
:return: False ou lista com caminho.
'''
impar, lvi = self.ehEuleriano()
if (not impar) or (not self.conexo()):
return False
self._caminho = list()
self._matriz = self.matrizArestas()
self._arestas = self.listaRotulos()
# Escolhe o vertice de inicio do caminho
if lvi != []:
Vi = lvi[0]
elif lvi == []:
Vi = 0
self._caminho.append(self.N[Vi])
Eh_caminho = self.buscarCaminho(Vi)
return self._caminho
def ehEuleriano(self):
'''
Verifica se cada vertice é de grau impar e a quantidade
de vertices impares no grafo
:return: True, lista de vertices impar se verdadeiro
False, None se falso
'''
total = 0
i = 0
lvi = list()
while (total <= 2) and (i <= len(self.N) - 1):
grau = 0
grau += self.grau(self.N[i])
if grau % 2 == 1:
total += 1
lvi.append(i)
i += 1
if total == 0 or total == 2:
return True, lvi
else:
return False, None
def buscarCaminho(self, v):
'''
Gera o caminho eureliano.
:return: bool referente a existencia do caminho.
'''
grau = self.grauMatriz(self.N[v], self._matriz)
if grau == 0:
return True
elif grau == 1:
for c in range(len(self.M[v])):
if v < c:
linha, coluna = v, c
else:
linha, coluna = c, v
if self._arestas != [] and self._matriz[linha][coluna] != "-" and self._matriz[linha][coluna] != {}:
aresta = self._matriz[linha][coluna]
self._arestas.remove(aresta[0])
self._caminho.append(aresta[0])
self._caminho.append(aresta[1] if aresta[1] != self.N[v] else aresta[2])
self._matriz[linha][coluna] = "-"
self.buscarCaminho(self.N.index(aresta[1] if aresta[1] != self.N[v] else aresta[2]))
else:
__N = self.N.copy()
for c in range(len(self.M[v])):
if v < c:
linha, coluna = v, c
else:
linha, coluna = c, v
if self._arestas != [] and self._matriz[linha][coluna] != "-" and self._matriz[linha][coluna] != {}:
aresta = self._matriz[linha][coluna]
self._matriz[linha][coluna] = "-"
if self.grauMatriz(self.N[v], self._matriz) == 0: __N.remove(self.N[v])
if not self.ponte(__N):
self._arestas.remove(aresta[0])
self._caminho.append(aresta[0])
self._caminho.append(aresta[1] if aresta[1] != self.N[v] else aresta[2])
self.buscarCaminho(self.N.index(aresta[1] if aresta[1] != self.N[v] else aresta[2]))
elif self.grauMatriz(aresta[1] if aresta[1] != self.N[v] else aresta[2], self._matriz) != 0:
self._arestas.remove(aresta[0])
self._caminho.append(aresta[0])
self._caminho.append(aresta[1] if aresta[1] != self.N[v] else aresta[2])
self.buscarCaminho(self.N.index(aresta[1] if aresta[1] != self.N[v] else aresta[2]))
else:
self._matriz[linha][coluna] = aresta
self._arestas.append(aresta[0])
__N.append(self.N[v])
return False
def grauMatriz(self, V, M):
'''
Provê o grau do vértice passado como parâmetro
:param V: O rótulo do vértice a ser analisado
:param M: Matriz a analisada a partir de V
:return: Um valor inteiro que indica o grau do vértice
:raises: VerticeInvalidoException se o vértice não existe no grafo
'''
if V not in self.N:
raise VerticeInvalidoException
cont = 0
for l in range(len(M)):
for c in range(len(M)):
if M[l][c] != "-" and V in M[l][c]:
cont += 1
return cont
def matrizArestas(self):
'''
Prove uma matriz com as arestas do grafo.
:return: Matriz com as arestas do grafo
'''
matriz = []
for l in range(len(self.M)):
matriz.append([])
for c in range(len(self.M)):
if self.M[l][c] != "-" and self.M[l][c] != {}:
for aresta in self.M[l][c]:
matriz[l].append([aresta, self.M[l][c][aresta].getV1(), self.M[l][c][aresta].getV2()])
else:
matriz[l].append("-")
return matriz
############### MAIS FUNÇÕES ÚTEIS ###############
def imprimirMatriz(self, matriz):
'''
imprime o grafo de uma forma mais visível para o usuário.
:param matriz: Uma matriz formada por 0 e 1.
Ex:
:entrada: [[1, 1, 0, 1], [0, 1, 0, 1], [1, 1, 0, 1], [0, 1, 0, 1]].
:saída: |A|B|C|D|
A|1 1 0 1
B|0 1 0 1
C|1 1 0 1
D|0 1 0 1
'''
print(" ",end='|')
for k in range(len(self.N)):
print(self.N[k], end='|')
print()
for i in range(len(matriz)):
print(self.N[i], end='|')
for j in range(len(matriz[0])):
print(matriz[i][j], end=' ')
print()
def arcoNaPosicao(self, pos1, pos2):
'''Retorna o arco da matriz na posição
passada por parâmetro'''
arco = []
for i in range(len(self.N)):
if i == pos1:
arco.append(self.N[i])
for j in range(len(self.N)):
if j == pos2:
arco.append(self.N[j])
if len(arco) != 2: return False
return arco
def matrizModificada(self):
'''
:param: grafo.
:return: Uma matriz que representa o grafo de outra forma, com rótulo, v1, v2 e peso.
Ex:
[['a1', 'A', 'B', 3], ['a2', 'A', 'C', 4], ['a3', 'A', 'D', 5], ['a4', 'B', 'G', 5], ...
'''
# Listas que servirão para criação da matriz final.
rotulos = []
pesos = []
v1 = []
v2 = []
matrizModificada = [] # Matriz final
for i in range(len(self.N)):
for j in range(len(self.N)):
if self.M[i][j]:
if '-' not in (list((self.M[i][j]))[0]):
if len(list((self.M[i][j]).keys())) == 1:
rotulo = list((self.M[i][j]).keys())[0] # Extraindo rótulo da posição matriz[i][j]
rotulos.append(rotulo) # Rótulo
pesos.append(self.M[i][j][rotulo].getPeso()) # Peso do rótulo
v1.append(self.N[i]) # Vértice 1
v2.append(self.N[j]) # Vértice 2
else:
for k in (list((self.M[i][j]).keys())):
rotulo = k # Extraindo rótulo da posição matriz[i][j]
rotulos.append(rotulo) # Rótulo
pesos.append(self.M[i][j][rotulo].getPeso()) # Peso do rótulo
v1.append(self.N[i]) # Vértice 1
v2.append(self.N[j]) # Vértice 2
# Adicionando as informações recolhidas numa só matriz
for k in range(len(rotulos)):
aux = []
# Identificador da ordem da aresta. Será utilizado
# para organizar os arcos na posição correta, pois eles
# aparecem em ordens aleatórias. Ex: a1, a2, a13, a15, a7 ...
aux.append(int(rotulos[k][1:]))
aux.append(rotulos[k])
aux.append(v1[k])
aux.append(v2[k])
aux.append(pesos[k])
matrizModificada.append(aux)
# Colocando arcos na ordem correta com ajuda
# do primeiro elemento, que é o identificador
matrizModificada.sort()
# Removendo o identificador
for l in matrizModificada:
del l[0]
return matrizModificada
def pesoArco(self, v1, v2):
'''
:param: Vértices 1 e 2.
:return: Peso do arco formado por V1-V2, caso ele exista.
'''
matriz = self.matrizModificada()
# Ex: i = [['a1', 'A', 'B', 1]
for i in matriz:
if i[1] == v1 and i[2] == v2:
return i[3] # Peso
return False
def pegarRotulo(self, v1, v2):
'''
:param: Vértices 1 e 2.
:return: Rótulo do arco formado por V1-V2, caso ele exista.
'''
matriz = self.matrizModificada()
# Ex: i = [['a1', 'A', 'B', 1]
for i in matriz:
if i[1] == v1 and i[2] == v2:
return i[0] # Rotulo
return False
def matrizAdjacenciaComPesos(self):
'''
Modifica o grafo para que ele se transforme em uma matriz composta pelo peso do arco e 0.
:param grafo: O grafo que será transformado.
:return: Uma matriz. Ex: [[3, 0, 0, 5], [0, 4, 0, 7], [2, 0, 0, 4], [0, 3, 3, 0]]
'''
grafo = self.matrizAdjacenciaBinaria()
novaMatriz = []
for i in range(len(grafo)):
aux = []
for j in range(len(grafo[0])):
if grafo[i][j] != 0:
arco = self.arcoNaPosicao(i, j)
aux.append(self.pesoArco(arco[0], arco[1]))
else:
aux.append(0)
novaMatriz.append(aux)
return novaMatriz
def matrizAdjacenciaBinaria(self):
'''
Modifica o grafo para que ele se transforme em uma matriz composta apenas de 1 e 0.
:param grafo: Uma cópia do grafo que será transformado.
:return: Uma matriz. Ex: [[1, 0, 0, 1], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]]
'''
matriz = deepcopy(self.M)
for i in range(len(self.M)):
for j in range(len(self.M[0])):
if self.M[i][j] != 'o' and self.M[i][j] != '-' and self.M[i][j]:
matriz[i][j] = 1
else:
matriz[i][j] = 0
return matriz
############### ALGORITMO DE PRIM ###############
def menorPeso(self, pesos, visitados, quantVertices):
'''Menor peso entre vértices'''
minimo = 9**9
for v in range(quantVertices):
if pesos[v] < minimo and visitados[v] == False:
minimo = pesos[v]
menorIndice = v
return menorIndice
def primAlgoritmo(self):
'''
Utiliza o algoritmo de Prim para criar uma matriz formada
pelo arco mais o peso do arco. Ex: [['A-B', 3], ['A-C', 4], ...
:param : O grafo.
:return: Uma nova matriz gerada a partir do algoritmo de Prim.
'''
vertices = deepcopy(self.N)
grafo = self.matrizAdjacenciaComPesos()
quantVertices = len(grafo)
pesos = [9**9] * quantVertices # Menores peso do arco
noPais = [None] * quantVertices # Guardar Minimum Spanning Tree
pesos[0] = 0 # Distancia até nó raiz é 0
visitados = [False] * quantVertices
noPais[0] = -1
for n in range(quantVertices):
# Escolha o vértice de distância mínima ainda não processado.
vMenorDist = self.menorPeso(pesos, visitados, quantVertices)
visitados[vMenorDist] = True
for v in range(quantVertices):
if grafo[vMenorDist][v] > 0 and visitados[v] == False and pesos[v] > grafo[vMenorDist][v]:
pesos[v] = grafo[vMenorDist][v]
noPais[v] = vMenorDist
caminhoPrim = []
for i in range(1, quantVertices):
aux = []
aux.append(vertices[noPais[i]] + '-' + vertices[i])
aux.append(self.pesoArco(vertices[noPais[i]], vertices[i]))
caminhoPrim.append(aux)
return caminhoPrim
############### ALGORITMO DE KRUSKAL ###############
def posicaoDoVertice(self, V):
'''Retorna a posição do vértice(V) em self.N'''
for i in range(len(self.N)):
if self.N[i] == V:
return i
def utilKruskal(self):
grafo = self.matrizModificada()
novaLista = []
for j in range(len(grafo)):
aux = []
for k in range(1, 3):
aux.append(self.posicaoDoVertice(grafo[j][k]))
aux.append(grafo[j][3])
novaLista.append(aux)
return novaLista
def buscaKruskal(self, pai, i):
'''Função de utilidade para encontrar o conjunto de um elemento i'''
if pai[i] == i:
return i
return self.buscaKruskal(pai, pai[i])
def kruskalAlgoritmo(self):
'''
Utiliza o algoritmo de Kruskal para criar uma matriz formada
pelo arco mais o peso do arco. Ex: [['A-B', 3], ['A-C', 4], ...
:param : O grafo.
:return: Uma nova matriz gerada a partir do algoritmo de Kruskal.
'''
grafo = self.utilKruskal()
V = len(self.N)
i = soma = 0
# Ordendando listas por ordem de peso
graph = sorted(grafo, key=lambda item: item[2])
pai, rank, resultado = [], [], []
for node in range(V):
pai.append(node)
rank.append(0)
while soma < V - 1:
v1, v2, peso = graph[i]
i += 1
aux1 = self.buscaKruskal(pai, v1)
aux2 = self.buscaKruskal(pai, v2)
# Se incluir esta borda não causar um ciclo inclua no resultado
# E incrementa o índice do resultado para a próxima borda
if aux1 != aux2:
soma += 1
resultado.append([v1, v2, peso])
# União de dois conjuntos de aux1 e aux2
xRaiz = self.buscaKruskal(pai, aux1)
yRaiz = self.buscaKruskal(pai, aux2)
if rank[xRaiz] < rank[yRaiz]: pai[xRaiz] = yRaiz
elif rank[xRaiz] > rank[yRaiz]: pai[yRaiz] = xRaiz
else:
pai[yRaiz] = xRaiz
rank[xRaiz] += 1
resultadoFinal = []
for i in range(len(resultado)):
aux = []
aux.append(self.N[resultado[i][0]] + '-' + self.N[resultado[i][1]])
aux.append(resultado[i][2])
resultadoFinal.append(aux)
return resultadoFinal
| 34.456425
| 116
| 0.502808
|
a20ac1c8b87554cc8072aacd12300b90a8ba91b2
| 10,098
|
py
|
Python
|
model/single_frame_worker.py
|
idiap/DepthInSpace
|
fe759807f82df4c48c16b97f061718175ea0e6e9
|
[
"MIT"
] | 1
|
2022-02-05T09:50:24.000Z
|
2022-02-05T09:50:24.000Z
|
model/single_frame_worker.py
|
idiap/DepthInSpace
|
fe759807f82df4c48c16b97f061718175ea0e6e9
|
[
"MIT"
] | null | null | null |
model/single_frame_worker.py
|
idiap/DepthInSpace
|
fe759807f82df4c48c16b97f061718175ea0e6e9
|
[
"MIT"
] | null | null | null |
# DepthInSpace is a PyTorch-based program which estimates 3D depth maps
# from active structured-light sensor's multiple video frames.
#
# MIT License
#
# Copyright, 2021 ams International AG
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import numpy as np
import logging
import itertools
import matplotlib.pyplot as plt
import co
from data import base_dataset
from data import dataset
from model import networks
from . import worker
class Worker(worker.Worker):
def __init__(self, args, **kwargs):
super().__init__(args, **kwargs)
self.disparity_loss = networks.DisparitySmoothLoss()
def get_train_set(self):
train_set = dataset.TrackSynDataset(self.settings_path, self.train_paths, train=True, data_aug=True, track_length=self.track_length, load_flow_data = True, load_primary_data = False, load_pseudo_gt = self.use_pseudo_gt, data_type = self.data_type)
return train_set
def get_test_sets(self):
test_sets = base_dataset.TestSets()
test_set = dataset.TrackSynDataset(self.settings_path, self.test_paths, train=False, data_aug=False, track_length=self.track_length, load_flow_data = True, load_primary_data = False, load_pseudo_gt = self.use_pseudo_gt, data_type = self.data_type)
test_sets.append('simple', test_set, test_frequency=1)
self.patterns = []
self.ph_losses = []
self.ge_losses = []
self.d2ds = []
self.lcn_in = self.lcn_in.to('cuda')
for sidx in range(len(test_set.imsizes)):
imsize = test_set.imsizes[sidx]
pat = test_set.patterns[sidx]
pat = pat.mean(axis=2)
pat = torch.from_numpy(pat[None][None].astype(np.float32)).to('cuda')
pat,_ = self.lcn_in(pat)
self.patterns.append(pat)
pat = torch.cat([pat for idx in range(3)], dim=1)
ph_loss = networks.RectifiedPatternSimilarityLoss(imsize[0],imsize[1], pattern=pat)
K = test_set.getK(sidx)
Ki = np.linalg.inv(K)
K = torch.from_numpy(K)
Ki = torch.from_numpy(Ki)
ge_loss = networks.Single_Frame_Flow_Consistency_Loss(K, Ki, imsize[0], imsize[1], clamp=0.1)
self.ph_losses.append( ph_loss )
self.ge_losses.append( ge_loss )
d2d = networks.DispToDepth(float(test_set.focal_lengths[sidx]), float(test_set.baseline))
self.d2ds.append( d2d )
return test_sets
def net_forward(self, net, flow = None):
im0 = self.data['im0']
tl = im0.shape[0]
bs = im0.shape[1]
im0 = im0.view(-1, *im0.shape[2:])
out = net(im0)
if not(isinstance(out, tuple) or isinstance(out, list)):
out = out.view(tl, bs, *out.shape[1:])
else:
out = [o.view(tl, bs, *o.shape[1:]) for o in out]
return out
def loss_forward(self, out, train, flow_out = None):
if not(isinstance(out, tuple) or isinstance(out, list)):
out = [out]
vals = []
# apply photometric loss
for s,o in zip(itertools.count(), out):
im = self.data[f'im0']
im = im.view(-1, *im.shape[2:])
o = o.view(-1, *o.shape[2:])
std = self.data[f'std0']
std = std.view(-1, *std.shape[2:])
val, pattern_proj = self.ph_losses[0](o, im[:,0:1,...], std)
vals.append(val / (2 ** s))
# apply disparity loss
for s, o in zip(itertools.count(), out):
if s == 0:
amb0 = self.data[f'ambient0']
amb0 = amb0.contiguous().view(-1, *amb0.shape[2:])
o = o.view(-1, *o.shape[2:])
val = self.disparity_loss(o, amb0)
vals.append(val * 0.4 / (2 ** s))
# apply geometric loss
R = self.data['R']
t = self.data['t']
amb = self.data['ambient0']
ge_num = self.track_length * (self.track_length-1) / 2
for sidx in range(1):
d2d = self.d2ds[0]
depth = d2d(out[sidx])
ge_loss = self.ge_losses[0]
for tidx0 in range(depth.shape[0]):
for tidx1 in range(tidx0+1, depth.shape[0]):
depth0 = depth[tidx0]
R0 = R[tidx0]
t0 = t[tidx0]
amb0 = amb[tidx0]
flow0 = flow_out[f'flow_{tidx0}{tidx1}']
depth1 = depth[tidx1]
R1 = R[tidx1]
t1 = t[tidx1]
amb1 = amb[tidx1]
flow1 = flow_out[f'flow_{tidx1}{tidx0}']
val, flow_mask0, flow_mask1, orig_mask = ge_loss(depth0, depth1, R0, t0, R1, t1, flow0, flow1, amb0, amb1)
vals.append(val * 0.2 / ge_num / (2 ** sidx))
# using pseudo-ground truth
if self.use_pseudo_gt:
for s, o in zip(itertools.count(), out):
val = torch.mean(torch.abs(o - self.data['pseudo_gt']))
vals.append(val * 0.1 / (2 ** s))
# warming up the network for a few epochs
if train and self.data_type == 'real':
if self.current_epoch < self.warmup_epochs:
for s, o in zip(itertools.count(), out):
valid_mask = (self.data['sgm_disp'] > 30).float()
val = torch.sum(torch.abs(o - self.data['sgm_disp'] + 1.5 * torch.randn(o.size()).cuda()) * valid_mask) / torch.sum(valid_mask)
vals.append(val * 0.1)
return vals
def numpy_in_out(self, output):
if not(isinstance(output, tuple) or isinstance(output, list)):
output = [output]
es = output[0].detach().to('cpu').numpy()
gt = self.data['disp0'].detach().to('cpu').numpy().astype(np.float32)
im = self.data['im0'][:,:,0:1,...].detach().to('cpu').numpy()
amb = self.data['ambient0'].detach().to('cpu').numpy()
pat = self.patterns[0].detach().to('cpu').numpy()
es = es * (gt > 0)
return es, gt, im, amb, pat
def write_img(self, out_path, es, gt, im, amb, pat):
logging.info(f'write img {out_path}')
diff = np.abs(es - gt)
vmin, vmax = np.nanmin(gt), np.nanmax(gt)
vmin = vmin - 0.2*(vmax-vmin)
vmax = vmax + 0.2*(vmax-vmin)
vmax = np.max([vmax, 16])
fig = plt.figure(figsize=(16,16))
es0 = co.cmap.color_depth_map(es[0], scale=vmax)
gt0 = co.cmap.color_depth_map(gt[0], scale=vmax)
diff0 = co.cmap.color_error_image(diff[0], BGR=True)
# plot pattern and input images
ax = plt.subplot(3,3,1); plt.imshow(pat, vmin=pat.min(), vmax=pat.max(), cmap='gray'); plt.xticks([]); plt.yticks([]); ax.set_title(f'Projector Pattern')
ax = plt.subplot(3,3,2); plt.imshow(im[0], vmin=im.min(), vmax=im.max(), cmap='gray'); plt.xticks([]); plt.yticks([]); ax.set_title(f'F0 IR Input')
ax = plt.subplot(3,3,3); plt.imshow(amb[0], vmin=amb.min(), vmax=amb.max(), cmap='gray'); plt.xticks([]); plt.yticks([]); ax.set_title(f'F0 Ambient Input')
# plot disparities, ground truth disparity is shown only for reference
ax = plt.subplot(3,3,4); plt.imshow(gt0[...,[2,1,0]]); plt.xticks([]); plt.yticks([]); ax.set_title(f'F0 Disparity GT {np.nanmin(gt[0]):.4f}/{np.nanmax(gt[0]):.4f}')
ax = plt.subplot(3,3,5); plt.imshow(es0[...,[2,1,0]]); plt.xticks([]); plt.yticks([]); ax.set_title(f'F0 Disparity Est. {es[0].min():.4f}/{es[0].max():.4f}')
ax = plt.subplot(3,3,6); plt.imshow(diff0[...,[2,1,0]]); plt.xticks([]); plt.yticks([]); ax.set_title(f'F0 Disparity Err. {diff[0].mean():.5f}')
es1 = co.cmap.color_depth_map(es[1], scale=vmax)
gt1 = co.cmap.color_depth_map(gt[1], scale=vmax)
diff1 = co.cmap.color_error_image(diff[1], BGR=True)
ax = plt.subplot(3,3,7); plt.imshow(gt1[...,[2,1,0]]); plt.xticks([]); plt.yticks([]); ax.set_title(f'F1 Disparity GT {np.nanmin(gt[1]):.4f}/{np.nanmax(gt[1]):.4f}')
ax = plt.subplot(3,3,8); plt.imshow(es1[...,[2,1,0]]); plt.xticks([]); plt.yticks([]); ax.set_title(f'F1 Disparity Est. {es[1].min():.4f}/{es[1].max():.4f}')
ax = plt.subplot(3,3,9); plt.imshow(diff1[...,[2,1,0]]); plt.xticks([]); plt.yticks([]); ax.set_title(f'F1 Disparity Err. {diff[1].mean():.5f}')
plt.tight_layout()
plt.savefig(str(out_path))
plt.close(fig)
def callback_train_post_backward(self, net, errs, output, epoch, batch_idx, masks):
if batch_idx % 256 == 0:
out_path = self.exp_output_dir / f'train_{epoch:03d}_{batch_idx:04d}.png'
es, gt, im, amb, pat = self.numpy_in_out(output)
self.write_img(out_path, es[:, 0, 0], gt[:, 0, 0], im[:, 0, 0], amb[:, 0, 0], pat[0, 0])
torch.cuda.empty_cache()
def callback_test_start(self, epoch, set_idx):
self.metric = co.metric.MultipleMetric(
co.metric.DistanceMetric(vec_length=1),
co.metric.OutlierFractionMetric(vec_length=1, thresholds=[0.1, 0.5, 1, 2, 5])
)
def callback_test_add(self, epoch, set_idx, batch_idx, n_batches, output, masks):
es, gt, im, amb, pat = self.numpy_in_out(output)
if batch_idx % 8 == 0:
out_path = self.exp_output_dir / f'test_{epoch:03d}_{batch_idx:04d}.png'
self.write_img(out_path, es[:, 0, 0], gt[:, 0, 0], im[:, 0, 0], amb[:, 0, 0], pat[0, 0])
es = self.crop_reshape(es)
gt = self.crop_reshape(gt)
self.metric.add(es, gt)
def crop_reshape(self, input):
output = input.reshape(-1, 1)
return output
def callback_test_stop(self, epoch, set_idx, loss):
logging.info(f'{self.metric}')
for k, v in self.metric.items():
self.metric_add_test(epoch, set_idx, k, v)
if __name__ == '__main__':
pass
| 40.071429
| 251
| 0.639929
|
843e7a7d4fa974de9ef2d8b7f52859047941a677
| 692
|
py
|
Python
|
getpinyin.py
|
wangjksjtu/multi-embedding-cws
|
f272a7586bc0ad6f65e900c7f29fc9a89a0c95b4
|
[
"MIT"
] | 16
|
2018-09-18T13:59:59.000Z
|
2022-03-21T08:05:31.000Z
|
getpinyin.py
|
wangjksjtu/multi-embedding-cws
|
f272a7586bc0ad6f65e900c7f29fc9a89a0c95b4
|
[
"MIT"
] | null | null | null |
getpinyin.py
|
wangjksjtu/multi-embedding-cws
|
f272a7586bc0ad6f65e900c7f29fc9a89a0c95b4
|
[
"MIT"
] | 6
|
2020-01-05T13:03:45.000Z
|
2022-03-21T08:05:33.000Z
|
from pypinyin import lazy_pinyin
import sys,time
fo1 = open("pre_chars_for_w2v.txt", "r")
fo2 = open("pre_pinyin_for_w2v.txt", "a")
total_lines = len(fo1.readlines())
print("Total lines: %d" % (total_lines))
fo1.close()
lines = 0
fo1 = open("pre_chars_for_w2v.txt", "r")
while 1:
line = fo1.readline()
line = line.decode("utf8")
pinyinset = lazy_pinyin(line)
lines = lines + 1
progress = (float(lines) / total_lines) * 100.00
sys.stdout.write("Process progress: %.2f%% \r" % (progress))
sys.stdout.flush()
for pinyin in pinyinset:
pinyin = pinyin.encode("utf8")
fo2.write(pinyin)
if not line:
break
fo1.close()
fo2.close()
| 22.322581
| 67
| 0.638728
|
a51cab49d8c2b6b39f96f0f6eb82f3d37c76739d
| 3,833
|
py
|
Python
|
sdk/python/pulumi_aws_native/sagemaker/get_device_fleet.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/sagemaker/get_device_fleet.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/sagemaker/get_device_fleet.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetDeviceFleetResult',
'AwaitableGetDeviceFleetResult',
'get_device_fleet',
'get_device_fleet_output',
]
@pulumi.output_type
class GetDeviceFleetResult:
def __init__(__self__, description=None, output_config=None, role_arn=None, tags=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if output_config and not isinstance(output_config, dict):
raise TypeError("Expected argument 'output_config' to be a dict")
pulumi.set(__self__, "output_config", output_config)
if role_arn and not isinstance(role_arn, str):
raise TypeError("Expected argument 'role_arn' to be a str")
pulumi.set(__self__, "role_arn", role_arn)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description for the edge device fleet
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="outputConfig")
def output_config(self) -> Optional['outputs.DeviceFleetEdgeOutputConfig']:
"""
S3 bucket and an ecryption key id (if available) to store outputs for the fleet
"""
return pulumi.get(self, "output_config")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[str]:
"""
Role associated with the device fleet
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.DeviceFleetTag']]:
"""
Associate tags with the resource
"""
return pulumi.get(self, "tags")
class AwaitableGetDeviceFleetResult(GetDeviceFleetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDeviceFleetResult(
description=self.description,
output_config=self.output_config,
role_arn=self.role_arn,
tags=self.tags)
def get_device_fleet(device_fleet_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDeviceFleetResult:
"""
Resource schema for AWS::SageMaker::DeviceFleet
:param str device_fleet_name: The name of the edge device fleet
"""
__args__ = dict()
__args__['deviceFleetName'] = device_fleet_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:sagemaker:getDeviceFleet', __args__, opts=opts, typ=GetDeviceFleetResult).value
return AwaitableGetDeviceFleetResult(
description=__ret__.description,
output_config=__ret__.output_config,
role_arn=__ret__.role_arn,
tags=__ret__.tags)
@_utilities.lift_output_func(get_device_fleet)
def get_device_fleet_output(device_fleet_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDeviceFleetResult]:
"""
Resource schema for AWS::SageMaker::DeviceFleet
:param str device_fleet_name: The name of the edge device fleet
"""
...
| 33.920354
| 127
| 0.670232
|
d580186a2ef50f82f77c4dab1e53e8879f7b966f
| 2,368
|
py
|
Python
|
addons14/document_page/models/document_page_history.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-06-10T14:59:13.000Z
|
2021-06-10T14:59:13.000Z
|
addons14/document_page/models/document_page_history.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | null | null | null |
addons14/document_page/models/document_page_history.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-04-09T09:44:44.000Z
|
2021-04-09T09:44:44.000Z
|
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import difflib
from odoo import _, api, fields, models
class DocumentPageHistory(models.Model):
"""This model is necessary to manage a document history."""
_name = "document.page.history"
_description = "Document Page History"
_order = "id DESC"
page_id = fields.Many2one("document.page", "Page", ondelete="cascade")
name = fields.Char(index=True)
summary = fields.Char(index=True)
content = fields.Text()
diff = fields.Text(compute="_compute_diff")
company_id = fields.Many2one(
"res.company",
"Company",
help="If set, page is accessible only from this company",
related="page_id.company_id",
store=True,
index=True,
readonly=True,
)
def _compute_diff(self):
"""Shows a diff between this version and the previous version"""
history = self.env["document.page.history"]
for rec in self:
prev = history.search(
[
("page_id", "=", rec.page_id.id),
("create_date", "<", rec.create_date),
],
limit=1,
order="create_date DESC",
)
rec.diff = self._get_diff(prev.id, rec.id)
@api.model
def _get_diff(self, v1, v2):
"""Return the difference between two version of document version."""
text1 = v1 and self.browse(v1).content or ""
text2 = v2 and self.browse(v2).content or ""
# Include line breaks to make it more readable
# TODO: consider using a beautify library directly on the content
text1 = text1.replace("</p><p>", "</p>\r\n<p>")
text2 = text2.replace("</p><p>", "</p>\r\n<p>")
line1 = text1.splitlines(True)
line2 = text2.splitlines(True)
if line1 == line2:
return _("There are no changes in revisions.")
else:
diff = difflib.HtmlDiff()
return diff.make_table(
line1,
line2,
"Revision-{}".format(v1),
"Revision-{}".format(v2),
context=True,
)
def name_get(self):
return [(rec.id, "%s #%i" % (rec.page_id.name, rec.id)) for rec in self]
| 33.352113
| 80
| 0.558277
|
92317eb2b76c303f4cad2c2e96348225c61e66ba
| 2,795
|
py
|
Python
|
model/u-net/config.py
|
CoolPhilChen/cil-road-segmentation-2019
|
0becfe97d77012b3abbaa181a5c52e6edd1a39f1
|
[
"MIT"
] | null | null | null |
model/u-net/config.py
|
CoolPhilChen/cil-road-segmentation-2019
|
0becfe97d77012b3abbaa181a5c52e6edd1a39f1
|
[
"MIT"
] | null | null | null |
model/u-net/config.py
|
CoolPhilChen/cil-road-segmentation-2019
|
0becfe97d77012b3abbaa181a5c52e6edd1a39f1
|
[
"MIT"
] | 1
|
2020-06-08T02:09:18.000Z
|
2020-06-08T02:09:18.000Z
|
# encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import sys
import time
import numpy as np
from easydict import EasyDict as edict
import argparse
C = edict()
config = C
cfg = C
C.seed = 12345
"""please config ROOT_dir and user when u first using"""
C.repo_name = 'cil-road-segmentation-2019'
C.abs_dir = osp.realpath(".")
C.this_dir = C.abs_dir.split(osp.sep)[-1]
C.root_dir = C.abs_dir[:C.abs_dir.index(C.repo_name) + len(C.repo_name)]
C.log_dir = osp.abspath(osp.join(C.root_dir, 'log', C.this_dir))
C.log_dir_link = osp.join(C.abs_dir, 'log')
C.snapshot_dir = osp.abspath(osp.join(C.log_dir, "snapshot"))
exp_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())
C.log_file = C.log_dir + '/log_' + exp_time + '.log'
C.link_log_file = C.log_file + '/log_last.log'
C.val_log_file = C.log_dir + '/val_' + exp_time + '.log'
C.link_val_log_file = C.log_dir + '/val_last.log'
"""Data Dir and Weight Dir"""
C.dataset_path = "/local/home/lixxue/cil/cil-road-segmentation-2019/cil-road-segmentation-2019/"
C.img_root_folder = C.dataset_path
C.gt_root_folder = C.dataset_path
C.train_source = osp.join(C.dataset_path, "train.txt")
C.eval_source = osp.join(C.dataset_path, "val.txt")
C.test_source = osp.join(C.dataset_path, "test.txt")
C.is_test = False
"""Path Config"""
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
add_path(osp.join(C.root_dir, 'furnace'))
"""Image Config"""
C.num_classes = 2
C.image_mean = np.array([0.485, 0.456, 0.406]) # 0.485, 0.456, 0.406
C.image_std = np.array([0.229, 0.224, 0.225])
C.target_size = 1024
C.image_height = 400
C.image_width = 400
C.test_image_height = 608
C.test_image_width = 608
C.gt_down_sampling = 1
C.num_train_imgs = 90
C.num_eval_imgs = 10
""" Settings for network, this would be different for each kind of model"""
C.fix_bias = True
C.fix_bn = False
C.sync_bn = True
C.bn_eps = 1e-5
C.bn_momentum = 0.1
C.pretrained_model = None
"""Train Config"""
C.lr = 1e-3
C.lr_power = 0.9
C.momentum = 0.9
C.weight_decay = 5e-4
C.batch_size = 16
C.nepochs = 80
C.niters_per_epoch = 1000
C.num_workers = 4
C.train_scale_array = [0.75, 1, 1.25, 1.5, 1.75, 2.0]
"""Eval Config"""
C.eval_iter = 30
C.eval_stride_rate = 2 / 3
C.eval_scale_array = [1, ]
C.eval_flip = False
C.eval_height = 400
C.eval_width = 400
"""Display Config"""
C.snapshot_iter = 50
C.record_info_iter = 20
C.display_iter = 50
def open_tensorboard():
pass
if __name__ == '__main__':
print(config.epoch_num)
parser = argparse.ArgumentParser()
parser.add_argument(
'-tb', '--tensorboard', default=False, action='store_true')
args = parser.parse_args()
if args.tensorboard:
open_tensorboard()
| 24.517544
| 96
| 0.706619
|
f1c180478a7d3c7fdfe031e7e0aa350b1700fc0e
| 326
|
py
|
Python
|
sgcc-client/config/project_conf.py
|
zhanwei33/baai-federated-learning
|
838507f2344139e66385e1ef475a148fdeaf5c62
|
[
"Apache-2.0"
] | 19
|
2020-11-30T09:42:08.000Z
|
2020-12-09T02:08:11.000Z
|
sgcc-client/config/project_conf.py
|
AIOpenData/baai-federated-learning-crane-baseline
|
838507f2344139e66385e1ef475a148fdeaf5c62
|
[
"Apache-2.0"
] | null | null | null |
sgcc-client/config/project_conf.py
|
AIOpenData/baai-federated-learning-crane-baseline
|
838507f2344139e66385e1ef475a148fdeaf5c62
|
[
"Apache-2.0"
] | 5
|
2020-12-01T01:57:19.000Z
|
2021-11-20T14:38:44.000Z
|
class ProjectConf:
# development environment
dev = {
"host": "0.0.0.0",
"port": 5016,
"config": {
"ENV": "dev",
"DEBUG": True
}
}
# product environment
pro = {
"config": {
"ENV": "pro",
"DEBUG": False
}
}
| 17.157895
| 29
| 0.371166
|
f5f8ba264744ff477dd4de332ee3750d24cb9bc4
| 702
|
py
|
Python
|
genia/utils/download.py
|
m-stoeckel/pyramid-nested-ner
|
ee169d3d84b4beeeecb3d5aeb5caa826166cce78
|
[
"MIT"
] | 6
|
2020-11-15T09:17:36.000Z
|
2021-12-10T18:30:58.000Z
|
genia/utils/download.py
|
m-stoeckel/pyramid-nested-ner
|
ee169d3d84b4beeeecb3d5aeb5caa826166cce78
|
[
"MIT"
] | 2
|
2021-03-31T01:06:43.000Z
|
2021-07-01T12:44:33.000Z
|
genia/utils/download.py
|
m-stoeckel/pyramid-nested-ner
|
ee169d3d84b4beeeecb3d5aeb5caa826166cce78
|
[
"MIT"
] | 3
|
2021-04-05T18:58:01.000Z
|
2021-10-11T03:57:17.000Z
|
from oauth2client.client import GoogleCredentials
from pydrive.drive import GoogleDrive
from pydrive.auth import GoogleAuth
from google.colab import auth
if __name__ == "__main__":
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
bio_bert = ("1GJpGjQj6aZPV-EfbiQELpBkvlGtoKiyA", 'biobert_large_v1.1_pubmed.tar.gz')
bio_nlp_vec = ("0BzMCqpcgEJgiUWs0ZnU0NlFTam8", 'bio_nlp_vec.tar.gz')
for file_id, file_name in [bio_nlp_vec, bio_bert]:
print(f'downloading {file_name}...')
downloaded = drive.CreateFile({'id': file_id})
downloaded.GetContentFile(file_name)
| 31.909091
| 88
| 0.74359
|
72271162c71f6b514b7ac064452d20ab4782cf17
| 110
|
py
|
Python
|
core/urls.py
|
ghalonso94/wswallet
|
8f1f13a0d646166adad45b3872c2db6558d48f38
|
[
"MIT"
] | null | null | null |
core/urls.py
|
ghalonso94/wswallet
|
8f1f13a0d646166adad45b3872c2db6558d48f38
|
[
"MIT"
] | null | null | null |
core/urls.py
|
ghalonso94/wswallet
|
8f1f13a0d646166adad45b3872c2db6558d48f38
|
[
"MIT"
] | null | null | null |
from django.urls import path
from core import views
urlpatterns = [
path('', views.index, name='index')
]
| 18.333333
| 39
| 0.7
|
d113c2f53d3b67e10d16ed2f334a14ba6decd748
| 3,341
|
py
|
Python
|
api/src/application/socrata/client.py
|
iliaskaras/housing-units
|
2b0d78fcb42629ce7530b2da556ebd550cc7bec8
|
[
"MIT"
] | null | null | null |
api/src/application/socrata/client.py
|
iliaskaras/housing-units
|
2b0d78fcb42629ce7530b2da556ebd550cc7bec8
|
[
"MIT"
] | null | null | null |
api/src/application/socrata/client.py
|
iliaskaras/housing-units
|
2b0d78fcb42629ce7530b2da556ebd550cc7bec8
|
[
"MIT"
] | null | null | null |
from typing import Dict
from pandas import DataFrame
from sodapy import Socrata
from application.infrastructure.configurations.models import Configuration
from application.infrastructure.error.errors import NoneArgumentError
import pandas as pd
class SocrataClient:
DTYPES: Dict[str, str] = {
'project_id': 'object',
'project_name': 'object',
'project_start_date': 'datetime64',
'project_completion_date': 'datetime64',
'building_id': 'Int64',
'house_number': 'object',
'street_name': 'object',
'borough': 'object',
'postcode': 'Int64',
'bbl': 'Int64',
'bin': 'Int64',
'community_board': 'object',
'council_district': 'Int64',
'census_tract': 'object',
'neighborhood_tabulation_area': 'object',
'latitude': 'Int64',
'longitude': 'Int64',
'latitude_internal': 'Int64',
'longitude_internal': 'Int64',
'building_completion_date': 'datetime64',
'reporting_construction_type': 'object',
'extended_affordability_status': 'object',
'prevailing_wage_status': 'object',
'extremely_low_income_units': 'Int64',
'very_low_income_units': 'Int64',
'low_income_units': 'Int64',
'moderate_income_units': 'Int64',
'middle_income_units': 'Int64',
'other_income_units': 'Int64',
'studio_units': 'Int64',
'_1_br_units': 'Int64',
'_2_br_units': 'Int64',
'_3_br_units': 'Int64',
'_4_br_units': 'Int64',
'_5_br_units': 'Int64',
'_6_br_units': 'Int64',
'unknown_br_units': 'Int64',
'counted_rental_units': 'Int64',
'counted_homeownership_units': 'Int64',
'all_counted_units': 'Int64',
'total_units': 'Int64',
}
CHUNK_SIZE = 500
def __init__(self, hbd_dataset_id: str = 'hg8x-zxpr'):
if not hbd_dataset_id:
raise NoneArgumentError("HBD Dataset ID is not provided.")
self._dataset_id: str = hbd_dataset_id
self._client = Socrata("data.cityofnewyork.us", None)
self._client = Socrata(
"data.cityofnewyork.us",
app_token=Configuration.get().socrata_app_token
)
self._dataset_size = 10000
def download_housing_units_dataset(self) -> DataFrame:
"""
Downloads the Housing Units dataset by providing the dataset id from the Socrata api.
:return: The dataframe containing the downloaded results.
"""
results = self._client.get(self._dataset_id, limit=self._dataset_size)
# Convert to pandas DataFrame and change the column types.
dataframe: DataFrame = pd.DataFrame.from_records(results)
for col, col_type in self.DTYPES.items():
if col_type == 'Int64':
dataframe[col] = pd.to_numeric(dataframe[col])
return dataframe
@classmethod
def housing_unit_dataset_generator(cls, housing_unit_dataset: DataFrame) -> DataFrame:
"""
Breaks the Housing Unit dataset into chunks and create a generator out of them.
:return: The yielded chunked dataframe.
"""
for pos in range(0, len(housing_unit_dataset), cls.CHUNK_SIZE):
yield housing_unit_dataset.iloc[pos:pos + cls.CHUNK_SIZE]
| 34.802083
| 93
| 0.628854
|
e5e7b6808b858f1a90c96b65d6898a8660b61a3b
| 1,671
|
py
|
Python
|
Search_based_Planning/Search_2D/Dijkstra.py
|
CodesHub/PathPlanning
|
8271d9a0e30d7d9d0f20d61a2f85b8fe199209fa
|
[
"MIT"
] | 3,693
|
2020-07-15T15:41:07.000Z
|
2022-03-31T17:26:46.000Z
|
Search_based_Planning/Search_2D/Dijkstra.py
|
Alanaab/PathPlanning
|
8c12192d6952fcd2c3f8ba3c98e3593b27049a40
|
[
"MIT"
] | 26
|
2020-08-27T04:56:59.000Z
|
2022-03-14T02:17:05.000Z
|
Search_based_Planning/Search_2D/Dijkstra.py
|
Alanaab/PathPlanning
|
8c12192d6952fcd2c3f8ba3c98e3593b27049a40
|
[
"MIT"
] | 799
|
2020-07-17T04:02:05.000Z
|
2022-03-31T12:56:29.000Z
|
"""
Dijkstra 2D
@author: huiming zhou
"""
import os
import sys
import math
import heapq
sys.path.append(os.path.dirname(os.path.abspath(__file__)) +
"/../../Search_based_Planning/")
from Search_2D import plotting, env
from Search_2D.Astar import AStar
class Dijkstra(AStar):
"""Dijkstra set the cost as the priority
"""
def searching(self):
"""
Breadth-first Searching.
:return: path, visited order
"""
self.PARENT[self.s_start] = self.s_start
self.g[self.s_start] = 0
self.g[self.s_goal] = math.inf
heapq.heappush(self.OPEN,
(0, self.s_start))
while self.OPEN:
_, s = heapq.heappop(self.OPEN)
self.CLOSED.append(s)
if s == self.s_goal:
break
for s_n in self.get_neighbor(s):
new_cost = self.g[s] + self.cost(s, s_n)
if s_n not in self.g:
self.g[s_n] = math.inf
if new_cost < self.g[s_n]: # conditions for updating Cost
self.g[s_n] = new_cost
self.PARENT[s_n] = s
# best first set the heuristics as the priority
heapq.heappush(self.OPEN, (new_cost, s_n))
return self.extract_path(self.PARENT), self.CLOSED
def main():
s_start = (5, 5)
s_goal = (45, 25)
dijkstra = Dijkstra(s_start, s_goal, 'None')
plot = plotting.Plotting(s_start, s_goal)
path, visited = dijkstra.searching()
plot.animation(path, visited, "Dijkstra's") # animation generate
if __name__ == '__main__':
main()
| 23.871429
| 74
| 0.55775
|
99f56d284569ebeaebc3764c0a1a23f61681bfbf
| 14,765
|
py
|
Python
|
fastybird_shelly_connector/clients/coap.py
|
FastyBird/shelly-connector
|
afbd75da091f96854fa09d3750d9f75406c5bf39
|
[
"Apache-2.0"
] | null | null | null |
fastybird_shelly_connector/clients/coap.py
|
FastyBird/shelly-connector
|
afbd75da091f96854fa09d3750d9f75406c5bf39
|
[
"Apache-2.0"
] | null | null | null |
fastybird_shelly_connector/clients/coap.py
|
FastyBird/shelly-connector
|
afbd75da091f96854fa09d3750d9f75406c5bf39
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# Copyright 2021. FastyBird s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Shelly connector clients module CoAP client
"""
# Python base dependencies
import logging
import select
import struct
import time
from socket import ( # pylint: disable=no-name-in-module
AF_INET,
INADDR_ANY,
IP_ADD_MEMBERSHIP,
IPPROTO_IP,
IPPROTO_UDP,
SHUT_RDWR,
SO_REUSEADDR,
SOCK_DGRAM,
SOL_SOCKET,
error,
inet_aton,
socket,
)
from threading import Thread
from typing import Optional, Union
# Library libs
from fastybird_shelly_connector.api.gen1parser import Gen1Parser
from fastybird_shelly_connector.api.gen1validator import Gen1Validator
from fastybird_shelly_connector.clients.client import IClient
from fastybird_shelly_connector.consumers.consumer import Consumer
from fastybird_shelly_connector.exceptions import (
FileNotFoundException,
LogicException,
ParsePayloadException,
)
from fastybird_shelly_connector.logger import Logger
from fastybird_shelly_connector.registry.model import DevicesRegistry
from fastybird_shelly_connector.registry.records import (
BlockRecord,
DeviceRecord,
SensorRecord,
)
from fastybird_shelly_connector.types import ClientMessageType, ClientType
class CoapClient(IClient, Thread): # pylint: disable=too-many-instance-attributes
"""
CoAP client
@package FastyBird:ShellyConnector!
@module clients/coap
@author Adam Kadlec <adam.kadlec@fastybird.com>
"""
__stopped: bool = True
__socket: Optional[socket] = None
__validator: Gen1Validator
__parser: Gen1Parser
__consumer: Consumer
__devices_registry: DevicesRegistry
__logger: Union[Logger, logging.Logger]
# __timer: Optional[Timer] = None
__BIND_IP: str = "0.0.0.0"
__COAP_IP: str = "224.0.1.187"
__COAP_PORT: int = 5683
__DISCOVERY_INTERVAL: int = 60
# -----------------------------------------------------------------------------
def __init__( # pylint: disable=too-many-arguments
self,
validator: Gen1Validator,
parser: Gen1Parser,
consumer: Consumer,
devices_registry: DevicesRegistry,
logger: Union[Logger, logging.Logger] = logging.getLogger("dummy"),
) -> None:
Thread.__init__(self, name="CoAP server thread", daemon=True)
self.__consumer = consumer
self.__validator = validator
self.__parser = parser
self.__devices_registry = devices_registry
self.__logger = logger
# -----------------------------------------------------------------------------
@property
def type(self) -> ClientType:
"""Client type"""
return ClientType.COAP
# -----------------------------------------------------------------------------
def start(self) -> None:
"""Start communication"""
self.__create_client()
# self.__timer = Timer(interval=self.__DISCOVERY_INTERVAL)
self.__stopped = False
if not Thread.is_alive(self):
Thread.start(self)
# -----------------------------------------------------------------------------
def stop(self) -> None:
"""Stop communication"""
# self.__timer = None
self.__stopped = True
if self.__socket is not None:
try:
self.__socket.shutdown(SHUT_RDWR)
self.__socket = None
except error:
pass
# -----------------------------------------------------------------------------
def is_connected(self) -> bool:
"""Check if client is connected"""
return self.__socket is not None
# -----------------------------------------------------------------------------
def discover(self) -> None:
"""Send discover command"""
if self.__socket is not None:
self.__logger.debug(
"Sending CoAP discover UDP",
extra={
"client": {
"type": ClientType.COAP.value,
},
},
)
msg = bytes(b"\x50\x01\x00\x0A\xb3cit\x01d\xFF")
self.__socket.sendto(msg, (self.__COAP_IP, self.__COAP_PORT))
# -----------------------------------------------------------------------------
def handle(self) -> None:
"""Process CoAP requests"""
# -----------------------------------------------------------------------------
def write_sensor(
self,
device_record: DeviceRecord,
block_record: BlockRecord,
sensor_record: SensorRecord,
write_value: Union[str, int, float, bool, None],
) -> None:
"""Write value to device sensor"""
# -----------------------------------------------------------------------------
def run(self) -> None:
"""Process CoAP requests"""
if self.__socket is None:
return
while not self.__stopped:
# if self.__timer is not None and self.__timer.check():
# self.discover()
try:
self.__handle_request()
except Exception as ex: # pylint: disable=broad-except
self.__logger.error(
"Error receiving CoAP UDP",
extra={
"client": {
"type": ClientType.COAP.value,
},
"exception": {
"message": str(ex),
"code": type(ex).__name__,
},
},
)
# -----------------------------------------------------------------------------
def __create_client(self) -> None:
"""Create CoAP socket client"""
if self.__socket is None:
try:
self.__socket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)
self.__socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.__socket.bind((self.__BIND_IP, self.__COAP_PORT))
mreq = struct.pack("=4sl", inet_aton(self.__COAP_IP), INADDR_ANY) # pylint: disable=no-member
self.__socket.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, mreq)
except Exception as ex: # pylint: disable=broad-except
self.__logger.error(
"CoAP client can't be created",
extra={
"client": {
"type": ClientType.COAP.value,
},
"exception": {
"message": str(ex),
"code": type(ex).__name__,
},
},
)
# -----------------------------------------------------------------------------
def __handle_request( # pylint: disable=too-many-statements,too-many-branches,too-many-locals
self,
) -> None:
r_list, _, __ = select.select( # pylint: disable=c-extension-no-member
[self.__socket],
[],
[],
0.1,
)
for ready in r_list:
if isinstance(ready, socket):
data_tmp, address = ready.recvfrom(1024)
data = bytearray(data_tmp)
if len(data) < 10:
return
ip_address = address[0]
pos = 0
# Receive messages with ip from proxy
if data[0] == 112 and data[1] == 114 and data[2] == 120 and data[3] == 121:
pos = 8
byte = data[pos]
tkl = byte & 0x0F
# ver = byte >> 6
# typex = (byte >> 4) & 0x3
# token_length = byte & 0xF
code = data[pos + 1]
# message_id = 256 * data[2] + data[3]
pos = pos + 4 + tkl
if code in (30, 69):
byte = data[pos]
tot_delta = 0
device_type = ""
device_identifier = ""
while byte != 0xFF:
delta = byte >> 4
length = byte & 0x0F
if delta == 13:
pos = pos + 1
delta = data[pos] + 13
elif delta == 14:
pos = pos + 2
delta = data[pos - 1] * 256 + data[pos] + 269
tot_delta = tot_delta + delta
if length == 13:
pos = pos + 1
length = data[pos] + 13
elif length == 14:
pos = pos + 2
length = data[pos - 1] * 256 + data[pos] + 269
value = data[pos + 1 : pos + length]
pos = pos + length + 1
if tot_delta == 3332:
device_type, device_identifier, _ = str(value, "cp1252").split("#", 2)
byte = data[pos]
try:
payload = str(data[pos + 1 :], "cp1252")
except Exception as ex: # pylint: disable=broad-except
self.__logger.error(
"Can't convert received payload",
extra={
"client": {
"type": ClientType.COAP.value,
},
"exception": {
"message": str(ex),
"code": type(ex).__name__,
},
},
)
return
if payload: # Fix for DW2 payload error
payload = payload.replace(",,", ",").replace("][", "],[")
self.__logger.debug(
"CoAP Code: %d, Type: %s, Id: %s, Payload: %s",
code,
device_type,
device_identifier,
payload.replace(" ", ""),
extra={
"client": {
"type": ClientType.COAP.value,
},
"device": {
"identifier": device_identifier,
"ip_address": ip_address,
"type": device_type,
},
},
)
if code == 30:
self.__handle_message(
device_identifier=device_identifier.lower(),
device_type=device_type.lower(),
device_ip_address=ip_address,
message_payload=payload,
message_type=ClientMessageType.COAP_STATUS,
)
elif code == 69:
self.__handle_message(
device_identifier=device_identifier.lower(),
device_type=device_type.lower(),
device_ip_address=ip_address,
message_payload=payload,
message_type=ClientMessageType.COAP_DESCRIPTION,
)
# -----------------------------------------------------------------------------
def __handle_message( # pylint: disable=too-many-arguments
self,
device_identifier: str,
device_type: str,
device_ip_address: str,
message_payload: str,
message_type: ClientMessageType,
) -> None:
device_record = self.__devices_registry.get_by_identifier(
device_identifier=device_identifier,
)
if device_record is not None:
self.__devices_registry.set_last_communication_timestamp(
device=device_record,
last_communication_timestamp=time.time(),
)
try:
if (
self.__validator.validate_coap_message(
message_payload=message_payload,
message_type=message_type,
)
is False
):
return
except (LogicException, FileNotFoundException) as ex:
self.__logger.error(
"Received message validation against schema failed",
extra={
"device": {
"identifier": device_identifier,
"type": device_type,
},
"exception": {
"message": str(ex),
"code": type(ex).__name__,
},
},
)
return
try:
entity = self.__parser.parse_coap_message(
device_identifier=device_identifier,
device_type=device_type,
device_ip_address=device_ip_address,
message_payload=message_payload,
message_type=message_type,
)
except (FileNotFoundException, LogicException, ParsePayloadException) as ex:
self.__logger.error(
"Received message could not be successfully parsed to entity",
extra={
"device": {
"identifier": device_identifier,
"type": device_type,
},
"exception": {
"message": str(ex),
"code": type(ex).__name__,
},
},
)
return
self.__consumer.append(entity=entity)
| 32.379386
| 110
| 0.44829
|
b9898877b3e9bf89a7c311953673917ff7b5a7c1
| 99
|
py
|
Python
|
ibolc/wsgi.py
|
jschaf/ibolcdb
|
e890fcf6bdc6219b3464899a99ac0464f6d329fc
|
[
"BSD-3-Clause"
] | null | null | null |
ibolc/wsgi.py
|
jschaf/ibolcdb
|
e890fcf6bdc6219b3464899a99ac0464f6d329fc
|
[
"BSD-3-Clause"
] | null | null | null |
ibolc/wsgi.py
|
jschaf/ibolcdb
|
e890fcf6bdc6219b3464899a99ac0464f6d329fc
|
[
"BSD-3-Clause"
] | null | null | null |
from .app import create_app
from .settings import ProdConfig
application = create_app(ProdConfig)
| 19.8
| 36
| 0.828283
|
f795542d3d01d55b554ee078f22b86f50f3b7588
| 3,547
|
py
|
Python
|
lipo/psv/models.py
|
Mik3y-F/Lipo
|
d9d748b8565bc75c7022cb5b06781dba46181f63
|
[
"MIT"
] | null | null | null |
lipo/psv/models.py
|
Mik3y-F/Lipo
|
d9d748b8565bc75c7022cb5b06781dba46181f63
|
[
"MIT"
] | 1
|
2020-06-24T11:08:18.000Z
|
2020-06-24T11:08:18.000Z
|
lipo/psv/models.py
|
Mik3y-F/Lipo
|
d9d748b8565bc75c7022cb5b06781dba46181f63
|
[
"MIT"
] | null | null | null |
import uuid
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Psv(models.Model):
id = models.UUIDField(_("PSV Id"), primary_key=True, default=uuid.uuid4, editable=False)
type = models.ManyToManyField("users.UserType", verbose_name=_("P.S.V types"),)
name = models.CharField(_("Name of PSV"), blank=True, max_length=255)
sacco = models.ForeignKey("saccos.Sacco", verbose_name=_("Saccos"), on_delete=models.CASCADE,
null=True,)
plate_registration_no = models.CharField(_("Plate Registration Number of PSV"), max_length=255)
route = models.ManyToManyField("psv_routes.Route", verbose_name=_("Psv Routes Plied by PSV."))
attribute = models.ManyToManyField("psv.Attribute", verbose_name=_("Psv Attributes"))
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _("PSV")
verbose_name_plural = _("PSVs")
def __str__(self):
return self.plate_registration_no
def get_absolute_url(self):
return reverse("psv:detail", kwargs={"pk": self.pk})
class PsvType(models.Model):
TAXI = 'taxi'
MOTOR_BIKE = 'motor_bike'
MATATU_MINIBUS = 'matatu_minibus'
MATATU_BUS = 'matatu_bus'
BICYCLE = 'bicycle'
PSV_TYPE = [
(TAXI, _('Psv is a Taxi')),
(MOTOR_BIKE, _('Psv is a motorbike')),
(BICYCLE, _('Psv is a bicycle')),
(MATATU_MINIBUS, _('Psv is a Matatu Minibus')),
(MATATU_BUS, _('Psv is a Matatu Bus')),
]
name = models.CharField(_("Name of PSV Type"), blank=True, max_length=255)
class Meta:
verbose_name = _("Psv Type")
verbose_name_plural = _("Psv Types")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("psv_type:detail", kwargs={"pk": self.pk})
class PsvLike(models.Model):
id = models.UUIDField(_("PSV Like Id"), primary_key=True, default=uuid.uuid4, editable=False)
user = models.ForeignKey("users.User", verbose_name=_("user who liked P.S.V"),
on_delete=models.CASCADE)
psv = models.ForeignKey("psv.Psv", verbose_name=_("P.S.V. liked by user"),
on_delete=models.CASCADE)
class Meta:
verbose_name = _("Psv Like")
verbose_name_plural = _("Psv Likes")
def __str__(self):
return self.id
def get_absolute_url(self):
return reverse("psv_like:detail", kwargs={"pk": self.pk})
class PsvFavourite(models.Model):
id = models.UUIDField(_("PSV Favourite Id"), primary_key=True, default=uuid.uuid4, editable=False)
user = models.ForeignKey("users.User", verbose_name=_("user who favourited P.S.V"),
on_delete=models.CASCADE)
psv = models.ForeignKey("psv.Psv", verbose_name=_("P.S.V. favourited by user"),
on_delete=models.CASCADE)
class Meta:
verbose_name = _("Psv Like")
verbose_name_plural = _("Psv Likes")
def __str__(self):
return self.id
def get_absolute_url(self):
return reverse("psv_favourite:detail", kwargs={"pk": self.pk})
class Attribute(models.Model):
name = models.CharField(_("Psv Attribute"), max_length=50)
class Meta:
verbose_name = _("Attribute")
verbose_name_plural = _("Attributes")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("attribute:detail", kwargs={"pk": self.pk})
| 31.114035
| 102
| 0.648435
|
078a990c270cba46ac245aadfc157f6e81aa91e5
| 107
|
py
|
Python
|
tests/unit/test_modulegraph/testpkg-regr5/script.py
|
hawkhai/pyinstaller
|
016a24479b34de161792c72dde455a81ad4c78ae
|
[
"Apache-2.0"
] | 9,267
|
2015-01-01T04:08:45.000Z
|
2022-03-31T11:42:38.000Z
|
tests/unit/test_modulegraph/testpkg-regr5/script.py
|
jeremysanders/pyinstaller
|
321b24f9a9a5978337735816b36ca6b4a90a2fb4
|
[
"Apache-2.0"
] | 5,150
|
2015-01-01T12:09:56.000Z
|
2022-03-31T18:06:12.000Z
|
tests/unit/test_modulegraph/testpkg-regr5/script.py
|
jeremysanders/pyinstaller
|
321b24f9a9a5978337735816b36ca6b4a90a2fb4
|
[
"Apache-2.0"
] | 2,101
|
2015-01-03T10:25:27.000Z
|
2022-03-30T11:04:42.000Z
|
import __init__
from PyInstaller.lib.modulegraph.find_modules import find_needed_modules
import distutils
| 21.4
| 72
| 0.88785
|
245c125717630c513475cc49d1dbea1afa2e2183
| 323
|
py
|
Python
|
src/restapi/permissions.py
|
gavin-anders/callback-catcher
|
77d18a983fc5a9e53b33189d4202868210b5d7e3
|
[
"Apache-2.0"
] | 2
|
2019-06-27T21:08:23.000Z
|
2020-10-16T12:07:19.000Z
|
src/restapi/permissions.py
|
gavin-anders/callback-catcher
|
77d18a983fc5a9e53b33189d4202868210b5d7e3
|
[
"Apache-2.0"
] | null | null | null |
src/restapi/permissions.py
|
gavin-anders/callback-catcher
|
77d18a983fc5a9e53b33189d4202868210b5d7e3
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework.permissions import DjangoModelPermissions, BasePermission
from catcher.models import Client
from catcher.settings import USERNAME
class ClientUserPermissions(DjangoModelPermissions):
def __init__(self):
self.perms_map['GET'] = ['%(app_label)s.view_%(model_name)s']
| 35.888889
| 77
| 0.736842
|
0f76222be0db1618ee1b63218f3497e1e8890225
| 4,438
|
py
|
Python
|
skbot/ignition/sdformat/generic_sdf/__init__.py
|
FirefoxMetzger/ropy
|
c1bcebda223f3af0b6d35e3f4c26d8fd9d26577a
|
[
"Apache-2.0"
] | 6
|
2021-03-24T05:54:45.000Z
|
2021-07-20T21:03:21.000Z
|
skbot/ignition/sdformat/generic_sdf/__init__.py
|
FirefoxMetzger/scikit-bot
|
ee6f1d3451a3c61a6fa122cc42efc4dd67afc9c9
|
[
"Apache-2.0"
] | 31
|
2021-08-12T08:12:58.000Z
|
2022-03-21T23:16:36.000Z
|
skbot/ignition/sdformat/generic_sdf/__init__.py
|
FirefoxMetzger/scikit-bot
|
ee6f1d3451a3c61a6fa122cc42efc4dd67afc9c9
|
[
"Apache-2.0"
] | 1
|
2021-07-20T20:13:49.000Z
|
2021-07-20T20:13:49.000Z
|
""" Version-Agnostic SDF Bindings
.. currentmodule:: skbot.ignition.sdformat.generic_sdf
.. warning::
This module is experimental and not all SDFormat elements are currently
supported. If an element is not supported, it will raise a warning. To
suppress this warning use::
skbot.ignition.sdformat.generic_sdf.base.WARN_UNSUPPORTED = False
This module contains version-agnostic bindings for `SDFormat
<http://sdformat.org/spec>`_. The idea of these bindings is that they can parse
SDF in any one version, and allow you to consume the SDF as if it were any other
SDF version.
This, of course, comes with the limitation of compatibility between versions.
For example, you can load a SDF v1.6 file and access elements introduced in SDF
v1.8 if they have a default value or can be computed from the values available.
However, if an element is now required in SDF v1.8 but wasn't available in SDF
v1.6 then it will not be set. Further, if an element is no longer available in
the most recent SDF version, it will raise a depreciation warning. Such cases
will be documented as such.
In addition to unavoidable version incompatibilities listed above, the bindings
make the following oppinionated decisions:
- Some variable names differ from SDFormat
- If an element may have multiple children of the same kind, they
corresponding attribute uses plural instead of singular, e.g.
``models`` instead of ``model``.
- If different SDF versions use different names for the same variable,
they are converted into the name used in the most recent SDFormat
version. Old names are still available via a @property and will raise
a depreciation warning.
- vectors are converted to numpy arrays
- includes are resolved, removes, and the included element is inserted
instead
- __model__ is appended to frame references where necessary, e.g., a
reference of the form ``model_A::model_B`` will become
``model_A::model_b:__model__``.
Supported Elements
------------------
.. autosummary::
:toctree: generic_sdf
sdf.Sdf
camera.Camera
frame.Frame
joint.Joint
link.Link
model.Model
sensor.Sensor
world.World
include.Include
origin.Origin
Unsupported Elements
--------------------
The following elements are currently recognized, but not implemented. When encountered,
they will raise a warning.
.. autosummary::
:toctree: generic_sdf
state.State
light_state.Light
link_state.Link
model_state.Model
collision.Collision
inertial.Inertial
material.Material
geometry.Geometry
visual.Visual
actor.Actor
audio_sink.AudioSink
audio_source.AudioSource
collision_engine.CollisionEngine
gripper.Gripper
particle_emitter.ParticleEmitter
physics.Physics
scene.Scene
surface.Surface
urdf.URDF
.. rubric:: Sensors
.. autosummary::
:toctree: generic_sdf
sensors.Lidar
sensors.Ray
sensors.AirPressure
sensors.Altimeter
sensors.Contact
sensors.ForceTorque
sensors.Gps
sensors.Imu
sensors.LogicalCamera
sensors.Magnetometer
sensors.Navsat
sensors.RfidTag
sensors.Rfid
sensors.Sonar
sensors.Transceiver
light.Light
.. rubric:: Shapes
.. autosummary::
:toctree: generic_sdf
shapes.Box
shapes.Capsule
shapes.Cylinder
shapes.Ellipsoid
shapes.Heightmap
shapes.Image
shapes.Mesh
shapes.Plane
.. rubric:: Misc
.. autosummary::
:toctree: generic_sdf
gui.Gui
atmosphere.Atmosphere
battery.Battery
noise.Noise
population.Population
projector.Projector
"""
# currently imported for coverage x)
from . import base, sensors, shapes
# import top-level elements as mentioned in the
# SDFormat spec.
from .actor import Actor
from .collision import Collision
from .geometry import Geometry
from .joint import Joint
from .light import Light
from .link import Link
from .material import Material
from .model import Model
from .physics import Physics
from .scene import Scene
from .sensor import Sensor
from .state import State
from .visual import Visual
from .world import World
__all__ = [
"World",
"Scene",
"State",
"Physics",
"Light",
"Actor",
"Model",
"Link",
"Sensor",
"Joint",
"Collision",
"Visual",
"Material",
"Geometry",
]
| 24.519337
| 87
| 0.710455
|
7ffd6daa15a6e8a46f799b43fc40164b29a16125
| 23,106
|
py
|
Python
|
scripts/gridengine/paramsearch/runScript.py
|
helloric/pydial3
|
34988f4592c4e28388b2818de8768d841696efbb
|
[
"Apache-2.0"
] | null | null | null |
scripts/gridengine/paramsearch/runScript.py
|
helloric/pydial3
|
34988f4592c4e28388b2818de8768d841696efbb
|
[
"Apache-2.0"
] | null | null | null |
scripts/gridengine/paramsearch/runScript.py
|
helloric/pydial3
|
34988f4592c4e28388b2818de8768d841696efbb
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import argparse
""" DETAILS:
# THIS FILE EXPLORES GP/REGR/FF/LSTM MODELS
-- Try varying AT LEAST the following network parameters:
a) network structures: n_hideen, L1, L2, acitivation
b) learning rate, decay, and regularisation
"""
################################################
### repository path
################################################
repository_path = os.path.abspath(os.path.join(os.getcwd(),'../../../'))
def config_text(domains, root, seed,
screen_level,
maxturns,
belieftype, useconfreq, policytype, startwithhello, inpolicyfile, outpolicyfile, learning,
maxiter, gamma, learning_rate, tau, replay_type, minibatch_size, capacity,
exploration_type, epsilon_start, epsilon_end, n_in, features, max_k, \
learning_algorithm, architecture, h1_size, h2_size,
kernel,
random, scale,
usenewgoalscenarios,
nbestsize,
patience,
penaliseallturns,
wrongvenuepenalty,
notmentionedvaluepenalty,
sampledialogueprobs,
save_step,
confscorer,
oldstylepatience,
forcenullpositive,
file_level,
maxinformslots,
informmask,
informcountaccepted,
requestmask, confusionmodel, byemask,
n_samples, alpha_divergence, alpha, sigma_eps, sigma_prior,
stddev_var_mu, stddev_var_logsigma, mean_log_sigma,
nbestgeneratormodel,
delta, beta, is_threshold, train_iters_per_episode, training_frequency,
no_head, keep_prob, dropout_start,
old_style_parameter_sampling):
text = '[GENERAL]' + '\n'
text += 'domains = ' + domains + '\n'
text += 'singledomain = True' + '\n'
text += 'root = ' + root + '\n'
text += 'seed = ' + seed + '\n'
text += '\n'
text += '[conditional]' + '\n'
text += 'conditionalsimuser = True\n'
text += 'conditionalbeliefs = True\n'
text += '\n'
text += '[agent]' + '\n'
text += 'maxturns = ' + maxturns + '\n'
text += '\n'
text += '[logging]' + '\n'
text += 'screen_level = ' + screen_level + '\n'
text += 'file_level = ' + file_level + '\n'
text += '\n'
text += '[simulate]' + '\n'
text += 'mindomainsperdialog = 1\n'
text += 'maxdomainsperdialog = 1\n'
text += 'forcenullpositive = ' + forcenullpositive + '\n'
text += '\n'
text += '[policy]' + '\n'
text += 'maxinformslots = ' + maxinformslots + '\n'
text += 'informmask = ' + informmask + '\n'
text += 'informcountaccepted = ' + informcountaccepted + '\n'
text += 'requestmask = ' + requestmask + '\n'
text += 'byemask = ' + byemask + '\n'
text += '\n'
text += '[policy_' + domains + ']' + '\n'
text += 'belieftype = ' + belieftype + '\n'
text += 'useconfreq = ' + useconfreq + '\n'
text += 'policytype = ' + policytype + '\n'
text += 'startwithhello = ' + startwithhello + '\n'
text += 'inpolicyfile = ' + inpolicyfile + '\n'
text += 'outpolicyfile = ' + outpolicyfile + '\n'
text += 'learning = ' + learning + '\n'
text += 'save_step = ' + save_step + '\n'
text += '\n'
text += '[dqnpolicy_' + domains + ']' + '\n'
text += 'maxiter = ' + maxiter + '\n'
text += 'gamma = ' + gamma + '\n'
text += 'learning_rate = ' + learning_rate + '\n'
text += 'tau = ' + tau + '\n'
text += 'replay_type = ' + replay_type + '\n'
text += 'minibatch_size = ' + minibatch_size + '\n'
text += 'capacity = ' + capacity + '\n'
text += 'exploration_type = ' + exploration_type + '\n'
text += 'epsilon_start = ' + epsilon_start + '\n'
text += 'epsilon_end = ' + epsilon_end + '\n'
text += 'n_in = ' + n_in + '\n'
text += 'features = ' + features + '\n'
text += 'max_k = ' + max_k + '\n'
text += 'learning_algorithm = ' + learning_algorithm + '\n'
text += 'architecture = ' + architecture + '\n'
text += 'h1_size = ' + h1_size + '\n'
text += 'h2_size = ' + h2_size + '\n'
text += 'training_frequency = ' + training_frequency + '\n'
# Bayesian parameters
text += 'n_samples = ' + n_samples + '\n'
text += 'stddev_var_mu = ' + stddev_var_mu + '\n'
text += 'stddev_var_logsigma = ' + stddev_var_logsigma + '\n'
text += 'mean_log_sigma = ' + mean_log_sigma + '\n'
text += 'sigma_prior = ' + sigma_prior + '\n'
text += 'alpha =' + alpha + '\n'
text += 'alpha_divergence =' + alpha_divergence + '\n'
text += 'sigma_eps = ' + sigma_eps + '\n'
text += 'no_head = ' + no_head + '\n'
text += 'keep_prob = ' + keep_prob + '\n'
text += 'dropout_start = ' + dropout_start + '\n'
text += '\n'
# ACER
text += 'delta = ' + delta + '\n'
text += 'beta = ' + beta + '\n'
text += 'is_threshold = ' + is_threshold + '\n'
text += 'train_iters_per_episode = ' + train_iters_per_episode + '\n'
text += '\n'
text += '[gppolicy_' + domains + ']' + '\n'
text += 'kernel = ' + kernel + '\n'
text += '\n'
text += '[gpsarsa_' + domains + ']' + '\n'
text += 'random = ' + random + '\n'
text += 'scale = ' + scale + '\n'
text += '\n'
text += '[usermodel]' + '\n'
text += 'usenewgoalscenarios = ' + usenewgoalscenarios + '\n'
text += 'sampledialogueprobs = ' + sampledialogueprobs + '\n'
text += 'oldstylepatience = ' + oldstylepatience + '\n'
text += 'oldstylesampling = ' + old_style_parameter_sampling + '\n'
text += '\n'
text += '[errormodel]' + '\n'
text += 'nbestsize = ' + nbestsize + '\n'
text += 'confusionmodel = ' + confusionmodel + '\n'
text += 'nbestgeneratormodel = ' + nbestgeneratormodel + '\n'
text += 'confscorer = ' + confscorer + '\n'
text += '\n'
text += '[goalgenerator]' + '\n'
text += 'patience = ' + patience + '\n'
text += '\n'
text += '[eval]' + '\n'
text += 'rewardvenuerecommended = 0' + '\n'
text += 'penaliseallturns = ' + penaliseallturns + '\n'
text += 'wrongvenuepenalty = ' + wrongvenuepenalty + '\n'
text += 'notmentionedvaluepenalty = ' + notmentionedvaluepenalty + '\n'
text += '\n'
text += '[eval_' + domains + ']' + '\n'
text += 'successmeasure = objective' + '\n'
text += 'successreward = 20' + '\n'
text += '\n'
return text
def run_on_grid(targetDir, step, iter_in_step, test_iter_in_step, parallel, execDir, configName, text, mode,
error):
################################################
### config file
config = repository_path + configName + '.cfg'
# if directory not exist, then creat one
config_dir = repository_path + 'configures/'
if not os.path.exists(config_dir):
os.makedirs(config_dir)
with open(config, 'w') as f:
f.write(text)
runStr = 'running ' + config
print('{0:*^60}'.format(runStr))
# command = 'python run_grid_pyGPtraining_rpg.py ' + targetDir + ' 3 10000 1 ' + execDir + ' 15 1 ' + config
if mode == ('train', 'grid'):
command = 'python run_grid_pyGPtraining_rpg.py ' + targetDir + ' ' + step + ' ' + \
iter_in_step + ' ' + parallel + ' ' + execDir + ' ' + error + ' 1 ' + config
elif mode == ('test', 'grid'):
command = 'python run_grid_pyGPtraining_rpg_test.py ' + targetDir + ' TEST ' + step + ' ' + \
test_iter_in_step + ' ' + parallel + ' ' + execDir + ' ' + error + ' 1 ' + config
elif mode == ('train', 'own'):
command = 'python run_own_pyGPtraining_rpg.py ' + targetDir + ' ' + step + ' ' + \
iter_in_step + ' ' + parallel + ' ' + execDir + ' ' + error + ' 1 ' + config
elif mode == ('test', 'own'):
command = 'python run_own_pyGPtraining_rpg_test.py ' + targetDir + ' TEST ' + step + ' ' + \
test_iter_in_step + ' ' + parallel + ' ' + execDir + ' ' + error + ' 1 ' + config
print(command)
os.system(command)
def main(argv):
step = '10'
iter_in_step = '100'
test_iter_in_step = '100'
save_step = '100'
parallel = '1'
maxiter = str(int(step) * int(iter_in_step))
################################################
### Domain information
################################################
domains = 'CamRestaurants' # SF restaurants
if len(argv) > 4:
repository_path = argv[4]
root = repository_path
seed = argv[3]
screen_level = 'warning'
file_level = 'warning'
maxturns = '25'
################################################
### General policy information
################################################
belieftype = 'focus'
useconfreq = 'False'
policytype_vary = ['BBQN']#dropout', 'concrete', 'bootstrapped'] #'dqn', 'BBQN', 'BBQN'] # 'dropout', 'concrete'
startwithhello = 'False'
inpolicyfile = 'policyFile'
outpolicyfile = 'policyFile'
learning = 'True'
maxinformslots = '5' # Maximum number of slot values that are presented in the inform summary action
informmask = 'True' # Decides if the mask over inform type actions is used or not (having the mask active speeds up learning)
informcountaccepted = '4' # number of accepted slots needed to unmask the inform_byconstraints action
requestmask = 'True' # Decides if the mask over inform type actions is used or not
byemask = 'True'
################################################
### DNN architecture options
################################################
gamma = '0.99' # discount factor
learning_rate = '0.001' # learning rate
tau_vary = ['0.02'] # target policy network update frequency 0.02 is equal to update policy after 50 epochs
replay_type_vary = ['vanilla'] # ['vanilla'] experience replay
minibatch_size_vary = ['64'] # how many turns are in the batch
capacity_vary = ['1000'] # how many turns/dialogues are in ER
exploration_type_vary = ['e-greedy'] # 'e-greedy', 'Boltzman'
epsilon_s_e_vary = [('0.9', '0.0')] # , ('0.3', '0.0')]#, ('0.5', '0.1')]
training_frequency = '2' # how often train the model, episode_count % frequency == 0
features = '["discourseAct", "method", "requested", "full", "lastActionInformNone", "offerHappened", "inform_info"]'
max_k = '5'
learning_algorithm = 'dqn'
architecture = 'vanilla'
h1_size = ['130']#, '200', '300']
h2_size = ['50']#, '75', '100']
################################################
### Bayesian estimation parameters
################################################
n_samples = '1' # number of samples for action choice
alpha_divergence = 'False' # use alpha divergence?
alpha = '0.85'
sigma_eps = '0.01' # variance size for sampling epsilon
sigma_prior = '1.5' # prior for variance in KL term
stddev_var_mu = '0.01' # stdv for weights
stddev_var_logsigma = '0.01' # stdv of variance for variance
mean_log_sigma = '0.000001' # prior mean for variance
no_head = '3' # number of heads used for
keep_prob = '0.9' # dropout level
dropout_start = '0.2' # concrete dropout level
################################################
### ACER parameters
################################################
beta = '0.95'
delta = '1.0'
is_threshold = '5.0'
train_iters_per_episode = '1'
################################################
### User model and environment model info.
################################################
usenewgoalscenarios = 'True'
sampledialogueprobs = 'True'
old_style_parameter_sampling = 'True' # for BBQN True
confusionmodel = 'RandomConfusions'
confscorer = 'additive' # 'additive'
nbestgeneratormodel = 'SampledNBestGenerator'
nbestsize = '3'
patience = '3'
penaliseallturns = 'True'
wrongvenuepenalty = '0'
notmentionedvaluepenalty = '0'
oldstylepatience = 'True'
forcenullpositive = 'False'
runError_vary = ['0']
if domains is 'CamRestaurants':
n_in = '268'
elif domains is 'CamHotels':
n_in = '111'
elif domains is 'SFRestaurants':
n_in = '636'
elif domains is 'SFHotels':
n_in = '438'
elif domains is 'Laptops11':
n_in = '257'
elif domains is 'TV':
n_in = '188'
elif domains is 'Booking':
n_in = '188'
################################################
### GP policy training options
################################################
kernel = 'polysort'
random = 'False'
scale = '3'
ConfigCounter = 0
listFile = open(argv[0], 'w')
runMode = ('train', 'grid')
if argv[1] not in ('train', 'test') or argv[2] not in ('grid', 'own'):
print('\n!!!!! WRONG COMMAND !!!!!\n')
print('EXAMPLE: python runScript.py list [train|test] [grid|own]\n')
exit(1)
elif argv[1] == 'train':
if argv[2] == 'grid':
runMode = ('train', 'grid')
elif argv[2] == 'own':
runMode = ('train', 'own')
elif argv[1] == 'test':
if argv[2] == 'grid':
runMode = ('test', 'grid')
elif argv[2] == 'own':
runMode = ('test', 'own')
listOutput = '{0: <6}'.format('PARAM') + '\t'
listOutput += '{0: <10}'.format('type') + '\t'
listOutput += '{0: <10}'.format('actor_lr') + '\t'
listOutput += '{0: <10}'.format('critic_lr') + '\t'
listOutput += '{0: <10}'.format('replaytype') + '\t'
listOutput += '{0: <10}'.format('nMini') + '\t'
listOutput += '{0: <10}'.format('capacity') + '\t'
listOutput += '{0: <10}'.format('runError') + '\t'
listFile.write(listOutput + '\n')
for policytype in policytype_vary:
for tau in tau_vary:
for replay_type in replay_type_vary:
for minibatch_size in minibatch_size_vary:
for exploration_type in exploration_type_vary:
for capacity in capacity_vary:
for epsilon_s_e in epsilon_s_e_vary:
epsilon_start, epsilon_end = epsilon_s_e
for h1 in h1_size:
for h2 in h2_size:
for runError in runError_vary:
execDir = repository_path
if policytype == 'gp':
targetDir = 'CamRestaurants_gp_'
elif policytype == 'dqn' or policytype == 'dqn_vanilla':
targetDir = 'CamRestaurants_dqn_'
elif policytype == 'a2c':
targetDir = 'CamRestaurants_a2c_'
elif policytype == 'enac':
targetDir = 'CamRestaurants_enac_'
elif policytype == 'BBQN':
targetDir = 'CamRestaurants_BBQN_'
elif policytype == 'BBQN':
targetDir = 'CamRestaurants_BBQN_'
elif policytype == 'concrete':
targetDir = 'CamRestaurants_concrete_'
elif policytype == 'bootstrapped':
targetDir = 'CamRestaurants_bootstrapped_'
elif policytype == 'dropout':
targetDir = 'CamRestaurants_dropout_'
elif policytype == 'acer':
targetDir = 'CamRestaurants_acer_'
elif policytype == 'a2cis':
targetDir = 'CamRestaurants_a2cis_'
elif policytype == 'tracer':
targetDir = 'CamRestaurants_tracer_'
listOutput = '{0: <10}'.format(targetDir) + '\t'
listOutput += '{0: <10}'.format(policytype) + '\t'
listOutput += '{0: <10}'.format(learning_rate) + '\t'
listOutput += '{0: <10}'.format(replay_type) + '\t'
listOutput += '{0: <10}'.format(minibatch_size) + '\t'
listOutput += '{0: <10}'.format(capacity) + '\t'
listOutput += '{0: <10}'.format(runError) + '\t'
targetDir += 'learning_rate' + learning_rate + '_replay_type' + replay_type + \
'_minibatch_size' + minibatch_size + '_capacity' + capacity + '_runError' + runError
text = config_text(domains, root, seed,
screen_level,
maxturns,
belieftype, useconfreq, policytype, startwithhello,
inpolicyfile, outpolicyfile, learning,
maxiter, gamma, learning_rate, tau, replay_type,
minibatch_size, capacity,
exploration_type, epsilon_start, epsilon_end, n_in,
features, max_k, learning_algorithm, architecture, h1,
h2,
kernel,
random, scale,
usenewgoalscenarios,
nbestsize,
patience,
penaliseallturns,
wrongvenuepenalty,
notmentionedvaluepenalty,
sampledialogueprobs,
save_step,
confscorer,
oldstylepatience,
forcenullpositive,
file_level,
maxinformslots, informmask,informcountaccepted,requestmask, confusionmodel, byemask,
n_samples, alpha_divergence, alpha, sigma_eps, sigma_prior,
stddev_var_mu, stddev_var_logsigma, mean_log_sigma,
nbestgeneratormodel,
delta, beta, is_threshold, train_iters_per_episode, training_frequency,
no_head, keep_prob, dropout_start,
old_style_parameter_sampling)
# run_on_grid(targetDir, execDir, configName, text)
tmpName = 'gRun' + str(ConfigCounter)
run_on_grid(tmpName, step, iter_in_step, test_iter_in_step, parallel, execDir, tmpName, text,
runMode, runError)
listFile.write(tmpName + '\t' + listOutput + '\n')
ConfigCounter += 1
if __name__ == "__main__":
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description='DeepRL parameter search')
parser.add_argument('-s', '--seed', help='set the random seed', required=False, type=str, default="123")
parser.add_argument('-tn', '--train', help='script is set to train policies (default)', action='store_true')
parser.add_argument('-tt', '--test', help='script is set to test/evaluate policies', action='store_true')
parser.add_argument('--own', help='run on local machine (default)', action='store_true')
parser.add_argument('--grid', help='run on grid', action='store_true')
parser.add_argument('-f', '--file', help='the list file', required=False, type=str, default='list')
parser.add_argument('-p', '--pydial', help='the path to pydial', required=False, type=str, default='../../../')
if len(argv) > 0 and not argv[0][0] == '-':
if len(sys.argv) != 5:
parser.print_help()
# print '\n!!!!! WRONG COMMAND !!!!!\n'
# print 'EXAMPLE: python runScript.py list [train|test] [grid|own]\n'
exit(1)
# main(argv)
else:
# parser = argparse.ArgumentParser(description='DeepRL parameter search')
# parser.add_argument('-s', '--seed', help='set the random seed', required=False, type=str, default="123")
# parser.add_argument('-tn', '--train', help='script is set to train policies (default)', action='store_true')
# parser.add_argument('-tt', '--test', help='script is set to test/evaluate policies', action='store_true')
# parser.add_argument('--own', help='run on local machine (default)', action='store_true')
# parser.add_argument('--grid', help='run on grid', action='store_true')
# parser.add_argument('-f', '--file', help='the list file', required=False, type=str, default='list')
# parser.add_argument('-p', '--pydial', help='the path to pydial', required=False, type=str, default='../../../')
args = parser.parse_args()
own = not args.grid
grid = not args.own and args.grid
if own == grid:
pass # issue error with parameter help
train = not args.test
test = not args.train and args.test
if train == test:
pass # issue error with parameter help
pydialpath = os.path.abspath(os.path.join(os.getcwd(),args.pydial))
argv = [args.file, 'test' if test else 'train', 'grid' if grid else 'own', args.seed, pydialpath]
# print argv
main(argv)
# END OF FILE
| 44.606178
| 141
| 0.483165
|
28a42fdc854cb7972d64a944eb2bba262f7b2094
| 96
|
py
|
Python
|
preferences/__init__.py
|
uisautomation/lecture-capture-preferences-webapp
|
bf54bc9b37241a5a7a6c0df7b1e2c0f8dd16bed6
|
[
"MIT"
] | 1
|
2019-02-17T08:17:59.000Z
|
2019-02-17T08:17:59.000Z
|
preferences/__init__.py
|
rjw57/lecture-capture-preferences-webapp
|
f6b4572b06cf1bc2f45e635148cd7e2b3ea283f6
|
[
"MIT"
] | 5
|
2019-01-08T07:43:40.000Z
|
2019-02-04T16:32:03.000Z
|
preferences/__init__.py
|
rjw57/lecture-capture-preferences-webapp
|
f6b4572b06cf1bc2f45e635148cd7e2b3ea283f6
|
[
"MIT"
] | null | null | null |
"""
Lecture Capture Preferences Application
"""
default_app_config = 'preferences.apps.Config'
| 16
| 46
| 0.78125
|
74e2100a2a1ef105b7c8d5a319516db785ab86e9
| 6,728
|
py
|
Python
|
contrib/devtools/symbol-check.py
|
dogxteam/dogxwallet-master
|
346189354bdec9a80c20bdc429ddec15c3b17b73
|
[
"MIT"
] | 5
|
2019-03-18T02:14:20.000Z
|
2019-03-21T17:08:27.000Z
|
contrib/devtools/symbol-check.py
|
dogxteam/dogxwallet-master
|
346189354bdec9a80c20bdc429ddec15c3b17b73
|
[
"MIT"
] | null | null | null |
contrib/devtools/symbol-check.py
|
dogxteam/dogxwallet-master
|
346189354bdec9a80c20bdc429ddec15c3b17b73
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py
'''
import subprocess
import re
import sys
import os
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11),
'LIBATOMIC': (1,0)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr'
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ALLOWED_LIBRARIES = {
# dogxcoind and dogxcoin-qt
'libgcc_s.so.1', # GCC base support
'libc.so.6', # C library
'libpthread.so.0', # threading
'libanl.so.1', # DNS resolve
'libm.so.6', # math library
'librt.so.1', # real-time (clock)
'libatomic.so.1',
'ld-linux-x86-64.so.2', # 64-dogx dynamic linker
'ld-linux.so.2', # 32-dogx dynamic linker
'ld-linux-aarch64.so.1', # 64-dogx ARM dynamic linker
'ld-linux-armhf.so.3', # 32-dogx ARM dynamic linker
'ld-linux-riscv64-lp64d.so.1', # 64-dogx RISC-V dynamic linker
# dogxcoin-qt only
'libX11-xcb.so.1', # part of X11
'libX11.so.6', # part of X11
'libxcb.so.1', # part of X11
'libfontconfig.so.1', # font support
'libfreetype.so.6', # font parsing
'libdl.so.2' # programming interface to dynamic linker
}
ARCH_MIN_GLIBC_VER = {
'80386': (2,1),
'X86-64': (2,2,5),
'ARM': (2,4),
'AArch64':(2,17),
'RISC-V': (2,27)
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', '-h', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.splitlines():
line = line.split()
if 'Machine:' in line:
arch = line[-1]
if len(line)>7 and re.match('[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition('@')
is_import = line[6] == 'UND'
if version.startswith('@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version, arch))
return syms
def check_version(max_versions, version, arch):
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib] or lib == 'GLIBC' and ver <= ARCH_MIN_GLIBC_VER[arch]
def read_libraries(filename):
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>2 and tokens[1] == '(NEEDED)':
match = re.match('^Shared library: \[(.*)\]$', ' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version,arch in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version, arch):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym), version))
retval = 1
# Check exported symbols
if arch != 'RISC-V':
for sym,version,arch in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym)))
retval = 1
# Check dependency libraries
for library_name in read_libraries(filename):
if library_name not in ALLOWED_LIBRARIES:
print('%s: NEEDED library %s is not allowed' % (filename, library_name))
retval = 1
sys.exit(retval)
| 37.797753
| 173
| 0.646552
|
7a7002de094f35c3a08ef43bbe47c04b98dce464
| 10,896
|
py
|
Python
|
tests/components/switch_as_x/test_init.py
|
camditt/core
|
862d8b54146a9b21c307a841ca5dca7cb2f5359f
|
[
"Apache-2.0"
] | null | null | null |
tests/components/switch_as_x/test_init.py
|
camditt/core
|
862d8b54146a9b21c307a841ca5dca7cb2f5359f
|
[
"Apache-2.0"
] | null | null | null |
tests/components/switch_as_x/test_init.py
|
camditt/core
|
862d8b54146a9b21c307a841ca5dca7cb2f5359f
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for the Switch as X."""
from unittest.mock import patch
import pytest
from homeassistant.components.switch_as_x.const import CONF_TARGET_DOMAIN, DOMAIN
from homeassistant.const import CONF_ENTITY_ID, STATE_OFF, STATE_ON, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from tests.common import MockConfigEntry
@pytest.mark.parametrize(
"target_domain",
(
Platform.COVER,
Platform.FAN,
Platform.LIGHT,
Platform.SIREN,
),
)
async def test_config_entry_unregistered_uuid(
hass: HomeAssistant, target_domain: str
) -> None:
"""Test light switch setup from config entry with unknown entity registry id."""
fake_uuid = "a266a680b608c32770e6c45bfe6b8411"
config_entry = MockConfigEntry(
data={},
domain=DOMAIN,
options={
CONF_ENTITY_ID: fake_uuid,
CONF_TARGET_DOMAIN: target_domain,
},
title="ABC",
)
config_entry.add_to_hass(hass)
assert not await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
@pytest.mark.parametrize(
"target_domain",
(
Platform.FAN,
Platform.LIGHT,
Platform.SIREN,
),
)
async def test_entity_registry_events(hass: HomeAssistant, target_domain: str) -> None:
"""Test entity registry events are tracked."""
registry = er.async_get(hass)
registry_entry = registry.async_get_or_create("switch", "test", "unique")
switch_entity_id = registry_entry.entity_id
hass.states.async_set(switch_entity_id, "on")
config_entry = MockConfigEntry(
data={},
domain=DOMAIN,
options={
CONF_ENTITY_ID: registry_entry.id,
CONF_TARGET_DOMAIN: target_domain,
},
title="ABC",
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(f"{target_domain}.abc").state == STATE_ON
# Change entity_id
new_switch_entity_id = f"{switch_entity_id}_new"
registry.async_update_entity(switch_entity_id, new_entity_id=new_switch_entity_id)
hass.states.async_set(new_switch_entity_id, STATE_OFF)
await hass.async_block_till_done()
# Check tracking the new entity_id
await hass.async_block_till_done()
assert hass.states.get(f"{target_domain}.abc").state == STATE_OFF
# The old entity_id should no longer be tracked
hass.states.async_set(switch_entity_id, STATE_ON)
await hass.async_block_till_done()
assert hass.states.get(f"{target_domain}.abc").state == STATE_OFF
# Check changing name does not reload the config entry
with patch(
"homeassistant.components.switch_as_x.async_unload_entry",
) as mock_setup_entry:
registry.async_update_entity(new_switch_entity_id, name="New name")
await hass.async_block_till_done()
mock_setup_entry.assert_not_called()
# Check removing the entity removes the config entry
registry.async_remove(new_switch_entity_id)
await hass.async_block_till_done()
assert hass.states.get(f"{target_domain}.abc") is None
assert registry.async_get(f"{target_domain}.abc") is None
assert len(hass.config_entries.async_entries("switch_as_x")) == 0
@pytest.mark.parametrize(
"target_domain",
(
Platform.COVER,
Platform.FAN,
Platform.LIGHT,
Platform.SIREN,
),
)
async def test_device_registry_config_entry_1(
hass: HomeAssistant, target_domain: str
) -> None:
"""Test we add our config entry to the tracked switch's device."""
device_registry = dr.async_get(hass)
entity_registry = er.async_get(hass)
switch_config_entry = MockConfigEntry()
device_entry = device_registry.async_get_or_create(
config_entry_id=switch_config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
switch_entity_entry = entity_registry.async_get_or_create(
"switch",
"test",
"unique",
config_entry=switch_config_entry,
device_id=device_entry.id,
)
# Add another config entry to the same device
device_registry.async_update_device(
device_entry.id, add_config_entry_id=MockConfigEntry().entry_id
)
switch_as_x_config_entry = MockConfigEntry(
data={},
domain=DOMAIN,
options={
CONF_ENTITY_ID: switch_entity_entry.id,
CONF_TARGET_DOMAIN: target_domain,
},
title="ABC",
)
switch_as_x_config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(switch_as_x_config_entry.entry_id)
await hass.async_block_till_done()
entity_entry = entity_registry.async_get(f"{target_domain}.abc")
assert entity_entry.device_id == switch_entity_entry.device_id
device_entry = device_registry.async_get(device_entry.id)
assert switch_as_x_config_entry.entry_id in device_entry.config_entries
# Remove the wrapped switch's config entry from the device
device_registry.async_update_device(
device_entry.id, remove_config_entry_id=switch_config_entry.entry_id
)
await hass.async_block_till_done()
await hass.async_block_till_done()
# Check that the switch_as_x config entry is removed from the device
device_entry = device_registry.async_get(device_entry.id)
assert switch_as_x_config_entry.entry_id not in device_entry.config_entries
@pytest.mark.parametrize(
"target_domain",
(
Platform.COVER,
Platform.FAN,
Platform.LIGHT,
Platform.SIREN,
),
)
async def test_device_registry_config_entry_2(
hass: HomeAssistant, target_domain: str
) -> None:
"""Test we add our config entry to the tracked switch's device."""
device_registry = dr.async_get(hass)
entity_registry = er.async_get(hass)
switch_config_entry = MockConfigEntry()
device_entry = device_registry.async_get_or_create(
config_entry_id=switch_config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
switch_entity_entry = entity_registry.async_get_or_create(
"switch",
"test",
"unique",
config_entry=switch_config_entry,
device_id=device_entry.id,
)
switch_as_x_config_entry = MockConfigEntry(
data={},
domain=DOMAIN,
options={
CONF_ENTITY_ID: switch_entity_entry.id,
CONF_TARGET_DOMAIN: target_domain,
},
title="ABC",
)
switch_as_x_config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(switch_as_x_config_entry.entry_id)
await hass.async_block_till_done()
entity_entry = entity_registry.async_get(f"{target_domain}.abc")
assert entity_entry.device_id == switch_entity_entry.device_id
device_entry = device_registry.async_get(device_entry.id)
assert switch_as_x_config_entry.entry_id in device_entry.config_entries
# Remove the wrapped switch from the device
entity_registry.async_update_entity(switch_entity_entry.entity_id, device_id=None)
await hass.async_block_till_done()
# Check that the switch_as_x config entry is removed from the device
device_entry = device_registry.async_get(device_entry.id)
assert switch_as_x_config_entry.entry_id not in device_entry.config_entries
@pytest.mark.parametrize(
"target_domain",
(
Platform.COVER,
Platform.FAN,
Platform.LIGHT,
Platform.SIREN,
),
)
async def test_config_entry_entity_id(
hass: HomeAssistant, target_domain: Platform
) -> None:
"""Test light switch setup from config entry with entity id."""
config_entry = MockConfigEntry(
data={},
domain=DOMAIN,
options={
CONF_ENTITY_ID: "switch.abc",
CONF_TARGET_DOMAIN: target_domain,
},
title="ABC",
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert DOMAIN in hass.config.components
state = hass.states.get(f"{target_domain}.abc")
assert state
assert state.state == "unavailable"
# Name copied from config entry title
assert state.name == "ABC"
# Check the light is added to the entity registry
registry = er.async_get(hass)
entity_entry = registry.async_get(f"{target_domain}.abc")
assert entity_entry
assert entity_entry.unique_id == config_entry.entry_id
@pytest.mark.parametrize(
"target_domain",
(
Platform.COVER,
Platform.FAN,
Platform.LIGHT,
Platform.SIREN,
),
)
async def test_config_entry_uuid(hass: HomeAssistant, target_domain: Platform) -> None:
"""Test light switch setup from config entry with entity registry id."""
registry = er.async_get(hass)
registry_entry = registry.async_get_or_create("switch", "test", "unique")
config_entry = MockConfigEntry(
data={},
domain=DOMAIN,
options={
CONF_ENTITY_ID: registry_entry.id,
CONF_TARGET_DOMAIN: target_domain,
},
title="ABC",
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(f"{target_domain}.abc")
@pytest.mark.parametrize(
"target_domain",
(
Platform.COVER,
Platform.FAN,
Platform.LIGHT,
Platform.SIREN,
),
)
async def test_device(hass: HomeAssistant, target_domain: Platform) -> None:
"""Test the entity is added to the wrapped entity's device."""
device_registry = dr.async_get(hass)
entity_registry = er.async_get(hass)
test_config_entry = MockConfigEntry()
device_entry = device_registry.async_get_or_create(
config_entry_id=test_config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
switch_entity_entry = entity_registry.async_get_or_create(
"switch", "test", "unique", device_id=device_entry.id
)
switch_as_x_config_entry = MockConfigEntry(
data={},
domain=DOMAIN,
options={
CONF_ENTITY_ID: switch_entity_entry.id,
CONF_TARGET_DOMAIN: target_domain,
},
title="ABC",
)
switch_as_x_config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(switch_as_x_config_entry.entry_id)
await hass.async_block_till_done()
entity_entry = entity_registry.async_get(f"{target_domain}.abc")
assert entity_entry
assert entity_entry.device_id == switch_entity_entry.device_id
| 30.866856
| 87
| 0.702368
|
6a3bc378777ae81712935cf3a2e37b6f1487489a
| 677
|
py
|
Python
|
apps/notifications/migrations/0001_initial.py
|
Nelson-Morais/HA-OOAD
|
82bd00ba7ab5f11f43bc90b505ee8afd0b86c7b5
|
[
"Apache-2.0"
] | null | null | null |
apps/notifications/migrations/0001_initial.py
|
Nelson-Morais/HA-OOAD
|
82bd00ba7ab5f11f43bc90b505ee8afd0b86c7b5
|
[
"Apache-2.0"
] | null | null | null |
apps/notifications/migrations/0001_initial.py
|
Nelson-Morais/HA-OOAD
|
82bd00ba7ab5f11f43bc90b505ee8afd0b86c7b5
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.6 on 2021-02-15 16:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_owner_id', models.IntegerField()),
('created_at', models.DateField(auto_now_add=True)),
('title', models.CharField(max_length=64)),
('content', models.CharField(max_length=512)),
],
),
]
| 27.08
| 114
| 0.57164
|
a32d2cf20eb58fba6764696e149457d343628431
| 24,694
|
py
|
Python
|
spacy/_ml.py
|
mhham/spaCy
|
cce1bff3e1b27e841632e0b1ef358949b5fd0d24
|
[
"MIT"
] | null | null | null |
spacy/_ml.py
|
mhham/spaCy
|
cce1bff3e1b27e841632e0b1ef358949b5fd0d24
|
[
"MIT"
] | null | null | null |
spacy/_ml.py
|
mhham/spaCy
|
cce1bff3e1b27e841632e0b1ef358949b5fd0d24
|
[
"MIT"
] | null | null | null |
# coding: utf8
from __future__ import unicode_literals
import numpy
from thinc.v2v import Model, Maxout, Softmax, Affine, ReLu
from thinc.i2v import HashEmbed, StaticVectors
from thinc.t2t import ExtractWindow, ParametricAttention
from thinc.t2v import Pooling, sum_pool, mean_pool
from thinc.misc import Residual
from thinc.misc import LayerNorm as LN
from thinc.misc import FeatureExtracter
from thinc.api import add, layerize, chain, clone, concatenate, with_flatten
from thinc.api import with_getitem, flatten_add_lengths
from thinc.api import uniqued, wrap, noop
from thinc.api import with_square_sequences
from thinc.linear.linear import LinearModel
from thinc.neural.ops import NumpyOps, CupyOps
from thinc.neural.util import get_array_module
from thinc.neural.optimizers import Adam
from thinc import describe
from thinc.describe import Dimension, Synapses, Biases, Gradient
from thinc.neural._classes.affine import _set_dimensions_if_needed
import thinc.extra.load_nlp
from .attrs import ID, ORTH, LOWER, NORM, PREFIX, SUFFIX, SHAPE
from .errors import Errors
from . import util
try:
import torch.nn
from thinc.extra.wrappers import PyTorchWrapperRNN
except ImportError:
torch = None
VECTORS_KEY = "spacy_pretrained_vectors"
def cosine(vec1, vec2):
xp = get_array_module(vec1)
norm1 = xp.linalg.norm(vec1)
norm2 = xp.linalg.norm(vec2)
if norm1 == 0.0 or norm2 == 0.0:
return 0
else:
return vec1.dot(vec2) / (norm1 * norm2)
def create_default_optimizer(ops, **cfg):
learn_rate = util.env_opt("learn_rate", 0.001)
beta1 = util.env_opt("optimizer_B1", 0.9)
beta2 = util.env_opt("optimizer_B2", 0.999)
eps = util.env_opt("optimizer_eps", 1e-8)
L2 = util.env_opt("L2_penalty", 1e-6)
max_grad_norm = util.env_opt("grad_norm_clip", 1.0)
optimizer = Adam(ops, learn_rate, L2=L2, beta1=beta1, beta2=beta2, eps=eps)
optimizer.max_grad_norm = max_grad_norm
optimizer.device = ops.device
return optimizer
@layerize
def _flatten_add_lengths(seqs, pad=0, drop=0.0):
ops = Model.ops
lengths = ops.asarray([len(seq) for seq in seqs], dtype="i")
def finish_update(d_X, sgd=None):
return ops.unflatten(d_X, lengths, pad=pad)
X = ops.flatten(seqs, pad=pad)
return (X, lengths), finish_update
def _zero_init(model):
def _zero_init_impl(self, *args, **kwargs):
self.W.fill(0)
model.on_init_hooks.append(_zero_init_impl)
if model.W is not None:
model.W.fill(0.0)
return model
@layerize
def _preprocess_doc(docs, drop=0.0):
keys = [doc.to_array(LOWER) for doc in docs]
# The dtype here matches what thinc is expecting -- which differs per
# platform (by int definition). This should be fixed once the problem
# is fixed on Thinc's side.
lengths = numpy.array([arr.shape[0] for arr in keys], dtype=numpy.int_)
keys = numpy.concatenate(keys)
vals = numpy.zeros(keys.shape, dtype='f')
return (keys, vals, lengths), None
def with_cpu(ops, model):
"""Wrap a model that should run on CPU, transferring inputs and outputs
as necessary."""
model.to_cpu()
def with_cpu_forward(inputs, drop=0.):
cpu_outputs, backprop = model.begin_update(_to_cpu(inputs), drop=drop)
gpu_outputs = _to_device(ops, cpu_outputs)
def with_cpu_backprop(d_outputs, sgd=None):
cpu_d_outputs = _to_cpu(d_outputs)
return backprop(cpu_d_outputs, sgd=sgd)
return gpu_outputs, with_cpu_backprop
return wrap(with_cpu_forward, model)
def _to_cpu(X):
if isinstance(X, numpy.ndarray):
return X
elif isinstance(X, tuple):
return tuple([_to_cpu(x) for x in X])
elif isinstance(X, list):
return [_to_cpu(x) for x in X]
elif hasattr(X, 'get'):
return X.get()
else:
return X
def _to_device(ops, X):
if isinstance(X, tuple):
return tuple([_to_device(ops, x) for x in X])
elif isinstance(X, list):
return [_to_device(ops, x) for x in X]
else:
return ops.asarray(X)
@layerize
def _preprocess_doc_bigrams(docs, drop=0.0):
unigrams = [doc.to_array(LOWER) for doc in docs]
ops = Model.ops
bigrams = [ops.ngrams(2, doc_unis) for doc_unis in unigrams]
keys = [ops.xp.concatenate(feats) for feats in zip(unigrams, bigrams)]
keys, vals = zip(*[ops.xp.unique(k, return_counts=True) for k in keys])
# The dtype here matches what thinc is expecting -- which differs per
# platform (by int definition). This should be fixed once the problem
# is fixed on Thinc's side.
lengths = ops.asarray([arr.shape[0] for arr in keys], dtype=numpy.int_)
keys = ops.xp.concatenate(keys)
vals = ops.asarray(ops.xp.concatenate(vals), dtype="f")
return (keys, vals, lengths), None
@describe.on_data(
_set_dimensions_if_needed, lambda model, X, y: model.init_weights(model)
)
@describe.attributes(
nI=Dimension("Input size"),
nF=Dimension("Number of features"),
nO=Dimension("Output size"),
nP=Dimension("Maxout pieces"),
W=Synapses("Weights matrix", lambda obj: (obj.nF, obj.nO, obj.nP, obj.nI)),
b=Biases("Bias vector", lambda obj: (obj.nO, obj.nP)),
pad=Synapses(
"Pad",
lambda obj: (1, obj.nF, obj.nO, obj.nP),
lambda M, ops: ops.normal_init(M, 1.0),
),
d_W=Gradient("W"),
d_pad=Gradient("pad"),
d_b=Gradient("b"),
)
class PrecomputableAffine(Model):
def __init__(self, nO=None, nI=None, nF=None, nP=None, **kwargs):
Model.__init__(self, **kwargs)
self.nO = nO
self.nP = nP
self.nI = nI
self.nF = nF
def begin_update(self, X, drop=0.0):
Yf = self.ops.gemm(
X, self.W.reshape((self.nF * self.nO * self.nP, self.nI)), trans2=True
)
Yf = Yf.reshape((Yf.shape[0], self.nF, self.nO, self.nP))
Yf = self._add_padding(Yf)
def backward(dY_ids, sgd=None):
dY, ids = dY_ids
dY, ids = self._backprop_padding(dY, ids)
Xf = X[ids]
Xf = Xf.reshape((Xf.shape[0], self.nF * self.nI))
self.d_b += dY.sum(axis=0)
dY = dY.reshape((dY.shape[0], self.nO * self.nP))
Wopfi = self.W.transpose((1, 2, 0, 3))
Wopfi = self.ops.xp.ascontiguousarray(Wopfi)
Wopfi = Wopfi.reshape((self.nO * self.nP, self.nF * self.nI))
dXf = self.ops.gemm(dY.reshape((dY.shape[0], self.nO * self.nP)), Wopfi)
# Reuse the buffer
dWopfi = Wopfi
dWopfi.fill(0.0)
self.ops.gemm(dY, Xf, out=dWopfi, trans1=True)
dWopfi = dWopfi.reshape((self.nO, self.nP, self.nF, self.nI))
# (o, p, f, i) --> (f, o, p, i)
self.d_W += dWopfi.transpose((2, 0, 1, 3))
if sgd is not None:
sgd(self._mem.weights, self._mem.gradient, key=self.id)
return dXf.reshape((dXf.shape[0], self.nF, self.nI))
return Yf, backward
def _add_padding(self, Yf):
Yf_padded = self.ops.xp.vstack((self.pad, Yf))
return Yf_padded
def _backprop_padding(self, dY, ids):
# (1, nF, nO, nP) += (nN, nF, nO, nP) where IDs (nN, nF) < 0
mask = ids < 0.0
mask = mask.sum(axis=1)
d_pad = dY * mask.reshape((ids.shape[0], 1, 1))
self.d_pad += d_pad.sum(axis=0)
return dY, ids
@staticmethod
def init_weights(model):
"""This is like the 'layer sequential unit variance', but instead
of taking the actual inputs, we randomly generate whitened data.
Why's this all so complicated? We have a huge number of inputs,
and the maxout unit makes guessing the dynamics tricky. Instead
we set the maxout weights to values that empirically result in
whitened outputs given whitened inputs.
"""
if (model.W ** 2).sum() != 0.0:
return
ops = model.ops
xp = ops.xp
ops.normal_init(model.W, model.nF * model.nI, inplace=True)
ids = ops.allocate((5000, model.nF), dtype="f")
ids += xp.random.uniform(0, 1000, ids.shape)
ids = ops.asarray(ids, dtype="i")
tokvecs = ops.allocate((5000, model.nI), dtype="f")
tokvecs += xp.random.normal(loc=0.0, scale=1.0, size=tokvecs.size).reshape(
tokvecs.shape
)
def predict(ids, tokvecs):
# nS ids. nW tokvecs. Exclude the padding array.
hiddens = model(tokvecs[:-1]) # (nW, f, o, p)
vectors = model.ops.allocate((ids.shape[0], model.nO * model.nP), dtype="f")
# need nS vectors
hiddens = hiddens.reshape(
(hiddens.shape[0] * model.nF, model.nO * model.nP)
)
model.ops.scatter_add(vectors, ids.flatten(), hiddens)
vectors = vectors.reshape((vectors.shape[0], model.nO, model.nP))
vectors += model.b
vectors = model.ops.asarray(vectors)
if model.nP >= 2:
return model.ops.maxout(vectors)[0]
else:
return vectors * (vectors >= 0)
tol_var = 0.01
tol_mean = 0.01
t_max = 10
t_i = 0
for t_i in range(t_max):
acts1 = predict(ids, tokvecs)
var = model.ops.xp.var(acts1)
mean = model.ops.xp.mean(acts1)
if abs(var - 1.0) >= tol_var:
model.W /= model.ops.xp.sqrt(var)
elif abs(mean) >= tol_mean:
model.b -= mean
else:
break
def link_vectors_to_models(vocab):
vectors = vocab.vectors
if vectors.name is None:
vectors.name = VECTORS_KEY
if vectors.data.size != 0:
print(
"Warning: Unnamed vectors -- this won't allow multiple vectors "
"models to be loaded. (Shape: (%d, %d))" % vectors.data.shape
)
ops = Model.ops
for word in vocab:
if word.orth in vectors.key2row:
word.rank = vectors.key2row[word.orth]
else:
word.rank = 0
data = ops.asarray(vectors.data)
# Set an entry here, so that vectors are accessed by StaticVectors
# (unideal, I know)
thinc.extra.load_nlp.VECTORS[(ops.device, vectors.name)] = data
def PyTorchBiLSTM(nO, nI, depth, dropout=0.2):
if depth == 0:
return layerize(noop())
model = torch.nn.LSTM(nI, nO // 2, depth, bidirectional=True, dropout=dropout)
return with_square_sequences(PyTorchWrapperRNN(model))
def Tok2Vec(width, embed_size, **kwargs):
pretrained_vectors = kwargs.get("pretrained_vectors", None)
cnn_maxout_pieces = kwargs.get("cnn_maxout_pieces", 3)
subword_features = kwargs.get("subword_features", True)
conv_depth = kwargs.get("conv_depth", 4)
bilstm_depth = kwargs.get("bilstm_depth", 0)
cols = [ID, NORM, PREFIX, SUFFIX, SHAPE, ORTH]
with Model.define_operators(
{">>": chain, "|": concatenate, "**": clone, "+": add, "*": reapply}
):
norm = HashEmbed(width, embed_size, column=cols.index(NORM), name="embed_norm")
if subword_features:
prefix = HashEmbed(
width, embed_size // 2, column=cols.index(PREFIX), name="embed_prefix"
)
suffix = HashEmbed(
width, embed_size // 2, column=cols.index(SUFFIX), name="embed_suffix"
)
shape = HashEmbed(
width, embed_size // 2, column=cols.index(SHAPE), name="embed_shape"
)
else:
prefix, suffix, shape = (None, None, None)
if pretrained_vectors is not None:
glove = StaticVectors(pretrained_vectors, width, column=cols.index(ID))
if subword_features:
embed = uniqued(
(glove | norm | prefix | suffix | shape)
>> LN(Maxout(width, width * 5, pieces=3)),
column=cols.index(ORTH),
)
else:
embed = uniqued(
(glove | norm) >> LN(Maxout(width, width * 2, pieces=3)),
column=cols.index(ORTH),
)
elif subword_features:
embed = uniqued(
(norm | prefix | suffix | shape)
>> LN(Maxout(width, width * 4, pieces=3)),
column=cols.index(ORTH),
)
else:
embed = norm
convolution = Residual(
ExtractWindow(nW=1)
>> LN(Maxout(width, width * 3, pieces=cnn_maxout_pieces))
)
tok2vec = FeatureExtracter(cols) >> with_flatten(
embed >> convolution ** conv_depth, pad=conv_depth
)
if bilstm_depth >= 1:
tok2vec = tok2vec >> PyTorchBiLSTM(width, width, bilstm_depth)
# Work around thinc API limitations :(. TODO: Revise in Thinc 7
tok2vec.nO = width
tok2vec.embed = embed
return tok2vec
def reapply(layer, n_times):
def reapply_fwd(X, drop=0.0):
backprops = []
for i in range(n_times):
Y, backprop = layer.begin_update(X, drop=drop)
X = Y
backprops.append(backprop)
def reapply_bwd(dY, sgd=None):
dX = None
for backprop in reversed(backprops):
dY = backprop(dY, sgd=sgd)
if dX is None:
dX = dY
else:
dX += dY
return dX
return Y, reapply_bwd
return wrap(reapply_fwd, layer)
def asarray(ops, dtype):
def forward(X, drop=0.0):
return ops.asarray(X, dtype=dtype), None
return layerize(forward)
def _divide_array(X, size):
parts = []
index = 0
while index < len(X):
parts.append(X[index : index + size])
index += size
return parts
def get_col(idx):
if idx < 0:
raise IndexError(Errors.E066.format(value=idx))
def forward(X, drop=0.0):
if isinstance(X, numpy.ndarray):
ops = NumpyOps()
else:
ops = CupyOps()
output = ops.xp.ascontiguousarray(X[:, idx], dtype=X.dtype)
def backward(y, sgd=None):
dX = ops.allocate(X.shape)
dX[:, idx] += y
return dX
return output, backward
return layerize(forward)
def doc2feats(cols=None):
if cols is None:
cols = [ID, NORM, PREFIX, SUFFIX, SHAPE, ORTH]
def forward(docs, drop=0.0):
feats = []
for doc in docs:
feats.append(doc.to_array(cols))
return feats, None
model = layerize(forward)
model.cols = cols
return model
def print_shape(prefix):
def forward(X, drop=0.0):
return X, lambda dX, **kwargs: dX
return layerize(forward)
@layerize
def get_token_vectors(tokens_attrs_vectors, drop=0.0):
tokens, attrs, vectors = tokens_attrs_vectors
def backward(d_output, sgd=None):
return (tokens, d_output)
return vectors, backward
@layerize
def logistic(X, drop=0.0):
xp = get_array_module(X)
if not isinstance(X, xp.ndarray):
X = xp.asarray(X)
# Clip to range (-10, 10)
X = xp.minimum(X, 10.0, X)
X = xp.maximum(X, -10.0, X)
Y = 1.0 / (1.0 + xp.exp(-X))
def logistic_bwd(dY, sgd=None):
dX = dY * (Y * (1 - Y))
return dX
return Y, logistic_bwd
def zero_init(model):
def _zero_init_impl(self, X, y):
self.W.fill(0)
model.on_data_hooks.append(_zero_init_impl)
return model
@layerize
def preprocess_doc(docs, drop=0.0):
keys = [doc.to_array([LOWER]) for doc in docs]
ops = Model.ops
lengths = ops.asarray([arr.shape[0] for arr in keys])
keys = ops.xp.concatenate(keys)
vals = ops.allocate(keys.shape[0]) + 1
return (keys, vals, lengths), None
def getitem(i):
def getitem_fwd(X, drop=0.0):
return X[i], None
return layerize(getitem_fwd)
def build_tagger_model(nr_class, **cfg):
embed_size = util.env_opt("embed_size", 2000)
if "token_vector_width" in cfg:
token_vector_width = cfg["token_vector_width"]
else:
token_vector_width = util.env_opt("token_vector_width", 96)
pretrained_vectors = cfg.get("pretrained_vectors")
subword_features = cfg.get("subword_features", True)
with Model.define_operators({">>": chain, "+": add}):
if "tok2vec" in cfg:
tok2vec = cfg["tok2vec"]
else:
tok2vec = Tok2Vec(
token_vector_width,
embed_size,
subword_features=subword_features,
pretrained_vectors=pretrained_vectors,
)
softmax = with_flatten(Softmax(nr_class, token_vector_width))
model = tok2vec >> softmax
model.nI = None
model.tok2vec = tok2vec
model.softmax = softmax
return model
@layerize
def SpacyVectors(docs, drop=0.0):
batch = []
for doc in docs:
indices = numpy.zeros((len(doc),), dtype="i")
for i, word in enumerate(doc):
if word.orth in doc.vocab.vectors.key2row:
indices[i] = doc.vocab.vectors.key2row[word.orth]
else:
indices[i] = 0
vectors = doc.vocab.vectors.data[indices]
batch.append(vectors)
return batch, None
def build_text_classifier(nr_class, width=64, **cfg):
depth = cfg.get("depth", 2)
nr_vector = cfg.get("nr_vector", 5000)
pretrained_dims = cfg.get("pretrained_dims", 0)
with Model.define_operators({">>": chain, "+": add, "|": concatenate, "**": clone}):
if cfg.get("low_data") and pretrained_dims:
model = (
SpacyVectors
>> flatten_add_lengths
>> with_getitem(0, Affine(width, pretrained_dims))
>> ParametricAttention(width)
>> Pooling(sum_pool)
>> Residual(ReLu(width, width)) ** 2
>> zero_init(Affine(nr_class, width, drop_factor=0.0))
>> logistic
)
return model
lower = HashEmbed(width, nr_vector, column=1)
prefix = HashEmbed(width // 2, nr_vector, column=2)
suffix = HashEmbed(width // 2, nr_vector, column=3)
shape = HashEmbed(width // 2, nr_vector, column=4)
trained_vectors = FeatureExtracter(
[ORTH, LOWER, PREFIX, SUFFIX, SHAPE, ID]
) >> with_flatten(
uniqued(
(lower | prefix | suffix | shape)
>> LN(Maxout(width, width + (width // 2) * 3)),
column=0,
)
)
if pretrained_dims:
static_vectors = SpacyVectors >> with_flatten(
Affine(width, pretrained_dims)
)
# TODO Make concatenate support lists
vectors = concatenate_lists(trained_vectors, static_vectors)
vectors_width = width * 2
else:
vectors = trained_vectors
vectors_width = width
static_vectors = None
tok2vec = vectors >> with_flatten(
LN(Maxout(width, vectors_width))
>> Residual((ExtractWindow(nW=1) >> LN(Maxout(width, width * 3)))) ** depth,
pad=depth,
)
cnn_model = (
tok2vec
>> flatten_add_lengths
>> ParametricAttention(width)
>> Pooling(sum_pool)
>> Residual(zero_init(Maxout(width, width)))
>> zero_init(Affine(nr_class, width, drop_factor=0.0))
)
linear_model = (
_preprocess_doc
>> with_cpu(Model.ops, LinearModel(nr_class))
)
if cfg.get('exclusive_classes'):
output_layer = Softmax(nr_class, nr_class * 2)
else:
output_layer = (
zero_init(Affine(nr_class, nr_class * 2, drop_factor=0.0))
>> logistic
)
model = (
(linear_model | cnn_model)
>> output_layer
)
model.tok2vec = chain(tok2vec, flatten)
model.nO = nr_class
model.lsuv = False
return model
def build_simple_cnn_text_classifier(tok2vec, nr_class, exclusive_classes=False, **cfg):
"""
Build a simple CNN text classifier, given a token-to-vector model as inputs.
If exclusive_classes=True, a softmax non-linearity is applied, so that the
outputs sum to 1. If exclusive_classes=False, a logistic non-linearity
is applied instead, so that outputs are in the range [0, 1].
"""
with Model.define_operators({">>": chain}):
if exclusive_classes:
output_layer = Softmax(nr_class, tok2vec.nO)
else:
output_layer = zero_init(Affine(nr_class, tok2vec.nO, drop_factor=0.0)) >> logistic
model = tok2vec >> flatten_add_lengths >> Pooling(mean_pool) >> output_layer
model.tok2vec = chain(tok2vec, flatten)
model.nO = nr_class
return model
@layerize
def flatten(seqs, drop=0.0):
ops = Model.ops
lengths = ops.asarray([len(seq) for seq in seqs], dtype="i")
def finish_update(d_X, sgd=None):
return ops.unflatten(d_X, lengths, pad=0)
X = ops.flatten(seqs, pad=0)
return X, finish_update
def concatenate_lists(*layers, **kwargs): # pragma: no cover
"""Compose two or more models `f`, `g`, etc, such that their outputs are
concatenated, i.e. `concatenate(f, g)(x)` computes `hstack(f(x), g(x))`
"""
if not layers:
return noop()
drop_factor = kwargs.get("drop_factor", 1.0)
ops = layers[0].ops
layers = [chain(layer, flatten) for layer in layers]
concat = concatenate(*layers)
def concatenate_lists_fwd(Xs, drop=0.0):
drop *= drop_factor
lengths = ops.asarray([len(X) for X in Xs], dtype="i")
flat_y, bp_flat_y = concat.begin_update(Xs, drop=drop)
ys = ops.unflatten(flat_y, lengths)
def concatenate_lists_bwd(d_ys, sgd=None):
return bp_flat_y(ops.flatten(d_ys), sgd=sgd)
return ys, concatenate_lists_bwd
model = wrap(concatenate_lists_fwd, concat)
return model
def masked_language_model(vocab, model, mask_prob=0.15):
"""Convert a model into a BERT-style masked language model"""
random_words = _RandomWords(vocab)
def mlm_forward(docs, drop=0.0):
mask, docs = _apply_mask(docs, random_words, mask_prob=mask_prob)
mask = model.ops.asarray(mask).reshape((mask.shape[0], 1))
output, backprop = model.begin_update(docs, drop=drop)
def mlm_backward(d_output, sgd=None):
d_output *= 1 - mask
return backprop(d_output, sgd=sgd)
return output, mlm_backward
return wrap(mlm_forward, model)
class _RandomWords(object):
def __init__(self, vocab):
self.words = [lex.text for lex in vocab if lex.prob != 0.0]
self.probs = [lex.prob for lex in vocab if lex.prob != 0.0]
self.words = self.words[:10000]
self.probs = self.probs[:10000]
self.probs = numpy.exp(numpy.array(self.probs, dtype="f"))
self.probs /= self.probs.sum()
self._cache = []
def next(self):
if not self._cache:
self._cache.extend(
numpy.random.choice(len(self.words), 10000, p=self.probs)
)
index = self._cache.pop()
return self.words[index]
def _apply_mask(docs, random_words, mask_prob=0.15):
# This needs to be here to avoid circular imports
from .tokens.doc import Doc
N = sum(len(doc) for doc in docs)
mask = numpy.random.uniform(0.0, 1.0, (N,))
mask = mask >= mask_prob
i = 0
masked_docs = []
for doc in docs:
words = []
for token in doc:
if not mask[i]:
word = _replace_word(token.text, random_words)
else:
word = token.text
words.append(word)
i += 1
spaces = [bool(w.whitespace_) for w in doc]
# NB: If you change this implementation to instead modify
# the docs in place, take care that the IDs reflect the original
# words. Currently we use the original docs to make the vectors
# for the target, so we don't lose the original tokens. But if
# you modified the docs in place here, you would.
masked_docs.append(Doc(doc.vocab, words=words, spaces=spaces))
return mask, masked_docs
def _replace_word(word, random_words, mask="[MASK]"):
roll = numpy.random.random()
if roll < 0.8:
return mask
elif roll < 0.9:
return random_words.next()
else:
return word
| 32.664021
| 95
| 0.597068
|
3aff9b937003ede708026bb44e8f51b4ba682f45
| 7,628
|
py
|
Python
|
algorithms/embdi/EmbDI/schema_matching.py
|
Soton-Song/valentine
|
9a47859f912540cdbe961ed3585201d3accd07be
|
[
"Apache-2.0"
] | null | null | null |
algorithms/embdi/EmbDI/schema_matching.py
|
Soton-Song/valentine
|
9a47859f912540cdbe961ed3585201d3accd07be
|
[
"Apache-2.0"
] | null | null | null |
algorithms/embdi/EmbDI/schema_matching.py
|
Soton-Song/valentine
|
9a47859f912540cdbe961ed3585201d3accd07be
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import warnings
from operator import itemgetter
import gensim.models as models
import mlflow
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file', action='store', required=True, type=str, help='Input embeddings file.')
parser.add_argument('-d', '--dataset_file', action='store', required=True, type=str, help='Input dataset.')
parser.add_argument('-m', '--match_file', action='store', required=True, type=str)
return parser.parse_args()
def read_matches(match_file):
with open(match_file, 'r', encoding='utf-8') as fp:
md = {}
for idx, line in enumerate(fp):
t = line.strip().split(',')
if t[0] not in md:
md[t[0]] = {t[1]}
else:
md[t[0]].add(t[1])
# matches.append(t)
return md
def _clean_embeddings(emb_file, matches):
gt = set()
for k, v in matches.items():
gt.add(k)
for _ in v:
gt.add(_)
with open(emb_file, 'r') as fp:
s = fp.readline()
_, dimensions = s.strip().split(' ')
viable_idx = []
for idx, row in enumerate(fp):
r = row.split(' ', maxsplit=1)[0]
rr = r.split('__', maxsplit=1)[1]
if rr in gt:
viable_idx.append(row)
# viable_idx = [row for idx, row in enumerate(fp) if (row.split(' ', maxsplit=1)[0] in gt)]
# viable_idx = [_ for _ in viable_idx if len(_.split(' ')) > 1]
f = 'pipeline/dump/sm_dump.emb'
with open(f, 'w', encoding='utf-8') as fp:
fp.write('{} {}\n'.format(len(viable_idx), dimensions))
for _ in viable_idx:
fp.write(_)
return f
def _infer_prefix(df):
columns = df.columns
prefixes = tuple([_.split('_') for _ in columns])
if len(prefixes) > 2:
return None
else:
return list(prefixes)
def _match(candidates, maxrank=3):
to_be_matched = list(candidates.keys())
misses = {k: 0 for k in candidates}
mm = []
while len(to_be_matched) > 0:
tbm = to_be_matched.copy()
for item in tbm:
if item not in to_be_matched:
continue
else:
if misses[item] > maxrank:
to_be_matched.remove(item)
continue
else:
closest_list = candidates[item]
if len(closest_list) > 0:
for idx in range(len(closest_list)):
closest_to_item = closest_list[idx]
reciprocal_closest_list = candidates[closest_to_item]
reciprocal_closest = reciprocal_closest_list[0]
if closest_to_item in to_be_matched and reciprocal_closest == item:
to_be_matched.remove(item)
to_be_matched.remove(closest_to_item)
mm.append((item, closest_to_item))
for k in candidates:
if item in candidates[k]:
candidates[k].remove(item)
if closest_to_item in candidates[k]:
candidates[k].remove(closest_to_item)
break
else:
misses[item] += 1
else:
to_be_matched.remove(item)
return mm
def _extract_candidates(wv, dataset):
candidates = []
for _1 in range(len(dataset.columns)):
for _2 in range(0, len(dataset.columns)):
if _1 == _2:
continue
c1 = dataset.columns[_1]
c2 = dataset.columns[_2]
try:
rank = wv.distance('cid__'+c1, 'cid__'+c2)
tup = (c1, c2, rank)
candidates.append(tup)
except KeyError:
continue
cleaned = []
for k in candidates:
prefix = k[0].split('_')[0]
if not k[1].startswith(prefix):
cleaned.append(k)
sorted_cand = sorted(cleaned, key=itemgetter(2), reverse=False)
cands = []
flag = True
for value in sorted_cand:
if flag:
cands.append(value)
v1, v2, rank = value
print(str(v1) + " <--> " + str(v2)+ ": " + str(rank))
flag = False
else:
flag = True
cleaned_sorted = sorted(cleaned, key=itemgetter(0, 2), reverse=False)
candidates = {}
for value in cleaned_sorted:
v1, v2, rank = value
if v1 not in candidates:
candidates[v1] = [v2]
else:
candidates[v1].append(v2)
return (candidates,cands)
def _produce_match_results(candidates):
match_results = _match(candidates)
match_results = [sorted(_) for _ in match_results]
# refactored_match_results = [(int(_[0].split('_')[1]), int(_[1].split('_')[1])) for _ in match_results]
refactored_match_results = match_results
return refactored_match_results
def match_columns(dataset, embeddings_file):
emb_file = _clean_embeddings(embeddings_file)
if emb_file is None:
return []
wv = models.KeyedVectors.load_word2vec_format(emb_file, unicode_errors='ignore')
# print('Model built from file {}'.format(embeddings_file))
candidates = _extract_candidates(wv, dataset)
match_results = _produce_match_results(candidates)
return match_results
def schema_matching(dataset, embeddings_file, configuration):
print('# Executing SM tests.')
match_file = configuration['match_file']
ground_truth = read_matches(match_file)
emb_file = _clean_embeddings(embeddings_file, ground_truth)
wv = models.KeyedVectors.load_word2vec_format(emb_file, unicode_errors='ignore')
# print('Model built from file {}'.format(embeddings_file))
candidates, scand = _extract_candidates(wv, dataset)
match_results = _produce_match_results(candidates)
count_hits = 0
gt = 0
count_rk = 0.0
for i in range(len(ground_truth)):
left,right,rank = scand[i]
dicgt = ground_truth[left]
if right in dicgt:
count_rk += 1.0
recall_at_k = count_rk/len(ground_truth)
print('RECALL AT GT IS {:.4f}\t'.format(recall_at_k*100), end='')
for item in match_results:
# gt += 1
left = item[0]
right = item[1]
if left in ground_truth:
gt+=1
if right in ground_truth[left]:
count_hits += 1
if len(match_results) > 0:
precision = count_hits/len(match_results)
else:
precision = 0
if gt > 0:
recall = count_hits/gt
else:
warnings.warn('No hits found. Are you sure the SM ground truth file {} is correct?'.format(match_file))
recall = 0
try:
f1_score = 2 * (precision * recall) / (precision + recall)
except ZeroDivisionError:
f1_score = 0
# for tup in match_results:
# print(tup)
#
result_dict = {
'P': precision,
'R': recall,
'F': f1_score,
}
print('P\tR\tF')
for _ in result_dict.values():
print('{:.4f}\t'.format(_*100), end='')
print('')
if configuration['mlflow']:
with mlflow.active_run():
mlflow.log_metric('P', precision)
mlflow.log_metric('R', recall)
mlflow.log_metric('F', f1_score)
return result_dict
| 30.882591
| 117
| 0.552832
|
643ee993553d30f77db07c33695434bbc8621ca3
| 814
|
py
|
Python
|
examples/fips_mip_demo.py
|
J08nY/sec-certs
|
d25a4a7c830c587a45eb8e37d99f8794dec1a5eb
|
[
"MIT"
] | 2
|
2021-03-24T11:56:15.000Z
|
2021-04-12T12:22:16.000Z
|
examples/fips_mip_demo.py
|
J08nY/sec-certs
|
d25a4a7c830c587a45eb8e37d99f8794dec1a5eb
|
[
"MIT"
] | 73
|
2021-04-12T14:04:04.000Z
|
2022-03-31T15:40:26.000Z
|
examples/fips_mip_demo.py
|
J08nY/sec-certs
|
d25a4a7c830c587a45eb8e37d99f8794dec1a5eb
|
[
"MIT"
] | 3
|
2021-03-26T16:15:49.000Z
|
2021-05-10T07:26:23.000Z
|
#!/usr/bin/env python3
import click
from sec_certs.dataset.fips_mip import MIPDataset
@click.command()
@click.argument("directory", type=click.Path(exists=True, file_okay=False))
@click.argument("output", type=click.Path(dir_okay=False, writable=True))
def main(directory, output):
"""
Parse FIPS 'Modules In Process' pages.
\b
To use, download pages from the URL:
https://csrc.nist.gov/Projects/cryptographic-module-validation-program/modules-in-process/Modules-In-Process-List
into a directory `d` and name them `fips_mip_<iso-timestamp>.html`.
\b
Then run:
in_process.py fips-mip d output.json
to obtain the parsed output in `output.json`.
"""
dataset = MIPDataset.from_dumps(directory)
dataset.to_json(output)
if __name__ == "__main__":
main()
| 26.258065
| 117
| 0.710074
|
d86d6b207670723c7007c769471de73e4571efdb
| 2,150
|
py
|
Python
|
install/core/hooks/get_current_login.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | null | null | null |
install/core/hooks/get_current_login.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | null | null | null |
install/core/hooks/get_current_login.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | 1
|
2020-02-15T10:42:56.000Z
|
2020-02-15T10:42:56.000Z
|
# Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""
Hook that gets executed when the current user is being retrieved.
Please note that this hook will only be called whenever Toolkit doesn't
have an authenticated user present. In releases prior to v0.16, this was the case
for all users and projects, however as of Core v0.16 and above, projects are set
up to require users to log in by default, meaning that there already is a well
established notion of who the current user is.
But even in such projects, there are environments (render farms for example),
where a user cannot easily log in, and a Shotgun script user typically is being
used for "headless" operation of Toolkit. In these cases, Toolkit doesn't know
which Shotgun user is associated with the operation and this hook will be called.
The return value from this hook will then be compared with the availalble logins
for all users in Shotgun and if a match is found, this is deemed to be the
current user.
"""
from tank import Hook
import os, sys
class GetCurrentLogin(Hook):
def execute(self, **kwargs):
"""
Return the login name for the user currently logged in. This is typically used
by Toolkit to resolve against the 'login' field in the Shotgun users table in order
to extract further metadata.
"""
if sys.platform == "win32":
# http://stackoverflow.com/questions/117014/how-to-retrieve-name-of-current-windows-user-ad-or-local-using-python
return os.environ.get("USERNAME", None)
else:
try:
import pwd
pwd_entry = pwd.getpwuid(os.geteuid())
return pwd_entry[0]
except:
return None
| 41.346154
| 125
| 0.711163
|
828e8224e2894ac1b46f7475b5b69df4c7744c89
| 1,358
|
py
|
Python
|
h2o-py/tests/testdir_algos/glm/pyunit_covtype_get_future_model.py
|
huamichaelchen/h2o-3
|
2b52f2240652a1c73c1708762248c0773d0c073e
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/tests/testdir_algos/glm/pyunit_covtype_get_future_model.py
|
huamichaelchen/h2o-3
|
2b52f2240652a1c73c1708762248c0773d0c073e
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/tests/testdir_algos/glm/pyunit_covtype_get_future_model.py
|
huamichaelchen/h2o-3
|
2b52f2240652a1c73c1708762248c0773d0c073e
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import random
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
def test_get_future_model():
covtype=h2o.upload_file(pyunit_utils.locate("smalldata/covtype/covtype.altered.gz"))
myY=54
myX=list(set(range(54)) - set([20,28])) # Cols 21 and 29 are constant, so must be explicitly ignored
# Set response to be indicator of a particular class
res_class=random.sample(range(1,5), 1)[0]
covtype[myY] = covtype[myY] == res_class
covtype[myY] = covtype[myY].asfactor()
# L2: alpha=0, lambda=0
covtype_h2o1 = H2OGeneralizedLinearEstimator(family="binomial", alpha=0, Lambda=0)
covtype_h2o1.start(x=myX, y=myY, training_frame=covtype)
# Elastic: alpha=0.5, lambda=1e-4
covtype_h2o2 = H2OGeneralizedLinearEstimator(family="binomial", alpha=0.5, Lambda=1e-4)
covtype_h2o2.start(x=myX, y=myY, training_frame=covtype)
# L1: alpha=1, lambda=1e-4
covtype_h2o3 = H2OGeneralizedLinearEstimator(family="binomial", alpha=1, Lambda=1e-4)
covtype_h2o3.start(x=myX, y=myY, training_frame=covtype)
covtype_h2o1.join()
print(covtype_h2o1)
covtype_h2o2.join()
print(covtype_h2o2)
covtype_h2o3.join()
print(covtype_h2o3)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_get_future_model)
else:
test_get_future_model()
| 30.863636
| 104
| 0.749632
|
cf269e95218c576ff5625d4a7743a9e0e5fac111
| 4,843
|
py
|
Python
|
annotator_supreme/plugins/export_to_logo_detection.py
|
meerkat-cv/annotator-supreme
|
36f32834078b3c8b0ae5e59985848248265ac802
|
[
"MIT"
] | 5
|
2017-06-26T21:30:32.000Z
|
2017-09-20T16:12:57.000Z
|
annotator_supreme/plugins/export_to_logo_detection.py
|
meerkat-cv/annotator-supreme
|
36f32834078b3c8b0ae5e59985848248265ac802
|
[
"MIT"
] | 17
|
2017-12-01T14:21:33.000Z
|
2018-01-18T21:09:50.000Z
|
annotator_supreme/plugins/export_to_logo_detection.py
|
meerkat-cv/annotator-supreme
|
36f32834078b3c8b0ae5e59985848248265ac802
|
[
"MIT"
] | 1
|
2017-11-28T17:56:41.000Z
|
2017-11-28T17:56:41.000Z
|
import os
import cv2
import numpy as np
class AnnotatorPlugin:
_VERSION = '0.0.1'
def __init__(self, dataset, partition = None, additional_params = {}):
self.dataset_name = dataset["name"]
self.dataset_dir = '/tmp/logo_detection_' + dataset["name"]
self.tags = dataset["annotation_labels"]
self.images_list_training = []
self.images_list_testing = []
self.partition = partition
try:
os.system('rm -rf ' + self.dataset_dir)
except:
pass
os.system('mkdir ' + self.dataset_dir)
os.system('mkdir ' + self.dataset_dir+"/data/")
for t in self.tags:
os.system('mkdir ' + self.dataset_dir+'/'+t)
os.system('mkdir ' + self.dataset_dir + '/Distractors')
def process(self, image_matrix, image_object):
im_name = self.dataset_dir + '/' + image_object['phash']
width = float(image_matrix.shape[1])
height = float(image_matrix.shape[0])
curr_tag = ''
print('processing', image_object['phash'])
(image_matrix, image_object) = self.blur_img(image_matrix, image_object)
annotations = []
for bb in image_object['anno']:
if bb['ignore']:
continue
x = ((bb['left']+bb['right'])/2.0) / width
y = ((bb['top']+bb['bottom'])/2.0) / height
w = (bb['right']-bb['left']) / width
h = (bb['bottom']-bb['top']) / height
curr_tag = bb['labels'][0]
curr_anno = self.tags.index(curr_tag)+' '+str(x)+' '+str(y)+' '+str(w)+' '+str(h)
annotations.append(curr_anno)
has_annotation = True
if curr_tag == '':
curr_tag = 'Distractors'
has_annotation = False
if has_annotation:
with open(self.dataset_dir+'/'+curr_tag+'/'+image_object['phash']+'.txt', 'w') as f:
for a in annotations:
f.write(a+'\n')
cv2.imwrite(self.dataset_dir+'/'+curr_tag+'/'+image_object['phash']+'.png', image_matrix)
if image_object["partition"] == 0:
self.images_list_training.append(self.dataset_dir+'/'+curr_tag+'/'+image_object['phash']+'.png')
elif image_object["partition"] == 1:
self.images_list_testing.append(self.dataset_dir+'/'+curr_tag+'/'+image_object['phash']+'.png')
return (image_matrix, image_object)
def end(self):
with open(self.dataset_dir+'/train.txt', 'w') as f:
for im in self.images_list_training:
f.write(im+'\n')
with open(self.dataset_dir+'/test.txt', 'w') as f:
for im in self.images_list_testing:
f.write(im+'\n')
# create .names file
names_content = "\n".join(self.tags)
with open(self.dataset_dir+"/"+self.dataset_name+".names", 'w') as f:
f.write(names_content)
# create config file
cfg = "classes = {n_classes}\n"+\
"train = {train_path}\n"+\
"valid = {valid_path}\n"+\
"thresh = 0.5\n"+\
"names = {names_path}\n"+\
"backup = /tmp/data/"
cfg = cfg.format(n_classes = len(self.tags),
train_path = self.dataset_dir+"/train.txt",
valid_path = self.dataset_dir+"/text.txt",
names_path = self.dataset_dir+"/"+self.dataset_name+".names")
with open(self.dataset_dir+"/"+self.dataset_name+".cfg", 'w') as f:
f.write(cfg)
return {"out_folder": self.dataset_dir}
def get_parameters(self):
return {'parameters': []}
def get_version(self):
return _VERSION
def blur_img(self, im, anno):
im_blur = im
new_anno = []
if anno is not None:
for bb in anno['anno']:
if not bb['ignore'] and bb['labels'][0] != 'ignore':
new_anno.append(bb)
continue
(l,t,r,b) = (int(bb['left']), int(bb['top']), int(bb['right']), int(bb['bottom']))
(l,t,r,b) = self.fix_bb(l,t,r,b,im.shape)
if (r-l)*(b-t) <= 0:
continue
biggest_dim = r-l
if (b-t) > biggest_dim:
biggest_dim = b-t
kernel_size = int(biggest_dim/5.0) + (1-int(biggest_dim/5.0)%2)
im_blur[t:b,l:r] = cv2.GaussianBlur(im_blur[t:b,l:r], (kernel_size, kernel_size), biggest_dim/5.0)
anno['anno'] = new_anno
return (im_blur, anno)
def fix_bb(self, l, t, r, b, shape):
if l<0: l=0
if t<0: t=0
if b>=shape[0]: b=shape[0]-1
if r>=shape[1]: r=shape[1]-1
return (l,t,r,b)
| 33.4
| 114
| 0.520752
|
8faa8e914914ac0deb48002f2cf1ba84f256ef60
| 144
|
py
|
Python
|
chillpill_examples/__init__.py
|
kevinbache/chillpill_examples
|
d9c5fac9972f1afbf7bb4e6b6e5388b9f52c73c3
|
[
"MIT"
] | null | null | null |
chillpill_examples/__init__.py
|
kevinbache/chillpill_examples
|
d9c5fac9972f1afbf7bb4e6b6e5388b9f52c73c3
|
[
"MIT"
] | null | null | null |
chillpill_examples/__init__.py
|
kevinbache/chillpill_examples
|
d9c5fac9972f1afbf7bb4e6b6e5388b9f52c73c3
|
[
"MIT"
] | null | null | null |
"""chillpill_examples - Brown, paper, tied up with string."""
__version__ = '0.0.1'
__author__ = 'My Name <kevin.bache@gmail.com>'
__all__ = []
| 28.8
| 61
| 0.6875
|
b349068faa2f8eb954a92432cdc035c310ee1bf0
| 1,478
|
py
|
Python
|
bspump/declarative/expression/utility/castexpr.py
|
thatch/BitSwanPump
|
98a5b8d09f9b59d5361611cee0bd45e7b4c69e3f
|
[
"BSD-3-Clause"
] | null | null | null |
bspump/declarative/expression/utility/castexpr.py
|
thatch/BitSwanPump
|
98a5b8d09f9b59d5361611cee0bd45e7b4c69e3f
|
[
"BSD-3-Clause"
] | null | null | null |
bspump/declarative/expression/utility/castexpr.py
|
thatch/BitSwanPump
|
98a5b8d09f9b59d5361611cee0bd45e7b4c69e3f
|
[
"BSD-3-Clause"
] | null | null | null |
from bspump.declarative.abc import Expression
from ..value.eventexpr import ARG
class CAST(Expression):
"""
Casts "value" to "type"
"""
def __init__(self, app, *, arg_what=None, arg_type=None, arg_default=None, value=None):
super().__init__(app)
if value is not None:
# Scalar variant
self.Value = ARG(app, value='')
# Detect type cast function
if value == "int":
self.Conversion = int
elif value == "float":
self.Conversion = float
elif value == "str":
self.Conversion = str
elif value == "dict":
self.Conversion = dict
elif value == "list":
self.Conversion = list
else:
raise RuntimeError("Unsupported type '{}' found in CAST expression.".format(arg_type))
else:
self.Value = arg_what
# Detect type cast function
if arg_type == "int":
self.Conversion = int
elif arg_type == "float":
self.Conversion = float
elif arg_type == "str":
self.Conversion = str
elif arg_type == "dict":
self.Conversion = dict
elif arg_type == "list":
self.Conversion = list
else:
raise RuntimeError("Unsupported type '{}' found in CAST expression.".format(arg_type))
self.Default = arg_default
def __call__(self, context, event, *args, **kwargs):
try:
return self.Conversion(self.evaluate(self.Value, context, event, *args, **kwargs))
except ValueError:
if self.Default is None:
return None
return self.evaluate(self.Default, context, event, *args, **kwargs)
| 24.229508
| 90
| 0.661705
|
5b565a48cc0ba1e265fdd460311cb71c7ab83abe
| 3,285
|
py
|
Python
|
options/train_options.py
|
kagudkovArt/instagan
|
5805bf26526e7bdbad02fca7c23c85f385fde02c
|
[
"BSD-3-Clause"
] | null | null | null |
options/train_options.py
|
kagudkovArt/instagan
|
5805bf26526e7bdbad02fca7c23c85f385fde02c
|
[
"BSD-3-Clause"
] | null | null | null |
options/train_options.py
|
kagudkovArt/instagan
|
5805bf26526e7bdbad02fca7c23c85f385fde02c
|
[
"BSD-3-Clause"
] | null | null | null |
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
# Use ncols = 6 for better visualization
parser.add_argument('--display_ncols', type=int, default=6, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')
parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')
parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN')
parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine')
parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
parser.add_argument('--add_color_aug', action='store_true', help='if specified, create color augmentation the images for data augmentation')
self.isTrain = True
return parser
| 93.857143
| 175
| 0.714155
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.