hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6c39ac12c0c8d7dc26c4d273cceb4739cb11c3b0 | 3,754 | py | Python | Highway_RL_agents/mountainCar_reference.py | kk2491/highway-env | 7b0db91fdaf841824a4292bf9fc054c96da46510 | [
"MIT"
] | 1 | 2020-01-10T10:53:31.000Z | 2020-01-10T10:53:31.000Z | Highway_RL_agents/mountainCar_reference.py | kk2491/highway-env | 7b0db91fdaf841824a4292bf9fc054c96da46510 | [
"MIT"
] | null | null | null | Highway_RL_agents/mountainCar_reference.py | kk2491/highway-env | 7b0db91fdaf841824a4292bf9fc054c96da46510 | [
"MIT"
] | null | null | null | # Part 3 - Update to QLearning_sentex_1.py
import gym
import numpy as np
env = gym.make("MountainCar-v0")
env.reset()
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPISODES = 10000
SHOW_EVERY = 500
EPSILON = 0.5
START_EPSILON_DECAYING = 1
END_EPSILON_DECAYING = EPISODES // 2
EPSILON_DECAY_VALUE = EPSILON / (END_EPSILON_DECAYING - START_EPSILON_DECAYING)
print(env.observation_space.high)
print(env.observation_space.low)
print(env.action_space.n)
DISCRETE_OS_SIZE = [20] * len(env.observation_space.high)
print(DISCRETE_OS_SIZE)
discrete_os_win_size = (env.observation_space.high - env.observation_space.low) / DISCRETE_OS_SIZE
print(discrete_os_win_size)
q_table = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE + [env.action_space.n]))
print(q_table.shape)
# print(q_table)
ep_rewards = []
aggr_ep_rewards = {"ep": [],
"avg": [],
"min": [],
"max": []}
def get_descrete_state(state):
discrete_state = (state - env.observation_space.low) / discrete_os_win_size
return tuple(discrete_state.astype(np.int))
for episode in range(EPISODES):
episode_reward = 0
if episode % SHOW_EVERY == 0:
print(episode)
render = True
else:
render = False
discrete_state = get_descrete_state(env.reset())
# print(discrete_state)
# print(np.argmax(q_table[discrete_state]))
done = False
while not done:
if np.random.random() > EPSILON:
action = np.argmax(q_table[discrete_state])
else:
action = np.random.randint(0, env.action_space.n)
new_state, reward, done, info = env.step(action)
episode_reward += reward
new_discrete_state = get_descrete_state(new_state)
# print("New state : {}".format(new_state))
if render:
env.render()
if not done:
max_future_q = np.max(q_table[new_discrete_state])
# print("max_future_q : {}".format(max_future_q))
current_q = q_table[discrete_state + (action,)]
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
q_table[discrete_state + (action,)] = new_q
elif new_state[0] >= env.goal_position:
# print("We made it on episode : {}".format(episode))
q_table[discrete_state + (action,)] = 0
discrete_state = new_discrete_state
if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:
EPSILON -= EPSILON_DECAY_VALUE
ep_rewards.append(episode_reward)
if not episode % SHOW_EVERY:
np.save("q_tables_-q_table.npy", q_table)
average_reward = sum(ep_rewards[-SHOW_EVERY:]) / len(ep_rewards[-SHOW_EVERY:])
aggr_ep_rewards["ep"].append(episode)
aggr_ep_rewards["avg"].append(average_reward)
aggr_ep_rewards["min"].append(min(ep_rewards[-SHOW_EVERY:]))
aggr_ep_rewards["max"].append(max(ep_rewards[-SHOW_EVERY:]))
# print(f"Episode : {episode} Average : {average_reward} Minimum : {min(ep_rewards[-SHOW_EVERY])} Maximum : {max(ep_rewards[-SHOW_EVERY:])}")
print("Episode : {} || Average : {} || Minimum : {} || Maximum : {}".format(episode, average_reward,
min(ep_rewards[-SHOW_EVERY:]),
max(ep_rewards[-SHOW_EVERY:])))
env.close()
import matplotlib.pyplot as plt
plt.plot(aggr_ep_rewards["ep"], aggr_ep_rewards["avg"], label="avg")
plt.plot(aggr_ep_rewards["ep"], aggr_ep_rewards["min"], label="min")
plt.plot(aggr_ep_rewards["ep"], aggr_ep_rewards["max"], label="max")
plt.legend(loc=4)
plt.show()
| 31.813559 | 149 | 0.632658 |
84a6aaaafa70c608741594bd85dda4e5ec780be0 | 1,391 | py | Python | examples/1D_sizing/size_compressor.py | EnergyModels/estorage | 0f84c87632dba1ff0564ffb68f59ece314f67022 | [
"MIT"
] | null | null | null | examples/1D_sizing/size_compressor.py | EnergyModels/estorage | 0f84c87632dba1ff0564ffb68f59ece314f67022 | [
"MIT"
] | null | null | null | examples/1D_sizing/size_compressor.py | EnergyModels/estorage | 0f84c87632dba1ff0564ffb68f59ece314f67022 | [
"MIT"
] | null | null | null | from estorage import SIZE_AIR_CMP
import seaborn as sns
import matplotlib.pyplot as plt
# Test
designs = SIZE_AIR_CMP(p_in=1.01325, t_in=20.0, p_out=2.2, m_dot=2.2, RPM_low=22000, RPM_high=22000, RPM_cases = 1, debug=True)
# Run Sweep
designs = SIZE_AIR_CMP(p_in=1.01325, t_in=20.0, p_out=31.1, m_dot=13.82, RPM_low=1800, RPM_high=15000, RPM_cases = 20, debug=False)
designs.to_csv("cmp_sizing_results.csv")
# Plot Results
if len(designs)>0:
sns.set_style('white')
# Plot 1
f,a = plt.subplots(2,2,sharex=True)
sns.lineplot(x='RPM', y='Ns', hue='Nstg', data=designs, ax=a[0,0])
sns.lineplot(x='RPM', y='Ds', hue='Nstg', data=designs, ax=a[1,0])
sns.lineplot(x='RPM', y='psi', hue='Nstg', data=designs, ax=a[0,1])
sns.lineplot(x='RPM', y='mu', hue='Nstg', data=designs, ax=a[1,1])
f.savefig('cmp_sizing_noDim.png',dpi=1200)
# Plot 2
f, a = plt.subplots(3, 2, sharex=True)
sns.lineplot(x='RPM', y='eff', hue='Nstg', data=designs, ax=a[0, 0])
sns.lineplot(x='RPM', y='psi', hue='Nstg', data=designs, ax=a[1, 0])
sns.lineplot(x='RPM', y='mu', hue='Nstg', data=designs, ax=a[2, 0])
sns.lineplot(x='RPM', y='D', hue='Nstg', data=designs, ax=a[0, 1])
sns.lineplot(x='RPM', y='r2', hue='Nstg', data=designs, ax=a[1, 1])
sns.lineplot(x='RPM', y='r1', hue='Nstg', data=designs, ax=a[2, 1])
f.savefig('cmp_sizing.png', dpi=1200) | 40.911765 | 131 | 0.63839 |
06e4864894d153b2dd680a949ed3501b7e5c3171 | 34,638 | py | Python | wsgi.py | zsolt-beringer/osm-gimmisn | b0cbf2e88c1846ef49e33fd32aeb6b4ecabea4c0 | [
"MIT"
] | null | null | null | wsgi.py | zsolt-beringer/osm-gimmisn | b0cbf2e88c1846ef49e33fd32aeb6b4ecabea4c0 | [
"MIT"
] | null | null | null | wsgi.py | zsolt-beringer/osm-gimmisn | b0cbf2e88c1846ef49e33fd32aeb6b4ecabea4c0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2019 Miklos Vajna. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""The wsgi module contains functionality specific to the web interface."""
import json
import locale
import os
import subprocess
import sys
import urllib.parse
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import TYPE_CHECKING
from typing import Tuple
from typing import cast
import wsgiref.simple_server
import yattag
import areas
from i18n import translate as _
import overpass_query
import util
import webframe
if TYPE_CHECKING:
# pylint: disable=no-name-in-module,import-error,unused-import
from wsgiref.types import StartResponse
if sys.platform.startswith("win"):
import _locale
def handle_streets(relations: areas.Relations, request_uri: str) -> yattag.doc.Doc:
"""Expected request_uri: e.g. /osm/streets/ormezo/view-query."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
action = tokens[-1]
relation = relations.get_relation(relation_name)
osmrelation = relation.get_config().get_osmrelation()
doc = yattag.doc.Doc()
doc.asis(webframe.get_toolbar(relations, "streets", relation_name, osmrelation).getvalue())
if action == "view-query":
with doc.tag("pre"):
doc.text(relation.get_osm_streets_query())
elif action == "update-result":
query = relation.get_osm_streets_query()
try:
relation.get_files().write_osm_streets(overpass_query.overpass_query(query))
streets = relation.get_config().should_check_missing_streets()
if streets != "only":
doc.text(_("Update successful: "))
link = "/osm/missing-housenumbers/" + relation_name + "/view-result"
doc.asis(util.gen_link(link, _("View missing house numbers")).getvalue())
else:
doc.text(_("Update successful."))
except urllib.error.HTTPError as http_error:
doc.asis(util.handle_overpass_error(http_error).getvalue())
else:
# assume view-result
with relation.get_files().get_osm_streets_stream("r") as sock:
table = util.tsv_to_list(sock)
doc.asis(util.html_table_from_list(table).getvalue())
date = get_streets_last_modified(relation)
doc.asis(webframe.get_footer(date).getvalue())
return doc
def handle_street_housenumbers(relations: areas.Relations, request_uri: str) -> yattag.doc.Doc:
"""Expected request_uri: e.g. /osm/street-housenumbers/ormezo/view-query."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
action = tokens[-1]
relation = relations.get_relation(relation_name)
osmrelation = relation.get_config().get_osmrelation()
doc = yattag.doc.Doc()
doc.asis(webframe.get_toolbar(relations, "street-housenumbers", relation_name, osmrelation).getvalue())
if action == "view-query":
with doc.tag("pre"):
doc.text(relation.get_osm_housenumbers_query())
elif action == "update-result":
query = relation.get_osm_housenumbers_query()
try:
relation.get_files().write_osm_housenumbers(overpass_query.overpass_query(query))
doc.text(_("Update successful: "))
link = "/osm/missing-housenumbers/" + relation_name + "/view-result"
doc.asis(util.gen_link(link, _("View missing house numbers")).getvalue())
except urllib.error.HTTPError as http_error:
doc.asis(util.handle_overpass_error(http_error).getvalue())
else:
# assume view-result
if not os.path.exists(relation.get_files().get_osm_housenumbers_path()):
with doc.tag("div", id="no-osm-housenumbers"):
doc.text(_("No existing house numbers"))
else:
with relation.get_files().get_osm_housenumbers_stream(mode="r") as sock:
table = util.tsv_to_list(sock)
doc.asis(util.html_table_from_list(table).getvalue())
date = get_housenumbers_last_modified(relation)
doc.asis(webframe.get_footer(date).getvalue())
return doc
def missing_housenumbers_view_turbo(relations: areas.Relations, request_uri: str) -> yattag.doc.Doc:
"""Expected request_uri: e.g. /osm/missing-housenumbers/ormezo/view-turbo."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
doc = yattag.doc.Doc()
relation = relations.get_relation(relation_name)
ret = relation.write_missing_housenumbers()
_todo_street_count, _todo_count, _done_count, _percent, table = ret
query = areas.make_turbo_query_for_streets(relation, table)
with doc.tag("pre"):
doc.text(query)
return doc
def missing_housenumbers_view_res(relations: areas.Relations, request_uri: str) -> yattag.doc.Doc:
"""Expected request_uri: e.g. /osm/missing-housenumbers/ormezo/view-result."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
doc = yattag.doc.Doc()
relation = relations.get_relation(relation_name)
if not os.path.exists(relation.get_files().get_osm_streets_path()):
with doc.tag("div", id="no-osm-streets"):
doc.text(_("No existing streets: "))
link = "/osm/streets/" + relation_name + "/update-result"
doc.asis(util.gen_link(link, _("Call Overpass to create")).getvalue())
elif not os.path.exists(relation.get_files().get_osm_housenumbers_path()):
with doc.tag("div", id="no-osm-housenumbers"):
doc.text(_("No existing house numbers: "))
link = "/osm/street-housenumbers/" + relation_name + "/update-result"
doc.asis(util.gen_link(link, _("Call Overpass to create")).getvalue())
elif not os.path.exists(relation.get_files().get_ref_housenumbers_path()):
with doc.tag("div", id="no-ref-housenumbers"):
doc.text(_("No missing house numbers: "))
link = "/osm/missing-housenumbers/" + relation_name + "/update-result"
doc.asis(util.gen_link(link, _("Create from reference")).getvalue())
else:
ret = relation.write_missing_housenumbers()
todo_street_count, todo_count, done_count, percent, table = ret
with doc.tag("p"):
doc.text(_("OpenStreetMap is possibly missing the below {0} house numbers for {1} streets.")
.format(str(todo_count), str(todo_street_count)))
doc.text(_(" (existing: {0}, ready: {1}%).").format(str(done_count), str(percent)))
doc.stag("br")
with doc.tag("a", href="https://github.com/vmiklos/osm-gimmisn/tree/master/doc"):
doc.text(_("Filter incorrect information"))
doc.text(".")
doc.stag("br")
with doc.tag("a", href="/osm/missing-housenumbers/{}/view-turbo".format(relation_name)):
doc.text(_("Overpass turbo query for the below streets"))
doc.text(".")
doc.asis(util.html_table_from_list(table).getvalue())
return doc
def missing_streets_view_result(relations: areas.Relations, request_uri: str) -> yattag.doc.Doc:
"""Expected request_uri: e.g. /osm/missing-streets/budapest_11/view-result."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
relation = relations.get_relation(relation_name)
doc = yattag.doc.Doc()
if not os.path.exists(relation.get_files().get_osm_streets_path()):
with doc.tag("div", id="no-osm-streets"):
doc.text(_("No existing streets: "))
with doc.tag("a", href="/osm/streets/" + relation_name + "/update-result"):
doc.text(_("Call Overpass to create"))
elif not os.path.exists(relation.get_files().get_ref_streets_path()):
with doc.tag("div", id="no-ref-streets"):
doc.text(_("No street list: "))
with doc.tag("a", href="/osm/missing-streets/" + relation_name + "/update-result"):
doc.text(_("Create from reference"))
else:
ret = relation.write_missing_streets()
todo_count, done_count, percent, streets = ret
streets.sort(key=locale.strxfrm)
table = [[util.html_escape(_("Street name"))]]
for street in streets:
table.append([util.html_escape(street)])
with doc.tag("p"):
doc.text(_("OpenStreetMap is possibly missing the below {0} streets.").format(str(todo_count)))
doc.text(_(" (existing: {0}, ready: {1}%).").format(str(done_count), str(percent)))
doc.asis(util.html_table_from_list(table).getvalue())
return doc
def missing_housenumbers_view_txt(relations: areas.Relations, request_uri: str) -> str:
"""Expected request_uri: e.g. /osm/missing-housenumbers/ormezo/view-result.txt."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
relation = relations.get_relation(relation_name)
relation.get_config().set_letter_suffix_style(util.LetterSuffixStyle.LOWER)
output = ""
if not os.path.exists(relation.get_files().get_osm_streets_path()):
output += _("No existing streets")
elif not os.path.exists(relation.get_files().get_osm_housenumbers_path()):
output += _("No existing house numbers")
elif not os.path.exists(relation.get_files().get_ref_housenumbers_path()):
output += _("No reference house numbers")
else:
ongoing_streets, _ignore = relation.get_missing_housenumbers()
table = []
for result in ongoing_streets:
result_strings = util.get_housenumber_ranges(result[1])
# Street name, only_in_reference items.
if not relation.get_config().get_street_is_even_odd(result[0]):
result_sorted = sorted(result_strings, key=util.split_house_number)
row = result[0] + "\t[" + ", ".join(result_sorted) + "]"
else:
elements = util.format_even_odd(result_strings, doc=None)
row = result[0] + "\t[" + "], [".join(elements) + "]"
table.append(row)
table.sort(key=locale.strxfrm)
output += "\n".join(table)
return output
def get_chkl_split_limit() -> int:
"""Decides when to split a too long line in the chkl output."""
return 20
def missing_housenumbers_view_chkl(relations: areas.Relations, request_uri: str) -> Tuple[str, str]:
"""Expected request_uri: e.g. /osm/missing-housenumbers/ormezo/view-result.chkl."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
relation = relations.get_relation(relation_name)
relation.get_config().set_letter_suffix_style(util.LetterSuffixStyle.LOWER)
output = ""
if not os.path.exists(relation.get_files().get_osm_streets_path()):
output += _("No existing streets")
elif not os.path.exists(relation.get_files().get_osm_housenumbers_path()):
output += _("No existing house numbers")
elif not os.path.exists(relation.get_files().get_ref_housenumbers_path()):
output += _("No reference house numbers")
else:
ongoing_streets, _ignore = relation.get_missing_housenumbers()
table = []
for result in ongoing_streets:
result_strings = util.get_housenumber_ranges(result[1])
# Street name, only_in_reference items.
row = "[ ] "
if not relation.get_config().get_street_is_even_odd(result[0]):
result_sorted = sorted(result_strings, key=util.split_house_number)
row += result[0] + " [" + ", ".join(result_sorted) + "]"
table.append(row)
else:
elements = util.format_even_odd(result_strings, doc=None)
if len(elements) > 1 and len(result_strings) > get_chkl_split_limit():
for element in elements:
row = "[ ] " + result[0] + " [" + element + "]"
table.append(row)
else:
row += result[0] + " [" + "], [".join(elements) + "]"
table.append(row)
table.sort(key=locale.strxfrm)
output += "\n".join(table)
return output, relation_name
def missing_streets_view_txt(relations: areas.Relations, request_uri: str) -> str:
"""Expected request_uri: e.g. /osm/missing-streets/ujbuda/view-result.txt."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
relation = relations.get_relation(relation_name)
output = ""
if not os.path.exists(relation.get_files().get_osm_streets_path()):
output += _("No existing streets")
elif not os.path.exists(relation.get_files().get_ref_streets_path()):
output += _("No reference streets")
else:
todo_streets, _ignore = relation.get_missing_streets()
todo_streets.sort(key=locale.strxfrm)
output += "\n".join(todo_streets)
return output
def missing_housenumbers_update(relations: areas.Relations, relation_name: str) -> yattag.doc.Doc:
"""Expected request_uri: e.g. /osm/missing-housenumbers/ormezo/update-result."""
reference = webframe.get_config().get('wsgi', 'reference_housenumbers').strip().split(' ')
reference = [util.get_abspath(i) for i in reference]
relation = relations.get_relation(relation_name)
relation.write_ref_housenumbers(reference)
doc = yattag.doc.Doc()
doc.text(_("Update successful: "))
link = "/osm/missing-housenumbers/" + relation_name + "/view-result"
doc.asis(util.gen_link(link, _("View missing house numbers")).getvalue())
return doc
def missing_streets_update(relations: areas.Relations, relation_name: str) -> yattag.doc.Doc:
"""Expected request_uri: e.g. /osm/missing-streets/ujbuda/update-result."""
reference = util.get_abspath(webframe.get_config().get('wsgi', 'reference_street').strip())
relation = relations.get_relation(relation_name)
relation.write_ref_streets(reference)
doc = yattag.doc.Doc()
with doc.tag("div", id="update-success"):
doc.text(_("Update successful."))
return doc
def handle_missing_housenumbers(relations: areas.Relations, request_uri: str) -> yattag.doc.Doc:
"""Expected request_uri: e.g. /osm/missing-housenumbers/ormezo/view-[result|query]."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
action = tokens[-1]
date = None
relation = relations.get_relation(relation_name)
osmrelation = relation.get_config().get_osmrelation()
doc = yattag.doc.Doc()
doc.asis(webframe.get_toolbar(relations, "missing-housenumbers", relation_name, osmrelation).getvalue())
if action == "view-turbo":
doc.asis(missing_housenumbers_view_turbo(relations, request_uri).getvalue())
elif action == "view-query":
with doc.tag("pre"):
with relation.get_files().get_ref_housenumbers_stream("r") as sock:
doc.text(sock.read())
date = get_last_modified(relation.get_files().get_ref_housenumbers_path())
elif action == "update-result":
doc.asis(missing_housenumbers_update(relations, relation_name).getvalue())
else:
# assume view-result
doc.asis(missing_housenumbers_view_res(relations, request_uri).getvalue())
if not date:
date = ref_housenumbers_last_modified(relations, relation_name)
doc.asis(webframe.get_footer(date).getvalue())
return doc
def handle_missing_streets(relations: areas.Relations, request_uri: str) -> yattag.doc.Doc:
"""Expected request_uri: e.g. /osm/missing-streets/ujbuda/view-[result|query]."""
tokens = request_uri.split("/")
relation_name = tokens[-2]
action = tokens[-1]
relation = relations.get_relation(relation_name)
osmrelation = relation.get_config().get_osmrelation()
doc = yattag.doc.Doc()
doc.asis(webframe.get_toolbar(relations, "missing-streets", relation_name, osmrelation).getvalue())
if action == "view-query":
with doc.tag("pre"):
with relation.get_files().get_ref_streets_stream("r") as sock:
doc.text(sock.read())
elif action == "update-result":
doc.asis(missing_streets_update(relations, relation_name).getvalue())
else:
# assume view-result
doc.asis(missing_streets_view_result(relations, request_uri).getvalue())
date = ref_streets_last_modified(relation)
doc.asis(webframe.get_footer(date).getvalue())
return doc
def get_last_modified(path: str) -> str:
"""Gets the update date string of a file."""
return webframe.format_timestamp(get_timestamp(path))
def get_timestamp(path: str) -> float:
"""Gets the timestamp of a file if it exists, 0 otherwise."""
try:
return os.path.getmtime(path)
except FileNotFoundError:
return 0
def ref_housenumbers_last_modified(relations: areas.Relations, name: str) -> str:
"""Gets the update date for missing house numbers."""
relation = relations.get_relation(name)
t_ref = get_timestamp(relation.get_files().get_ref_housenumbers_path())
t_housenumbers = get_timestamp(relation.get_files().get_osm_housenumbers_path())
return webframe.format_timestamp(max(t_ref, t_housenumbers))
def ref_streets_last_modified(relation: areas.Relation) -> str:
"""Gets the update date for missing streets."""
t_ref = get_timestamp(relation.get_files().get_ref_streets_path())
t_osm = get_timestamp(relation.get_files().get_osm_streets_path())
return webframe.format_timestamp(max(t_ref, t_osm))
def get_housenumbers_last_modified(relation: areas.Relation) -> str:
"""Gets the update date of house numbers for a relation."""
return get_last_modified(relation.get_files().get_osm_housenumbers_path())
def get_streets_last_modified(relation: areas.Relation) -> str:
"""Gets the update date of streets for a relation."""
return get_last_modified(relation.get_files().get_osm_streets_path())
def handle_main_housenr_percent(relation: areas.Relation) -> Tuple[yattag.doc.Doc, str]:
"""Handles the house number percent part of the main page."""
url = "/osm/missing-housenumbers/" + relation.get_name() + "/view-result"
percent = "N/A"
if os.path.exists(relation.get_files().get_housenumbers_percent_path()):
percent = util.get_content(relation.get_files().get_housenumbers_percent_path())
doc = yattag.doc.Doc()
if percent != "N/A":
date = get_last_modified(relation.get_files().get_housenumbers_percent_path())
with doc.tag("strong"):
with doc.tag("a", href=url, title=_("updated") + " " + date):
doc.text(percent + "%")
return doc, percent
with doc.tag("strong"):
with doc.tag("a", href=url):
doc.text(_("missing house numbers"))
return doc, "0"
def handle_main_street_percent(relation: areas.Relation) -> Tuple[yattag.doc.Doc, str]:
"""Handles the street percent part of the main page."""
url = "/osm/missing-streets/" + relation.get_name() + "/view-result"
percent = "N/A"
if os.path.exists(relation.get_files().get_streets_percent_path()):
percent = util.get_content(relation.get_files().get_streets_percent_path())
doc = yattag.doc.Doc()
if percent != "N/A":
date = get_last_modified(relation.get_files().get_streets_percent_path())
with doc.tag("strong"):
with doc.tag("a", href=url, title=_("updated") + " " + date):
doc.text(percent + "%")
return doc, percent
with doc.tag("strong"):
with doc.tag("a", href=url):
doc.text(_("missing streets"))
return doc, "0"
def filter_for_everything(_complete: bool, _relation: areas.Relation) -> bool:
"""Does not filter out anything."""
return True
def filter_for_incomplete(complete: bool, _relation: areas.Relation) -> bool:
"""Filters out complete items."""
return not complete
def create_filter_for_refmegye(refmegye_filter: str) -> Callable[[bool, areas.Relation], bool]:
"""Creates a function that filters for a single refmegye."""
return lambda _complete, relation: relation.get_config().get_refmegye() == refmegye_filter
def create_filter_for_refmegye_reftelepules(
refmegye_filter: str,
reftelepules_filter: str
) -> Callable[[bool, areas.Relation], bool]:
"""Creates a function that filters for a single reftelepules in a refmegye."""
def filter_for(_complete: bool, relation: areas.Relation) -> bool:
config = relation.get_config()
return config.get_refmegye() == refmegye_filter and config.get_reftelepules() == reftelepules_filter
return filter_for
def handle_main_filters_refmegye(relations: areas.Relations, refmegye_id: str, refmegye: str) -> yattag.doc.Doc:
"""Handles one refmegye in the filter part of the main wsgi page."""
doc = yattag.doc.Doc()
name = relations.refmegye_get_name(refmegye)
if not name:
return doc
with doc.tag("a", href="/osm/filter-for/refmegye/" + refmegye):
doc.text(name)
if refmegye_id and refmegye == refmegye_id:
reftelepules_ids = relations.refmegye_get_reftelepules_ids(refmegye_id)
if reftelepules_ids:
names: List[yattag.doc.Doc] = []
for reftelepules_id in reftelepules_ids:
name = relations.reftelepules_get_name(refmegye_id, reftelepules_id)
name_doc = yattag.doc.Doc()
href_format = "/osm/filter-for/refmegye/{}/reftelepules/{}"
with name_doc.tag("a", href=href_format.format(refmegye, reftelepules_id)):
name_doc.text(name)
names.append(name_doc)
doc.text(" (")
for index, item in enumerate(names):
if index:
doc.text(", ")
doc.asis(item.getvalue())
doc.text(")")
return doc
def handle_main_filters(relations: areas.Relations, refmegye_id: str) -> yattag.doc.Doc:
"""Handlers the filter part of the main wsgi page."""
items: List[yattag.doc.Doc] = []
doc = yattag.doc.Doc()
with doc.tag("a", href="/osm/filter-for/incomplete"):
doc.text(_("Hide complete areas"))
items.append(doc)
# Sorted set of refmegye values of all relations.
for refmegye in sorted({relation.get_config().get_refmegye() for relation in relations.get_relations()}):
items.append(handle_main_filters_refmegye(relations, refmegye_id, refmegye))
doc = yattag.doc.Doc()
with doc.tag("h1"):
doc.text(_("Where to map?"))
with doc.tag("p"):
doc.text(_("Filters:") + " ")
for index, item in enumerate(items):
if index:
doc.text(" ¦ ")
doc.asis(item.getvalue())
return doc
def setup_main_filter_for(request_uri: str) -> Tuple[Callable[[bool, areas.Relation], bool], str]:
"""Sets up a filter-for function from request uri: only certain areas are shown then."""
tokens = request_uri.split("/")
filter_for: Callable[[bool, areas.Relation], bool] = filter_for_everything
filters = util.parse_filters(tokens)
refmegye = ""
if "incomplete" in filters:
# /osm/filter-for/incomplete
filter_for = filter_for_incomplete
elif "refmegye" in filters and "reftelepules" in filters:
# /osm/filter-for/refmegye/<value>/reftelepules/<value>.
refmegye = filters["refmegye"]
filter_for = create_filter_for_refmegye_reftelepules(filters["refmegye"], filters["reftelepules"])
elif "refmegye" in filters:
# /osm/filter-for/refmegye/<value>.
refmegye = filters["refmegye"]
filter_for = create_filter_for_refmegye(refmegye)
return filter_for, refmegye
def handle_main_relation(
relations: areas.Relations,
filter_for: Callable[[bool, areas.Relation], bool],
relation_name: str
) -> List[yattag.doc.Doc]:
"""Handles one relation (one table row) on the main page."""
relation = relations.get_relation(relation_name)
# If checking both streets and house numbers, then "is complete" refers to the street coverage
# for "hide complete" purposes.
complete = True
streets = relation.get_config().should_check_missing_streets()
row = [] # List[yattag.doc.Doc]
row.append(util.html_escape(relation_name))
if streets != "only":
cell, percent = handle_main_housenr_percent(relation)
doc = yattag.doc.Doc()
doc.asis(cell.getvalue())
row.append(doc)
complete = float(percent) >= 100.0
date = get_housenumbers_last_modified(relation)
doc = yattag.doc.Doc()
href = "/osm/street-housenumbers/" + relation_name + "/view-result"
with doc.tag("a", href=href, title=_("updated") + " " + date):
doc.text(_("existing house numbers"))
row.append(doc)
else:
row.append(yattag.doc.Doc())
row.append(yattag.doc.Doc())
if streets != "no":
cell, percent = handle_main_street_percent(relation)
row.append(cell)
complete = float(percent) >= 100.0
else:
row.append(yattag.doc.Doc())
date = get_streets_last_modified(relation)
doc = yattag.doc.Doc()
with doc.tag("a", href="/osm/streets/" + relation_name + "/view-result", title=_("updated") + " " + date):
doc.text(_("existing streets"))
row.append(doc)
doc = yattag.doc.Doc()
with doc.tag("a", href="https://www.openstreetmap.org/relation/" + str(relation.get_config().get_osmrelation())):
doc.text(_("area boundary"))
row.append(doc)
if not filter_for(complete, relation):
row.clear()
return row
def handle_main(request_uri: str, relations: areas.Relations) -> yattag.doc.Doc:
"""Handles the main wsgi page.
Also handles /osm/filter-for/* which filters for a condition."""
filter_for, refmegye = setup_main_filter_for(request_uri)
doc = yattag.doc.Doc()
doc.asis(webframe.get_toolbar(relations).getvalue())
doc.asis(handle_main_filters(relations, refmegye).getvalue())
table = []
table.append([util.html_escape(_("Area")),
util.html_escape(_("House number coverage")),
util.html_escape(_("Existing house numbers")),
util.html_escape(_("Street coverage")),
util.html_escape(_("Existing streets")),
util.html_escape(_("Area boundary"))])
for relation_name in relations.get_names():
row = handle_main_relation(relations, filter_for, relation_name)
if row:
table.append(row)
doc.asis(util.html_table_from_list(table).getvalue())
with doc.tag("p"):
with doc.tag("a", href="https://github.com/vmiklos/osm-gimmisn/tree/master/doc"):
doc.text(_("Add new area"))
doc.asis(webframe.get_footer().getvalue())
return doc
def get_html_title(request_uri: str) -> str:
"""Determines the HTML title for a given function and relation name."""
tokens = request_uri.split("/")
function = ""
relation_name = ""
if len(tokens) > 3:
function = tokens[2]
relation_name = tokens[3]
title = ""
if function == "missing-housenumbers":
title = " - " + _("{0} missing house numbers").format(relation_name)
elif function == "missing-streets":
title = " - " + relation_name + " " + _("missing streets")
elif function == "street-housenumbers":
title = " - " + relation_name + " " + _("existing house numbers")
elif function == "streets":
title = " - " + relation_name + " " + _("existing streets")
return title
def write_html_head(doc: yattag.doc.Doc, title: str) -> None:
"""Produces the <head> tag and its contents."""
with doc.tag("head"):
with doc.tag("title"):
doc.text(_("Where to map?") + title)
doc.stag("meta", charset="UTF-8")
doc.stag("link", rel="stylesheet", type="text/css", href="/osm/static/osm.css")
with doc.tag("script", src="/osm/static/sorttable.js"):
pass
doc.stag("meta", name="viewport", content="width=device-width, initial-scale=1")
def handle_github_webhook(environ: Dict[str, Any]) -> yattag.doc.Doc:
"""Handles a GitHub style webhook."""
body = urllib.parse.parse_qs(environ["wsgi.input"].read().decode('utf-8'))
payload = body["payload"][0]
root = json.loads(payload)
if root["ref"] == "refs/heads/master":
subprocess.run(["make", "-C", util.get_abspath(""), "deploy-pythonanywhere"], check=True)
return util.html_escape("")
def our_application_txt(
start_response: 'StartResponse',
relations: areas.Relations,
request_uri: str
) -> Iterable[bytes]:
"""Dispatches plain text requests based on their URIs."""
content_type = "text/plain"
extra_headers: List[Tuple[str, str]] = []
if request_uri.startswith("/osm/missing-streets/"):
output = missing_streets_view_txt(relations, request_uri)
else:
# assume "/osm/missing-housenumbers/"
_, _, ext = request_uri.partition('.')
if ext == "chkl":
output, relation_name = missing_housenumbers_view_chkl(relations, request_uri)
content_type = "application/octet-stream"
extra_headers.append(("Content-Disposition", 'attachment;filename="' + relation_name + '.txt"'))
else:
# assume txt
output = missing_housenumbers_view_txt(relations, request_uri)
return webframe.send_response(start_response, content_type, "200 OK", output, extra_headers)
def get_request_uri(environ: Dict[str, Any], relations: areas.Relations) -> str:
"""Finds out the request URI."""
request_uri = cast(str, environ.get("PATH_INFO"))
if request_uri:
# Compatibility.
if request_uri.startswith("/osm/suspicious-streets/"):
request_uri = request_uri.replace('suspicious-streets', 'missing-housenumbers')
elif request_uri.startswith("/osm/suspicious-relations/"):
request_uri = request_uri.replace('suspicious-relations', 'missing-streets')
# Performance: don't bother with relation aliases for non-relation requests.
if not request_uri.startswith("/osm/streets/") \
and not request_uri.startswith("/osm/missing-streets/") \
and not request_uri.startswith("/osm/street-housenumbers/") \
and not request_uri.startswith("/osm/missing-housenumbers/"):
return request_uri
# Relation aliases.
aliases = relations.get_aliases()
tokens = request_uri.split("/")
relation_name = tokens[-2]
if relation_name in aliases:
request_uri = request_uri.replace(relation_name, aliases[relation_name])
return request_uri
def check_existing_relation(relations: areas.Relations, request_uri: str) -> yattag.doc.Doc:
"""Prevents serving outdated data from a relation that has been renamed."""
doc = yattag.doc.Doc()
if not request_uri.startswith("/osm/streets/") \
and not request_uri.startswith("/osm/missing-streets/") \
and not request_uri.startswith("/osm/street-housenumbers/") \
and not request_uri.startswith("/osm/missing-housenumbers/"):
return doc
tokens = request_uri.split("/")
relation_name = tokens[-2]
if relation_name in relations.get_names():
return doc
with doc.tag("div", id="no-such-relation-error"):
doc.text(_("No such relation: {0}").format(relation_name))
return doc
HANDLERS = {
"/osm/streets/": handle_streets,
"/osm/missing-streets/": handle_missing_streets,
"/osm/street-housenumbers/": handle_street_housenumbers,
"/osm/missing-housenumbers/": handle_missing_housenumbers,
}
def get_handler(request_uri: str) -> Optional[Callable[[areas.Relations, str], yattag.doc.Doc]]:
"""Decides request_uri matches what handler."""
for key, value in HANDLERS.items():
if request_uri.startswith(key):
return value
return None
def our_application(
environ: Dict[str, Any],
start_response: 'StartResponse'
) -> Iterable[bytes]:
"""Dispatches the request based on its URI."""
config = webframe.get_config()
util.set_locale(config)
language = util.setup_localization(environ)
relations = areas.Relations(util.get_workdir(config))
request_uri = get_request_uri(environ, relations)
_, _, ext = request_uri.partition('.')
if ext in ("txt", "chkl"):
return our_application_txt(start_response, relations, request_uri)
if request_uri.startswith("/osm/static/"):
output, content_type = webframe.handle_static(request_uri)
return webframe.send_response(start_response, content_type, "200 OK", output, [])
doc = yattag.doc.Doc()
util.write_html_header(doc)
with doc.tag("html", lang=language):
write_html_head(doc, get_html_title(request_uri))
with doc.tag("body"):
no_such_relation = check_existing_relation(relations, request_uri)
handler = get_handler(request_uri)
if no_such_relation.getvalue():
doc.asis(no_such_relation.getvalue())
elif handler:
doc.asis(handler(relations, request_uri).getvalue())
elif request_uri.startswith("/osm/webhooks/github"):
doc.asis(handle_github_webhook(environ).getvalue())
else:
doc.asis(handle_main(request_uri, relations).getvalue())
return webframe.send_response(start_response, "text/html", "200 OK", doc.getvalue(), [])
def application(
environ: Dict[str, Any],
start_response: 'StartResponse'
) -> Iterable[bytes]:
"""The entry point of this WSGI app."""
try:
return our_application(environ, start_response)
# pylint: disable=broad-except
except Exception:
return webframe.handle_exception(environ, start_response)
def main() -> None:
"""Commandline interface to this module."""
if sys.platform.startswith("win"):
# pylint: disable=protected-access
_locale._getdefaultlocale = (lambda *args: ['en_US', 'utf8'])
httpd = wsgiref.simple_server.make_server('', 8000, application)
print("Open <http://localhost:8000/osm> in your browser.")
httpd.serve_forever()
if __name__ == "__main__":
main()
# vim:set shiftwidth=4 softtabstop=4 expandtab:
| 40.464953 | 117 | 0.657428 |
25eea4c798cb59739dc582da5555636f8c05bb4c | 1,014 | py | Python | main.py | petabyt/fujiptp | e7de43dbb65462391a2c9f64ca72cd3825b3d3e4 | [
"BSD-3-Clause"
] | null | null | null | main.py | petabyt/fujiptp | e7de43dbb65462391a2c9f64ca72cd3825b3d3e4 | [
"BSD-3-Clause"
] | null | null | null | main.py | petabyt/fujiptp | e7de43dbb65462391a2c9f64ca72cd3825b3d3e4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import ptpy
from random import randrange
FUJI_CREATE_FILE = 0x900c
FUJI_UNKNOWN1 = 0x900d
FUJI_WRITE_FILE = 0x901d
FUJI_FPUPDATE = 0xb802
FUJI_AUTO_ACT = 0x3002
# SendObjectInfo
# 36876 / 0x900c / Create file
# 36877 / 0x900d / ???
# 36893 / 0x901d / Write to file
camera = ptpy.PTPy()
with camera.session():
string = "FPUPDATE.DAT"
# Convert 8 bit string to 16 bit
byte = bytearray(0)
for i in string:
byte += str(i).encode()
byte += bytearray(1)
byte += bytearray(2)
# Prepare (struct?)
header = bytes([0, 0, 0, 0])
header += (FUJI_FPUPDATE).to_bytes(4, 'little')
header += (0x2048590).to_bytes(4, 'little') # ???
header += bytes([0, 0, 0, 0])
header += bytes([0, 0, 0, 0])
header += bytearray(0x20) + b'\r'
payload = header + byte
# Both don't seem to do anything
print(camera.custom(FUJI_CREATE_FILE, [], payload))
print(camera.custom(FUJI_WRITE_FILE, [], bytes([1, 2, 3, 4, 5, 6, 7, 8, 9])))
| 23.045455 | 81 | 0.619329 |
a17dbbc746d24341b86e366c1f8d4ed738c54589 | 1,556 | py | Python | main.py | eshirima/Diamond-Kinects | 597bbf9c523d1dd0faa0c88ddeb0766ffc14ce4b | [
"MIT"
] | null | null | null | main.py | eshirima/Diamond-Kinects | 597bbf9c523d1dd0faa0c88ddeb0766ffc14ce4b | [
"MIT"
] | null | null | null | main.py | eshirima/Diamond-Kinects | 597bbf9c523d1dd0faa0c88ddeb0766ffc14ce4b | [
"MIT"
] | null | null | null | # main.py
# Emil Shirima
# 20-September-2019 8:46 PM
#
# Purpose: Graph Swing data
import csv
import matplotlib.pyplot as plt
time_stamps, acclX, acclY, acclZ = [], [], [], []
# extracts data from file and saves it on the containers
def populate_data(file_name='latestSwing.csv'):
with open(file_name) as csv_file:
reader = csv.reader(csv_file)
for data_point in reader:
time_stamps.append(data_point[0])
acclX.append(data_point[1])
acclY.append(data_point[2])
acclZ.append(data_point[3])
csv_file.close()
# graphs data of each respective axis separately
def graph(time, x_data, y_data, z_data, title):
plt.subplot(3, 1, 1)
plt.title(title)
plt.plot(time, x_data, 'r')
plt.xlabel('Time')
plt.ylabel('X-Axis')
plt.subplot(3, 1, 2)
plt.plot(time, y_data, 'g')
plt.xlabel('Time')
plt.ylabel('Y-Axis')
plt.subplot(3, 1, 3)
plt.plot(time, z_data, 'y')
plt.xlabel('Time')
plt.ylabel('Z-Axis')
plt.show()
# graph all axes on same graph with count/index as x-axis values
def graph_indices(time, x_data, y_data, z_data, title):
time = range(len(time))
plt.title(title)
plt.plot(time, x_data, 'r')
plt.plot(time, y_data, 'g')
plt.plot(time, z_data, 'y')
plt.legend(['x-axis', 'y-axis', 'z-axis'])
plt.ylabel('Values')
plt.xlabel('Count')
plt.show()
populate_data()
graph(time_stamps, acclX, acclY, acclZ, 'Acceleration')
graph_indices(time_stamps, acclX, acclY, acclZ, 'Acceleration') | 23.938462 | 64 | 0.638817 |
77a0819d0407736fed54ddacec7c24a7b937e300 | 4,184 | py | Python | stockBOT/Discord/intent/Loki_Safety.py | Chenct-jonathan/LokiHub | 7193589151e88f4e66aee6457926e565d0023fa1 | [
"MIT"
] | 17 | 2020-11-25T07:40:18.000Z | 2022-03-07T03:29:18.000Z | stockBOT/Discord/intent/Loki_Safety.py | Chenct-jonathan/LokiHub | 7193589151e88f4e66aee6457926e565d0023fa1 | [
"MIT"
] | 8 | 2020-12-18T13:23:59.000Z | 2021-10-03T21:41:50.000Z | stockBOT/Discord/intent/Loki_Safety.py | Chenct-jonathan/LokiHub | 7193589151e88f4e66aee6457926e565d0023fa1 | [
"MIT"
] | 43 | 2020-12-02T09:03:57.000Z | 2021-12-23T03:30:25.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Loki module for Safety
Input:
inputSTR str,
utterance str,
args str[],
resultDICT dict
Output:
resultDICT dict
"""
DEBUG_Safety = True
userDefinedDICT = {"2302": ["2302", "麗正"], "2303": ["2303", "聯電"], "2329": ["2329", "華泰"], "2330": ["2330", "台積電"], "2337": ["2337", "旺宏"], "2338": ["2338", "光罩"], "2342": ["2342", "茂矽"], "2344": ["2344", "華邦電"], "2351": ["2351", "順德"], "2363": ["2363", "矽統"], "2369": ["2369", "菱生"], "2379": ["2379", "瑞昱"], "2388": ["2388", "威盛"], "2401": ["2401", "凌陽"], "2408": ["2408", "南亞科"], "2434": ["2434", "統懋"], "2436": ["2436", "偉詮電"], "2441": ["2441", "超豐"], "2449": ["2449", "京元", "京元電子"], "2451": ["2451", "創見"], "2454": ["2454", "聯發科"], "2458": ["2458", "義隆"], "3006": ["3006", "晶豪科"], "3014": ["3014", "聯陽"], "3016": ["3016", "家晶"], "3034": ["3034", "聯詠"], "3041": ["3041", "揚智"], "3054": ["3054", "立萬利"], "3094": ["3094", "聯傑"], "3189": ["3189", "景碩"], "3257": ["3257", "虹冠電"], "3413": ["3413", "京鼎"], "3443": ["3443", "創意"], "3450": ["3450", "聯鈞"], "3530": ["3530", "晶相光"], "3532": ["3532", "台勝科"], "3536": ["3536", "誠創"], "3545": ["3545", "敦泰"], "3583": ["3583", "辛耕"], "3588": ["3588", "通嘉"], "3661": ["3661", "世芯-KY"], "3686": ["3686", "達能"], "3711": ["3711", "日月光投控"], "4919": ["4919", "新唐"], "4952": ["4952", "凌通"], "4961": ["4961", "天銓"], "4967": ["4967", "十銓"], "4968": ["4968", "立積"], "5269": ["5269", "祥碩"], "5285": ["5285", "界霖"], "5471": ["5471", "松翰"], "6202": ["6202", "盛群"], "6239": ["6239", "力成"], "6243": ["6243", "迅杰"], "6257": ["6257", "矽格"], "6271": ["6271", "同欣電"], "6415": ["6415", "矽力", "矽力-KY"], "6451": ["6451", "訊芯", "訊芯-KY"], "6515": ["6515", "穎崴"], "6525": ["6525", "捷敏", "捷敏-KY"], "6531": ["6531", "愛普"], "6533": ["6533", "晶心科"], "6552": ["6552", "易華電"], "6573": ["6573", "虹揚-KY"], "6756": ["6756", "威鋒電子"], "8016": ["8016", "矽創"], "8028": ["8028", "昇陽半導體電子"], "8081": ["8081", "致心"], "8110": ["8110", "華東"], "8131": ["8131", "福懋科"], "8150": ["8150", "南茂"], "8261": ["8261", "富鼎"], "8271": ["8271", "宇瞻"], "安全性": [""], "成交價": [""], "成長力": [""], "成長率": ["YOY", "yoy"], "流動比": ["流動比率"], "負債比": [""], "速動比": ["速動比率"], "基本資料": ["基本資料", "基本資訊", "資料", "資訊"], "年成長率": [""], "每股盈餘": [""], "水泥類股": ["1101", "台泥", "1102", "亞泥", "1103", "嘉泥", "1104", "環泥", "1107", "建台", "1109", "信大", "1110", "東泥"], "營業利益": [""], "營業收入": ["營收", "營業收入"], "稅後淨利": [""], "財務報表": ["基本財報資料", "財務報表", "財報", "財報資料"], "運輸類股": ["2601", "益航", "2603", "長榮", "2604", "立榮", "2605", "新興", "2606", "裕民", "2607", "榮運", "2608", "大榮", "2609", "陽明", "2610", "華航", "2611", "志信", "2612", "中航", "2613", "中櫃", "2614", "遠森科", "2615", "萬海", "2616", "山隆", "2617", "台航", "2618", "長榮航"], "存貨周轉率": [""], "安全性分析": [""], "安全性指標": [""], "昨日收盤價": ["昨收", "昨日收盤價"], "流速動比率": [""], "現金流量比": [""], "利息保障倍數": [""], "營運周轉能力": [""], "資產年成長率": [""], "應收帳款週轉率": [""], "每股盈餘成長率": [""], "股東權益報酬率": [""]}
# 將符合句型的參數列表印出。這是 debug 或是開發用的。
def debugInfo(inputSTR, utterance):
if DEBUG_Safety:
print("[Safety] {} ===> {}".format(inputSTR, utterance))
def getResult(inputSTR, utterance, args, resultDICT):
debugInfo(inputSTR, utterance)
if utterance == "[聯發科][安全]嗎":
resultDICT["fun_safety"] = True
if utterance == "[聯發科]償債能力":
resultDICT["fun_safety"] = True
if utterance == "[聯發科]利息保障倍數":
resultDICT["fun_safety"] = True
if utterance == "[聯發科]安全性":
resultDICT["fun_safety"] = True
if utterance == "[聯發科]安全性分析":
resultDICT["function"] = "safty"
if utterance == "[聯發科]安全性指標":
resultDICT["fun_safety"] = True
if utterance == "[聯發科]是不[是][安全]的股票":
resultDICT["fun_safety"] = True
if utterance == "[聯發科]流動比":
resultDICT["fun_safety"] = True
if utterance == "[聯發科]流速動比率":
resultDICT["fun_safety"] = True
if utterance == "[聯發科]現金流量比":
resultDICT["fun_safety"] = True
if utterance == "[聯發科]負債":
resultDICT["fun_safety"] = True
if utterance == "[聯發科]負債比":
resultDICT["fun_safety"] = True
if utterance == "[聯發科]速動比":
resultDICT["fun_safety"] = True
return resultDICT | 52.3 | 2,662 | 0.47347 |
8deaed9ff62aaa7ea6a951f3d5f95c2c6e59d8bf | 3,181 | py | Python | api/urls_v1.py | healthdesk-hackathon/backend | 93c43ff1aeff493a6b4d0034807f0434507ab05d | [
"MIT"
] | null | null | null | api/urls_v1.py | healthdesk-hackathon/backend | 93c43ff1aeff493a6b4d0034807f0434507ab05d | [
"MIT"
] | 32 | 2020-03-27T23:50:02.000Z | 2021-09-08T01:54:55.000Z | api/urls_v1.py | healthdesk-hackathon/backend | 93c43ff1aeff493a6b4d0034807f0434507ab05d | [
"MIT"
] | null | null | null | from django.urls import re_path
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework import permissions
from rest_framework.routers import DefaultRouter
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView
from dashboard.views import DashboardView
from equipment.views import BedViewSet, BedTypeViewSet
from patient.views import PatientViewSet, PhoneViewSet, PersonalDataViewSet, NextOfKinContactViewSet, \
PatientIdentifierViewSet
from patient_tracker.views import AdmissionViewSet, HealthSnapshotViewSet, \
DischargeViewSet, DeceasedViewSet, \
OverallWellbeingViewSet, CommonSymptomsViewSet, GradedSymptomsViewSet, RelatedConditionsViewSet
from workflow.views import WorkflowViewSet
app_name = 'v1'
schema_view = get_schema_view(
openapi.Info(
title='Backend API',
default_version='v1',
description='You will find below all endpoints available for this API version',
terms_of_service='https://www.google.com/policies/terms/',
contact=openapi.Contact(email='contact@snippets.local'),
license=openapi.License(name='BSD License'),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
router = DefaultRouter()
### SETUP YOUR API URLS HERE ### # noqa: E266
router.register('workflow', WorkflowViewSet, basename='workflow')
router.register('patient', PatientViewSet, basename='patient')
router.register('phone', PhoneViewSet, basename='phone')
router.register('personal-data', PersonalDataViewSet, basename='personal-data')
router.register('patient-identifier', PatientIdentifierViewSet, basename='patient-identifier')
router.register('admission', AdmissionViewSet, basename='admission')
router.register('bed', BedViewSet, basename='bed')
router.register('bed-type', BedTypeViewSet, basename='bed-type')
router.register('health-snapshot', HealthSnapshotViewSet, basename='health-snapshot')
router.register('discharge', DischargeViewSet, basename='discharge')
router.register('deceased', DeceasedViewSet, basename='deceased')
router.register('overall-wellbeing', OverallWellbeingViewSet, basename='overall-wellbeing')
router.register('common-symptoms', CommonSymptomsViewSet, basename='common-symptoms')
router.register('graded-symptoms', GradedSymptomsViewSet, basename='graded-symptoms')
router.register('related-conditions', RelatedConditionsViewSet, basename='related-conditions')
router.register('next-of-kin-contacts', NextOfKinContactViewSet, basename='next-of-kin-contacts')
################################
urlpatterns = router.urls + [
re_path(r'token/$', TokenObtainPairView.as_view(), name='token_obtain_pair'),
re_path(r'token/refresh/$', TokenRefreshView.as_view(), name='token_refresh'),
re_path(r'dashboard/$', DashboardView.as_view(), name='dashboard'),
# Swagger related urls
re_path(r'swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
re_path(r'docs/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
re_path(r'redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
| 43.575342 | 111 | 0.769884 |
e1822f708d2d3059b430a300b7775103077560b6 | 2,696 | py | Python | examples/python/compression/client.py | warlock135/grpc | 81e13e4fa9c0cdf7dc131ce548e1604c895b738c | [
"Apache-2.0"
] | 36,552 | 2015-02-26T17:30:13.000Z | 2022-03-31T22:41:33.000Z | examples/python/compression/client.py | SanjanaSingh897/grpc | 2d858866eb95ce5de8ccc8c35189a12733d8ca79 | [
"Apache-2.0"
] | 23,536 | 2015-02-26T17:50:56.000Z | 2022-03-31T23:39:42.000Z | examples/python/compression/client.py | SanjanaSingh897/grpc | 2d858866eb95ce5de8ccc8c35189a12733d8ca79 | [
"Apache-2.0"
] | 11,050 | 2015-02-26T17:22:10.000Z | 2022-03-31T10:12:35.000Z | # Copyright 2019 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of compression on the client side with gRPC."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import grpc
from examples.protos import helloworld_pb2
from examples.protos import helloworld_pb2_grpc
_DESCRIPTION = 'A client capable of compression.'
_COMPRESSION_OPTIONS = {
"none": grpc.Compression.NoCompression,
"deflate": grpc.Compression.Deflate,
"gzip": grpc.Compression.Gzip,
}
_LOGGER = logging.getLogger(__name__)
def run_client(channel_compression, call_compression, target):
with grpc.insecure_channel(target,
compression=channel_compression) as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'),
compression=call_compression,
wait_for_ready=True)
print("Response: {}".format(response))
def main():
parser = argparse.ArgumentParser(description=_DESCRIPTION)
parser.add_argument('--channel_compression',
default='none',
nargs='?',
choices=_COMPRESSION_OPTIONS.keys(),
help='The compression method to use for the channel.')
parser.add_argument(
'--call_compression',
default='none',
nargs='?',
choices=_COMPRESSION_OPTIONS.keys(),
help='The compression method to use for an individual call.')
parser.add_argument('--server',
default='localhost:50051',
type=str,
nargs='?',
help='The host-port pair at which to reach the server.')
args = parser.parse_args()
channel_compression = _COMPRESSION_OPTIONS[args.channel_compression]
call_compression = _COMPRESSION_OPTIONS[args.call_compression]
run_client(channel_compression, call_compression, args.server)
if __name__ == "__main__":
logging.basicConfig()
main()
| 35.946667 | 80 | 0.671736 |
9c28125fd9a1c9e1d0f95cb2c6232c8bc15b7c22 | 8,497 | py | Python | report.py | shadow1ng/Vxscan | 52d979130f6e139abe3937d1bdd22420afcc0ae8 | [
"Apache-2.0"
] | 2 | 2021-04-07T16:13:38.000Z | 2021-06-16T02:03:01.000Z | report.py | shadow1ng/Vxscan | 52d979130f6e139abe3937d1bdd22420afcc0ae8 | [
"Apache-2.0"
] | null | null | null | report.py | shadow1ng/Vxscan | 52d979130f6e139abe3937d1bdd22420afcc0ae8 | [
"Apache-2.0"
] | null | null | null | # coding = utf-8
import json
import re
import time
import sys
import logging
from lib.sqldb import Sqldb
dbname = 'result'
def get_port(ipaddr):
try:
sql = "select port from ports where ipaddr='{}'".format(ipaddr)
getport = Sqldb(dbname).query(sql)
if getport:
result = []
for i in getport:
result.append(i[0])
result = list(map(int, result))
result = sorted(result)
result = list(map(str, result))
ports = ' , '.join(result)
return ports
except Exception as e:
logging.exception(e)
def gen_webinfo():
tableData = []
sql = 'select time,domain,waf,title,apps,server,address,ipaddr,os,pdns,reverseip from webinfo'
try:
res = Sqldb(dbname).query(sql)
for i in res:
time, domain, waf, title, apps, server, address, ipaddr, os, pdns, reverseip = i
ports = get_port(domain)
webinfo = {"time": time, "domain": domain, "waf": waf, "title": title,
"apps": apps, "server": server, "address": address, "ipaddr": ipaddr, "ports": ports, "os": os,
"reverseip": reverseip}
tableData.append(webinfo)
column = [{"field": "time", "title": "TIME", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "domain", "title": "domain", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "waf", "title": "waf", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "title", "title": "title", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "apps", "title": "apps", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "server", "title": "server", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "address", "title": "address", "width": 100, "tilteAlign": "center",
"columnAlign": "center"},
{"field": "ipaddr", "title": "ipaddr", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "ports", "title": "ports", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "os", "title": "os", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
# {"field": "pdns", "title": "pdns", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "reverseip", "title": "reverseip", "width": 100, "tilteAlign": "center",
"columnAlign": "center"}, ]
data = {
"name": "webinfo",
"tableData": tableData,
"columns": column
}
return data
except TypeError:
pass
except Exception as e:
logging.exception(e)
def gen_ports():
tableData = []
sql = 'select time,ipaddr,service,port,banner from ports'
try:
res = Sqldb(dbname).query(sql)
for i in res:
time, ipaddr, service, port, banner = i
ports = {"time": time, "ip": ipaddr, "port": port, "service": service, "banner": banner}
tableData.append(ports)
column = [{"field": "time", "title": "TIME", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "ip", "title": "IP", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "port", "title": "PORT", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "service", "title": "SERVICE", "width": 100, "tilteAlign": "center",
"columnAlign": "center"},
{"field": "banner", "title": "Banner", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
]
data = {
"name": "Ports",
"tableData": tableData,
"columns": column
}
return data
except TypeError:
pass
except Exception as e:
logging.exception(e)
def gen_urls():
tableData = []
sql = 'select time,domain,title,url,contype,rsp_len,rsp_code from urls'
try:
res = Sqldb(dbname).query(sql)
if res:
for i in res:
time, domain, title, url, contype, rsp_len, rsp_code = i
urls = {"time": time, "domain": domain, "title": title, "url": url, "contype": contype,
"rsp_len": rsp_len,
"rsp_code": rsp_code}
tableData.append(urls)
column = [{"field": "time", "title": "TIME", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "domain", "title": "Domain", "width": 100, "tilteAlign": "center",
"columnAlign": "center"},
{"field": "title", "title": "TITLE", "width": 100, "tilteAlign": "center",
"columnAlign": "center"},
{"field": "url", "title": "URL", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "contype", "title": "ConType", "width": 100, "tilteAlign": "center",
"columnAlign": "center"},
{"field": "rsp_len", "title": "rsp_len", "width": 100, "tilteAlign": "center",
"columnAlign": "center"},
{"field": "rsp_code", "title": "rsp_code", "width": 100, "tilteAlign": "center",
"columnAlign": "center"}]
data = {
"name": "URLS",
"tableData": tableData,
"columns": column
}
return data
except TypeError:
pass
except Exception as e:
logging.exception(e)
def gen_vuln():
tableData = []
sql = 'select time, domain, vuln from vuln'
try:
res = Sqldb(dbname).query(sql)
for i in res:
time, ip, vuln = i
vuln = {"time": time, "ip": ip, "vuln": vuln}
tableData.append(vuln)
column = [{"field": "time", "title": "TIME", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "ip", "title": "IP", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "vuln", "title": "VULN", "width": 100, "tilteAlign": "center", "columnAlign": "center"}, ]
data = {
"name": "Vuln",
"tableData": tableData,
"columns": column
}
return data
except TypeError:
pass
except Exception as e:
logging.exception(e)
def gen_crawl():
tableData = []
sql = 'select time, domain, type,leaks from crawl'
try:
res = Sqldb(dbname).query(sql)
for i in res:
time, domain, type, leaks = i
vuln = {"time": time, "domain": domain, "type": type, "leaks": leaks}
tableData.append(vuln)
column = [{"field": "time", "title": "TIME", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "domain", "title": "DOMAIN", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "type", "title": "TYPE", "width": 100, "tilteAlign": "center", "columnAlign": "center"},
{"field": "leaks", "title": "Leaks", "width": 100, "tilteAlign": "center", "columnAlign": "center"}, ]
data = {
"name": "Crawl",
"tableData": tableData,
"columns": column
}
return data
except TypeError:
pass
except Exception as e:
logging.exception(e)
def gener():
out = []
for i in [gen_webinfo(), gen_urls(), gen_ports(), gen_vuln(), gen_crawl()]:
if i:
out.append(i)
result = {"table": out}
result = json.dumps(result)
result = re.sub(r'^{|}$', '', result)
times = time.strftime("%Y%m%d%H%M%S", time.localtime())
name = 'Vxscan_' + times + '.html'
with open('report/report.htm', 'r', encoding='utf-8') as f, open(name, 'w') as f1:
text = f.read()
f1.write(text.replace("'summary': {}", result))
if __name__ == "__main__":
# if sys.argv[1]:
# dbname = sys.argv[1]
# dbname = re.sub('.db', '', dbname)
gener()
| 41.857143 | 120 | 0.503354 |
5673c6adb148079d2ea45e97005271018b368218 | 7,872 | py | Python | safelifeEnv/lib/python3.6/site-packages/pyglet/image/codecs/dds.py | JohnBurden/safelife | 338c9c42aa94fed49f6d80151c37dd28ba6f7978 | [
"Apache-2.0"
] | 2 | 2019-07-17T13:00:32.000Z | 2019-07-17T13:09:30.000Z | safelifeEnv/lib/python3.6/site-packages/pyglet/image/codecs/dds.py | JohnBurden/safelife | 338c9c42aa94fed49f6d80151c37dd28ba6f7978 | [
"Apache-2.0"
] | null | null | null | safelifeEnv/lib/python3.6/site-packages/pyglet/image/codecs/dds.py | JohnBurden/safelife | 338c9c42aa94fed49f6d80151c37dd28ba6f7978 | [
"Apache-2.0"
] | null | null | null | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2019 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''DDS texture loader.
Reference: http://msdn2.microsoft.com/en-us/library/bb172993.aspx
'''
from __future__ import division
from __future__ import print_function
from builtins import range
from builtins import object
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
import struct
from pyglet.gl import *
from pyglet.image import CompressedImageData
from pyglet.image import codecs
from pyglet.image.codecs import s3tc
from pyglet.compat import izip_longest as compat_izip_longest
class DDSException(codecs.ImageDecodeException):
exception_priority = 0
# dwFlags of DDSURFACEDESC2
DDSD_CAPS = 0x00000001
DDSD_HEIGHT = 0x00000002
DDSD_WIDTH = 0x00000004
DDSD_PITCH = 0x00000008
DDSD_PIXELFORMAT = 0x00001000
DDSD_MIPMAPCOUNT = 0x00020000
DDSD_LINEARSIZE = 0x00080000
DDSD_DEPTH = 0x00800000
# ddpfPixelFormat of DDSURFACEDESC2
DDPF_ALPHAPIXELS = 0x00000001
DDPF_FOURCC = 0x00000004
DDPF_RGB = 0x00000040
# dwCaps1 of DDSCAPS2
DDSCAPS_COMPLEX = 0x00000008
DDSCAPS_TEXTURE = 0x00001000
DDSCAPS_MIPMAP = 0x00400000
# dwCaps2 of DDSCAPS2
DDSCAPS2_CUBEMAP = 0x00000200
DDSCAPS2_CUBEMAP_POSITIVEX = 0x00000400
DDSCAPS2_CUBEMAP_NEGATIVEX = 0x00000800
DDSCAPS2_CUBEMAP_POSITIVEY = 0x00001000
DDSCAPS2_CUBEMAP_NEGATIVEY = 0x00002000
DDSCAPS2_CUBEMAP_POSITIVEZ = 0x00004000
DDSCAPS2_CUBEMAP_NEGATIVEZ = 0x00008000
DDSCAPS2_VOLUME = 0x00200000
class _filestruct(object):
def __init__(self, data):
if len(data) < self.get_size():
raise DDSException('Not a DDS file')
items = struct.unpack(self.get_format(), data)
for field, value in compat_izip_longest(self._fields,
items,
fillvalue=None):
setattr(self, field[0], value)
def __repr__(self):
name = self.__class__.__name__
return '%s(%s)' % \
(name, (', \n%s' % (' ' * (len(name) + 1))).join( \
['%s = %s' % (field[0], repr(getattr(self, field[0]))) \
for field in self._fields]))
@classmethod
def get_format(cls):
return '<' + ''.join([f[1] for f in cls._fields])
@classmethod
def get_size(cls):
return struct.calcsize(cls.get_format())
class DDSURFACEDESC2(_filestruct):
_fields = [
('dwMagic', '4s'),
('dwSize', 'I'),
('dwFlags', 'I'),
('dwHeight', 'I'),
('dwWidth', 'I'),
('dwPitchOrLinearSize', 'I'),
('dwDepth', 'I'),
('dwMipMapCount', 'I'),
('dwReserved1', '44s'),
('ddpfPixelFormat', '32s'),
('dwCaps1', 'I'),
('dwCaps2', 'I'),
('dwCapsReserved', '8s'),
('dwReserved2', 'I')
]
def __init__(self, data):
super(DDSURFACEDESC2, self).__init__(data)
self.ddpfPixelFormat = DDPIXELFORMAT(self.ddpfPixelFormat)
class DDPIXELFORMAT(_filestruct):
_fields = [
('dwSize', 'I'),
('dwFlags', 'I'),
('dwFourCC', '4s'),
('dwRGBBitCount', 'I'),
('dwRBitMask', 'I'),
('dwGBitMask', 'I'),
('dwBBitMask', 'I'),
('dwRGBAlphaBitMask', 'I')
]
_compression_formats = {
(b'DXT1', False): (GL_COMPRESSED_RGB_S3TC_DXT1_EXT, s3tc.decode_dxt1_rgb),
(b'DXT1', True): (GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, s3tc.decode_dxt1_rgba),
(b'DXT3', False): (GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, s3tc.decode_dxt3),
(b'DXT3', True): (GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, s3tc.decode_dxt3),
(b'DXT5', False): (GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, s3tc.decode_dxt5),
(b'DXT5', True): (GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, s3tc.decode_dxt5),
}
def _check_error():
e = glGetError()
if e != 0:
print('GL error %d' % e)
class DDSImageDecoder(codecs.ImageDecoder):
def get_file_extensions(self):
return ['.dds']
def decode(self, file, filename):
header = file.read(DDSURFACEDESC2.get_size())
desc = DDSURFACEDESC2(header)
if desc.dwMagic != b'DDS ' or desc.dwSize != 124:
raise DDSException('Invalid DDS file (incorrect header).')
width = desc.dwWidth
height = desc.dwHeight
mipmaps = 1
if desc.dwFlags & DDSD_DEPTH:
raise DDSException('Volume DDS files unsupported')
if desc.dwFlags & DDSD_MIPMAPCOUNT:
mipmaps = desc.dwMipMapCount
if desc.ddpfPixelFormat.dwSize != 32:
raise DDSException('Invalid DDS file (incorrect pixel format).')
if desc.dwCaps2 & DDSCAPS2_CUBEMAP:
raise DDSException('Cubemap DDS files unsupported')
if not desc.ddpfPixelFormat.dwFlags & DDPF_FOURCC:
raise DDSException('Uncompressed DDS textures not supported.')
has_alpha = desc.ddpfPixelFormat.dwRGBAlphaBitMask != 0
selector = (desc.ddpfPixelFormat.dwFourCC, has_alpha)
if selector not in _compression_formats:
raise DDSException('Unsupported texture compression %s' % \
desc.ddpfPixelFormat.dwFourCC)
dformat, decoder = _compression_formats[selector]
if dformat == GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
block_size = 8
else:
block_size = 16
datas = []
w, h = width, height
for i in range(mipmaps):
if not w and not h:
break
if not w:
w = 1
if not h:
h = 1
size = ((w + 3) // 4) * ((h + 3) // 4) * block_size
data = file.read(size)
datas.append(data)
w >>= 1
h >>= 1
image = CompressedImageData(width, height, dformat, datas[0],
'GL_EXT_texture_compression_s3tc', decoder)
level = 0
for data in datas[1:]:
level += 1
image.set_mipmap_data(level, data)
return image
def get_decoders():
return [DDSImageDecoder()]
def get_encoders():
return []
| 33.355932 | 80 | 0.629573 |
eb7d486b7a6d244fcb96af11a4877bffc6d630f8 | 19,178 | py | Python | packages/gtmcore/gtmcore/environment/componentmanager.py | jjwatts/gigantum-client | 88ce0475fb6880322bdd06d987c494e29064f278 | [
"MIT"
] | null | null | null | packages/gtmcore/gtmcore/environment/componentmanager.py | jjwatts/gigantum-client | 88ce0475fb6880322bdd06d987c494e29064f278 | [
"MIT"
] | null | null | null | packages/gtmcore/gtmcore/environment/componentmanager.py | jjwatts/gigantum-client | 88ce0475fb6880322bdd06d987c494e29064f278 | [
"MIT"
] | null | null | null | # Copyright (c) 2017 FlashX, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import os
import yaml
from typing import (Any, List, Dict, Tuple)
import glob
from typing import Optional
from gtmcore.labbook import LabBook
from gtmcore.environment.repository import BaseRepository # type: ignore
from gtmcore.logging import LMLogger
from gtmcore.activity import ActivityStore, ActivityType, ActivityRecord, ActivityDetailType, ActivityDetailRecord, \
ActivityAction
from gtmcore.labbook.schemas import CURRENT_SCHEMA
logger = LMLogger.get_logger()
PROJECT_ENTRYPOINT = \
"""#!/bin/bash
USER_ID=${LOCAL_USER_ID:-9001}
echo "Starting with UID: $USER_ID"
useradd --shell /bin/bash -u $USER_ID -o -c "" -m giguser
export HOME=/home/giguser
# Setup /mnt/ as a safe place to put user runnable code
mkdir /mnt/labbook
chown -R giguser:root /mnt/labbook
# Setup docker sock to run as the user
chown giguser:root /run/docker.sock
chmod 777 /var/run/docker.sock
export JUPYTER_RUNTIME_DIR=/mnt/share/jupyter/runtime
chown -R giguser:root /mnt/share/
# Run the Docker Command
exec gosu giguser "$@"
"""
def strip_package_and_version(package_manager: str, package_str: str) -> Tuple[str, Optional[str]]:
"""For a particular package encoded with version, this strips off the version and returns a tuple
containing (package-name, version). If version is not specified, it is None.
"""
if package_manager not in ['pip3', 'pip2', 'pip', 'apt', 'conda', 'conda2', 'conda3']:
raise ValueError(f'Unsupported package manager: {package_manager}')
if package_manager in ['pip', 'pip2', 'pip3']:
if '==' in package_str:
t = package_str.split('==')
return t[0], t[1]
else:
return package_str, None
if package_manager == 'apt' or package_manager in ['conda', 'conda2', 'conda3']:
if '=' in package_str:
t = package_str.split('=')
return t[0], t[1]
else:
return package_str, None
raise ValueError(f'Unsupported package manager: {package_manager}')
class ComponentManager(object):
"""Class to manage the Environment Components of a given LabBook
"""
DEFAULT_CUSTOM_DOCKER_NAME = 'user-custom-docker'
def __init__(self, labbook: LabBook) -> None:
"""Constructor
Args:
labbook(LabBook): A gtmcore.labbook.LabBook instance for the LabBook you wish to manage
"""
# Save labbook instance
self.labbook = labbook
# Create a base repo instance using the same config file
self.bases = BaseRepository(config_file=self.labbook.client_config.config_file)
# Make sure the LabBook's environment directory is ready to go
self._initialize_env_dir()
@property
def env_dir(self) -> str:
"""The environment directory in the given labbook"""
return os.path.join(self.labbook.root_dir, '.gigantum', 'env')
def _initialize_env_dir(self) -> None:
"""Method to populate the environment directory if any content is missing
Returns:
None
"""
# Create/validate directory structure
subdirs = ['base',
'package_manager',
'custom',
'docker']
for subdir in subdirs:
os.makedirs(os.path.join(self.env_dir, subdir), exist_ok=True)
# Add entrypoint.sh file if missing
entrypoint_file = os.path.join(self.env_dir, 'entrypoint.sh')
if os.path.exists(entrypoint_file) is False:
with open(entrypoint_file, 'wt') as ef:
ef.write(PROJECT_ENTRYPOINT)
short_message = "Adding missing entrypoint.sh, required for container automation"
self.labbook.git.add(entrypoint_file)
self.labbook.git.commit(short_message)
def add_docker_snippet(self, name: str, docker_content: List[str], description: Optional[str] = None) -> None:
""" Add a custom docker snippet to the environment (replacing custom dependency).
Args:
name: Name or identifier of the custom docker snippet
docker_content: Content of the docker material (May make this a list of strings instead)
description: Human-readable verbose description of what the snippet is intended to accomplish.
Returns:
None
"""
if not name:
raise ValueError('Argument `name` cannot be None or empty')
if not name.replace('-', '').replace('_', '').isalnum():
raise ValueError('Argument `name` must be alphanumeric string (- and _ accepted)')
if not docker_content:
docker_content = []
file_data = {
'name': name,
'timestamp_utc': datetime.datetime.utcnow().isoformat(),
'description': description or "",
'content': docker_content
}
docker_dir = os.path.join(self.labbook.root_dir, '.gigantum', 'env', 'docker')
docker_file = os.path.join(docker_dir, f'{name}.yaml')
os.makedirs(docker_dir, exist_ok=True)
yaml_dump = yaml.safe_dump(file_data, default_flow_style=False)
with open(docker_file, 'w') as df:
df.write(yaml_dump)
logger.info(f"Wrote custom Docker snippet `{name}` to {str(self.labbook)}")
short_message = f"Wrote custom Docker snippet `{name}`"
self.labbook.git.add(docker_file)
commit = self.labbook.git.commit(short_message)
adr = ActivityDetailRecord(ActivityDetailType.ENVIRONMENT, show=False, action=ActivityAction.CREATE)
adr.add_value('text/plain', '\n'.join(docker_content))
ar = ActivityRecord(ActivityType.ENVIRONMENT,
message=short_message,
show=True,
linked_commit=commit.hexsha,
tags=["environment", "docker", "snippet"])
ar.add_detail_object(adr)
ars = ActivityStore(self.labbook)
ars.create_activity_record(ar)
def remove_docker_snippet(self, name: str) -> None:
"""Remove a custom docker snippet
Args:
name: Name or identifer of snippet to remove
Returns:
None
"""
docker_dir = os.path.join(self.labbook.root_dir, '.gigantum', 'env', 'docker')
docker_file = os.path.join(docker_dir, f'{name}.yaml')
if not os.path.exists(docker_file):
raise ValueError(f'Docker snippet name `{name}` does not exist')
self.labbook.git.remove(docker_file, keep_file=False)
short_message = f"Removed custom Docker snippet `{name}`"
logger.info(short_message)
commit = self.labbook.git.commit(short_message)
adr = ActivityDetailRecord(ActivityDetailType.ENVIRONMENT, show=False, action=ActivityAction.DELETE)
adr.add_value('text/plain', short_message)
ar = ActivityRecord(ActivityType.ENVIRONMENT,
message=short_message,
show=False,
linked_commit=commit.hexsha,
tags=["environment", "docker", "snippet"])
ar.add_detail_object(adr)
ars = ActivityStore(self.labbook)
ars.create_activity_record(ar)
def add_packages(self, package_manager: str, packages: List[dict],
force: bool = False, from_base: bool = False) -> None:
"""Add a new yaml file describing the new package and its context to the labbook.
Args:
package_manager: The package manager (eg., "apt" or "pip3")
packages: A dictionary of packages to install (package & version are main keys needed)
force: Force overwriting a component if it already exists (e.g. you want to update the version)
from_base: If a package in a base image, not deletable. Otherwise, can be deleted by LB user.
Returns:
None
"""
if not package_manager:
raise ValueError('Argument package_manager cannot be None or empty')
# Create activity record
ar = ActivityRecord(ActivityType.ENVIRONMENT,
show=True,
message="",
linked_commit="",
tags=["environment", 'package_manager', package_manager])
update_cnt = 0
add_cnt = 0
for pkg in packages:
version_str = f'"{pkg["version"]}"' if pkg["version"] else 'latest'
yaml_lines = ['# Generated on: {}'.format(str(datetime.datetime.now())),
'manager: "{}"'.format(package_manager),
'package: "{}"'.format(pkg["package"]),
'version: {}'.format(version_str),
f'from_base: {str(from_base).lower()}',
f'schema: {CURRENT_SCHEMA}']
yaml_filename = '{}_{}.yaml'.format(package_manager, pkg["package"])
package_yaml_path = os.path.join(self.env_dir, 'package_manager', yaml_filename)
# Check if package already exists
if os.path.exists(package_yaml_path):
if force:
# You are updating, since force is set and package already exists.
logger.warning("Updating package file at {}".format(package_yaml_path))
detail_msg = "Update {} managed package: {} {}".format(package_manager, pkg["package"], version_str)
adr = ActivityDetailRecord(ActivityDetailType.ENVIRONMENT, show=False, action=ActivityAction.EDIT)
update_cnt += 1
else:
raise ValueError("The package {} already exists in this LabBook.".format(pkg["package"]) +
" Use `force` to overwrite")
else:
add_cnt += 1
detail_msg = "Add {} managed package: {} {}".format(package_manager, pkg["package"], version_str)
adr = ActivityDetailRecord(ActivityDetailType.ENVIRONMENT, show=False, action=ActivityAction.CREATE)
# Write the YAML to the file
with open(package_yaml_path, 'w') as package_yaml_file:
package_yaml_file.write(os.linesep.join(yaml_lines))
# Create activity record
adr.add_value('text/plain', detail_msg)
ar.add_detail_object(adr)
logger.info("Added package {} to labbook at {}".format(pkg["package"], self.labbook.root_dir))
# Set activity message
ar_msg = ""
if add_cnt > 0:
ar_msg = f"Added {add_cnt} {package_manager} package(s). "
if update_cnt > 0:
ar_msg = f"{ar_msg}Updated {update_cnt} {package_manager} package(s)"
# Add to git
self.labbook.git.add_all(self.env_dir)
commit = self.labbook.git.commit(ar_msg)
ar.linked_commit = commit.hexsha
ar.message = ar_msg
# Store
ars = ActivityStore(self.labbook)
ars.create_activity_record(ar)
def remove_packages(self, package_manager: str, package_names: List[str]) -> None:
"""Remove yaml files describing a package and its context to the labbook.
Args:
package_manager: The package manager (eg., "apt" or "pip3")
package_names: A list of packages to uninstall
Returns:
None
"""
# Create activity record
ar = ActivityRecord(ActivityType.ENVIRONMENT,
message="",
show=True,
linked_commit="",
tags=["environment", 'package_manager', package_manager])
for pkg in package_names:
yaml_filename = '{}_{}.yaml'.format(package_manager, pkg)
package_yaml_path = os.path.join(self.env_dir, 'package_manager', yaml_filename)
# Check for package to exist
if not os.path.exists(package_yaml_path):
raise ValueError(f"{package_manager} installed package {pkg} does not exist.")
# Check to make sure package isn't from the base. You cannot remove packages from the base yet.
with open(package_yaml_path, 'rt') as cf:
package_data = yaml.safe_load(cf)
if not package_data:
raise IOError("Failed to load package description")
if package_data['from_base'] is True:
raise ValueError("Cannot remove a package installed in the Base")
# Delete the yaml file, which on next Dockerfile gen/rebuild will remove the dependency
os.remove(package_yaml_path)
if os.path.exists(package_yaml_path):
raise ValueError(f"Failed to remove package.")
self.labbook.git.remove(package_yaml_path)
# Create detail record
adr = ActivityDetailRecord(ActivityDetailType.ENVIRONMENT, show=False, action=ActivityAction.DELETE)
adr.add_value('text/plain', f"Removed {package_manager} managed package: {pkg}")
ar.add_detail_object(adr)
logger.info(f"Removed {package_manager} managed package: {pkg}")
# Add to git
short_message = f"Removed {len(package_names)} {package_manager} managed package(s)"
commit = self.labbook.git.commit(short_message)
ar.linked_commit = commit.hexsha
ar.message = short_message
# Store
ars = ActivityStore(self.labbook)
ars.create_activity_record(ar)
def add_base(self, repository: str, base_id: str, revision: int) -> None:
"""Method to add a base to a LabBook's environment
Args:
repository(str): The Environment Component repository the component is in
base_id(str): The name of the component
revision(int): The revision to use (r_<revision_) in yaml filename.
Returns:
None
"""
if not repository:
raise ValueError('repository cannot be None or empty')
if not base_id:
raise ValueError('component cannot be None or empty')
# Get the base
base_data = self.bases.get_base(repository, base_id, revision)
base_filename = "{}_{}.yaml".format(repository, base_id, revision)
base_final_path = os.path.join(self.env_dir, 'base', base_filename)
short_message = "Added base: {}".format(base_id)
if os.path.exists(base_final_path):
raise ValueError("The base {} already exists in this project")
with open(base_final_path, 'wt') as cf:
cf.write(yaml.safe_dump(base_data, default_flow_style=False))
for manager in base_data['package_managers']:
packages = list()
# Build dictionary of packages
for p_manager in manager.keys():
if manager[p_manager]:
for pkg in manager[p_manager]:
pkg_name, pkg_version = strip_package_and_version(p_manager, pkg)
packages.append({"package": pkg_name, "version": pkg_version, "manager": p_manager})
self.add_packages(package_manager=p_manager, packages=packages,
force=True, from_base=True)
self.labbook.git.add(base_final_path)
commit = self.labbook.git.commit(short_message)
logger.info(f"Added base from {repository}: {base_id} rev{revision}")
# Create a ActivityRecord
long_message = "Added base {}\n".format(base_id)
long_message = "{}\n{}\n\n".format(long_message, base_data['description'])
long_message = "{} - repository: {}\n".format(long_message, repository)
long_message = "{} - component: {}\n".format(long_message, base_id)
long_message = "{} - revision: {}\n".format(long_message, revision)
# Create detail record
adr = ActivityDetailRecord(ActivityDetailType.ENVIRONMENT, show=False, action=ActivityAction.CREATE)
adr.add_value('text/plain', long_message)
# Create activity record
ar = ActivityRecord(ActivityType.ENVIRONMENT,
message=short_message,
linked_commit=commit.hexsha,
tags=["environment", "base"],
show=True)
ar.add_detail_object(adr)
# Store
ars = ActivityStore(self.labbook)
ars.create_activity_record(ar)
def get_component_list(self, component_class: str) -> List[Dict[str, Any]]:
"""Method to get the YAML contents for a given component class
Args:
component_class(str): The class of component you want to access
Returns:
list
"""
# Get component dir
component_dir = os.path.join(self.env_dir, component_class)
if not os.path.exists(component_dir):
raise ValueError("No components found for component class: {}".format(component_class))
# Get all YAML files in dir
yaml_files = glob.glob(os.path.join(component_dir, "*.yaml"))
yaml_files = sorted(yaml_files)
data = []
# Read YAML files and write data to dictionary
for yf in yaml_files:
with open(yf, 'rt') as yf_file:
yaml_data = yaml.safe_load(yf_file)
data.append(yaml_data)
return sorted(data, key=lambda elt: elt.get('id') or elt.get('manager'))
@property
def base_fields(self) -> Dict[str, Any]:
"""Load the base data for this LabBook from disk"""
base_yaml_file = glob.glob(os.path.join(self.env_dir, 'base', '*.yaml'))
if len(base_yaml_file) != 1:
raise ValueError(f"Project misconfigured. Found {len(base_yaml_file)} base configurations.")
# If you got 1 base, load from disk
with open(base_yaml_file[0], 'rt') as bf:
data = yaml.safe_load(bf)
return data
| 41.691304 | 120 | 0.619616 |
0f37b3cda1a4afd9101000d6ecdabbf846c8e761 | 6,079 | py | Python | viberio/handlers/users/my_parcels_out.py | bostud/Viber_bot | 076113433837aab942f86a0f73275c50037ed8f9 | [
"MIT"
] | null | null | null | viberio/handlers/users/my_parcels_out.py | bostud/Viber_bot | 076113433837aab942f86a0f73275c50037ed8f9 | [
"MIT"
] | null | null | null | viberio/handlers/users/my_parcels_out.py | bostud/Viber_bot | 076113433837aab942f86a0f73275c50037ed8f9 | [
"MIT"
] | null | null | null | from aiogram import types
from aiogram.dispatcher import FSMContext
from aiogram.types import CallbackQuery, ParseMode
import re
from aiogram.utils.markdown import hbold
from meest_api.get_payment_data import get_payment_data
from meest_api.location import search_my_parcels
from meest_api.pay import pay_by_portmone_my_parcels
from meest_api.poshtomatApi import get_parcel_info, parcel_debt_new
from functions.functions import if_data_in_response
from keyboards.default.share import phone_share_kb
from keyboards.inline.my_parcels import my_parcels_callback_kb
from loader import dp, db
from meest_api.appApi import parcels_for_delivery_out
from asgiref.sync import sync_to_async
@dp.callback_query_handler(text="my_parcels") # Хендлер для inline кнопки из создать посылку
async def my_parcels(call: CallbackQuery):
await call.message.delete_reply_markup()
await call.message.edit_text(f"{call.from_user.first_name} ,<b>оберіть фільтр для Ваших відправлень</b>",
reply_markup=my_parcels_callback_kb)
# Хендлер отправления посылки
@dp.callback_query_handler(lambda query: query.data == "out")
async def parcel_modes(call: CallbackQuery, state: FSMContext):
user_id = call.from_user.id
data = call.data
await state.update_data(button_mode=data)
print(data)
await call.answer(cache_time=60)
user_data = await if_data_in_response(await db.get_user_data(user_id))
if user_data is False:
return await call.message.answer(
call.from_user.first_name + hbold(", для початку роботи з сервісом поділіться Вашим номером телефону\n"
"за допомогою кнопки знизу 👇"), reply_markup=phone_share_kb)
else:
print(user_data)
user_phone = user_data['phone_number']
result = await sync_to_async(parcels_for_delivery_out)(user_phone, data)
if result is False:
# await call.message.delete_reply_markup()
await call.message.delete()
create_parcel = types.InlineKeyboardMarkup(row_width=2)
create_parcel.add(types.InlineKeyboardButton(text="Створити", callback_data="parcel_cre"))
create_parcel.add(types.InlineKeyboardButton(text="До головного меню", callback_data="Cancel3"))
await call.message.answer(call.from_user.first_name + hbold(", створених відправлень ще не має. Створити?"),
reply_markup=create_parcel)
else:
for i in result:
if i['debt_cost'] != 0:
keyboard = types.InlineKeyboardMarkup(row_width=2)
keyboard.insert(types.InlineKeyboardButton(text="Детальний трекінг", callback_data=i['num']))
keyboard.insert(types.InlineKeyboardButton(text="Сформувати рахунок", callback_data=i['pay_num_out']))
keyboard.insert(types.InlineKeyboardButton(text="Підтримка", url='t.me/MeestSupport_bot'))
# keyboard.insert(types.InlineKeyboardButton(text="Обрати час доставки(розробка)", callback_data="in"))
keyboard.add(types.InlineKeyboardButton(text="Сховати", callback_data="die"))
await call.message.answer(
i['text'],
parse_mode=types.ParseMode.HTML, reply_markup=keyboard)
elif i['debt_cost'] == 0:
keyboard = types.InlineKeyboardMarkup(row_width=2)
keyboard.insert(types.InlineKeyboardButton(text=f"Детальний трекінг", callback_data=i['num']))
keyboard.insert(types.InlineKeyboardButton(text="Підтримка", url='t.me/MeestSupport_bot'))
# keyboard.insert(types.InlineKeyboardButton(text="Обрати час доставки(розробка)", callback_data="in"))
keyboard.add(types.InlineKeyboardButton(text="Сховати", callback_data="die"))
await call.message.answer(
i['text'],
parse_mode=types.ParseMode.HTML, reply_markup=keyboard)
await call.message.answer(call.from_user.first_name + hbold(", оберіть фільтр для Ваших відправлень"),
reply_markup=my_parcels_callback_kb)
# await state.reset_state(with_data=False)
# Хендлер кнопки оплатить отправка
@dp.callback_query_handler(lambda query: re.match(r"pay_data_out", query.data))
async def pay_out(call: CallbackQuery, state: FSMContext):
data = call.data
print(data)
parcel_num = data.replace('pay_data_out', '')
get_data = await sync_to_async(get_payment_data)(parcel_num, "")
print(get_data)
split_pay = get_data['split_pay']
print(split_pay)
shipment_uid = get_data['shipment_uid']
# pay_type = get_data['type']
description = get_data['description']
total_amount = parcel_debt_new(shipment_uid)
total_to_pay = total_amount['total']
pay_link = await sync_to_async(pay_by_portmone_my_parcels)(parcel_num, total_to_pay,
split_pay, shipment_uid, description)
print(pay_link)
pay_button = types.InlineKeyboardMarkup(row_width=2)
pay_button.row(
types.InlineKeyboardButton(text=f"💸 Оплатити {total_to_pay} грн", callback_data="Cancel3",
url=pay_link))
pay_button.row(types.InlineKeyboardButton(text="Сховати", callback_data="Cancel3"))
await call.message.edit_reply_markup(reply_markup=pay_button)
await state.finish()
@dp.callback_query_handler(lambda query: re.match(r"!><", query.data))
async def in_mode(call: CallbackQuery, state: FSMContext):
data = call.data
state_data = await state.get_data()
print(state_data)
num = data.replace('!><', '')
print(num)
parcel_info = await sync_to_async(get_parcel_info)(num)
result = ("\n".join(search_my_parcels(num)))
keyboard = types.InlineKeyboardMarkup()
keyboard.add(types.InlineKeyboardButton(text=f"Сховати", callback_data="Cancel3"))
await call.message.edit_text(parcel_info + result, parse_mode=types.ParseMode.HTML, reply_markup=keyboard)
await state.finish()
| 51.516949 | 119 | 0.699128 |
1e42cfc844707e26129462d7dff1d25b2a28b4bd | 629 | py | Python | idiomatic_python/02.working_with_data/property_to_future_proof_class_implementation.py | tonper19/PythonDemos | 633a40e282049e511fd965c0afe104e775a2f526 | [
"MIT"
] | null | null | null | idiomatic_python/02.working_with_data/property_to_future_proof_class_implementation.py | tonper19/PythonDemos | 633a40e282049e511fd965c0afe104e775a2f526 | [
"MIT"
] | null | null | null | idiomatic_python/02.working_with_data/property_to_future_proof_class_implementation.py | tonper19/PythonDemos | 633a40e282049e511fd965c0afe104e775a2f526 | [
"MIT"
] | null | null | null | class Product():
TAX_RATE = 0.21
def __init__(self, name, price):
self.name = name
self._price = price
@property
def price(self):
# now if we need to change how price is calculated, we can do it
# here (or in the "setter" and __init__)
return self._price + self._price * Product.TAX_RATE
@price.setter
def price(self, value):
# The "setter" function must have the same name as the property
self._price = value
def main():
p = Product("Macbook", 4200)
print(f"The price of {p.name} is {p.price}")
if __name__ == "__main__":
main() | 25.16 | 73 | 0.604134 |
2be61eebd464f96a6a07ef1d4a799c46fc806433 | 965 | py | Python | awardapp/urls.py | savannah8/awards | 484d2c225eaf2ee76213e64a565af89fc5c7f08c | [
"Unlicense"
] | null | null | null | awardapp/urls.py | savannah8/awards | 484d2c225eaf2ee76213e64a565af89fc5c7f08c | [
"Unlicense"
] | 5 | 2020-06-05T21:36:48.000Z | 2021-09-08T01:05:56.000Z | awardapp/urls.py | savannah8/awards | 484d2c225eaf2ee76213e64a565af89fc5c7f08c | [
"Unlicense"
] | null | null | null | from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns=[
url(r'^$',views.home,name='homePage'),
url(r'^upload$',views.upload,name='upload'),
url(r'^ratecontent/(\d+)',views.add_content, name='ratecontent'),
url(r'^ratedesign/(\d+)',views.add_design, name='ratedesign'),
url(r'^rateusability/(\d+)',views.add_usability, name='rateusability'),
url(r'^profile/(\d+)',views.profile,name='profile'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^api/profile/$', views.ProfileList.as_view()),
url(r'^api/project/$', views.ProjectList.as_view()),
url(r'api/project/project-id/(?P<pk>[0-9]+)/$',views.ProjectDescription.as_view()),
url(r'api/profile/profile-id/(?P<pk>[0-9]+)/$',views.ProfileDescription.as_view()),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root= settings.MEDIA_ROOT) | 45.952381 | 87 | 0.693264 |
3cc8e16bc833526b92d34248a4919f1696546e48 | 1,097 | py | Python | demos/RocketDemo.py | Venator43/Tamagoci | 1a5aefa7a05ac36dee5c30878c12e5cc36473ec6 | [
"MIT"
] | null | null | null | demos/RocketDemo.py | Venator43/Tamagoci | 1a5aefa7a05ac36dee5c30878c12e5cc36473ec6 | [
"MIT"
] | null | null | null | demos/RocketDemo.py | Venator43/Tamagoci | 1a5aefa7a05ac36dee5c30878c12e5cc36473ec6 | [
"MIT"
] | null | null | null | from pygame_functions import *
screenSize(1000, 750)
setBackgroundImage("images/stars.png")
rocket = makeSprite("images/rocket1.png")
addSpriteImage(rocket,"images/rocket2a.png")
xPos = 500
yPos = 320
xSpeed = 0
ySpeed = 0
moveSprite(rocket, xPos, yPos)
showSprite(rocket)
while True:
if keyPressed("up"):
changeSpriteImage(rocket,1)
transformSprite(rocket, 0,1)
ySpeed -= 2
elif keyPressed("down"):
changeSpriteImage(rocket,1)
transformSprite(rocket, 180,1)
ySpeed += 2
elif keyPressed("right"):
changeSpriteImage(rocket,1)
transformSprite(rocket, 90,1)
xSpeed += 2
elif keyPressed("left"):
changeSpriteImage(rocket,1)
transformSprite(rocket, -90,1)
xSpeed -= 2
else:
changeSpriteImage(rocket,0)
xPos += xSpeed
if xPos > 960:
xPos = -100
elif xPos < -100:
xPos = 960
yPos += ySpeed
if yPos > 700:
yPos = -100
elif yPos < -100:
yPos = 700
moveSprite(rocket, xPos, yPos)
tick(30)
endWait()
| 18.913793 | 44 | 0.604376 |
7a1c6e88f75ecf302c14b6cfed08effa7d7dee92 | 2,690 | py | Python | main.py | AkagiYui/AzurLaneTool | f00fa6e5c6371db72ee399d7bd178a81f39afd8b | [
"Apache-2.0"
] | null | null | null | main.py | AkagiYui/AzurLaneTool | f00fa6e5c6371db72ee399d7bd178a81f39afd8b | [
"Apache-2.0"
] | null | null | null | main.py | AkagiYui/AzurLaneTool | f00fa6e5c6371db72ee399d7bd178a81f39afd8b | [
"Apache-2.0"
] | null | null | null | import getopt
import logging
import os
import signal
import sys
from time import sleep
from threading import Thread
import colorlog as colorlog
import main_tool
from AyAdb import AyAdb
from table_constant import *
from config import config
import global_info
def signal_handler(sign, _):
if sign == signal.SIGINT or sign == signal.SIGTERM:
global_info.trigger_exit(global_info.exit_code)
def main_exit():
# 退出准备
device.disconnect() # 断开连接
sleep(0.01)
logger.info('程序退出,欢迎下次使用')
sys.exit(global_info.exit_code)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# 读取启动参数
try:
opts, args = getopt.getopt(sys.argv[1:], 'd', ['debug'])
except getopt.GetoptError:
print('参数错误')
sys.exit(1)
for opt, arg in opts:
if opt in ('-d', '--debug'):
global_info.debug_mode = True
if config.debug_mode:
global_info.debug_mode = True
# 读取配置
config.reload()
# 初始化日志
if global_info.debug_mode or config.debug_mode:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logger = logging.getLogger(MAIN_NAME)
logger.setLevel(log_level)
console_handler = logging.StreamHandler()
console_handler.setLevel(-1000)
console_handler.setFormatter(colorlog.ColoredFormatter(
fmt='%(log_color)s%(asctime)s [%(levelname)8s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
log_colors=LOG_COLORS_CONFIG
))
if not logger.handlers:
logger.addHandler(console_handler)
logger.info(f'{MAIN_NAME} v{MAIN_VERSION_TEXT}')
logger.debug(f'version: {MAIN_VERSION}')
# 创建临时文件夹
if not os.path.exists('./temp'):
logger.debug('创建临时文件夹')
os.makedirs('./temp')
if not os.path.exists('./temp'):
logger.error('创建临时文件夹失败')
sys.exit(1)
# 初始化设备
device = AyAdb(config.adb_host, int(config.adb_port), PATH_ADBKEY)
if not device.connect():
logger.error('设备连接失败')
global_info.trigger_exit(1)
logger.info('设备连接成功')
# 设备基础设置
device.settings.show_touches(config.show_touches)
device.settings.pointer_location(config.show_pointer_location)
# 启动脚本
main_thread = Thread(target=main_tool.script_start, daemon=True, args=(device,))
logger.info('启动脚本')
main_thread.start()
# 等待退出
logger.debug('等待退出')
while not global_info.exiting:
if global_info.time_to_exit or not main_thread.is_alive():
exiting = True
main_exit()
exit(global_info.exit_code)
sleep(0.5)
exit(global_info.exit_code)
| 25.619048 | 84 | 0.660223 |
85f133ad2370b698d411586524a9cb8d3444cde6 | 594 | py | Python | Oauth/app/__init__.py | 837477/Oauth | 8d01a84d71563d9d510950cdb77ae67de0da2a40 | [
"MIT"
] | 2 | 2022-01-09T09:26:50.000Z | 2022-01-16T15:56:10.000Z | Oauth/app/__init__.py | 837477/Oauth | 8d01a84d71563d9d510950cdb77ae67de0da2a40 | [
"MIT"
] | null | null | null | Oauth/app/__init__.py | 837477/Oauth | 8d01a84d71563d9d510950cdb77ae67de0da2a40 | [
"MIT"
] | 1 | 2022-03-02T05:30:13.000Z | 2022-03-02T05:30:13.000Z | from fastapi import FastAPI
from app import routers
from app.routers import (templates,
google,
kakao,
naver,
facebook)
def create_app():
"""
Application Creating
"""
app = FastAPI(
docs_url="/routers"
)
routers.init_app(app)
# Routers Settings
app.include_router(templates.router)
app.include_router(google.router)
app.include_router(kakao.router)
app.include_router(naver.router)
app.include_router(facebook.router)
return app
| 21.214286 | 40 | 0.579125 |
cd5cf4ae930efc82ee0e4ce8313ad8a44936696a | 196 | py | Python | Configuration/StandardSequences/python/AlCaRecoStreamsMC_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Configuration/StandardSequences/python/AlCaRecoStreamsMC_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Configuration/StandardSequences/python/AlCaRecoStreamsMC_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | # specialize the AlCa sequences for MC
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.AlCaRecoStreams_cff import *
hcalDigiAlCaMB.InputLabel = 'rawDataCollector'
| 24.5 | 65 | 0.841837 |
dfc10ed4a2ab9a56161cfd41b92bf2487a38139d | 6,791 | py | Python | resources/lib/services/msl/msl_request_builder.py | Doctor-Eggs/plugin.video.netflix | 1372fd29c63ae4b933dcf7b2643c4483b4bda70b | [
"MIT"
] | 1 | 2020-10-21T21:30:11.000Z | 2020-10-21T21:30:11.000Z | resources/lib/services/msl/msl_request_builder.py | jurialmunkey/plugin.video.netflix | 0e599eb3465e67eb082d8b4048527ae32bfb0608 | [
"MIT"
] | null | null | null | resources/lib/services/msl/msl_request_builder.py | jurialmunkey/plugin.video.netflix | 0e599eb3465e67eb082d8b4048527ae32bfb0608 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
MSL request building
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import json
import base64
import random
import time
from resources.lib.globals import G
import resources.lib.common as common
from resources.lib.utils.logging import measure_exec_time_decorator
class MSLRequestBuilder(object):
"""Provides mechanisms to create MSL requests"""
def __init__(self):
self.current_message_id = None
self.rndm = random.SystemRandom()
# Set the Crypto handler
if common.get_system_platform() == 'android':
from .android_crypto import AndroidMSLCrypto as MSLCrypto
else:
from .default_crypto import DefaultMSLCrypto as MSLCrypto
self.crypto = MSLCrypto()
@staticmethod
def build_request_data(url, params=None, echo=''):
"""Create a standard request data"""
timestamp = int(time.time() * 10000)
request_data = {
'version': 2,
'url': url,
'id': timestamp,
'languages': [G.LOCAL_DB.get_profile_config('language')],
'params': params,
'echo': echo
}
return request_data
@measure_exec_time_decorator(is_immediate=True)
def msl_request(self, data, esn, auth_data):
"""Create an encrypted MSL request"""
return (json.dumps(self._signed_header(esn, auth_data)) +
json.dumps(self._encrypted_chunk(data, esn)))
@measure_exec_time_decorator(is_immediate=True)
def handshake_request(self, esn):
"""Create a key handshake request"""
header = json.dumps({
'entityauthdata': {
'scheme': 'NONE',
'authdata': {'identity': esn}},
'headerdata':
base64.standard_b64encode(
self._headerdata(auth_data={}, is_handshake=True).encode('utf-8')).decode('utf-8'),
'signature': ''
}, sort_keys=True)
payload = json.dumps(self._encrypted_chunk(envelope_payload=False))
return header + payload
def _signed_header(self, esn, auth_data):
encryption_envelope = self.crypto.encrypt(self._headerdata(auth_data=auth_data, esn=esn), esn)
return {
'headerdata': base64.standard_b64encode(
encryption_envelope.encode('utf-8')).decode('utf-8'),
'signature': self.crypto.sign(encryption_envelope),
'mastertoken': self.crypto.mastertoken,
}
def _headerdata(self, auth_data, esn=None, compression=None, is_handshake=False):
"""
Function that generates a MSL header dict
:return: The base64 encoded JSON String of the header
"""
self.current_message_id = self.rndm.randint(0, pow(2, 52))
header_data = {
'messageid': self.current_message_id,
'renewable': True,
'capabilities': {
'languages': [G.LOCAL_DB.get_value('locale_id')],
'compressionalgos': [compression] if compression else [] # GZIP, LZW, Empty
}
}
if is_handshake:
header_data['keyrequestdata'] = self.crypto.key_request_data()
else:
header_data['sender'] = esn
self._add_auth_info(header_data, auth_data)
return json.dumps(header_data)
def _encrypted_chunk(self, data='', esn=None, envelope_payload=True):
if data:
data = base64.standard_b64encode(json.dumps(data).encode('utf-8')).decode('utf-8')
payload = json.dumps({
'messageid': self.current_message_id,
'data': data,
'sequencenumber': 1,
'endofmsg': True
})
if envelope_payload:
payload = self.crypto.encrypt(payload, esn)
return {
'payload': base64.standard_b64encode(payload.encode('utf-8')).decode('utf-8'),
'signature': self.crypto.sign(payload) if envelope_payload else '',
}
def decrypt_header_data(self, data, enveloped=True):
"""Decrypt a message header"""
header_data = json.loads(base64.standard_b64decode(data))
if enveloped:
init_vector = base64.standard_b64decode(header_data['iv'])
cipher_text = base64.standard_b64decode(header_data['ciphertext'])
return json.loads(self.crypto.decrypt(init_vector, cipher_text))
return header_data
def _add_auth_info(self, header_data, auth_data):
"""User authentication identifies the application user associated with a message"""
# Warning: the user id token contains also contains the identity of the netflix profile
# therefore it is necessary to use the right user id token for the request
if auth_data.get('user_id_token'):
if auth_data['use_switch_profile']:
# The SWITCH_PROFILE is a custom Netflix MSL user authentication scheme
# that is needed for switching profile on MSL side
# works only combined with user id token and can not be used with all endpoints
# after use it you will get user id token of the profile specified in the response
header_data['userauthdata'] = {
'scheme': 'SWITCH_PROFILE',
'authdata': {
'useridtoken': auth_data['user_id_token'],
'profileguid': G.LOCAL_DB.get_active_profile_guid()
}
}
else:
# Authentication with user ID token containing the user identity (netflix profile)
header_data['useridtoken'] = auth_data['user_id_token']
else:
# Authentication with the user credentials
credentials = common.get_credentials()
header_data['userauthdata'] = {
'scheme': 'EMAIL_PASSWORD',
'authdata': {
'email': credentials['email'],
'password': credentials['password']
}
}
# Authentication with user Netflix ID cookies
# This not works on android,
# will raise: User authentication data does not match entity identity
# header_data['userauthdata'] = {
# 'scheme': 'NETFLIXID',
# 'authdata': {
# 'netflixid': cookies['NetflixId'],
# 'securenetflixid': cookies['SecureNetflixId']
# }
# }
| 40.664671 | 103 | 0.598145 |
584fa12df5ae765b943f12dd54559cf0df420533 | 1,392 | py | Python | src/users/models/microsoftgraphplanner_plan_details.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | src/users/models/microsoftgraphplanner_plan_details.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | src/users/models/microsoftgraphplanner_plan_details.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MicrosoftgraphplannerPlanDetails(Model):
"""MicrosoftgraphplannerPlanDetails.
:param id:
:type id: str
:param shared_with:
:type shared_with: object
:param category_descriptions:
:type category_descriptions:
~users.models.MicrosoftgraphplannerCategoryDescriptions
:param context_details:
:type context_details: object
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'shared_with': {'key': 'sharedWith', 'type': 'object'},
'category_descriptions': {'key': 'categoryDescriptions', 'type': 'MicrosoftgraphplannerCategoryDescriptions'},
'context_details': {'key': 'contextDetails', 'type': 'object'},
}
def __init__(self, id=None, shared_with=None, category_descriptions=None, context_details=None):
super(MicrosoftgraphplannerPlanDetails, self).__init__()
self.id = id
self.shared_with = shared_with
self.category_descriptions = category_descriptions
self.context_details = context_details
| 36.631579 | 118 | 0.623563 |
e0f46fa1aeda47ff9daf4b9306ecd620e18b7c88 | 2,591 | py | Python | misc/config_tools/board_inspector/pcieparser/extcaps.py | donsheng/acrn-hypervisor | 79edf8ba08f3f6d11d1ccf464b208c80b5b0fd24 | [
"BSD-3-Clause"
] | 848 | 2018-03-06T01:20:35.000Z | 2022-03-31T05:47:50.000Z | misc/config_tools/board_inspector/pcieparser/extcaps.py | donsheng/acrn-hypervisor | 79edf8ba08f3f6d11d1ccf464b208c80b5b0fd24 | [
"BSD-3-Clause"
] | 6,483 | 2018-03-09T05:29:36.000Z | 2022-03-31T20:39:35.000Z | misc/config_tools/board_inspector/pcieparser/extcaps.py | donsheng/acrn-hypervisor | 79edf8ba08f3f6d11d1ccf464b208c80b5b0fd24 | [
"BSD-3-Clause"
] | 593 | 2018-03-06T07:04:42.000Z | 2022-03-29T15:39:27.000Z | # Copyright (C) 2021 Intel Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import ctypes
import copy
import inspectorlib.cdata as cdata
class ExtendedCapability:
# Capability names from PCI Express Base Specification, mostly Table 9-23
_cap_names_ = {
0x01: "Advanced Error Reporting",
0x02: "Virtual Channel",
0x03: "Device Serial Number",
0x04: "Power Budgeting",
0x05: "Root Complex Link Declaration",
0x06: "Root Complex Internal Link Control",
0x07: "Root Complex Event Collector Endpoint Association",
0x08: "Multi-Function Virtual Channel",
0x09: "Virtual Channel",
0x0a: "RCRB Header",
0x0b: "Vendor-Specific Extended",
0x0c: "Configuration Access Correlation",
0x0d: "ACS",
0x0e: "ARI",
0x0f: "ATS",
0x10: "SR-IOV",
0x11: "MR-IOV",
0x12: "Multicast",
0x13: "PRI",
0x15: "Resizable BAR",
0x16: "DPA",
0x17: "TPH Requester",
0x18: "LTR",
0x19: "Secondary PCI Express",
0x1a: "PMUX",
0x1b: "PASID",
0x1c: "LNR",
0x1d: "DPC",
0x1e: "L1 PM Substates",
0x1f: "TPM",
0x20: "M-PCIe",
0x21: "FRS Queueing",
0x22: "Readiness Time Reporting",
0x23: "Designated Vendor-Specific",
0x24: "VF Resizable BAR",
0x25: "Data Link Feature",
0x26: "Physical Layer 16.0 GT/s",
0x27: "Lane Margining at the Receiver",
0x28: "Hierarchy ID",
0x29: "NPEM",
0x2a: "Physical Layer 32.0 GT/s",
0x2b: "Alternate Protocol",
0x2c: "SFI",
}
@property
def name(self):
if self.id in self._cap_names_.keys():
return self._cap_names_[self.id]
else:
return f"Reserved Extended ({hex(self.id)})"
class ExtendedCapabilityListRegister(cdata.Struct, ExtendedCapability):
_pack_ = 1
_fields_ = [
('id', ctypes.c_uint32, 16),
('version', ctypes.c_uint32, 4),
('next_cap_ptr_raw', ctypes.c_uint32, 12),
]
@property
def next_cap_ptr(self):
return self.next_cap_ptr_raw & 0xffc
# Module API
def extended_capabilities(data):
buf = ctypes.create_string_buffer(data, len(data))
cap_ptr = 0x100
acc = list()
while cap_ptr != 0:
caplist = ExtendedCapabilityListRegister.from_buffer_copy(buf, cap_ptr)
if caplist.id != 0:
acc.append(caplist)
cap_ptr = caplist.next_cap_ptr
return acc
| 28.472527 | 79 | 0.587804 |
71b34b02e7aeb94f742c6423d184320aaadc0c8c | 821 | py | Python | bin/emoji_translate.py | wks-sumo-logic/emoji-tools | 36a23f9a15eda83411ed754482dcde3db994dfb2 | [
"MIT"
] | null | null | null | bin/emoji_translate.py | wks-sumo-logic/emoji-tools | 36a23f9a15eda83411ed754482dcde3db994dfb2 | [
"MIT"
] | null | null | null | bin/emoji_translate.py | wks-sumo-logic/emoji-tools | 36a23f9a15eda83411ed754482dcde3db994dfb2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Converts an emoji into unicode string for display
"""
import sys
ename=sys.argv[1]
ecodelist=sys.argv[2:]
convertlist = list()
for ecode in ecodelist:
ecode = ecode.replace('U+','')
if len(ecode) != 4:
offset = (len(bin(int(ecode,16))) - 10 )
LEAD = str(hex(int(str((bin(int(ecode,16)))[2:offset]),2) + 55232))
LEAD = LEAD.replace('0x', "\\u")
TAIL = str(hex( (int(ecode, 16) & 1023 ) + 56320 ))
TAIL = TAIL.replace('0x', "\\u")
conversion = LEAD + TAIL
else:
LEAD = '\\' + 'u' + ecode
conversion = LEAD
convertlist.append(conversion)
SEPARATOR = ''
CONVERTED = SEPARATOR.join(convertlist)
print('NAME: {}\t CODELIST: {}'.format(ename, ecodelist))
print('NAME: {}\t CONVERTED: {}'.format(ename, CONVERTED))
| 24.878788 | 75 | 0.588307 |
f0a8d84fe06e8dd4742e0c62f144ce96f5a109f8 | 10,640 | py | Python | test/unit/visualizations/plugins/test_VisualizationsRegistry.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 47 | 2015-10-21T23:30:30.000Z | 2022-03-09T06:51:32.000Z | test/unit/visualizations/plugins/test_VisualizationsRegistry.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 78 | 2019-01-18T08:12:49.000Z | 2022-03-13T08:56:41.000Z | test/unit/visualizations/plugins/test_VisualizationsRegistry.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 35 | 2015-10-30T13:09:40.000Z | 2021-05-03T23:17:46.000Z | """
Test lib/galaxy/visualization/plugins/registry.
"""
import os
import re
import unittest
from galaxy import model
from galaxy.util import clean_multiline_string
from galaxy.visualization.plugins import plugin
from galaxy.visualization.plugins.registry import VisualizationsRegistry
from . import VisualizationsBase_TestCase
from ...unittest_utils import galaxy_mock
glx_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir))
template_cache_dir = os.path.join(glx_dir, 'database', 'compiled_templates')
addtional_templates_dir = os.path.join(glx_dir, 'config', 'plugins', 'visualizations', 'common', 'templates')
vis_reg_path = 'config/plugins/visualizations'
config1 = """\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE visualization SYSTEM "../../visualization.dtd">
<visualization name="scatterplot">
<data_sources>
<data_source>
<model_class>HistoryDatasetAssociation</model_class>
<test type="isinstance" test_attr="datatype" result_type="datatype">tabular.Tabular</test>
<to_param param_attr="id">dataset_id</to_param>
</data_source>
</data_sources>
<params>
<param type="dataset" var_name_in_template="hda" required="true">dataset_id</param>
</params>
<template>scatterplot.mako</template>
</visualization>
"""
class VisualizationsRegistry_TestCase(VisualizationsBase_TestCase):
def test_plugin_load_from_repo(self):
"""should attempt load if criteria met"""
mock_app = galaxy_mock.MockApp(root=glx_dir)
plugin_mgr = VisualizationsRegistry(mock_app,
directories_setting=vis_reg_path,
template_cache_dir=None)
expected_plugins_path = os.path.join(glx_dir, vis_reg_path)
self.assertEqual(plugin_mgr.base_url, 'visualizations')
self.assertEqual(plugin_mgr.directories, [expected_plugins_path])
scatterplot = plugin_mgr.plugins['scatterplot']
self.assertEqual(scatterplot.name, 'scatterplot')
self.assertEqual(scatterplot.path, os.path.join(expected_plugins_path, 'scatterplot'))
self.assertEqual(scatterplot.base_url, '/'.join((plugin_mgr.base_url, scatterplot.name)))
self.assertTrue(scatterplot.serves_templates)
self.assertEqual(scatterplot.template_path, os.path.join(scatterplot.path, 'templates'))
self.assertEqual(scatterplot.template_lookup.__class__.__name__, 'TemplateLookup')
trackster = plugin_mgr.plugins['trackster']
self.assertEqual(trackster.name, 'trackster')
self.assertEqual(trackster.path, os.path.join(expected_plugins_path, 'trackster'))
self.assertEqual(trackster.base_url, '/'.join((plugin_mgr.base_url, trackster.name)))
self.assertFalse(trackster.serves_templates)
def test_plugin_load(self):
""""""
mock_app_dir = galaxy_mock.MockDir({
'plugins': {
'vis1': {
'config': {
'vis1.xml': config1
},
'static': {},
'templates': {},
},
'vis2': {
'config': {
'vis2.xml': config1
}
},
'not_a_vis1': {
'config': {
'vis1.xml': 'blerbler'
},
},
# empty
'not_a_vis2': {},
'not_a_vis3': 'blerbler',
# bad config
'not_a_vis4': {
'config': {
'not_a_vis4.xml': 'blerbler'
}
},
'not_a_vis5': {
# no config
'static': {},
'templates': {},
},
}
})
mock_app = galaxy_mock.MockApp(root=mock_app_dir.root_path)
plugin_mgr = VisualizationsRegistry(mock_app,
directories_setting='plugins',
template_cache_dir=template_cache_dir)
expected_plugins_path = os.path.join(mock_app_dir.root_path, 'plugins')
expected_plugin_names = ['vis1', 'vis2']
self.assertEqual(plugin_mgr.base_url, 'visualizations')
self.assertEqual(plugin_mgr.directories, [expected_plugins_path])
self.assertEqual(sorted(plugin_mgr.plugins.keys()), expected_plugin_names)
vis1 = plugin_mgr.plugins['vis1']
self.assertEqual(vis1.name, 'vis1')
self.assertEqual(vis1.path, os.path.join(expected_plugins_path, 'vis1'))
self.assertEqual(vis1.base_url, '/'.join((plugin_mgr.base_url, vis1.name)))
self.assertTrue(vis1.serves_templates)
self.assertEqual(vis1.template_path, os.path.join(vis1.path, 'templates'))
self.assertEqual(vis1.template_lookup.__class__.__name__, 'TemplateLookup')
vis2 = plugin_mgr.plugins['vis2']
self.assertEqual(vis2.name, 'vis2')
self.assertEqual(vis2.path, os.path.join(expected_plugins_path, 'vis2'))
self.assertEqual(vis2.base_url, '/'.join((plugin_mgr.base_url, vis2.name)))
self.assertFalse(vis2.serves_templates)
mock_app_dir.remove()
template_cache_dir
def test_interactive_environ_plugin_load(self):
"""
"""
jupyter_config = clean_multiline_string("""\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE interactive_environment SYSTEM "../../interactive_environments.dtd">
<interactive_environment name="Jupyter">
<data_sources>
<data_source>
<model_class>HistoryDatasetAssociation</model_class>
<test type="isinstance" test_attr="datatype" result_type="datatype">tabular.Tabular</test>
<test type="isinstance" test_attr="datatype" result_type="datatype">data.Text</test>
<to_param param_attr="id">dataset_id</to_param>
</data_source>
</data_sources>
<params>
<param type="dataset" var_name_in_template="hda" required="true">dataset_id</param>
</params>
<template>jupyter.mako</template>
</interactive_environment>
""")
mock_app_dir = {
'plugins': {
'jupyter': {
'config': {
'jupyter.xml': jupyter_config
},
'templates': {}
},
},
}
# going to use a fake template here to simplify testing
jupyter_template = "${ ie_request }-${ get_api_key() }"
mock_app_dir['plugins']['jupyter']['templates']['jupyter.mako'] = jupyter_template
# so that we don't create a cached version of that fake template in the real mako caches
# we'll set up a cache in the temp dir
mock_app_dir['caches'] = {}
# and make sure the vis reg uses that
mock_app_dir = galaxy_mock.MockDir(mock_app_dir)
mock_app = galaxy_mock.MockApp(root=mock_app_dir.root_path)
plugin_mgr = VisualizationsRegistry(mock_app,
directories_setting='plugins',
template_cache_dir=os.path.join(mock_app_dir.root_path, 'caches'))
# ...then start testing
expected_plugins_path = os.path.join(mock_app_dir.root_path, 'plugins')
expected_plugin_names = ['jupyter']
self.assertEqual(plugin_mgr.base_url, 'visualizations')
self.assertEqual(plugin_mgr.directories, [expected_plugins_path])
self.assertEqual(sorted(plugin_mgr.plugins.keys()), expected_plugin_names)
jupyter = plugin_mgr.plugins['jupyter']
config = jupyter.config
self.assertEqual(jupyter.name, 'jupyter')
self.assertEqual(config.get('plugin_type'), 'interactive_environment')
# get_api_key needs a user, fill_template a trans
user = model.User(email="blah@bler.blah", password="dockerDockerDOCKER")
trans = galaxy_mock.MockTrans(user=user)
# use a mock request factory - this will be written into the filled template to show it was used
jupyter.INTENV_REQUEST_FACTORY = lambda t, p: 'mock'
# should return the (new) api key for the above user (see the template above)
response = jupyter._render({}, trans=trans)
response.strip()
self.assertIsInstance(response, str)
self.assertTrue('-' in response)
ie_request, api_key = response.split('-')
self.assertEqual(ie_request, 'mock')
match = re.match(r'[a-f0-9]{32}', api_key)
self.assertIsNotNone(match)
self.assertEqual(match.span(), (0, 32))
mock_app_dir.remove()
def test_script_entry(self):
""""""
script_entry_config = clean_multiline_string("""\
<?xml version="1.0" encoding="UTF-8"?>
<visualization name="js-test">
<data_sources>
<data_source>
<model_class>HistoryDatasetAssociation</model_class>
</data_source>
</data_sources>
<entry_point entry_point_type="script" data-main="one" src="bler"></entry_point>
</visualization>
""")
mock_app_dir = galaxy_mock.MockDir({
'plugins': {
'jstest': {
'config': {
'jstest.xml': script_entry_config
},
'static': {}
},
}
})
mock_app = galaxy_mock.MockApp(root=mock_app_dir.root_path)
plugin_mgr = VisualizationsRegistry(mock_app,
directories_setting='plugins',
template_cache_dir=template_cache_dir)
script_entry = plugin_mgr.plugins['jstest']
self.assertIsInstance(script_entry, plugin.ScriptVisualizationPlugin)
self.assertEqual(script_entry.name, 'jstest')
self.assertTrue(script_entry.serves_templates)
trans = galaxy_mock.MockTrans()
script_entry._set_up_template_plugin(mock_app_dir.root_path, [addtional_templates_dir])
response = script_entry._render({}, trans=trans, embedded=True)
self.assertTrue('src="bler"' in response)
self.assertTrue('type="text/javascript"' in response)
self.assertTrue('data-main="one"' in response)
mock_app_dir.remove()
# -----------------------------------------------------------------------------
# TODO: config parser tests (in separate file)
if __name__ == '__main__':
unittest.main()
| 40.766284 | 110 | 0.608459 |
1855d6ed2c229d55d7d790f594d9f65e1298f95d | 18,611 | py | Python | wo/cli/plugins/site_backup.py | dacsec-org/WordOps | 30adb81a812ebba2107097b24ad356016d9d6af9 | [
"MIT"
] | 1 | 2022-01-15T14:51:38.000Z | 2022-01-15T14:51:38.000Z | wo/cli/plugins/site_backup.py | dacsec-org/WordOps | 30adb81a812ebba2107097b24ad356016d9d6af9 | [
"MIT"
] | null | null | null | wo/cli/plugins/site_backup.py | dacsec-org/WordOps | 30adb81a812ebba2107097b24ad356016d9d6af9 | [
"MIT"
] | null | null | null | import os
from cement.core.controller import CementBaseController, expose
from wo.cli.plugins.site_functions import (
detSitePar, check_domain_exists, site_package_check,
pre_run_checks, setupdomain, SiteError,
doCleanupAction, setupdatabase, setupwordpress, setwebrootpermissions,
display_cache_settings, copyWildcardCert)
from wo.cli.plugins.sitedb import (deleteSiteInfo, getAllsites,
getSiteInfo, updateSiteInfo)
from wo.core.acme import WOAcme
from wo.core.domainvalidate import WODomain
from wo.core.git import WOGit
from wo.core.logging import Log
from wo.core.nginxhashbucket import hashbucket
from wo.core.services import WOService
from wo.core.sslutils import SSL
from wo.core.variables import WOVar
class WOSiteBackupController(CementBaseController):
class Meta:
label = 'backup'
stacked_on = 'site'
stacked_type = 'nested'
description = ('this commands allow you to backup your sites')
arguments = [
(['site_name'],
dict(help='domain name for the site to be cloned.',
nargs='?')),
(['--db'],
dict(help="backup only site database", action='store_true')),
(['--files'],
dict(help="backup only site files", action='store_true')),
(['--all'],
dict(help="backup all sites", action='store_true')),
]
@expose(hide=True)
def default(self):
pargs = self.app.pargs
# self.app.render((data), 'default.mustache')
# Check domain name validation
data = dict()
sites = getAllsites(self)
if not pargs.site_name and not pargs.all:
try:
while not pargs.site_name:
# preprocessing before finalize site name
pargs.site_name = (input('Enter site name : ')
.strip())
except IOError as e:
Log.debug(self, str(e))
Log.error(self, "Unable to input site name, Please try again!")
pargs.site_name = pargs.site_name.strip()
wo_domain = WODomain.validate(self, pargs.site_name)
wo_www_domain = "www.{0}".format(wo_domain)
(wo_domain_type, wo_root_domain) = WODomain.getlevel(
self, wo_domain)
if not wo_domain.strip():
Log.error(self, "Invalid domain name, "
"Provide valid domain name")
wo_site_webroot = WOVar.wo_webroot + wo_domain
if not check_domain_exists(self, wo_domain):
Log.error(self, "site {0} already exists".format(wo_domain))
elif os.path.isfile('/etc/nginx/sites-available/{0}'
.format(wo_domain)):
Log.error(self, "Nginx configuration /etc/nginx/sites-available/"
"{0} already exists".format(wo_domain))
try:
try:
# setup NGINX configuration, and webroot
setupdomain(self, data)
# Fix Nginx Hashbucket size error
hashbucket(self)
except SiteError as e:
# call cleanup actions on failure
Log.info(self, Log.FAIL +
"There was a serious error encountered...")
Log.info(self, Log.FAIL + "Cleaning up afterwards...")
doCleanupAction(self, domain=wo_domain,
webroot=data['webroot'])
Log.debug(self, str(e))
Log.error(self, "Check the log for details: "
"`tail /var/log/wo/wordops.log` "
"and please try again")
if 'proxy' in data.keys() and data['proxy']:
addNewSite(self, wo_domain, stype, cache, wo_site_webroot)
# Service Nginx Reload
if not WOService.reload_service(self, 'nginx'):
Log.info(self, Log.FAIL +
"There was a serious error encountered...")
Log.info(self, Log.FAIL + "Cleaning up afterwards...")
doCleanupAction(self, domain=wo_domain)
deleteSiteInfo(self, wo_domain)
Log.error(self, "service nginx reload failed. "
"check issues with `nginx -t` command")
Log.error(self, "Check the log for details: "
"`tail /var/log/wo/wordops.log` "
"and please try again")
if wo_auth and len(wo_auth):
for msg in wo_auth:
Log.info(self, Log.ENDC + msg, log=False)
Log.info(self, "Successfully created site"
" http://{0}".format(wo_domain))
return
if data['php72']:
php_version = "7.2"
elif data['php74']:
php_version = "7.4"
else:
php_version = "7.3"
addNewSite(self, wo_domain, stype, cache, wo_site_webroot,
php_version=php_version)
# Setup database for MySQL site
if 'wo_db_name' in data.keys() and not data['wp']:
try:
data = setupdatabase(self, data)
# Add database information for site into database
updateSiteInfo(self, wo_domain, db_name=data['wo_db_name'],
db_user=data['wo_db_user'],
db_password=data['wo_db_pass'],
db_host=data['wo_db_host'])
except SiteError as e:
# call cleanup actions on failure
Log.debug(self, str(e))
Log.info(self, Log.FAIL +
"There was a serious error encountered...")
Log.info(self, Log.FAIL + "Cleaning up afterwards...")
doCleanupAction(self, domain=wo_domain,
webroot=data['webroot'],
dbname=data['wo_db_name'],
dbuser=data['wo_db_user'],
dbhost=data['wo_db_host'])
deleteSiteInfo(self, wo_domain)
Log.error(self, "Check the log for details: "
"`tail /var/log/wo/wordops.log` "
"and please try again")
try:
wodbconfig = open("{0}/wo-config.php"
.format(wo_site_webroot),
encoding='utf-8', mode='w')
wodbconfig.write("<?php \ndefine('DB_NAME', '{0}');"
"\ndefine('DB_USER', '{1}'); "
"\ndefine('DB_PASSWORD', '{2}');"
"\ndefine('DB_HOST', '{3}');\n?>"
.format(data['wo_db_name'],
data['wo_db_user'],
data['wo_db_pass'],
data['wo_db_host']))
wodbconfig.close()
stype = 'mysql'
except IOError as e:
Log.debug(self, str(e))
Log.debug(self, "Error occured while generating "
"wo-config.php")
Log.info(self, Log.FAIL +
"There was a serious error encountered...")
Log.info(self, Log.FAIL + "Cleaning up afterwards...")
doCleanupAction(self, domain=wo_domain,
webroot=data['webroot'],
dbname=data['wo_db_name'],
dbuser=data['wo_db_user'],
dbhost=data['wo_db_host'])
deleteSiteInfo(self, wo_domain)
Log.error(self, "Check the log for details: "
"`tail /var/log/wo/wordops.log` "
"and please try again")
# Setup WordPress if Wordpress site
if data['wp']:
vhostonly = bool(pargs.vhostonly)
try:
wo_wp_creds = setupwordpress(self, data, vhostonly)
# Add database information for site into database
updateSiteInfo(self, wo_domain,
db_name=data['wo_db_name'],
db_user=data['wo_db_user'],
db_password=data['wo_db_pass'],
db_host=data['wo_db_host'])
except SiteError as e:
# call cleanup actions on failure
Log.debug(self, str(e))
Log.info(self, Log.FAIL +
"There was a serious error encountered...")
Log.info(self, Log.FAIL + "Cleaning up afterwards...")
doCleanupAction(self, domain=wo_domain,
webroot=data['webroot'],
dbname=data['wo_db_name'],
dbuser=data['wo_db_user'],
dbhost=data['wo_mysql_grant_host'])
deleteSiteInfo(self, wo_domain)
Log.error(self, "Check the log for details: "
"`tail /var/log/wo/wordops.log` "
"and please try again")
# Service Nginx Reload call cleanup if failed to reload nginx
if not WOService.reload_service(self, 'nginx'):
Log.info(self, Log.FAIL +
"There was a serious error encountered...")
Log.info(self, Log.FAIL + "Cleaning up afterwards...")
doCleanupAction(self, domain=wo_domain,
webroot=data['webroot'])
if 'wo_db_name' in data.keys():
doCleanupAction(self, domain=wo_domain,
dbname=data['wo_db_name'],
dbuser=data['wo_db_user'],
dbhost=data['wo_mysql_grant_host'])
deleteSiteInfo(self, wo_domain)
Log.info(self, Log.FAIL + "service nginx reload failed."
" check issues with `nginx -t` command.")
Log.error(self, "Check the log for details: "
"`tail /var/log/wo/wordops.log` "
"and please try again")
WOGit.add(self, ["/etc/nginx"],
msg="{0} created with {1} {2}"
.format(wo_www_domain, stype, cache))
# Setup Permissions for webroot
try:
setwebrootpermissions(self, data['webroot'])
except SiteError as e:
Log.debug(self, str(e))
Log.info(self, Log.FAIL +
"There was a serious error encountered...")
Log.info(self, Log.FAIL + "Cleaning up afterwards...")
doCleanupAction(self, domain=wo_domain,
webroot=data['webroot'])
if 'wo_db_name' in data.keys():
print("Inside db cleanup")
doCleanupAction(self, domain=wo_domain,
dbname=data['wo_db_name'],
dbuser=data['wo_db_user'],
dbhost=data['wo_mysql_grant_host'])
deleteSiteInfo(self, wo_domain)
Log.error(self, "Check the log for details: "
"`tail /var/log/wo/wordops.log` and "
"please try again")
if wo_auth and len(wo_auth):
for msg in wo_auth:
Log.info(self, Log.ENDC + msg, log=False)
if data['wp'] and (not pargs.vhostonly):
Log.info(self, Log.ENDC + "WordPress admin user :"
" {0}".format(wo_wp_creds['wp_user']), log=False)
Log.info(self, Log.ENDC + "WordPress admin password : {0}"
.format(wo_wp_creds['wp_pass']), log=False)
display_cache_settings(self, data)
Log.info(self, "Successfully created site"
" http://{0}".format(wo_domain))
except SiteError:
Log.error(self, "Check the log for details: "
"`tail /var/log/wo/wordops.log` and please try again")
if pargs.letsencrypt:
acme_domains = []
data['letsencrypt'] = True
letsencrypt = True
Log.debug(self, "Going to issue Let's Encrypt certificate")
acmedata = dict(
acme_domains, dns=False, acme_dns='dns_cf',
dnsalias=False, acme_alias='', keylength='')
if self.app.config.has_section('letsencrypt'):
acmedata['keylength'] = self.app.config.get(
'letsencrypt', 'keylength')
else:
acmedata['keylength'] = 'ec-384'
if pargs.dns:
Log.debug(self, "DNS validation enabled")
acmedata['dns'] = True
if not pargs.dns == 'dns_cf':
Log.debug(self, "DNS API : {0}".format(pargs.dns))
acmedata['acme_dns'] = pargs.dns
if pargs.dnsalias:
Log.debug(self, "DNS Alias enabled")
acmedata['dnsalias'] = True
acmedata['acme_alias'] = pargs.dnsalias
# detect subdomain and set subdomain variable
if pargs.letsencrypt == "subdomain":
Log.warn(
self, 'Flag --letsencrypt=subdomain is '
'deprecated and not required anymore.')
acme_subdomain = True
acme_wildcard = False
elif pargs.letsencrypt == "wildcard":
acme_wildcard = True
acme_subdomain = False
acmedata['dns'] = True
else:
if ((wo_domain_type == 'subdomain')):
Log.debug(self, "Domain type = {0}"
.format(wo_domain_type))
acme_subdomain = True
else:
acme_subdomain = False
acme_wildcard = False
if acme_subdomain is True:
Log.info(self, "Certificate type : subdomain")
acme_domains = acme_domains + ['{0}'.format(wo_domain)]
elif acme_wildcard is True:
Log.info(self, "Certificate type : wildcard")
acme_domains = acme_domains + ['{0}'.format(wo_domain),
'*.{0}'.format(wo_domain)]
else:
Log.info(self, "Certificate type : domain")
acme_domains = acme_domains + ['{0}'.format(wo_domain),
'www.{0}'.format(wo_domain)]
if WOAcme.cert_check(self, wo_domain):
SSL.archivedcertificatehandle(self, wo_domain, acme_domains)
else:
if acme_subdomain is True:
# check if a wildcard cert for the root domain exist
Log.debug(self, "checkWildcardExist on *.{0}"
.format(wo_root_domain))
if SSL.checkwildcardexist(self, wo_root_domain):
Log.info(self, "Using existing Wildcard SSL "
"certificate from {0} to secure {1}"
.format(wo_root_domain, wo_domain))
Log.debug(self, "symlink wildcard "
"cert between {0} & {1}"
.format(wo_domain, wo_root_domain))
# copy the cert from the root domain
copyWildcardCert(self, wo_domain, wo_root_domain)
else:
# check DNS records before issuing cert
if not acmedata['dns'] is True:
if not pargs.force:
if not WOAcme.check_dns(self, acme_domains):
Log.error(self,
"Aborting SSL "
"certificate issuance")
Log.debug(self, "Setup Cert with acme.sh for {0}"
.format(wo_domain))
if WOAcme.setupletsencrypt(
self, acme_domains, acmedata):
WOAcme.deploycert(self, wo_domain)
else:
if not acmedata['dns'] is True:
if not pargs.force:
if not WOAcme.check_dns(self, acme_domains):
Log.error(self,
"Aborting SSL certificate issuance")
if WOAcme.setupletsencrypt(
self, acme_domains, acmedata):
WOAcme.deploycert(self, wo_domain)
if pargs.hsts:
SSL.setuphsts(self, wo_domain)
SSL.httpsredirect(self, wo_domain, acme_domains, True)
SSL.siteurlhttps(self, wo_domain)
if not WOService.reload_service(self, 'nginx'):
Log.error(self, "service nginx reload failed. "
"check issues with `nginx -t` command")
Log.info(self, "Congratulations! Successfully Configured "
"SSL on https://{0}".format(wo_domain))
# Add nginx conf folder into GIT
WOGit.add(self, ["{0}/conf/nginx".format(wo_site_webroot)],
msg="Adding letsencrypts config of site: {0}"
.format(wo_domain))
updateSiteInfo(self, wo_domain, ssl=letsencrypt)
| 48.847769 | 79 | 0.471119 |
e8cfc1518f0199ccc66e6ddb1e576cc6ff46cc3b | 8,034 | py | Python | train/pretrain.py | Gohary-98/TMNet | 4bc99b6f100a7327d8d356cec70b67c95bb6515d | [
"MIT"
] | null | null | null | train/pretrain.py | Gohary-98/TMNet | 4bc99b6f100a7327d8d356cec70b67c95bb6515d | [
"MIT"
] | null | null | null | train/pretrain.py | Gohary-98/TMNet | 4bc99b6f100a7327d8d356cec70b67c95bb6515d | [
"MIT"
] | null | null | null | from __future__ import print_function
import argparse
import random
import numpy as np
import torch
import torch.optim as optim
import sys
print(sys.path)
sys.path.append('./auxiliary/')
print(sys.path)
from dataset import *
from model import *
from utils import *
from ply import *
import os
import json
import datetime
import visdom
parser = argparse.ArgumentParser()
parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=12)
parser.add_argument('--nepoch', type=int, default=420, help='number of epochs to train for')
parser.add_argument('--model', type=str, default='', help='optional reload model path')
parser.add_argument('--num_points', type=int, default=2500, help='number of points')
parser.add_argument('--nb_primitives', type=int, default=1, help='number of primitives in the atlas')
parser.add_argument('--super_points', type=int, default=2500,
help='number of input points to pointNet, not used by default')
parser.add_argument('--env', type=str, default="pretrain", help='visdom environment')
parser.add_argument('--lr',type=float,default=1e-3, help='initial learning rate')
parser.add_argument('--manualSeed', type=int, default=6185)
opt = parser.parse_args()
print(opt)
sys.path.append("./extension/")
import dist_chamfer as ext
distChamfer = ext.chamferDist()
vis = visdom.Visdom(port=8888, env=opt.env)
now = datetime.datetime.now()
save_path = opt.env
dir_name = os.path.join('./log', save_path)
if not os.path.exists(dir_name):
os.mkdir(dir_name)
logname = os.path.join(dir_name, 'log.txt')
blue = lambda x: '\033[94m' + x + '\033[0m'
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
dataset = ShapeNet(npoints=opt.num_points, SVR=True, normal=False, train=True, class_choice='chair')
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
dataset_test = ShapeNet(npoints=opt.num_points, SVR=True, normal=False, train=False, class_choice='chair')
dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=opt.batchSize,
shuffle=False, num_workers=int(opt.workers))
print('training set', len(dataset.datapath))
print('testing set', len(dataset_test.datapath))
len_dataset = len(dataset)
network = Pretrain(num_points=opt.num_points)
network.cuda() # put network on GPU
network.apply(weights_init) # initialization of the weight
if opt.model != '':
network.load_state_dict(torch.load(opt.model))
print(" Previous weight loaded ")
lrate = opt.lr # learning rate
optimizer = optim.Adam([
{'params': network.pc_encoder.parameters()},
{'params': network.decoder.parameters()}
], lr=lrate)
# meters to record stats on learning
train_loss = AverageValueMeter()
val_loss = AverageValueMeter()
with open(logname, 'a') as f: # open and append
f.write(str(network) + '\n')
# initialize learning curve on visdom, and color for each primitive in visdom display
train_curve = []
val_curve = []
for epoch in range(opt.nepoch):
# TRAIN MODE
train_loss.reset()
network.train()
# learning rate schedule
if epoch == 100:
optimizer = optim.Adam([
{'params': network.pc_encoder.parameters()},
{'params': network.decoder.parameters()}
], lr=lrate/10.0)
if epoch == 120:
optimizer = optim.Adam(network.encoder.parameters(), lr=lrate)
if epoch == 220:
optimizer = optim.Adam(network.encoder.parameters(), lr=lrate / 10.0)
for i, data in enumerate(dataloader, 0):
optimizer.zero_grad()
img, points, normals, name, cat = data
img = img.cuda()
points = points.transpose(2, 1).contiguous()
points = points.cuda()
# SUPER_RESOLUTION optionally reduce the size of the points fed to PointNet
points = points[:, :, :opt.super_points].contiguous()
# END SUPER RESOLUTION
if epoch >= 120:
pointsRec = network(img, mode='img')
else:
pointsRec = network(points) # forward pass
dist1, dist2,_,_ = distChamfer(points.transpose(2, 1).contiguous(), pointsRec) # loss function
loss_net = (torch.mean(dist1)) + (torch.mean(dist2))
loss_net.backward()
train_loss.update(loss_net.item())
optimizer.step() # gradient update
# VIZUALIZE
if i % 50 <= 0:
vis.scatter(X=points.transpose(2, 1).contiguous()[0].data.cpu(),
win='TRAIN_INPUT',
opts=dict(
title="TRAIN_INPUT",
markersize=2,
),
)
vis.scatter(X=pointsRec[0].data.cpu(),
win='TRAIN_INPUT_RECONSTRUCTED',
opts=dict(
title="TRAIN_INPUT_RECONSTRUCTED",
markersize=2,
),
)
print('[%d: %d/%d] train loss: %f ' % (epoch, i, len_dataset / opt.batchSize, loss_net.item()))
# UPDATE CURVES
train_curve.append(train_loss.avg)
# VALIDATION
val_loss.reset()
for item in dataset_test.cat:
dataset_test.perCatValueMeter[item].reset()
network.eval()
with torch.no_grad():
for i, data in enumerate(dataloader_test, 0):
img, points, normals, name, cat = data
img = img.cuda()
points = points.transpose(2, 1).contiguous()
points = points.cuda()
# SUPER_RESOLUTION
points = points[:, :, :opt.super_points].contiguous()
# END SUPER RESOLUTION
if epoch >= 120:
pointsRec = network(img, mode='img')
else:
pointsRec = network(points) # forward pass
dist1, dist2,_,_ = distChamfer(points.transpose(2, 1).contiguous(), pointsRec)
loss_net = (torch.mean(dist1)) + (torch.mean(dist2))
val_loss.update(loss_net.item())
dataset_test.perCatValueMeter[cat[0]].update(loss_net.item())
if i % 200 == 0:
vis.scatter(X=points.transpose(2, 1).contiguous()[0].data.cpu(),
win='VAL_INPUT',
opts=dict(
title="VAL_INPUT",
markersize=2,
),
)
vis.scatter(X=pointsRec[0].data.cpu(),
win='VAL_INPUT_RECONSTRUCTED',
opts=dict(
title="VAL_INPUT_RECONSTRUCTED",
markersize=2,
),
)
print('[%d: %d/%d] val loss: %f ' % (epoch, i, len(dataset_test)/opt.batchSize, loss_net.item()))
# UPDATE CURVES
val_curve.append(val_loss.avg)
vis.line(X=np.column_stack((np.arange(len(train_curve)), np.arange(len(val_curve)))),
Y=np.log(np.column_stack((np.array(train_curve), np.array(val_curve)))),
win='loss',
opts=dict(title="loss", legend=["train_curve" , "val_curve"], markersize=2, ), )
# dump stats in log file
log_table = {
"train_loss": train_loss.avg,
"val_loss": val_loss.avg,
"epoch": epoch,
"lr": lrate,
"super_points": opt.super_points,
}
print(log_table)
for item in dataset_test.cat:
print(item, dataset_test.perCatValueMeter[item].avg)
log_table.update({item: dataset_test.perCatValueMeter[item].avg})
with open(logname, 'a') as f: # open and append
f.write('json_stats: ' + json.dumps(log_table) + '\n')
torch.save(network.state_dict(), '%s/network.pth' % (dir_name))
| 40.17 | 110 | 0.60107 |
36b436f08aff36ce5a36b9c43b5e3a52d2087204 | 13,891 | py | Python | fractional/DeepONet_float32_batch.py | shushu-qin/deeponet | 5bbe066279bba055ad80e04c364140363c87634a | [
"Apache-2.0"
] | 140 | 2020-12-14T00:45:25.000Z | 2022-03-29T15:28:53.000Z | fractional/DeepONet_float32_batch.py | shushu-qin/deeponet | 5bbe066279bba055ad80e04c364140363c87634a | [
"Apache-2.0"
] | 16 | 2021-05-01T04:00:39.000Z | 2022-03-25T22:01:53.000Z | fractional/DeepONet_float32_batch.py | shushu-qin/deeponet | 5bbe066279bba055ad80e04c364140363c87634a | [
"Apache-2.0"
] | 63 | 2020-12-13T15:27:12.000Z | 2022-03-26T14:09:17.000Z | import tensorflow as tf
import matplotlib.pyplot as plt
import scipy.special as scisp
import numpy as np
from SALib.sample import sobol_sequence
import time
import sys
# import datasets as ds
random_seed = 12345
def xavier_init(size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2.0 / (in_dim + out_dim))
return tf.Variable(
tf.truncated_normal(
[in_dim, out_dim], stddev=xavier_stddev, seed=random_seed, dtype=tf.float32
),
dtype=tf.float32,
)
# def neural_net(X, weights, biases):
# num_layers = len(weights) + 1
# H = X
# for l in range(0,num_layers-1):
# W = weights[l]
# b = biases[l]
# H = tf.nn.tanh(tf.add(tf.matmul(H, W), b))
# W = weights[-1]
# b = biases[-1]
# Y = tf.add(tf.matmul(H, W), b)
# Y = H
# return Y
def neural_net2(X, weights, biases):
num_layers = len(weights) + 1
H = X
for l in range(0, num_layers - 1):
W = weights[l]
b = biases[l]
H = tf.nn.tanh(tf.add(tf.matmul(H, W), b))
Y = H
return Y
def neural_net1(X, weights, biases):
num_layers = len(weights) + 1
H = X
for l in range(0, num_layers - 2):
W = weights[l]
b = biases[l]
H = tf.nn.tanh(tf.add(tf.matmul(H, W), b))
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y
################ Specify parameters and hyperparameters
### learning 1D Caputo derivative
m = 15 # length of u vector
d = 2 # dim of (y,alpha)
### learning 2D fractional Laplacian
# m = 225 # length of u vector
# d = 3 # dim of (x,y,alpha)
batch_size = 100000
num_epoch = 1000001
print_skip = 100
is_test = False
# is_test = True
### 1D Caputo
layers_u = [m] + [40] * 3
layers_y = [d] + [40] * 3
### 2D fractional Laplacian
# layers_u = [m] + [60]*3
# layers_y = [d] + [60]*3
store_path = "./saved_model/"
################################# buidling ONet
L_u = len(layers_u)
L_y = len(layers_y)
b0 = tf.Variable(0.0, name="b0", dtype=tf.float32)
weights_u = [
tf.Variable(
xavier_init([layers_u[l], layers_u[l + 1]]),
name="weights_u" + str(l),
dtype=tf.float32,
)
for l in range(0, L_u - 1)
]
biases_u = [
tf.Variable(
tf.zeros((1, layers_u[l + 1]), dtype=tf.float32, name="biases_u" + str(l)),
dtype=tf.float32,
)
for l in range(0, L_u - 1)
]
weights_y = [
tf.Variable(
xavier_init([layers_y[l], layers_y[l + 1]]),
name="weights_y" + str(l),
dtype=tf.float32,
)
for l in range(0, L_y - 1)
]
biases_y = [
tf.Variable(
tf.zeros((1, layers_y[l + 1]), dtype=tf.float32, name="biases_y" + str(l)),
dtype=tf.float32,
)
for l in range(0, L_y - 1)
]
x_u = tf.placeholder(tf.float32, shape=(None, m))
x_y = tf.placeholder(tf.float32, shape=(None, d))
y = tf.placeholder(tf.float32, shape=(None, 1))
net_u = neural_net1(x_u, weights_u, biases_u)
net_y = neural_net2(x_y, weights_y, biases_y)
net_o = tf.reduce_sum(net_u * net_y, axis=1, keepdims=True) + b0
saver = tf.train.Saver(
var_list=[weights_u[l] for l in range(L_u - 1)]
+ [biases_u[l] for l in range(L_u - 1)]
+ [weights_y[l] for l in range(L_y - 1)]
+ [biases_y[l] for l in range(L_y - 1)]
+ [b0]
)
############ defining loss and optimizer
loss = tf.reduce_mean(tf.square(net_o - y)) / tf.reduce_mean(tf.square(y))
optimizer_Adam = tf.train.AdamOptimizer(1.0e-3)
# tt0 = time.time()
train_op_Adam = optimizer_Adam.minimize(loss)
# tt1 = time.time()
# print ('loss_graph CPU time: ', tt1-tt0)
############ generating and loading training, validation, and test sets
# if is_test == False:
# tt0 = time.time()
# ds.training_set(m, d, n_u, n_y)
# tt1 = time.time()
# print ('Generate training set CPU time: ', tt1-tt0)
#
# ds.test_set(m, d, n_y)
data_path = "data/"
data = np.load(data_path + "train.npz")
X_u_train, X_y_train, Y_train = data["X_u_train"], data["X_y_train"], data["Y_train"]
data = np.load(data_path + "test.npz")
X_u_test, X_y_test, Y_test = data["X_u_test"], data["X_y_test"], data["Y_test"]
data = np.load(data_path + "test0.npz")
X_u_test0, X_y_test0, Y_test0 = data["X_u_test"], data["X_y_test"], data["Y_test"]
# data = np.load("test_fabricated.npz")
# X_u_test, X_y_test, Y_test = data["X_u_test"], data["X_y_test"], data["Y_test"]
# X_u_train = (X_u_train0 - np.mean(X_u_train0,axis=0,keepdims=True))/np.std(X_u_train0,axis=0, keepdims=True)
# X_y_train = (X_y_train0 - np.mean(X_y_train0,axis=0,keepdims=True))/np.std(X_y_train0,axis=0, keepdims=True)
#
#
# X_u_test = (X_u_test0- np.mean(X_u_train0,axis=0,keepdims=True))/np.std(X_u_train0,axis=0, keepdims=True)
# X_y_test = (X_y_test0 - np.mean(X_y_train0,axis=0,keepdims=True))/np.std(X_y_train0,axis=0, keepdims=True)
################## Training, validating or test
loss_train_h = []
loss_test_h = []
loss_test0_h = []
i_h = []
if is_test == False:
tt0 = time.time()
min_loss = 1e16
num_batch = X_u_train.shape[0] // batch_size
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# feed_train = {x_u: X_u_train, x_y: X_y_train, y: Y_train}
feed_test = {x_u: X_u_test, x_y: X_y_test, y: Y_test}
feed_test0 = {x_u: X_u_test0, x_y: X_y_test0, y: Y_test0}
ind = np.arange(X_u_train.shape[0])
for i in range(num_epoch):
np.random.shuffle(ind)
for j in range(num_batch):
feed_train_batch = {
x_u: X_u_train[ind[(j * batch_size) : ((j + 1) * batch_size)], 0:],
x_y: X_y_train[ind[(j * batch_size) : ((j + 1) * batch_size)], 0:],
y: Y_train[ind[(j * batch_size) : ((j + 1) * batch_size)], 0:],
}
if i % print_skip == 0 and j == num_batch - 1:
temp_loss = sess.run(loss, feed_train_batch)
if temp_loss < min_loss:
save_path = saver.save(sess, store_path + "paras_NN.ckpt")
min_loss = temp_loss
loss_train = temp_loss
loss_test, Y_pred = sess.run([loss, net_o], feed_test)
loss_test0, Y_pred0 = sess.run([loss, net_o], feed_test0)
error = np.linalg.norm(Y_pred - Y_test) / np.linalg.norm(Y_test)
error0 = np.linalg.norm(Y_pred0 - Y_test0) / np.linalg.norm(
Y_test0
)
loss_train_h.append(loss_train)
loss_test_h.append(loss_test)
loss_test0_h.append(loss_test0)
i_h.append(np.float64(i))
fig = plt.figure()
losst = np.stack(loss_train_h)
lossv = np.stack(loss_test_h)
lossv0 = np.stack(loss_test0_h)
ii = np.stack(i_h)
plt.semilogy(ii, losst, "r", label="Training loss")
plt.semilogy(ii, lossv, "b", label="Test loss")
plt.semilogy(ii, lossv0, "b", label="Test loss0")
plt.xlabel("Number of epochs")
plt.ylabel("Loss")
plt.title("Training and test")
plt.legend()
plt.savefig(store_path + "Training_test0.png", dpi=300)
plt.tight_layout()
plt.close(fig)
fig = plt.figure()
losst = np.stack(loss_train_h)
lossv = np.stack(loss_test_h)
lossv0 = np.stack(loss_test0_h)
ii = np.stack(i_h)
plt.semilogy(ii, losst, "r", label="Training loss")
plt.semilogy(ii, lossv, "b", label="Test loss")
plt.xlabel("Number of epochs")
plt.ylabel("Loss")
plt.title("Training and test")
plt.legend()
plt.savefig(store_path + "Training_test.png", dpi=300)
plt.tight_layout()
plt.close(fig)
with open(store_path + "training_validation.txt", "a") as f:
f.write(
"Epoch: "
+ str(i + 1)
+ " Training loss: "
+ str(loss_train)
+ " Test loss: "
+ str(loss_test)
+ " Test loss0: "
+ str(loss_test0)
+ " RelErr: "
+ str(error)
+ "\n\n"
)
print(
"\n",
"Epoch: ",
i + 1,
"Training loss: ",
loss_train,
"Test loss: ",
loss_test,
"Test loss0: ",
loss_test0,
"Rel_Err: ",
error,
)
np.savetxt(store_path + "loss_train.txt", losst)
np.savetxt(store_path + "loss_test.txt", lossv)
np.savetxt(store_path + "loss-test0.txt", lossv0)
np.savetxt(store_path + "ii.txt", ii)
sess.run(train_op_Adam, feed_train_batch)
tt1 = time.time()
print("Training and validation CPU time: ", tt1 - tt0)
else:
tt0 = time.time()
with tf.Session() as sess:
saver.restore(sess, store_path + "paras_NN.ckpt")
feed_test = {x_u: X_u_test, x_y: X_y_test, y: Y_test}
feed_test0 = {x_u: X_u_test0, x_y: X_y_test0, y: Y_test0}
# feed_train = {x_u: X_u_train, x_y: X_y_train, y: Y_train}
feed_valid = {x_u: X_u_test, x_y: X_y_test, y: Y_test}
# train_loss = sess.run(loss, feed_train)
valid_loss = sess.run(loss, feed_valid)
test_loss, Y_pred = sess.run([loss, net_o], feed_test)
test_loss0, Y_pred0 = sess.run([loss, net_o], feed_test0)
test_err = np.linalg.norm(Y_pred - Y_test) / np.linalg.norm(Y_test)
test_err0 = np.linalg.norm(Y_pred0 - Y_test0) / np.linalg.norm(Y_test0)
with open(store_path + "test.txt", "a") as f:
f.write(
" Validation loss: "
+ str(valid_loss)
+ " Test loss: "
+ str(test_loss)
+ " Test loss0: "
+ str(test_loss0)
+ " RelErr: "
+ str(test_err)
+ "\n\n"
)
print(
"Valid_loss: ",
valid_loss,
"Test_loss: ",
test_loss,
"test rel_Err: ",
test_err,
"Test_loss0: ",
test_loss0,
"test rel_Err0: ",
test_err0,
)
# np.savetxt('Y_pred.txt', Y_pred)
fig = plt.figure()
plt.plot(Y_pred, Y_test, "r.", Y_test, Y_test, "b:")
plt.savefig(store_path + "prediction.png", dpi=300)
plt.close(fig)
# rr = X_y_test[:100,0].reshape((10,10))
# tt = X_y_test[:100,1].reshape((10,10))
# fig = plt.figure()
# plt.subplot(121)
# plt.contourf(rr*np.cos(tt), rr*np.sin(tt), Y_pred[:100].reshape(rr.shape),100,cmap='jet')
# plt.colorbar()
# plt.subplot(122)
# plt.contourf(rr*np.cos(tt), rr*np.sin(tt), Y_test[:100].reshape(rr.shape),100,cmap='jet')
# plt.colorbar()
# plt.title(r'$\alpha= $'+str(X_y_test[0,-1]))
# plt.tight_layout()
# plt.savefig(store_path+'prediction1_fabricated.png',dpi=300)
# plt.close(fig)
# fig = plt.figure()
# plt.plot(X_y_test[0:9,0:1].flatten(), Y_pred[0:9,0:1].flatten(),'r',label='pred: '+r'$G\{u\}(y,0.01)$')
# plt.plot(X_y_test[0:9,0:1].flatten(), Y_test[0:9,0:1].flatten(),'b',label='test: '+r'$\frac{d^{0.01}u}{dy^0.01}(y)$')
# plt.title('Prediction ' +r' $G\{u\}(y,\alpha=0.01)\approx \frac{d^{0.01}u}{dy^0.01}(y)$')
# plt.xlabel('y')
# plt.ylabel(r'$G\{u\}(y,\alpha)$')
# plt.tight_layout()
# plt.legend()
# plt.savefig(store_path+'prediction1.png',dpi=500)
#
# fig = plt.figure()
# plt.plot(X_y_test[81:,0:1].flatten(), Y_pred[81:,0:1].flatten(),'r',label='pred: '+r'$G\{u\}(y,0.99)$')
# plt.plot(X_y_test[81:,0:1].flatten(), Y_test[81:,0:1].flatten(),'b',label='test: '+r'$\frac{d^{0.99}u}{dy^0.99}(y)$')
# plt.title('Prediction ' +r' $G\{u\}(y,\alpha=0.99) \approx \frac{d^{0.99}u}{dy^0.99}(y)$')
# plt.xlabel('y')
# plt.ylabel(r'$G\{u\}(y,\alpha)$')
# plt.tight_layout()
# plt.legend()
# plt.savefig(store_path+'prediction2.png',dpi=500)
#
# plt.show()
# plt.close(fig)
tt1 = time.time()
print("Test CPU time: ", tt1 - tt0)
| 35.256345 | 132 | 0.490102 |
db46c33347f268579a274df93c9ff30896a99924 | 2,903 | py | Python | girder/exceptions.py | HailLab/girder | 974d869e6f53ec87a5e64730fee27eb6314fc006 | [
"Apache-2.0"
] | null | null | null | girder/exceptions.py | HailLab/girder | 974d869e6f53ec87a5e64730fee27eb6314fc006 | [
"Apache-2.0"
] | null | null | null | girder/exceptions.py | HailLab/girder | 974d869e6f53ec87a5e64730fee27eb6314fc006 | [
"Apache-2.0"
] | 1 | 2017-02-27T16:11:54.000Z | 2017-02-27T16:11:54.000Z | class GirderBaseException(Exception):
"""
A class from which all Girder exceptions are based.
"""
pass
class AccessException(GirderBaseException):
"""
Represents denial of access to a resource.
"""
def __init__(self, message, extra=None):
self.message = message
self.extra = extra
super(AccessException, self).__init__(message)
class GirderException(GirderBaseException):
"""
Represents a general exception that might occur in regular use. From the
user perspective, these are failures, but not catastrophic ones. An
identifier can be passed, which allows receivers to check the exception
without relying on the text of the message. It is recommended that
identifiers are a dot-separated string consisting of the originating
python module and a distinct error. For example,
'girder.model.assetstore.no-current-assetstore'.
"""
def __init__(self, message, identifier=None):
self.identifier = identifier
self.message = message
super(GirderException, self).__init__(message)
class NoAssetstoreAdapter(GirderException):
"""
Raised when no assetstore adapter is available.
"""
identifier = 'girder.utility.assetstore.no-adapter'
def __init__(self, message='No assetstore adapter'):
super(NoAssetstoreAdapter, self).__init__(message, self.identifier)
class ValidationException(GirderBaseException):
"""
Represents validation failure in the model layer. Raise this with
a message and an optional field property. If one of these is thrown
in the model during a REST request, it will respond as a 400 status.
"""
def __init__(self, message, field=None):
self.field = field
self.message = message
super(ValidationException, self).__init__(message)
class ResourcePathNotFound(ValidationException):
"""
A special case of ValidationException representing the case when the resource at a
given path does not exist.
"""
pass
class RestException(GirderBaseException):
"""
Throw a RestException in the case of any sort of incorrect
request (i.e. user/client error). Login and permission failures
should set a 403 code; almost all other validation errors
should use status 400, which is the default.
"""
def __init__(self, message, code=400, extra=None):
self.code = code
self.extra = extra
self.message = message
super(RestException, self).__init__(message)
class FilePathException(GirderException):
"""
Thrown when a file path is requested and cannot be returned.
"""
identifier = 'girder.utility.assetstore.file-path-not-available'
def __init__(self, message='No assetstore adapter', identifier=None):
super(FilePathException, self).__init__(message, identifier or self.identifier)
| 29.622449 | 87 | 0.704099 |
1591ffffcab144e30ebe55d255f766cb67ff1500 | 12,735 | py | Python | 2_training/Custom_Model/tensorflow/keras_script_mode_pipe_mode_horovod/source_dir/cifar10_keras_main.py | RyutaroHashimoto/aws_sagemaker | fabe4727498c1f2807cda29df8d35c71cc1b27bd | [
"MIT"
] | null | null | null | 2_training/Custom_Model/tensorflow/keras_script_mode_pipe_mode_horovod/source_dir/cifar10_keras_main.py | RyutaroHashimoto/aws_sagemaker | fabe4727498c1f2807cda29df8d35c71cc1b27bd | [
"MIT"
] | null | null | null | 2_training/Custom_Model/tensorflow/keras_script_mode_pipe_mode_horovod/source_dir/cifar10_keras_main.py | RyutaroHashimoto/aws_sagemaker | fabe4727498c1f2807cda29df8d35c71cc1b27bd | [
"MIT"
] | null | null | null | # Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# https://aws.amazon.com/apache-2-0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import absolute_import, division, print_function
import argparse
import json
import logging
import os
import re
import keras
import tensorflow as tf
from keras import backend as K
from keras.callbacks import TensorBoard, ModelCheckpoint
from keras.layers import Activation, Conv2D, Dense, Dropout, Flatten, MaxPooling2D, BatchNormalization
from keras.models import Sequential
from keras.optimizers import Adam, SGD, RMSprop
logging.getLogger().setLevel(logging.INFO)
tf.logging.set_verbosity(tf.logging.INFO)
HEIGHT = 32
WIDTH = 32
DEPTH = 3
NUM_CLASSES = 10
NUM_DATA_BATCHES = 5
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 10000 * NUM_DATA_BATCHES
INPUT_TENSOR_NAME = 'inputs_input' # needs to match the name of the first layer + "_input"
def keras_model_fn(learning_rate, weight_decay, optimizer, momentum, mpi=False, hvd=False):
"""keras_model_fn receives hyperparameters from the training job and returns a compiled keras model.
The model is transformed into a TensorFlow Estimator before training and saved in a
TensorFlow Serving SavedModel at the end of training.
"""
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', name='inputs', input_shape=(HEIGHT, WIDTH, DEPTH)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(128, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES))
model.add(Activation('softmax'))
size = 1
if mpi:
size = hvd.size()
if optimizer.lower() == 'sgd':
opt = SGD(lr=learning_rate * size, decay=weight_decay, momentum=momentum)
elif optimizer.lower() == 'rmsprop':
opt = RMSprop(lr=learning_rate * size, decay=weight_decay)
else:
opt = Adam(lr=learning_rate * size, decay=weight_decay)
if mpi:
opt = hvd.DistributedOptimizer(opt)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
def train_input_fn():
return _input(args.epochs, args.batch_size, args.train, 'train')
def eval_input_fn():
return _input(args.epochs, args.batch_size, args.eval, 'eval')
def validation_input_fn():
return _input(args.epochs, args.batch_size, args.validation, 'validation')
def _get_filenames(channel_name, channel):
if channel_name in ['train', 'validation', 'eval']:
return [os.path.join(channel, channel_name + '.tfrecords')]
else:
raise ValueError('Invalid data subset "%s"' % channel_name)
def _input(epochs, batch_size, channel, channel_name):
"""Uses the tf.data input pipeline for CIFAR-10 dataset."""
mode = args.data_config[channel_name]['TrainingInputMode']
logging.info("Running {} in {} mode".format(channel_name, mode))
if mode == 'Pipe':
from sagemaker_tensorflow import PipeModeDataset
dataset = PipeModeDataset(channel=channel_name, record_format='TFRecord')
else:
filenames = _get_filenames(channel_name, channel)
dataset = tf.data.TFRecordDataset(filenames)
# Repeat infinitely.
dataset = dataset.repeat()
dataset = dataset.prefetch(10)
# Parse records.
dataset = dataset.map(_dataset_parser, num_parallel_calls=10)
# Potentially shuffle records.
if channel_name == 'train':
# Ensure that the capacity is sufficiently large to provide good random shuffling.
buffer_size = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * 0.4) + 3 * batch_size
dataset = dataset.shuffle(buffer_size=buffer_size)
# Batch it up.
dataset = dataset.batch(batch_size, drop_remainder=True)
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
image_batch, label_batch = iterator.get_next()
return {INPUT_TENSOR_NAME: image_batch}, label_batch
def _train_preprocess_fn(image):
"""Preprocess a single training image of layout [height, width, depth]."""
# Resize the image to add four extra pixels on each side.
image = tf.image.resize_image_with_crop_or_pad(image, HEIGHT + 8, WIDTH + 8)
# Randomly crop a [HEIGHT, WIDTH] section of the image.
image = tf.random_crop(image, [HEIGHT, WIDTH, DEPTH])
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
return image
def _dataset_parser(value):
"""Parse a CIFAR-10 record from value."""
featdef = {
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
}
example = tf.parse_single_example(value, featdef)
image = tf.decode_raw(example['image'], tf.uint8)
image.set_shape([DEPTH * HEIGHT * WIDTH])
# Reshape from [depth * height * width] to [depth, height, width].
image = tf.cast(
tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),
tf.float32,
)
label = tf.cast(example['label'], tf.int32)
image = _train_preprocess_fn(image)
return image, tf.one_hot(label, NUM_CLASSES)
def save_model(model, output):
signature = tf.saved_model.signature_def_utils.predict_signature_def(
inputs={'image': model.input}, outputs={'scores': model.output}
)
builder = tf.saved_model.builder.SavedModelBuilder(output + '/1/')
builder.add_meta_graph_and_variables(
sess=K.get_session(),
tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature
},
)
builder.save()
logging.info("Model successfully saved at: {}".format(output))
def main(args):
if 'sourcedir.tar.gz' in args.tensorboard_dir:
tensorboard_dir = re.sub('source/sourcedir.tar.gz', 'model', args.tensorboard_dir)
else:
tensorboard_dir = args.tensorboard_dir
logging.info("Writing TensorBoard logs to {}".format(tensorboard_dir))
mpi = False
if 'sagemaker_mpi_enabled' in args.fw_params:
if args.fw_params['sagemaker_mpi_enabled']:
import horovod.keras as hvd
mpi = True
# Horovod: initialize Horovod.
hvd.init()
# Horovod: pin GPU to be used to process local rank (one GPU per process)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
K.set_session(tf.Session(config=config))
else:
hvd = None
logging.info("Running with MPI={}".format(mpi))
logging.info("getting data")
train_dataset = train_input_fn()
eval_dataset = eval_input_fn()
validation_dataset = validation_input_fn()
logging.info("configuring model")
model = keras_model_fn(args.learning_rate, args.weight_decay, args.optimizer, args.momentum, mpi, hvd)
callbacks = []
if mpi:
callbacks.append(hvd.callbacks.BroadcastGlobalVariablesCallback(0))
callbacks.append(hvd.callbacks.MetricAverageCallback())
callbacks.append(hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=5, verbose=1))
callbacks.append(keras.callbacks.ReduceLROnPlateau(patience=10, verbose=1))
if hvd.rank() == 0:
callbacks.append(ModelCheckpoint(args.output_dir + '/checkpoint-{epoch}.h5'))
callbacks.append(TensorBoard(log_dir=tensorboard_dir, update_freq='epoch'))
else:
callbacks.append(keras.callbacks.ReduceLROnPlateau(patience=10, verbose=1))
callbacks.append(ModelCheckpoint(args.output_dir + '/checkpoint-{epoch}.h5'))
callbacks.append(TensorBoard(log_dir=tensorboard_dir, update_freq='epoch'))
logging.info("Starting training")
size = 1
if mpi:
size = hvd.size()
model.fit(x=train_dataset[0],
y=train_dataset[1],
steps_per_epoch=(num_examples_per_epoch('train') // args.batch_size) // size,
epochs=args.epochs,
validation_data=validation_dataset,
validation_steps=(num_examples_per_epoch('validation') // args.batch_size) // size,
callbacks=callbacks)
score = model.evaluate(eval_dataset[0],
eval_dataset[1],
steps=num_examples_per_epoch('eval') // args.batch_size,
verbose=0)
logging.info('Test loss:{}'.format(score[0]))
logging.info('Test accuracy:{}'.format(score[1]))
# Horovod: Save model only on worker 0 (i.e. master)
if mpi:
if hvd.rank() == 0:
save_model(model, args.model_output_dir)
else:
save_model(model, args.model_output_dir)
def num_examples_per_epoch(subset='train'):
if subset == 'train':
return 40000
elif subset == 'validation':
return 10000
elif subset == 'eval':
return 10000
else:
raise ValueError('Invalid data subset "%s"' % subset)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--train',
type=str,
required=False,
default=os.environ.get('SM_CHANNEL_TRAIN'),
help='The directory where the CIFAR-10 input data is stored.')
parser.add_argument(
'--validation',
type=str,
required=False,
default=os.environ.get('SM_CHANNEL_VALIDATION'),
help='The directory where the CIFAR-10 input data is stored.')
parser.add_argument(
'--eval',
type=str,
required=False,
default=os.environ.get('SM_CHANNEL_EVAL'),
help='The directory where the CIFAR-10 input data is stored.')
parser.add_argument(
'--model_dir',
type=str,
required=True,
help='The directory where the model will be stored.')
parser.add_argument(
'--model_output_dir',
type=str,
default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument(
'--output-dir',
type=str,
default=os.environ.get('SM_OUTPUT_DIR'))
parser.add_argument(
'--tensorboard-dir',
type=str,
default=os.environ.get('SM_MODULE_DIR'))
parser.add_argument(
'--weight-decay',
type=float,
default=2e-4,
help='Weight decay for convolutions.')
parser.add_argument(
'--learning-rate',
type=float,
default=0.001,
help="""\
This is the inital learning rate value. The learning rate will decrease
during training. For more details check the model_fn implementation in
this file.\
""")
parser.add_argument(
'--epochs',
type=int,
default=10,
help='The number of steps to use for training.')
parser.add_argument(
'--batch-size',
type=int,
default=128,
help='Batch size for training.')
parser.add_argument(
'--data-config',
type=json.loads,
default=os.environ.get('SM_INPUT_DATA_CONFIG')
)
parser.add_argument(
'--fw-params',
type=json.loads,
default=os.environ.get('SM_FRAMEWORK_PARAMS')
)
parser.add_argument(
'--optimizer',
type=str,
default='adam'
)
parser.add_argument(
'--momentum',
type=float,
default='0.9'
)
args = parser.parse_args()
main(args)
| 33.869681 | 106 | 0.659757 |
fec1b9ecf44766481f7c2e285818d04b2bde7344 | 397 | py | Python | login_rest/login_rest/wsgi.py | noctilukkas/api-login-token-drf | 6f15571da8ecaf4588674b1e59dbe25c7520cc28 | [
"MIT"
] | null | null | null | login_rest/login_rest/wsgi.py | noctilukkas/api-login-token-drf | 6f15571da8ecaf4588674b1e59dbe25c7520cc28 | [
"MIT"
] | null | null | null | login_rest/login_rest/wsgi.py | noctilukkas/api-login-token-drf | 6f15571da8ecaf4588674b1e59dbe25c7520cc28 | [
"MIT"
] | null | null | null | """
WSGI config for login_rest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'login_rest.settings')
application = get_wsgi_application()
| 23.352941 | 78 | 0.788413 |
3b05010094dd5c66306316b6610b2cbb504cfb76 | 7,605 | py | Python | tungsten_tempest_plugin/hacking/checks.py | Goutham-Pratapa/tungsten-tempest | 966a2f2795435314c91e0d236040412d95fa2e96 | [
"Apache-2.0"
] | 1 | 2019-04-29T09:00:16.000Z | 2019-04-29T09:00:16.000Z | tungsten_tempest_plugin/hacking/checks.py | Goutham-Pratapa/tungsten-tempest | 966a2f2795435314c91e0d236040412d95fa2e96 | [
"Apache-2.0"
] | 11 | 2018-12-04T14:20:27.000Z | 2019-05-30T14:37:13.000Z | tungsten_tempest_plugin/hacking/checks.py | Goutham-Pratapa/tungsten-tempest | 966a2f2795435314c91e0d236040412d95fa2e96 | [
"Apache-2.0"
] | 9 | 2018-07-26T18:20:45.000Z | 2020-03-27T17:40:56.000Z | # Copyright 2013 IBM Corp.
# Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import pycodestyle
PYTHON_CLIENTS = ['contrail']
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
TEST_DEFINITION = re.compile(r'^\s*def test.*')
SETUP_TEARDOWN_CLASS_DEFINITION = re.compile(r'^\s+def (setUp|tearDown)Class')
SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
RAND_NAME_HYPHEN_RE = re.compile(r".*rand_name\(.+[\-\_][\"\']\)")
MUTABLE_DEFAULT_ARGS = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
TESTTOOLS_SKIP_DECORATOR = re.compile(r'\s*@testtools\.skip\((.*)\)')
CLASS = re.compile(r"^class .+")
RBAC_CLASS_NAME_RE = re.compile(r'class .+RbacTest')
RULE_VALIDATION_DECORATOR = re.compile(
r'\s*@rbac_rule_validation.action\(.*')
IDEMPOTENT_ID_DECORATOR = re.compile(r'\s*@idempotent_id\((.*)\)')
have_rbac_decorator = False
def import_no_clients_in_api_tests(physical_line, filename):
"""Check for client imports from tungsten_tempest_plugin/tests/api
T102: Cannot import python clients
"""
if "tugnsten_tempest_plugin/tests/api" in filename:
res = PYTHON_CLIENT_RE.match(physical_line)
if res:
return (physical_line.find(res.group(1)),
("T102: python clients import not allowed "
"in tungsten_tempest_plugin/tests/api/* or "
"tugnsten_tempest_plugin/tests/scenario/* tests"))
def no_setup_teardown_class_for_tests(physical_line, filename):
"""Check that tests do not use setUpClass/tearDownClass
T105: Tests cannot use setUpClass/tearDownClass
"""
if pycodestyle.noqa(physical_line):
return
if SETUP_TEARDOWN_CLASS_DEFINITION.match(physical_line):
return (physical_line.find('def'),
"T105: (setUp|tearDown)Class can not be used in tests")
def no_vi_headers(physical_line, line_number, lines):
"""Check for vi editor configuration in source files.
By default vi modelines can only appear in the first or
last 5 lines of a source file.
T106
"""
# NOTE(gilliard): line_number is 1-indexed
if line_number <= 5 or line_number > len(lines) - 5:
if VI_HEADER_RE.match(physical_line):
return 0, "T106: Don't put vi configuration in source files"
def service_tags_not_in_module_path(physical_line, filename):
"""Check that a service tag isn't in the module path
A service tag should only be added if the service name isn't already in
the module path.
T107
"""
matches = SCENARIO_DECORATOR.match(physical_line)
if matches:
services = matches.group(1).split(',')
for service in services:
service_name = service.strip().strip("'")
modulepath = os.path.split(filename)[0]
if service_name in modulepath:
return (physical_line.find(service_name),
"T107: service tag should not be in path")
def no_hyphen_at_end_of_rand_name(logical_line, filename):
"""Check no hyphen at the end of rand_name() argument
T108
"""
msg = "T108: hyphen should not be specified at the end of rand_name()"
if RAND_NAME_HYPHEN_RE.match(logical_line):
return 0, msg
def no_mutable_default_args(logical_line):
"""Check that mutable object isn't used as default argument
N322: Method's default argument shouldn't be mutable
"""
msg = "N322: Method's default argument shouldn't be mutable!"
if MUTABLE_DEFAULT_ARGS.match(logical_line):
yield (0, msg)
def no_testtools_skip_decorator(logical_line):
"""Check that methods do not have the testtools.skip decorator
T109
"""
if TESTTOOLS_SKIP_DECORATOR.match(logical_line):
yield (0, "T109: Cannot use testtools.skip decorator; instead use "
"decorators.skip_because from tempest.lib")
def use_rand_uuid_instead_of_uuid4(logical_line, filename):
"""Check that tests use data_utils.rand_uuid() instead of uuid.uuid4()
T113
"""
if 'uuid.uuid4()' not in logical_line:
return
msg = ("T113: Tests should use data_utils.rand_uuid()/rand_uuid_hex() "
"instead of uuid.uuid4()/uuid.uuid4().hex")
yield (0, msg)
def no_rbac_rule_validation_decorator(physical_line, filename):
"""Check that each test has the ``rbac_rule_validation.action`` decorator.
Checks whether the test function has "@rbac_rule_validation.action"
above it; otherwise checks that it has "@decorators.idempotent_id" above
it and "@rbac_rule_validation.action" above that.
Assumes that ``rbac_rule_validation.action`` decorator is either the first
or second decorator above the test function; otherwise this check fails.
P100
"""
global have_rbac_decorator
if ("tungsten_tempest_plugin/tests/api" in filename or
"tungsten_tempest_plugin/tests/scenario" in filename):
if RULE_VALIDATION_DECORATOR.match(physical_line):
have_rbac_decorator = True
return
if TEST_DEFINITION.match(physical_line):
if not have_rbac_decorator:
return (0, "Must use rbac_rule_validation.action "
"decorator for API and scenario tests")
have_rbac_decorator = False
def no_rbac_suffix_in_test_filename(filename):
"""Check that RBAC filenames end with "_rbac" suffix.
P101
"""
if "tungsten_tempest_plugin/tests/api" in filename:
if filename.endswith('rbac_base.py'):
return
if not filename.endswith('_rbac.py'):
return 0, "RBAC test filenames must end in _rbac suffix"
def no_rbac_test_suffix_in_test_class_name(physical_line, filename):
"""Check that RBAC class names end with "RbacTest"
P102
"""
if "tunsgten_tempest_plugin/tests/api" in filename:
if filename.endswith('rbac_base.py'):
return
if CLASS.match(physical_line):
if not RBAC_CLASS_NAME_RE.match(physical_line):
return 0, "RBAC test class names must end in 'RbacTest'"
def no_client_alias_in_test_cases(logical_line, filename):
"""Check that test cases don't use "self.client" to define a client.
P103
"""
if "tungsten_tempest_plugin/tests/api" in filename:
if "self.client" in logical_line or "cls.client" in logical_line:
return 0, "Do not use 'self.client' as a service client alias"
def factory(register):
register(import_no_clients_in_api_tests)
register(no_setup_teardown_class_for_tests)
register(no_vi_headers)
register(no_hyphen_at_end_of_rand_name)
register(no_mutable_default_args)
register(no_testtools_skip_decorator)
register(use_rand_uuid_instead_of_uuid4)
register(service_tags_not_in_module_path)
register(no_rbac_rule_validation_decorator)
register(no_rbac_suffix_in_test_filename)
register(no_rbac_test_suffix_in_test_class_name)
| 33.8 | 78 | 0.692702 |
c4cb9e23568cab5a5dc66bcbea1eec5bbf5be261 | 645 | py | Python | pyOCD/utility/__init__.py | orenc17/pyOCD | b5c9bc62b68323129aa258e128a8fc68aaa2527f | [
"Apache-2.0"
] | null | null | null | pyOCD/utility/__init__.py | orenc17/pyOCD | b5c9bc62b68323129aa258e128a8fc68aaa2527f | [
"Apache-2.0"
] | null | null | null | pyOCD/utility/__init__.py | orenc17/pyOCD | b5c9bc62b68323129aa258e128a8fc68aaa2527f | [
"Apache-2.0"
] | null | null | null | """
mbed CMSIS-DAP debugger
Copyright (c) 2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import conversion
import cmdline
import mask
| 30.714286 | 73 | 0.773643 |
1e8d81bb5df569736d7eeb015c6b05215b96d8fc | 432 | py | Python | utils/utils.py | w7cep/Froakie | 60f116b85e96d211f3055c8da005d94a048a0df1 | [
"MIT"
] | 1 | 2021-11-09T07:33:12.000Z | 2021-11-09T07:33:12.000Z | utils/utils.py | w7cep/Froakie | 60f116b85e96d211f3055c8da005d94a048a0df1 | [
"MIT"
] | null | null | null | utils/utils.py | w7cep/Froakie | 60f116b85e96d211f3055c8da005d94a048a0df1 | [
"MIT"
] | null | null | null | import re
import config
def blockquote(string: str) -> str:
"""Add blockquotes to a string"""
# inserts > at the start of string and after new lines
# as long as it is not at the end of the string
return re.sub(r"(^|\n)(?!$)", r"\1> ", string.strip())
def custom_id(view: str, id: int) -> str:
"""create a custom id from the bot name : the view : the identifier"""
return f"{config.BOT_NAME}:{view}:{id}" | 30.857143 | 74 | 0.62963 |
333f9703bd309f93aa352bc1fade1322fd919752 | 6,538 | py | Python | venv/lib/python2.7/site-packages/ansible/modules/windows/win_regedit.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 37 | 2017-08-15T15:02:43.000Z | 2021-07-23T03:44:31.000Z | venv/lib/python2.7/site-packages/ansible/modules/windows/win_regedit.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 12 | 2018-01-10T05:25:25.000Z | 2021-11-28T06:55:48.000Z | venv/lib/python2.7/site-packages/ansible/modules/windows/win_regedit.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 49 | 2017-08-15T09:52:13.000Z | 2022-03-21T17:11:54.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Adam Keech <akeech@chathamfinancial.com>, Josh Ludwig <jludwig@chathamfinancial.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_regedit
version_added: '2.0'
short_description: Add, change, or remove registry keys and values
description:
- Add, modify or remove registry keys and values.
- More information about the windows registry from Wikipedia
U(https://en.wikipedia.org/wiki/Windows_Registry).
options:
path:
description:
- Name of the registry path.
- 'Should be in one of the following registry hives: HKCC, HKCR, HKCU,
HKLM, HKU.'
required: yes
aliases: [ key ]
name:
description:
- Name of the registry entry in the above C(path) parameters.
- If not provided, or empty then the '(Default)' property for the key will
be used.
aliases: [ entry ]
data:
description:
- Value of the registry entry C(name) in C(path).
- If not specified then the value for the property will be null for the
corresponding C(type).
- Binary and None data should be expressed in a yaml byte array or as comma
separated hex values.
- An easy way to generate this is to run C(regedit.exe) and use the
I(export) option to save the registry values to a file.
- In the exported file, binary value will look like C(hex:be,ef,be,ef), the
C(hex:) prefix is optional.
- DWORD and QWORD values should either be represented as a decimal number
or a hex value.
- Multistring values should be passed in as a list.
- See the examples for more details on how to format this data.
type:
description:
- The registry value data type.
choices: [ binary, dword, expandstring, multistring, string, qword ]
default: string
aliases: [ datatype ]
state:
description:
- The state of the registry entry.
choices: [ absent, present ]
default: present
delete_key:
description:
- When C(state) is 'absent' then this will delete the entire key.
- If C(no) then it will only clear out the '(Default)' property for
that key.
type: bool
default: 'yes'
version_added: '2.4'
hive:
description:
- A path to a hive key like C:\Users\Default\NTUSER.DAT to load in the
registry.
- This hive is loaded under the HKLM:\ANSIBLE key which can then be used
in I(name) like any other path.
- This can be used to load the default user profile registry hive or any
other hive saved as a file.
- Using this function requires the user to have the C(SeRestorePrivilege)
and C(SeBackupPrivilege) privileges enabled.
version_added: '2.5'
notes:
- Check-mode C(-C/--check) and diff output C(-D/--diff) are supported, so that you can test every change against the active configuration before
applying changes.
- Beware that some registry hives (C(HKEY_USERS) in particular) do not allow to create new registry paths in the root folder.
- Since ansible 2.4, when checking if a string registry value has changed, a case-sensitive test is used. Previously the test was case-insensitive.
author:
- Adam Keech (@smadam813)
- Josh Ludwig (@joshludwig)
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Create registry path MyCompany
win_regedit:
path: HKCU:\Software\MyCompany
- name: Add or update registry path MyCompany, with entry 'hello', and containing 'world'
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: world
- name: Add or update registry path MyCompany, with dword entry 'hello', and containing 1337 as the decimal value
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: 1337
type: dword
- name: Add or update registry path MyCompany, with dword entry 'hello', and containing 0xff2500ae as the hex value
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: 0xff2500ae
type: dword
- name: Add or update registry path MyCompany, with binary entry 'hello', and containing binary data in hex-string format
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: hex:be,ef,be,ef,be,ef,be,ef,be,ef
type: binary
- name: Add or update registry path MyCompany, with binary entry 'hello', and containing binary data in yaml format
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: [0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef]
type: binary
- name: Add or update registry path MyCompany, with expand string entry 'hello'
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: '%appdata%\local'
type: expandstring
- name: Add or update registry path MyCompany, with multi string entry 'hello'
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
data: ['hello', 'world']
type: multistring
- name: Disable keyboard layout hotkey for all users (changes existing)
win_regedit:
path: HKU:\.DEFAULT\Keyboard Layout\Toggle
name: Layout Hotkey
data: 3
type: dword
- name: Disable language hotkey for current users (adds new)
win_regedit:
path: HKCU:\Keyboard Layout\Toggle
name: Language Hotkey
data: 3
type: dword
- name: Remove registry path MyCompany (including all entries it contains)
win_regedit:
path: HKCU:\Software\MyCompany
state: absent
delete_key: yes
- name: Clear the existing (Default) entry at path MyCompany
win_regedit:
path: HKCU:\Software\MyCompany
state: absent
delete_key: no
- name: Remove entry 'hello' from registry path MyCompany
win_regedit:
path: HKCU:\Software\MyCompany
name: hello
state: absent
- name: Change default mouse trailing settings for new users
win_regedit:
path: HKLM:\ANSIBLE\Control Panel\Mouse
name: MouseTrails
data: 10
type: string
state: present
hive: C:\Users\Default\NTUSER.dat
'''
RETURN = r'''
data_changed:
description: whether this invocation changed the data in the registry value
returned: success
type: boolean
sample: False
data_type_changed:
description: whether this invocation changed the datatype of the registry value
returned: success
type: boolean
sample: True
'''
| 32.527363 | 147 | 0.699296 |
417aa41eb1061903e05c07057f0a30fca64c4037 | 28,918 | py | Python | bin/doc-gen.py | splunkins/security-content | 3904c837fa9003a6e1b65a6fb164231b0132f648 | [
"Apache-2.0"
] | null | null | null | bin/doc-gen.py | splunkins/security-content | 3904c837fa9003a6e1b65a6fb164231b0132f648 | [
"Apache-2.0"
] | null | null | null | bin/doc-gen.py | splunkins/security-content | 3904c837fa9003a6e1b65a6fb164231b0132f648 | [
"Apache-2.0"
] | null | null | null | import glob
import yaml
import argparse
from os import path
import sys
import re
from jinja2 import Environment, FileSystemLoader
def load_objects(file_path):
files = []
manifest_files = path.join(path.expanduser(REPO_PATH), file_path)
for file in sorted(glob.glob(manifest_files)):
files.append(load_file(file))
return files
def load_file(file_path):
with open(file_path, 'r') as stream:
try:
file = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
sys.exit("ERROR: reading {0}".format(file_path))
return file
def prepare_content(stories, detections):
# enrich stories with information from detections: data_models, mitre_ids, kill_chain_phases, nists
sto_to_data_models = {}
sto_to_mitre_attack_ids = {}
sto_to_kill_chain_phases = {}
sto_to_ciss = {}
sto_to_nists = {}
sto_to_det = {}
for detection in detections:
if 'analytics_story' in detection['tags']:
for story in detection['tags']['analytics_story']:
if story in sto_to_det.keys():
sto_to_det[story].add(detection['name'])
else:
sto_to_det[story] = {detection['name']}
data_model = parse_data_models_from_search(detection['search'])
if data_model:
if story in sto_to_data_models.keys():
sto_to_data_models[story].add(data_model)
else:
sto_to_data_models[story] = {data_model}
if 'mitre_attack_id' in detection['tags']:
if story in sto_to_mitre_attack_ids.keys():
for mitre_attack_id in detection['tags']['mitre_attack_id']:
sto_to_mitre_attack_ids[story].add(mitre_attack_id)
else:
sto_to_mitre_attack_ids[story] = set(detection['tags']['mitre_attack_id'])
if 'kill_chain_phases' in detection['tags']:
if story in sto_to_kill_chain_phases.keys():
for kill_chain in detection['tags']['kill_chain_phases']:
sto_to_kill_chain_phases[story].add(kill_chain)
else:
sto_to_kill_chain_phases[story] = set(detection['tags']['kill_chain_phases'])
if 'cis20' in detection['tags']:
if story in sto_to_ciss.keys():
for cis in detection['tags']['cis20']:
sto_to_ciss[story].add(cis)
else:
sto_to_ciss[story] = set(detection['tags']['cis20'])
if 'nist' in detection['tags']:
if story in sto_to_nists.keys():
for nist in detection['tags']['nist']:
sto_to_nists[story].add(nist)
else:
sto_to_nists[story] = set(detection['tags']['nist'])
for story in stories:
story['detections'] = sorted(sto_to_det[story['name']])
if story['name'] in sto_to_data_models:
story['data_models'] = sorted(sto_to_data_models[story['name']])
if story['name'] in sto_to_mitre_attack_ids:
story['mitre_attack_ids'] = sorted(sto_to_mitre_attack_ids[story['name']])
if story['name'] in sto_to_kill_chain_phases:
story['kill_chain_phases'] = sorted(sto_to_kill_chain_phases[story['name']])
if story['name'] in sto_to_ciss:
story['ciss'] = sorted(sto_to_ciss[story['name']])
if story['name'] in sto_to_nists:
story['nists'] = sorted(sto_to_nists[story['name']])
#sort stories into categories
categories = []
category_names = set()
for story in stories:
if 'category' in story['tags']:
category_names.add(story['tags']['category'][0])
for category_name in sorted(category_names):
new_category = {}
new_category['name'] = category_name
new_category['stories'] = []
categories.append(new_category)
for story in stories:
for category in categories:
if category['name'] == story['tags']['category'][0]:
category['stories'].append(story)
return categories
def write_splunk_docs(stories, detections, OUTPUT_DIR):
categories = prepare_content(stories, detections)
j2_env = Environment(loader=FileSystemLoader('bin/jinja2_templates'),
trim_blocks=True)
template = j2_env.get_template('splunk_docs_categories.j2')
output_path = OUTPUT_DIR + "/splunk_docs_categories.wiki"
output = template.render(categories=categories)
with open(output_path, 'w') as f:
f.write(output)
return len(stories), output_path
def write_markdown_docs(stories, detections, OUTPUT_DIR):
categories = prepare_content(stories, detections)
j2_env = Environment(loader=FileSystemLoader('bin/jinja2_templates'),
trim_blocks=True)
template = j2_env.get_template('stories_categories.j2')
output_path = OUTPUT_DIR + "/stories_categories.md"
output = template.render(categories=categories)
with open(output_path, 'w') as f:
f.write(output)
return len(stories), output_path
# function to get unique values
def unique(list1):
# init a null list
unique_list = []
# traverse for all elements
for x in list1:
# check if exists in unique_list or not
if x not in unique_list:
unique_list.append(x)
return unique_list
def process_data_metadata(obj, complete_obj, name):
# collect tagging
metadata = obj['data_metadata']
if 'data_models' in metadata:
complete_obj[name]['data_models'] = metadata['data_models']
if 'providing_technologies' in metadata:
complete_obj[name]['providing_technologies'] = metadata['providing_technologies']
if 'data_source' in metadata:
complete_obj[name]['data_source'] = metadata['data_source']
if 'mappings' in obj:
complete_obj[name]['mappings'] = obj['mappings']
if 'fields_required' in obj:
complete_obj[name]['entities'] = obj['fields_required']
if 'entities' in obj:
complete_obj[name]['entities'] = obj['entities']
return complete_obj
def process_metadata(detections, story_name):
# grab mappings
mappings = dict()
# grab provising technologies
providing_technologies = []
# grab datamodels
data_models = []
# process the above for detections
for detection_name, detection in sorted(detections.items()):
for s in detection['stories']:
# check if the detection is part of this story
if s == story_name:
# grab providing technologies
if 'providing_technologies' in detection:
for pt in detection['providing_technologies']:
providing_technologies.append(pt)
# grab data models
if 'data_models' in detection:
for dm in detection['data_models']:
data_models.append(dm)
for key in detection['mappings'].keys():
mappings[key] = list(detection['mappings'][key])
return mappings, providing_technologies, data_models
def generate_detections(REPO_PATH, stories):
# first we process detections
detections = []
detections_manifest_files = path.join(path.expanduser(REPO_PATH), "detections/*.yml")
for detections_manifest_file in glob.glob(detections_manifest_files):
# read in each detection
with open(detections_manifest_file, 'r') as stream:
try:
detection = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
sys.exit("ERROR: reading {0}".format(detections_manifest_file))
detections.append(detection)
complete_detections = dict()
for detection in detections:
# lets process v1 detections
if detection['spec_version'] == 1:
if verbose:
print("processing v1 detection: {0}".format(detection['search_name']))
name = detection['search_name']
type = 'splunk'
description = detection['search_description']
id = detection['search_id']
# grab search information
correlation_rule = detection['correlation_rule']
search = detection['search']
schedule = detection['scheduling']
earliest_time = schedule['earliest_time']
latest_time = schedule['latest_time']
cron = schedule['cron_schedule']
# grabbing entities
entities = []
investigations = []
baselines = []
responses = []
for story_name, story in sorted(stories.items()):
for d in story['detections']:
if d['name'] == name:
if 'investigations' in story:
investigations = story['investigations']
if 'baselines' in story:
baselines = story['baselines']
# lets process v2 detections
if detection['spec_version'] == 2:
if verbose:
print("processing v2 detection: {0}".format(detection['name']))
name = detection['name']
id = detection['id']
entities = detection['entities']
description = detection['description']
# splunk
if 'splunk' in detection['detect']:
type = 'splunk'
correlation_rule = detection['detect']['splunk']['correlation_rule']
search = correlation_rule['search']
earliest_time = correlation_rule['schedule']['earliest_time']
latest_time = correlation_rule['schedule']['latest_time']
cron = correlation_rule['schedule']['cron_schedule']
# uba
if 'uba' in detection['detect']:
uba = detection['detect']['uba']
type = 'uba'
search = uba['search'] = 'CONSTRUCT DETECTION SEARCH HERE'
# earliest_time = uba['earliest_time']
# latest_time = uba['latest_time']
# cron = uba['cron_schedule']
# phantom
if 'phantom' in detection['detect']:
phantom = detection['detect']['phantom']
type = 'phantom'
search = phantom['search'] = 'CONSTRUCT DETECTION SEARCH HERE'
# earliest_time = phantom['earliest_time']
# latest_time = phantom['latest_time']
# cron = phantom['cron_schedule']
baselines = []
investigations = []
responses = []
if 'baselines' in detection:
for b in detection['baselines']:
baselines.append({"type": b['type'], "name": b['name']})
if 'investigations' in detection:
for i in detection['investigations']:
investigations.append({"type": i['type'], "name": i['name']})
if 'responses' in detection:
for r in detection['responses']:
responses.append({"type": r['type'], "name": r['name']})
complete_detections[name] = {}
complete_detections[name]['detection_name'] = name
complete_detections[name]['id'] = id
complete_detections[name]['search'] = search
complete_detections[name]['latest_time'] = latest_time
complete_detections[name]['earliest_time'] = earliest_time
complete_detections[name]['cron'] = cron
complete_detections[name]['investigations'] = investigations
complete_detections[name]['baselines'] = baselines
complete_detections[name]['responses'] = responses
complete_detections[name]['entities'] = entities
complete_detections[name]['description'] = description
complete_detections[name]['correlation_rule'] = correlation_rule
complete_detections[name]['type'] = type
complete_detections[name]['maintainers'] = detection['maintainers']
if 'references' not in detection:
detection['references'] = []
complete_detections[name]['references'] = detection['references']
if 'channel' not in detection:
detection['channel'] = ""
complete_detections[name]['channel'] = detection['channel']
if 'confidence' not in detection:
detection['confidence'] = ""
complete_detections[name]['confidence'] = detection['confidence']
if 'eli5' not in detection:
detection['eli5'] = ""
complete_detections[name]['eli5'] = detection['eli5']
if 'how_to_implement' not in detection:
detection['how_to_implement'] = ""
complete_detections[name]['how_to_implement'] = detection['how_to_implement']
if 'asset_type' not in detection:
detection['asset_type'] = ""
complete_detections[name]['asset_type'] = detection['asset_type']
if 'known_false_positives' not in detection:
detection['known_false_positives'] = ""
complete_detections[name]['known_false_positives'] = detection['known_false_positives']
complete_detections[name]['security_domain'] = detection['security_domain']
complete_detections[name]['version'] = detection['version']
complete_detections[name]['spec_version'] = detection['spec_version']
complete_detections[name]['creation_date'] = detection['creation_date']
# set modification date to creation of there is not one
if 'modification_date' in detection:
complete_detections[name]['modification_date'] = detection['modification_date']
else:
complete_detections[name]['modification_date'] = detection['creation_date']
# process its metadata
complete_detections = process_data_metadata(detection, complete_detections, name)
# stories associated with the detection
complete_detections[name]['stories'] = []
for story_name, story in sorted(stories.items()):
for d in story['detections']:
if d['name'] == name:
complete_detections[name]['stories'].append(story['story_name'])
# sort uniq the results
complete_detections[name]['stories'] = sorted(set(complete_detections[name]['stories']))
return complete_detections
def generate_stories(REPO_PATH, verbose):
story_files = []
story_manifest_files = path.join(path.expanduser(REPO_PATH), "stories/*.yml")
for story_manifest_file in glob.glob(story_manifest_files):
# read in each story
with open(story_manifest_file, 'r') as stream:
try:
story = list(yaml.safe_load_all(stream))[0]
except yaml.YAMLError as exc:
print(exc)
sys.exit("ERROR: reading {0}".format(story_manifest_file))
story_files.append(story)
# store an object with all stories and their data
complete_stories = dict()
for story in story_files:
if verbose:
print("processing story: {0}".format(story['name']))
# Start building the story for the use case
name = story['name']
complete_stories[name] = {}
complete_stories[name]['story_name'] = name
complete_stories[name]['id'] = story['id']
# grab modification date if it has one, otherwise write as creation date
complete_stories[name]['creation_date'] = story['creation_date']
if 'modification_date' in story:
complete_stories[name]['modification_date'] = story['modification_date']
else:
complete_stories[name]['modification_date'] = story['creation_date']
complete_stories[name]['description'] = story['description']
if 'references' not in story:
story['references'] = []
complete_stories[name]['references'] = story['references']
complete_stories[name]['version'] = story['version']
complete_stories[name]['narrative'] = story['narrative']
complete_stories[name]['spec_version'] = story['spec_version']
complete_stories[name]['maintainers'] = story['maintainers']
# grab searches
if story['spec_version'] == 1:
detections = []
baselines = []
investigations = []
category = []
category.append(story['category'])
if 'detection_searches' in story['searches']:
for d in story['searches']['detection_searches']:
detections.append({"type": "splunk", "name": d})
complete_stories[name]['detections'] = detections
# in spec v1 these are part of the story which is why we are grabbing them here
if 'support_searches' in story['searches']:
for b in story['searches']['support_searches']:
baselines.append({"type": "splunk", "name": b})
complete_stories[name]['baselines'] = baselines
if 'contextual_searches' in story['searches']:
for i in story['searches']['contextual_searches']:
investigations.append({"type": "splunk", "name": i})
if 'investigative_searches' in story['searches']:
for i in story['searches']['investigative_searches']:
investigations.append({"type": "splunk", "name": i})
complete_stories[name]['investigations'] = investigations
if story['spec_version'] == 2:
detections = []
if 'detections' in story:
for d in story['detections']:
detections.append({"type": d['type'], "name": d['name']})
complete_stories[name]['detections'] = detections
category = story['category']
complete_stories[name]['category'] = category
return complete_stories
def write_splunk_docs_bak(stories, detections, OUTPUT_DIR):
paths = []
# Create conf files from analytics stories files
splunk_docs_output_path = OUTPUT_DIR + "/splunk_docs_categories.wiki"
paths.append(splunk_docs_output_path)
output_file = open(splunk_docs_output_path, 'w')
output_file.write("= Use Case Categories=\n")
output_file.write("The collapse...\n")
# calculate categories
categories = []
for story_name, story in sorted(stories.items()):
c = story['category']
categories.append(c)
# get a unique set of them
categories = unique(categories)
for c in categories:
output_file.write("\n\n=={0}==\n".format(c[0]))
# iterate through every story and print it out
for story_name, story in sorted(stories.items()):
# if the category matches
if story['category'] == c:
output_file.write("\n==={0}===\n".format(story_name))
output_file.write("\n{0}\n".format(story['description']))
output_file.write(
"""\n<div class="toccolours mw-collapsible">\n<div class="mw-collapsible-content">\n""")
# header information
output_file.write("\n====Narrative====\n{0}\n".format(story['narrative']))
mappings, providing_technologies, data_models = process_metadata(detections, story_name)
# providing tech
output_file.write("\n====Providing Technologies====\n")
providing_technologies = unique(providing_technologies)
for pt in providing_technologies:
output_file.write("* {0}\n".format(pt))
# providing tech
output_file.write("\n====Data Models====\n")
data_models = unique(data_models)
for dm in data_models:
output_file.write("* {0}\n".format(dm))
# mappings
output_file.write("\n====Mappings====\n")
output_file.write("\n=====ATT&CK=====\n")
if mappings['mitre_attack']:
for m in mappings['mitre_attack']:
output_file.write("* {0}\n".format(m))
output_file.write("\n=====Kill Chain Phases=====\n")
if mappings['kill_chain_phases']:
for m in mappings['kill_chain_phases']:
output_file.write("* {0}\n".format(m))
if mappings['cis20']:
output_file.write("\n=====CIS=====\n")
for m in mappings['cis20']:
output_file.write("* {0}\n".format(m))
if mappings['nist']:
output_file.write("\n=====NIST=====\n")
for m in mappings['nist']:
output_file.write("* {0}\n".format(m))
# references
output_file.write("\n====References====\n")
for r in story['references']:
output_file.write("* {0}\n".format(r))
# story details
output_file.write("\ncreation_date = {0}\n\n".format(story['creation_date']))
output_file.write("modification_date = {0}\n\n".format(story['modification_date']))
output_file.write("version = {0}\n".format(story['version']))
# footer information
output_file.write("""\n</div>\n</div>\n""")
output_file.write("""\n[[Category:V:Lab:drafts]]""")
output_file.close()
story_count = len(stories.keys())
return story_count, paths
def write_markdown_docs_bak(stories, detections, OUTPUT_DIR):
paths = []
# Create conf files from analytics stories files
splunk_docs_output_path = OUTPUT_DIR + "/stories_categories.md"
paths.append(splunk_docs_output_path)
output_file = open(splunk_docs_output_path, 'w')
output_file.write("# Categories\n")
output_file.write("Analytics stories organized by categories\n")
# calculate categories
categories = []
for story_name, story in sorted(stories.items()):
c = story['category']
categories.append(c)
# get a unique set of them
categories = unique(categories)
# build category TOC
for c in categories:
output_file.write("\n* [{0}](#{1})\n".format(c[0], c[0].replace(' ', '-').lower()))
for c in categories:
output_file.write("\n\n## {0}\n".format(c[0]))
# build story TOC
for story_name, story in sorted(stories.items()):
# if the category matches
if story['category'] == c:
output_file.write("\n* [{0}](#{1})\n".format(story_name, story_name.replace(' ', '-').lower()))
# iterate through every story and print it out
for story_name, story in sorted(stories.items()):
# if the category matches
if story['category'] == c:
output_file.write("\n### {0}\n".format(story_name))
# basic story info
output_file.write("* id = `{0}`\n".format(story['id']))
output_file.write("* creation_date = {0}\n".format(story['creation_date']))
output_file.write("* modification_date = {0}\n".format(story['modification_date']))
output_file.write("* version = {0}\n".format(story['version']))
output_file.write("* spec_version = {0}\n".format(story['spec_version']))
# description and narrative
output_file.write("\n##### Description\n{0}\n".format(story['description']))
output_file.write("\n##### Narrative\n{0}\n".format(story['narrative']))
# process detections
output_file.write("\n##### Detections\n")
# write all detections
if 'detections' in story:
for d in story['detections']:
output_file.write("* {0}\n".format(d['name']))
mappings, providing_technologies, data_models = process_metadata(detections, story_name)
# providing tech
output_file.write("\n##### Providing Technologies\n")
providing_technologies = unique(providing_technologies)
for pt in providing_technologies:
output_file.write("* {0}\n".format(pt))
# data models
output_file.write("\n##### Data Models\n")
data_models = unique(data_models)
for dm in data_models:
output_file.write("{0}\n".format(dm))
# mappings
output_file.write("\n##### Mappings\n")
output_file.write("\n###### ATT&CK\n")
if mappings['mitre_attack']:
for m in mappings['mitre_attack']:
output_file.write("* {0}\n".format(m))
output_file.write("\n###### Kill Chain Phases\n")
if mappings['kill_chain_phases']:
for m in mappings['kill_chain_phases']:
output_file.write("* {0}\n".format(m))
if mappings['cis20']:
output_file.write("\n###### CIS\n")
for m in mappings['cis20']:
output_file.write("* {0}\n".format(m))
if mappings['nist']:
output_file.write("\n###### NIST\n")
for m in mappings['nist']:
output_file.write("* {0}\n".format(m))
# maintainers
output_file.write("\n##### Maintainers\n")
for m in story['maintainers']:
output_file.write("* name = {0}\n".format(m['name']))
output_file.write("* email = {0}\n".format(m['email']))
output_file.write("* company = {0}\n".format(m['company']))
# references
output_file.write("\n##### References\n")
for r in story['references']:
output_file.write("* {0}\n".format(r))
output_file.close()
story_count = len(stories.keys())
return story_count, paths
def parse_data_models_from_search(search):
match = re.search('from\sdatamodel\s?=\s?([^\s.]*)',search)
if match is not None:
return match.group(1)
return False
if __name__ == "__main__":
# grab arguments
parser = argparse.ArgumentParser(description="generates documentation from our content", epilog="""
This tool converts manifests information to documents in variious format like markdown and wiki markup used by Splunk docs.""")
parser.add_argument("-p", "--path", required=True, help="path to security-content repo")
parser.add_argument("-o", "--output", required=True, help="path to the output directory for the docs")
parser.add_argument("-v", "--verbose", required=False, default=False, action='store_true', help="prints verbose output")
parser.add_argument("-gsd", "--gen_splunk_docs", required=False, default=True, action='store_true',
help="generates wiki markup splunk documentation, default to true")
parser.add_argument("-gmd", "--gen_markdown_docs", required=False, default=True, action='store_true',
help="generates markdown docs, default to true")
# parse them
args = parser.parse_args()
REPO_PATH = args.path
OUTPUT_DIR = args.output
verbose = args.verbose
gsd = args.gen_splunk_docs
gmd = args.gen_markdown_docs
stories = load_objects("stories/*.yml")
detections = []
detections = load_objects("detections/*/*.yml")
# complete_stories = generate_stories(REPO_PATH, verbose)
# complete_detections = generate_detections(REPO_PATH, complete_stories)
if gsd:
story_count, path = write_splunk_docs(stories, detections, OUTPUT_DIR)
print("{0} story documents have been successfully written to {1}".format(story_count, path))
else:
print("--gen_splunk_docs was set to false, not generating splunk documentation")
if gmd:
story_count, path = write_markdown_docs(stories, detections, OUTPUT_DIR)
print("{0} story documents have been successfully written to {1}".format(story_count, path))
else:
print("--gen_splunk_docs was set to false, not generating splunk documentation")
print("documentation generation for security content completed..") | 41.252496 | 131 | 0.585691 |
c38fbf8b4876dcb336cdf78e73cf232325c5c0a8 | 2,651 | py | Python | src/galaxy_crawler/filters/v1.py | pddg/galaxy_crawler | cc0634dfca7d81ee49e5370ff0bf83cca92ec4ac | [
"Apache-2.0"
] | 2 | 2019-12-24T10:45:37.000Z | 2022-03-04T00:47:14.000Z | src/galaxy_crawler/filters/v1.py | pddg/galaxy_crawler | cc0634dfca7d81ee49e5370ff0bf83cca92ec4ac | [
"Apache-2.0"
] | 2 | 2019-10-31T17:42:36.000Z | 2020-03-24T18:20:41.000Z | src/galaxy_crawler/filters/v1.py | pddg/galaxy_crawler | cc0634dfca7d81ee49e5370ff0bf83cca92ec4ac | [
"Apache-2.0"
] | null | null | null | from .base import Filter
from typing import TYPE_CHECKING
from logging import getLogger
from .base import FilterEnum
from galaxy_crawler.errors import NotSupportedFilterError
from galaxy_crawler.constants import Target
if TYPE_CHECKING:
from typing import Union
logger = getLogger(__name__)
class V1FilterEnum(FilterEnum):
DOWNLOAD = 'download_count'
STAR = 'stargazers_count'
FORK = 'forks_count'
ANSIBLE = 'ansible_version'
@classmethod
def by_name(cls, name: str, gt: bool, threshold: 'Union[int, float]') -> 'Filter':
name = name.lower()
if name not in cls.choices():
raise NotSupportedFilterError(name)
if name == 'ansible':
filter_instance = AnsibleVersionFilter(threshold)
else:
filter_instance = CountFilter(cls[name.upper()].value, threshold)
return filter_instance if gt else not filter_instance
class CountFilter(Filter):
"""Whether the number over the threshold"""
def __init__(self, key_name: str, threshold: int):
self.threshold = threshold
self.key_name = key_name
def passed(self, target: 'Target', role: 'dict') -> bool:
if target not in [Target.ROLES, Target.REPOSITORIES]:
return True
try:
count = role[self.key_name]
except AttributeError:
logger.error(f"Failed to parse response. Repository has no attribute '{self.key_name}'.")
return False
return count > self.threshold
class AnsibleVersionFilter(Filter):
"""Filter for minimum ansible version"""
def __init__(self, min_version: float):
self.min_version = min_version
self.key_name = 'min_ansible_version'
def passed(self, target: 'Target', role: 'dict') -> bool:
if target not in [Target.REPOSITORIES, Target.ROLES]:
return True
try:
min_version_str = role[self.key_name]
except AttributeError:
logger.error(f"Failed to parse response. Repository has no attribute '{self.key_name}'.")
return False
try:
# Convert 2.0a1 to 2.0
min_version = float(min_version_str[:3])
except ValueError:
# When failed to parse value as float
logger.warning(f"Cannot parse min_ansible_version ('{min_version_str}'). Use 0.0 instead.")
min_version = 0.0
except TypeError:
# The value is None
logger.warning(f"Cannot parse min_ansible_version ('{min_version_str}'). Use 0.0 instead.")
min_version = 0.0
return min_version >= self.min_version
| 33.1375 | 103 | 0.646548 |
2ca1bc0b8a566be98bf1f330e5b12ab31054de52 | 2,110 | py | Python | buildall.py | elementechemlyn/CareConnectBuilder | c004fa94c1af64d636ee25de8f13e34fe723b5f3 | [
"MIT"
] | null | null | null | buildall.py | elementechemlyn/CareConnectBuilder | c004fa94c1af64d636ee25de8f13e34fe723b5f3 | [
"MIT"
] | null | null | null | buildall.py | elementechemlyn/CareConnectBuilder | c004fa94c1af64d636ee25de8f13e34fe723b5f3 | [
"MIT"
] | null | null | null | import configparser
import os.path
import os
import shutil
import buildcodesystem
import buildvalueset
import buildextensions
import buildprofiles
config = configparser.ConfigParser()
config.read('config.ini')
output_dir = config["dir"]["output_dir"]
template_dir = config["dir"]["template_dir"]
#CodeSystems and ValueSets have the same names. They must go in a different folder.
#TODO Make these sub-folders configurable?
#TODO Make the download function configurable
codesystem_output = os.path.join(output_dir, 'CodeSystems')
os.makedirs(codesystem_output, exist_ok=True)
os.makedirs(os.path.join(codesystem_output,"tests"), exist_ok=True)
valueset_output = os.path.join(output_dir, 'ValueSets')
os.makedirs(valueset_output, exist_ok=True)
os.makedirs(os.path.join(valueset_output, "tests"), exist_ok=True)
extension_output = os.path.join(output_dir, 'Extensions')
os.makedirs(extension_output, exist_ok=True)
os.makedirs(os.path.join(extension_output, "tests"), exist_ok=True)
profile_output = os.path.join(output_dir, 'Profiles')
os.makedirs(profile_output, exist_ok=True)
os.makedirs(os.path.join(profile_output, "tests"), exist_ok=True)
buildcodesystem.output_dir = codesystem_output
buildcodesystem.template_dir = template_dir
buildvalueset.output_dir = valueset_output
buildvalueset.template_dir = template_dir
buildextensions.output_dir = extension_output
buildextensions.template_dir = template_dir
buildprofiles.output_dir = profile_output
buildprofiles.template_dir = template_dir
buildvalueset.build()
buildextensions.build()
buildprofiles.build()
#Copy the base classes over to the output folder.
base_class_dir = os.path.join(template_dir,'BaseClasses')
base_class_output_dir = os.path.join(output_dir,'BaseClasses')
os.makedirs(base_class_output_dir, exist_ok=True)
for name in os.listdir(base_class_dir):
full_src_name = os.path.join(base_class_dir,name)
full_target_name = os.path.join(base_class_output_dir,name)
print("Copying %s to %s" % (full_src_name,full_target_name))
shutil.copyfile(full_src_name,full_target_name)
| 39.074074 | 84 | 0.796682 |
8270b4d17b4379af22d9039c2a84344a752b2fd7 | 330 | py | Python | database/queries/update_queries.py | BrickText/JHROM | d99b907e0837d8dcc57ab474e9435891736f0dda | [
"MIT"
] | null | null | null | database/queries/update_queries.py | BrickText/JHROM | d99b907e0837d8dcc57ab474e9435891736f0dda | [
"MIT"
] | null | null | null | database/queries/update_queries.py | BrickText/JHROM | d99b907e0837d8dcc57ab474e9435891736f0dda | [
"MIT"
] | null | null | null | UPDATE_MOVIE = '''
UPDATE MOVIE
SET NAME=?, RATING=?
WHERE MOVIE.ID=?;
'''
UPDATE_PROJECTION = '''
UPDATE PROJECTION
SET MOVIE_ID=?, TYPE=?, DATE=?
WHERE PROJECTION.ID=?;
'''
DELETE_RESERVATION = '''
UPDATE RESERVATION
SET USER_ID=?, PROJECTION_ID=?, ROW=?, COL=?
WHERE RESERVATION.ID=?;
'''
| 18.333333 | 48 | 0.60303 |
1318608492f8d7e85a3867daf4a89209587ccae1 | 2,075 | py | Python | ansible/roles/lib_openshift_3.2/build/src/oc_scale.py | fahlmant/openshift-tools | dbb4f16ccde3404c36c23108c45ca7b67138ee12 | [
"Apache-2.0"
] | 164 | 2015-07-29T17:35:04.000Z | 2021-12-16T16:38:04.000Z | ansible/roles/lib_openshift_3.2/build/src/oc_scale.py | fahlmant/openshift-tools | dbb4f16ccde3404c36c23108c45ca7b67138ee12 | [
"Apache-2.0"
] | 3,634 | 2015-06-09T13:49:15.000Z | 2022-03-23T20:55:44.000Z | ansible/roles/lib_openshift_3.2/build/src/oc_scale.py | fahlmant/openshift-tools | dbb4f16ccde3404c36c23108c45ca7b67138ee12 | [
"Apache-2.0"
] | 250 | 2015-06-08T19:53:11.000Z | 2022-03-01T04:51:23.000Z | # vim: expandtab:tabstop=4:shiftwidth=4
# pylint: skip-file
# pylint: disable=too-many-instance-attributes
class OCScale(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
resource_name,
namespace,
replicas,
kind,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCScale '''
super(OCScale, self).__init__(namespace, kubeconfig)
self.kind = kind
self.replicas = replicas
self.name = resource_name
self.namespace = namespace
self.kubeconfig = kubeconfig
self.verbose = verbose
self._resource = None
@property
def resource(self):
''' property function for resource var '''
if not self._resource:
self.get()
return self._resource
@resource.setter
def resource(self, data):
''' setter function for resource var '''
self._resource = data
def get(self):
'''return replicas information '''
vol = self._get(self.kind, self.name)
if vol['returncode'] == 0:
if self.kind == 'dc':
self.resource = DeploymentConfig(content=vol['results'][0])
vol['results'] = [self.resource.get_replicas()]
if self.kind == 'rc':
self.resource = ReplicationController(content=vol['results'][0])
vol['results'] = [self.resource.get_replicas()]
return vol
def put(self):
'''update replicas into dc '''
self.resource.update_replicas(self.replicas)
#self.resource.get_volumes()
#self.resource.update_volume_mount(self.volume_mount)
return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
def needs_update(self):
''' verify whether an update is needed '''
return self.resource.needs_update_replicas(self.replicas)
| 33.467742 | 83 | 0.593735 |
305c4c84b5c4e7ee4b74855775a601c853edef69 | 3,152 | py | Python | stacks_queues/queue_from_stacks/queue_from_stacks_challenge.py | stephank007/python_challenges | dfd8d18c03a06735f6e4e02b0660007fe2d02f07 | [
"Apache-2.0"
] | null | null | null | stacks_queues/queue_from_stacks/queue_from_stacks_challenge.py | stephank007/python_challenges | dfd8d18c03a06735f6e4e02b0660007fe2d02f07 | [
"Apache-2.0"
] | null | null | null | stacks_queues/queue_from_stacks/queue_from_stacks_challenge.py | stephank007/python_challenges | dfd8d18c03a06735f6e4e02b0660007fe2d02f07 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Challenge Notebook
# ## Problem: Implement a queue using two stacks.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# * [Solution Notebook](#Solution-Notebook)
# ## Constraints
#
# * Do we expect the methods to be enqueue and dequeue?
# * Yes
# * Can we assume we already have a stack class that can be used for this problem?
# * Yes
# * Can we push a None value to the Stack?
# * No
# * Can we assume this fits memory?
# * Yes
# ## Test Cases
#
# * Enqueue and dequeue on empty stack
# * Enqueue and dequeue on non-empty stack
# * Multiple enqueue in a row
# * Multiple dequeue in a row
# * Enqueue after a dequeue
# * Dequeue after an enqueue
# ## Algorithm
#
# Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/stacks_queues/queue_from_stacks/queue_from_stacks_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
# ## Code
# In[ ]:
get_ipython().run_line_magic('run', '../stack/stack.py')
get_ipython().run_line_magic('load', '../stack/stack.py')
# In[ ]:
class QueueFromStacks(object):
def __init__(self):
# TODO: Implement me
pass
def shift_stacks(self, source, destination):
# TODO: Implement me
pass
def enqueue(self, data):
# TODO: Implement me
pass
def dequeue(self):
# TODO: Implement me
pass
# ## Unit Test
#
#
# In[ ]:
# %load test_queue_from_stacks.py
import unittest
class TestQueueFromStacks(unittest.TestCase):
def test_queue_from_stacks(self):
print('Test: Dequeue on empty stack')
queue = QueueFromStacks()
self.assertEqual(queue.dequeue(), None)
print('Test: Enqueue on empty stack')
print('Test: Enqueue on non-empty stack')
print('Test: Multiple enqueue in a row')
num_items = 3
for i in range(0, num_items):
queue.enqueue(i)
print('Test: Dequeue on non-empty stack')
print('Test: Dequeue after an enqueue')
self.assertEqual(queue.dequeue(), 0)
print('Test: Multiple dequeue in a row')
self.assertEqual(queue.dequeue(), 1)
self.assertEqual(queue.dequeue(), 2)
print('Test: Enqueue after a dequeue')
queue.enqueue(5)
self.assertEqual(queue.dequeue(), 5)
print('Success: test_queue_from_stacks')
def main():
test = TestQueueFromStacks()
test.test_queue_from_stacks()
if __name__ == '__main__':
main()
# ## Solution Notebook
#
# Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/stacks_queues/queue_from_stacks/queue_from_stacks_solution.ipynb) for a discussion on algorithms and code solutions.
| 25.419355 | 302 | 0.667513 |
eba7eddcd9390d0be15c8b590f1073a984062086 | 4,638 | py | Python | imcsdk/mometa/lsboot/LsbootNVMe.py | ragupta-git/ImcSdk | 2e41f2ffe5282d38de85bc4739fa53dd2f0c9bb4 | [
"Apache-2.0"
] | null | null | null | imcsdk/mometa/lsboot/LsbootNVMe.py | ragupta-git/ImcSdk | 2e41f2ffe5282d38de85bc4739fa53dd2f0c9bb4 | [
"Apache-2.0"
] | null | null | null | imcsdk/mometa/lsboot/LsbootNVMe.py | ragupta-git/ImcSdk | 2e41f2ffe5282d38de85bc4739fa53dd2f0c9bb4 | [
"Apache-2.0"
] | 3 | 2018-11-14T13:02:40.000Z | 2018-11-14T13:49:38.000Z | """This module contains the general information for LsbootNVMe ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class LsbootNVMeConsts:
STATE_DISABLED = "Disabled"
STATE_ENABLED = "Enabled"
TYPE_NVME = "NVME"
class LsbootNVMe(ManagedObject):
"""This is LsbootNVMe class."""
consts = LsbootNVMeConsts()
naming_props = set([u'name'])
mo_meta = {
"classic": MoMeta("LsbootNVMe", "lsbootNVMe", "nvme-[name]", VersionMeta.Version2013e, "InputOutput", 0xff, [], ["admin", "read-only", "user"], [u'lsbootDevPrecision'], [], ["Get", "Set"]),
"modular": MoMeta("LsbootNVMe", "lsbootNVMe", "nvme-[name]", VersionMeta.Version2013e, "InputOutput", 0xff, [], ["admin", "read-only", "user"], [u'lsbootDevPrecision'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version2013e, MoPropertyMeta.NAMING, 0x4, None, None, r"""(([a-zA-Z0-9]{1})|([a-zA-Z0-9]{1}[a-zA-Z0-9_\-]{0,28}[a-zA-Z0-9]{1})|([a-zA-Z0-9]{2}))""", [], []),
"order": MoPropertyMeta("order", "order", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["1-255"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, 0, 255, None, [], []),
"state": MoPropertyMeta("state", "state", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["NVME"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
"modular": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version2013e, MoPropertyMeta.NAMING, 0x4, None, None, r"""(([a-zA-Z0-9]{1})|([a-zA-Z0-9]{1}[a-zA-Z0-9_\-]{0,28}[a-zA-Z0-9]{1})|([a-zA-Z0-9]{2}))""", [], []),
"order": MoPropertyMeta("order", "order", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["1-255"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, 0, 255, None, [], []),
"state": MoPropertyMeta("state", "state", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["NVME"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
}
prop_map = {
"classic": {
"dn": "dn",
"name": "name",
"order": "order",
"rn": "rn",
"state": "state",
"status": "status",
"type": "type",
"childAction": "child_action",
},
"modular": {
"dn": "dn",
"name": "name",
"order": "order",
"rn": "rn",
"state": "state",
"status": "status",
"type": "type",
"childAction": "child_action",
},
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.order = None
self.state = None
self.status = None
self.type = None
self.child_action = None
ManagedObject.__init__(self, "LsbootNVMe", parent_mo_or_dn, **kwargs)
| 52.11236 | 231 | 0.584088 |
999eb8173fd0afd7432b677cb4be0b19ab7581d8 | 2,065 | py | Python | utils/spider/sitemapper.py | codebrk/sqweeks | 19d5cabe924aee578c5de314f5524bb323112517 | [
"MIT"
] | null | null | null | utils/spider/sitemapper.py | codebrk/sqweeks | 19d5cabe924aee578c5de314f5524bb323112517 | [
"MIT"
] | null | null | null | utils/spider/sitemapper.py | codebrk/sqweeks | 19d5cabe924aee578c5de314f5524bb323112517 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2018 hackbox.io developers (http://hackbox.io)
See the file 'LICENSE' for copying permission
"""
from bs4 import BeautifulSoup
import requests
import re
from urlparse import urljoin
stack = []
domain = None
count = 0
def run(target, **kwargs):
"""
Map a website using recursive crawling method
"""
global stack, domain, count
stack.append(target)
domain = target.split("://")[1].split("/")[0]
count = 0
start(**kwargs)
return stack
def start(**options):
"""
Start recursive crawling
"""
global stack, domain, count
if options["verbose"]:
print("+| {}".format(stack[count]))
res = requests.get(stack[count])
if res.ok:
links = crawl_links(res.content)
for link in links:
if not link.startswith("http"):
link = urljoin(res.url, link)
if link.endswith("/"):
link = link[:-1]
if link not in stack and (re.compile(r'://' + domain).search(link) or
re.compile(r'://www.' + domain).search(link)):
if options["verbose"]:
print("+| | {}".format(link))
stack.append(link)
count += 1
if count < len(stack) <= options["max_links"]:
start(**options)
def crawl_links(content):
"""
Crawl all the links from given page content
"""
soup = BeautifulSoup(content, "lxml")
a_tags = soup.find_all("a")
links = []
for a_tag in a_tags:
try:
if is_valid_url(a_tag["href"]):
links.append(a_tag["href"])
except KeyError:
pass
return links
def is_valid_url(url):
"""
Check if the href URL is valid
"""
return (
url != "#" and
url != "" and
url[0] != "?" and
url[0] != "#" and
not url.startswith("tel:") and
not url.startswith("javascript:") and
not url.startswith("mailto:")
)
| 21.28866 | 84 | 0.533656 |
918119d6b69f6b59474224f6fa1aa463e1f7720e | 7,907 | py | Python | cairotft/tft.py | hadess/cairotft | 1951fbf949c815eb32594dd4336720b67a3e8811 | [
"BSD-3-Clause"
] | 4 | 2016-08-05T13:28:59.000Z | 2022-01-08T15:02:17.000Z | cairotft/tft.py | hadess/cairotft | 1951fbf949c815eb32594dd4336720b67a3e8811 | [
"BSD-3-Clause"
] | 2 | 2016-05-01T16:52:24.000Z | 2018-11-25T16:17:34.000Z | cairotft/tft.py | hadess/cairotft | 1951fbf949c815eb32594dd4336720b67a3e8811 | [
"BSD-3-Clause"
] | 3 | 2016-12-17T11:08:09.000Z | 2019-08-29T19:36:56.000Z | # Copyright (c) 2015, Thomas Chiroux - Link Care Services
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of cairotft nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Class for display on tft using linuxfb."""
import asyncio
import cairocffi as cairo
from cairotft import linuxfb
class TftDisplay():
"""Display class for the tft display.
:ivar fb_interface: (:py:class:`str`) framebuffer interface
name (ex: /dev/fb0)
:ivar cairo_format: (:py:class:`int`) cairo pixel format.
see cairocffi documentation:
https://pythonhosted.org/cairocffi/api.html#pixel-format
:ivar fps: (:py:class:`int`) forced fps
:ivar _blit_flag: (:py:class:`bool`) used in forced fps mode: each blit()
call will activate the blit flag in order to do a real buffer copy
in the next blit.
:ivar _fbmem: (:class:`cairotft.linuxfb.FbMem`) framebuffer memory
interface. This object is the memory interface to the screen.
:ivar _buffermem: (ctypes array of c_char) memory buffer.
This object is the memory buffer for the double buffer.
:ivar surf: (:class:`cairocffi.ImageSurface`) cairo surface pointing to
the actual screen.
:ivar buffer_surf: (:class:`cairocffi.ImageSurface`) cairo surface pointing
to the double buffer.
:ivar width: (:py:class:`int`) width of the screen in pixels.
:ivar height: (:py:class:`int`) height of the screen in pixels.
:ivar size_per_pixel: (:py:class:`int`) number of bytes per pixel.
:ivar ctx: (:class:`cairocffi.Context`) cairocffi default context.
This context draws in the double (memory) buffer.
:ivar screen_ctx: (:class:`cairocffi.Context`) cairocffi context to draw
directly on the screen.
:ivar loop: (:py:class:`asyncio.BaseEventLoop`) The main event loop.
"""
def __init__(self, interface='/dev/fb0', cairo_format=cairo.FORMAT_ARGB32,
fps=None):
"""Initialisation of the class.
:param str interface: framebuffer interface name
:param int cairo_format: the pixel format.
see: https://pythonhosted.org/cairocffi/api.html#pixel-format
:param int fps: a forced fps.
* If no forced fps is given (fps=None),
each blit() call will copy the memory buffer into the screen
buffer.
* If a forced fps is given, each call to :class:`TftDisplay.blit`
will not redraw the screen but only trigger a redraw for the
next frame. The 'real' blit is called every 1/fps seconds.
.. warning:: choose your fps carefully: if you choose a to high
fps for your hardware, the application may pass all its time
to redraw the screen instead of actually really drawing
objects.
Also, take care of the bus speed and size that defines a max
fps. For example a SPI screen with 480x272 resolution in
16bits a 20 Mhz has an absolute max FPS of:
20 000 000 / (480 * 272 * 2 * 8) = 9.57 fps
(without taking care of the spi communications overhead)
"""
self.fb_interface = interface
self.cairo_format = cairo_format
self.fps = fps
self._blit_flag = False
# two memory buffers:
# * fbmem for direct draw on the screen
# * buffermem: memory buffer for double buffering.
self._fbmem = linuxfb.open_fbmem(self.fb_interface)
self._buffermem = linuxfb.memory_buffer(self._fbmem.fix_info.smem_len)
# two cairo surface, directly on the screen and in the memory buffer.
self.surf = linuxfb.cairo_surface_from_fbmem(
self._fbmem,
self._fbmem.mmap,
cairo_format)
self.buffer_surf = linuxfb.cairo_surface_from_fbmem(
self._fbmem,
self._buffermem,
cairo_format)
# calculates width and height of the screen
self.width, self.height = self.surf.get_width(), self.surf.get_height()
self.size_per_pixel = self._fbmem.fix_info.smem_len / (self.width *
self.height)
# by default we write only in buffer using self.ctx
self.ctx = cairo.Context(self.buffer_surf)
# cairo context for direct rendering on the screen.
# normaly only used with blit.
self.screen_ctx = cairo.Context(self.surf)
# async io loop
self.loop = asyncio.get_event_loop()
def blit(self, force=False):
"""Display the buffer in the screen.
Take the content of the memory buffer and draw it on the screen.
:param bool force: if force is True, force a buffer copy, even in fps
mode.
"""
if self.fps is None or force:
self.screen_ctx.set_source_surface(self.buffer_surf)
self.screen_ctx.paint()
else:
self._blit_flag = True
def fps_call(self):
"""force a redraw screen. Called every x ms when fps mode is set."""
if self._blit_flag:
self.blit(force=True)
self._blit_flag = False
self.loop.call_later(1 / self.fps, self.fps_call)
def close(self):
"""Close the interface."""
# Back to black background
self.blank_screen(self.ctx)
linuxfb.close_fbmem(self._fbmem)
def blank_screen(self, ctx, color=(0, 0, 0, 1), blit=True):
"""Blank the screen with the given color.
:param ctx: cairocffi context
:type ctx: :class:`cairocffi.Context`
:param color: 4 int tuple reprensentig the rgba color.
"""
ctx.set_source_rgba(*color)
ctx.rectangle(0, 0, self.width, self.height)
ctx.fill()
if blit:
self.blit()
def draw_interface(self, ctx):
"""Method that should be overriden by subclasses.
:param ctx: cairocffi context
:type ctx: :class:`cairocffi.Context`
"""
raise NotImplementedError
def run(self):
"""main loop."""
# just afer loop is started, draw the interface
self.loop.call_soon(self.draw_interface, self.ctx)
if self.fps:
self.loop.call_later(1 / self.fps, self.fps_call)
try:
self.loop.run_forever()
except KeyboardInterrupt:
pass
finally:
self.loop.close()
self.close()
| 41.615789 | 79 | 0.649172 |
d782d3b1d84c1ea490e2a948457f3fb8b99d01a1 | 1,400 | py | Python | test/functional/feature_shutdown.py | TopoX84/newlux | 555b9f7f9e4be4ef879f20083d8cf80ed8f7777e | [
"MIT"
] | 1,389 | 2017-06-28T02:35:01.000Z | 2022-03-25T20:09:01.000Z | test/functional/feature_shutdown.py | TopoX84/newlux | 555b9f7f9e4be4ef879f20083d8cf80ed8f7777e | [
"MIT"
] | 1,039 | 2015-03-25T23:58:32.000Z | 2022-03-30T00:41:16.000Z | test/functional/feature_shutdown.py | TopoX84/newlux | 555b9f7f9e4be4ef879f20083d8cf80ed8f7777e | [
"MIT"
] | 564 | 2017-06-28T03:55:03.000Z | 2022-03-30T14:57:40.000Z | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind shutdown."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
| 38.888889 | 103 | 0.715714 |
32869d51f0d41c777c01001300fe8412d5bd7ad0 | 2,846 | py | Python | carbon/engine.py | laplab/carbon-engine | 574a2d8560b12256933d9e8892acf213c56b5373 | [
"MIT"
] | 1 | 2021-01-02T16:22:22.000Z | 2021-01-02T16:22:22.000Z | carbon/engine.py | laplab/carbon-engine | 574a2d8560b12256933d9e8892acf213c56b5373 | [
"MIT"
] | null | null | null | carbon/engine.py | laplab/carbon-engine | 574a2d8560b12256933d9e8892acf213c56b5373 | [
"MIT"
] | 1 | 2016-05-20T05:25:30.000Z | 2016-05-20T05:25:30.000Z | import os.path
import logging
from colorlog import ColoredFormatter
from errors import *
from utils import Map
from program import Program
class Engine:
"""Base class of Carbon Engine, highest level of abstraction in module.
In this class all components are together. Program will be compiled, executed and checked
for the right answer here.
"""
def __init__(self, logging_level):
"""Init method of engine.
Args:
logging_level (int): Minimal level for logging of events
"""
# init logging
handler = logging.StreamHandler()
handler.setFormatter(
ColoredFormatter(
('%(green)s%(asctime)s%(reset)s ' +
'%(cyan)s%(filename)s:%(lineno)d%(reset)s ' +
'%(log_color)s%(bold)s%(levelname)-8s%(reset)s ' +
'%(log_color)s%(message)s%(reset)s'),
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
},
secondary_log_colors={},
style='%'
)
)
self.logger = logging.getLogger('carbon_engine')
self.logger.addHandler(handler)
self.logger.setLevel(logging_level)
def test_program(self, filename, lang_config, input, output, autoremove=False):
"""Checking source code for passing one test.
Args:
filename (str): File name of source
lang_config (dict|Map): Config for Program (see Program's class Attributes for structure)
input (str): Input to be passed into program STDIN
output (str): Output expected to be got from program
autoremove (bool[default=False]): Remove file after execution
Raises:
FileDoesNotExistError: File named by filename arg is not found
"""
# check if file exists
if not os.path.isfile(filename):
raise FileDoesNotExistError()
if not isinstance(lang_config, Map):
lang_config = Map(lang_config)
program = Program(filename, lang_config)
self.logger.info('Compiling {0}...'.format(program.filename))
try:
program.compile()
except Exception as e:
self.logger.fatal(e)
self.logger.info('Executing {0}...'.format(program.filename))
status = Map({'stdout': None})
try:
status = program.execute(input, autoremove)
except Exception as e:
self.logger.fatal(e)
self.logger.info('Comparing STDOUT and expected output... ' + str(status.stdout == output))
| 32.712644 | 101 | 0.56149 |
af9e23efec172d2bb92fe8d67947b65a1e70e153 | 756 | py | Python | MaidUtils/skills/agenda.py | PolarFill/maid | 59868d80a87fae0c4ea624ade37caa1775390b4c | [
"MIT"
] | null | null | null | MaidUtils/skills/agenda.py | PolarFill/maid | 59868d80a87fae0c4ea624ade37caa1775390b4c | [
"MIT"
] | null | null | null | MaidUtils/skills/agenda.py | PolarFill/maid | 59868d80a87fae0c4ea624ade37caa1775390b4c | [
"MIT"
] | null | null | null | def TempNote(note): #Cria nota temporaria
import configparser
from config import path
config = configparser.ConfigParser()
config.read(f'{path}/Configurações/session.info')
config.set('Session', 'tempnote', note) #Escreve nota temporaria
with open(f'{path}/Configurações/session.info', 'w') as configfile: #Escreve o novo session.info
config.write(configfile)
def ReadTempNote(): #Lê nota temporaria
import configparser
from config import path
config = configparser.ConfigParser()
config.read(f'{path}/Configurações/session.info')
note = config.get('Session', 'tempnote')
if note == '':
note = 'Nenhuma nota foi encontrada.'
return note | 32.869565 | 101 | 0.652116 |
88288feab4b7b2505f22f7e3e290c23217de048f | 5,248 | py | Python | electrumx/server/controller.py | erik-svensson/electrumx-royale | 7ba069dd9f7e8662ea50db4371a260b879a3e106 | [
"MIT"
] | 1 | 2020-12-03T12:29:31.000Z | 2020-12-03T12:29:31.000Z | electrumx/server/controller.py | erik-svensson/electrumx-royale | 7ba069dd9f7e8662ea50db4371a260b879a3e106 | [
"MIT"
] | null | null | null | electrumx/server/controller.py | erik-svensson/electrumx-royale | 7ba069dd9f7e8662ea50db4371a260b879a3e106 | [
"MIT"
] | 1 | 2020-05-10T11:04:07.000Z | 2020-05-10T11:04:07.000Z | # Copyright (c) 2016-2018, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
from asyncio import Event
from aiorpcx import _version as aiorpcx_version, TaskGroup
import electrumx
from electrumx.lib.server_base import ServerBase
from electrumx.lib.util import version_string
from electrumx.server.mempool import MemPool, MemPoolAPI
from electrumx.server.session import SessionManager
class Notifications(object):
# hashX notifications come from two sources: new blocks and
# mempool refreshes.
#
# A user with a pending transaction is notified after the block it
# gets in is processed. Block processing can take an extended
# time, and the prefetcher might poll the daemon after the mempool
# code in any case. In such cases the transaction will not be in
# the mempool after the mempool refresh. We want to avoid
# notifying clients twice - for the mempool refresh and when the
# block is done. This object handles that logic by deferring
# notifications appropriately.
def __init__(self):
self._touched_mp = {}
self._touched_bp = {}
self._highest_block = -1
async def _maybe_notify(self):
tmp, tbp = self._touched_mp, self._touched_bp
common = set(tmp).intersection(tbp)
if common:
height = max(common)
elif tmp and max(tmp) == self._highest_block:
height = self._highest_block
else:
# Either we are processing a block and waiting for it to
# come in, or we have not yet had a mempool update for the
# new block height
return
touched = tmp.pop(height)
for old in [h for h in tmp if h <= height]:
del tmp[old]
for old in [h for h in tbp if h <= height]:
touched.update(tbp.pop(old))
await self.notify(height, touched)
async def notify(self, height, touched):
pass
async def start(self, height, notify_func):
self._highest_block = height
self.notify = notify_func
await self.notify(height, set())
async def on_mempool(self, touched, height):
self._touched_mp[height] = touched
await self._maybe_notify()
async def on_block(self, touched, height):
self._touched_bp[height] = touched
self._highest_block = height
await self._maybe_notify()
class Controller(ServerBase):
'''Manages server initialisation and stutdown.
Servers are started once the mempool is synced after the block
processor first catches up with the daemon.
'''
async def serve(self, shutdown_event):
'''Start the RPC server and wait for the mempool to synchronize. Then
start serving external clients.
'''
if not (0, 18, 1) <= aiorpcx_version < (0, 19):
raise RuntimeError('aiorpcX version 0.18.x is required')
env = self.env
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
self.logger.info(f'software version: {electrumx.version}')
self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}')
self.logger.info(f'supported protocol versions: {min_str}-{max_str}')
self.logger.info(f'event loop policy: {env.loop_policy}')
self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks')
notifications = Notifications()
Daemon = env.coin.DAEMON
BlockProcessor = env.coin.BLOCK_PROCESSOR
DB = env.coin.DATABASE
MEMPOOL = env.coin.MEMPOOL
async with Daemon(env.coin, env.daemon_url) as daemon:
db = DB(env)
bp = BlockProcessor(env, db, daemon, notifications)
# Set notifications up to implement the MemPoolAPI
def get_db_height():
return db.db_height
notifications.height = daemon.height
notifications.db_height = get_db_height
notifications.cached_height = daemon.cached_height
notifications.mempool_hashes = daemon.mempool_hashes
notifications.raw_transactions = daemon.getrawtransactions
notifications.lookup_utxos = db.lookup_utxos
MemPoolAPI.register(Notifications)
mempool = MEMPOOL(env.coin, notifications)
session_mgr = SessionManager(env, db, bp, daemon, mempool,
shutdown_event)
# Test daemon authentication, and also ensure it has a cached
# height. Do this before entering the task group.
await daemon.height()
caught_up_event = Event()
mempool_event = Event()
async def wait_for_catchup():
await caught_up_event.wait()
await group.spawn(db.populate_header_merkle_cache())
await group.spawn(mempool.keep_synchronized(mempool_event))
async with TaskGroup() as group:
await group.spawn(session_mgr.serve(notifications, mempool_event))
await group.spawn(bp.fetch_and_process_blocks(caught_up_event))
await group.spawn(wait_for_catchup())
| 38.588235 | 82 | 0.651867 |
ea5007814d6482e443d1250effc52b2b0610a324 | 7,360 | py | Python | scripts/evaluate_dark_frame.py | exowanderer/BadPixelDetector | 1dd30eaec6f2a1c6edd40322cde395ac2cd06626 | [
"BSD-3-Clause"
] | null | null | null | scripts/evaluate_dark_frame.py | exowanderer/BadPixelDetector | 1dd30eaec6f2a1c6edd40322cde395ac2cd06626 | [
"BSD-3-Clause"
] | 1 | 2020-06-25T10:46:56.000Z | 2020-06-25T10:46:56.000Z | scripts/evaluate_dark_frame.py | exowanderer/HxRGBadPixelDetector | 1dd30eaec6f2a1c6edd40322cde395ac2cd06626 | [
"BSD-3-Clause"
] | null | null | null | import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(
logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e)
from itertools import product as iterproduct
from matplotlib import pyplot as plt
import joblib
import numpy as np
import pandas as pd
import time
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, confusion_matrix
from sklearn.preprocessing import StandardScaler
from statsmodels.robust import scale as sc
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import Model
from tensorflow.keras import layers
def build_lstm_autoencoder(train_x, n_units=128,
dropout_rate=0.2, fit_now=False,
epochs=10, batch_size=4096, validation_split=0.2,
shuffle=True, loss='mae', optimizer='adam'):
model = Sequential()
model.add(layers.LSTM(
units=n_units,
input_shape=(train_x.shape[1], train_x.shape[2])
))
model.add(layers.Dropout(rate=dropout_rate))
model.add(layers.RepeatVector(n=train_x.shape[1]))
model.add(layers.LSTM(units=n_units, return_sequences=True))
model.add(layers.Dropout(rate=dropout_rate))
model.add(
layers.TimeDistributed(
layers.Dense(units=train_x.shape[2])
)
)
model.compile(loss=loss, optimizer=optimizer)
return model
def evaluate_lstm_autoencoder(model, new_data_x, THRESHOLD=0.1):
# New Data
start = time.time()
print('[INFO] Starting `new_data_x` predict step')
new_data_pred = model.predict(new_data_x)
print('[INFO] Completed `new_data_x` predict step: '
f'{time.time() - start} sec')
new_data_mae_loss = np.mean(np.abs(new_data_pred - new_data_x), axis=1)
new_data_mae_loss = new_data_mae_loss.squeeze()
new_data_score_df = pd.DataFrame()
new_data_score_df['loss'] = new_data_mae_loss
new_data_score_df['threshold'] = THRESHOLD
new_data_score_df['anomaly'] = new_data_score_df.loss > THRESHOLD
print(f'[INFO] Created `new_data_x` Dataframe: {time.time() - start} sec')
return new_data_score_df, new_data_pred
def plot_confusion_matrix(confusionMatrix, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues,
float_fmt='.1f',
figsize=(12, 12),
rotation=0):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
confusionMatrix = confusionMatrix.astype('float')
confusionMatrix /= confusionMatrix.sum(axis=1)[:, np.newaxis]
confusionMatrix = confusionMatrix * 100
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(confusionMatrix, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=rotation)
plt.yticks(tick_marks, classes)
fmt = float_fmt if normalize else 'd'
thresh = confusionMatrix.max() / 2.
range0 = range(confusionMatrix.shape[0])
range1 = range(confusionMatrix.shape[1])
for i, j in iterproduct(range0, range1):
plt.text(j, i, format(confusionMatrix[i, j], fmt) + '%',
horizontalalignment="center",
color="white" if confusionMatrix[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-fn', '--fits_filename', required=True, type=str)
parser.add_argument('-nu', '--n_units', type=int, default=128)
parser.add_argument('-e', '--epochs', type=int, default=10)
parser.add_argument('-sn', '--save_now', action='store_true')
parser.add_argument('-ln', '--load_name', type=str,
default='simulated_55k_bad_pixels_df.joblib.save')
parser.add_argument('-ns', '--n_sig', type=float, default=4.5)
parser.add_argument('-bsn', '--base_name', type=str, default='JWST_Dark')
parser.add_argument('-pv', '--plot_verbose', action='store_true')
clargs = parser.parse_args()
fits_filename = clargs.fits_filename
n_units = clargs.n_units
epochs = clargs.epochs
save_now = clargs.save_now
load_name = clargs.load_name
n_sig = clargs.n_sig
base_name = clargs.base_name
plot_verbose = clargs.plot_verbose
if not plot_verbose:
plt.ion()
# history = joblib.load(
# f'LSTM{n_units}_{base_name}_history_{epochs}epochs.joblib.save')
lstm_autoencoder = build_lstm_autoencoder(
train_x=train_x, n_units=n_units,
epochs=epochs, batch_size=batch_size)
lstm_autoencoder.load(
f'LSTM{n_units}_{base_name}_history_{epochs}epochs.h5'
)
# lstm_autoencoder_weights = keras.load_weights(
# f'LSTM{n_units}_{base_name}_history_{epochs}epochs_weights.h5')
print('[INFO] Loading Train Score Dataframe')
train_score_df = pd.read_csv(
f'LSTM{n_units}_{base_name}_train_score_df.csv'
)
print('[INFO] Loading Test Score Dataframe')
test_score_df = pd.read_csv(f'LSTM{n_units}_{base_name}_test_score_df.csv')
n_sig = 4.5
THRESHOLD = np.median(train_score_df.loss) + \
n_sig * sc.mad(train_score_df.loss)
train_score_df.anomaly = train_score_df.loss > THRESHOLD
test_score_df.anomaly = test_score_df.loss > THRESHOLD
# Open JWST dark current file and reshape to AE input shape
new_data_x = fits.open(fits_filename)['SCI'].data
n_rows, n_cols, n_timesteps = new_data_x.shape
new_data_x.reshape((n_rows * n_cols, n_timesteps))
if scaler_name is not None:
scaler = joblib.load(scaler_name)
new_data_x = scaler.transform(new_data_x)
new_data_x = new_data_x.reshape(new_data_x.shape + (1,))
new_data_score_df, new_data_pred = evaluate_lstm_autoencoder(
lstm_autoencoder, new_data_x, THRESHOLD=THRESHOLD)
if save_now:
new_data_score_df.to_csv(
f'LSTM{n_units}_{base_name}_new_data_score_df.csv'
)
if plot_now:
print('[INFO] Creating KDE Figure')
fig = plt.figure()
new_data_score_df.loss.plot.kde()
train_score_df.loss.plot.kde()
test_score_df.loss.plot.kde()
plt.axvline(THRESHOLD, ls='--', lw=3)
plt.xlabel('MAE', fontsize=20)
plt.ylabel('Probability Density')
plt.title('Compare MAE vs Anomalies in Train and Test Sets')
plt.legend(('New Data Loss', 'Train Loss',
'Test Loss', 'Threshold MAE'),
loc=0, fontsize=20)
plt.show()
if save_now:
print('[INFO] Saving KDE Figure')
fig.savefig(f'LSTM{n_units}_{base_name}_MAE_KDE.pdf')
| 34.232558 | 79 | 0.662228 |
3087b0a3c9d7dd6119bd858ce8232b8ecb8fa624 | 1,334 | py | Python | ginga/toolkit.py | godber/ginga | acb32ed422aa604681c63c5a9494ffb0ad96cf2e | [
"BSD-3-Clause"
] | null | null | null | ginga/toolkit.py | godber/ginga | acb32ed422aa604681c63c5a9494ffb0ad96cf2e | [
"BSD-3-Clause"
] | null | null | null | ginga/toolkit.py | godber/ginga | acb32ed422aa604681c63c5a9494ffb0ad96cf2e | [
"BSD-3-Clause"
] | null | null | null | #
# toolkit.py -- module for customizing Ginga GUI toolkit version
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
toolkit = 'choose'
family = None
class ToolKitError(Exception):
pass
def use(name):
"""
Set the name of the GUI toolkit we should use.
"""
global toolkit, family
name = name.lower()
if name.startswith('choose'):
pass
elif name.startswith('qt') or name.startswith('pyside'):
family = 'qt'
if name == 'qt':
name = 'qt4'
assert name in ('qt4', 'pyside', 'qt5'), \
ToolKitError("ToolKit '%s' not supported!" % (name))
elif name.startswith('gtk'):
family = 'gtk'
if name == 'gtk':
name = 'gtk2'
assert name in ('gtk2', ), \
ToolKitError("ToolKit '%s' not supported!" % (name))
elif name.startswith('tk'):
family = 'tk'
assert name in ('tk', ), \
ToolKitError("ToolKit '%s' not supported!" % (name))
else:
ToolKitError("ToolKit '%s' not supported!" % (name))
toolkit = name
def get_toolkit():
return toolkit
def get_family():
return family
#END
| 22.233333 | 67 | 0.570465 |
fd902d00fb7b4a2c1a93efc57a0bc6bcb587051a | 1,004 | py | Python | jupyterlab_kernel_usage/__init__.py | Quansight/jupyterlab-kernel-usage | b22e7f8b35f1adf7a14f0ff2d9e637c37751efa2 | [
"BSD-3-Clause"
] | null | null | null | jupyterlab_kernel_usage/__init__.py | Quansight/jupyterlab-kernel-usage | b22e7f8b35f1adf7a14f0ff2d9e637c37751efa2 | [
"BSD-3-Clause"
] | 11 | 2022-01-07T10:12:16.000Z | 2022-03-15T20:41:52.000Z | jupyterlab_kernel_usage/__init__.py | Quansight/jupyterlab-kernel-usage | b22e7f8b35f1adf7a14f0ff2d9e637c37751efa2 | [
"BSD-3-Clause"
] | null | null | null |
import json
from pathlib import Path
from ._version import __version__
HERE = Path(__file__).parent.resolve()
with (HERE / "labextension" / "package.json").open() as fid:
data = json.load(fid)
def _jupyter_labextension_paths():
return [{
"src": "labextension",
"dest": data["name"]
}]
from .handlers import setup_handlers
def _jupyter_server_extension_points():
return [{
"module": "jupyterlab_kernel_usage"
}]
def _load_jupyter_server_extension(server_app):
"""Registers the API handler to receive HTTP requests from the frontend extension.
Parameters
----------
server_app: jupyterlab.labapp.LabApp
JupyterLab application instance
"""
setup_handlers(server_app.web_app)
server_app.log.info("Registered KerneUsage extension at URL path /jupyterlab_kernel_usage")
# For backward compatibility with notebook server - useful for Binder/JupyterHub
load_jupyter_server_extension = _load_jupyter_server_extension
| 22.818182 | 95 | 0.722112 |
dc35f6fbcf7d677d2b4270a0afe730d50debc4f3 | 203 | py | Python | Replace_bits,py.py | Chaytali/Python | a5dbb537078747283850e69637d2994b267f0a3c | [
"bzip2-1.0.6"
] | null | null | null | Replace_bits,py.py | Chaytali/Python | a5dbb537078747283850e69637d2994b267f0a3c | [
"bzip2-1.0.6"
] | null | null | null | Replace_bits,py.py | Chaytali/Python | a5dbb537078747283850e69637d2994b267f0a3c | [
"bzip2-1.0.6"
] | null | null | null | def ReplaceBits(x, y, pos, n):
temp = 2**n-1
temp = temp<<(pos - n)
y = y&temp
x = x&~temp
return x|y
if(__name__=="__main__"):
replace=ReplaceBits(12,7,3,3)
print(replace)
| 16.916667 | 33 | 0.551724 |
766a4a3bede300a6411362790cacf616bc801d3c | 2,970 | py | Python | humanfriendly/compat.py | gauravjuvekar/debian-python-humanfriendly | 2642f5c24aca91792737ad3ff19a20420eac5553 | [
"MIT"
] | null | null | null | humanfriendly/compat.py | gauravjuvekar/debian-python-humanfriendly | 2642f5c24aca91792737ad3ff19a20420eac5553 | [
"MIT"
] | null | null | null | humanfriendly/compat.py | gauravjuvekar/debian-python-humanfriendly | 2642f5c24aca91792737ad3ff19a20420eac5553 | [
"MIT"
] | null | null | null | # Human friendly input/output in Python.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: January 16, 2017
# URL: https://humanfriendly.readthedocs.io
"""
Compatibility with Python 2 and 3.
This module exposes aliases and functions that make it easier to write Python
code that is compatible with Python 2 and Python 3.
.. data:: basestring
Alias for :func:`python2:basestring` (in Python 2) or :class:`python3:str`
(in Python 3). See also :func:`is_string()`.
.. data:: interactive_prompt
Alias for :func:`python2:raw_input()` (in Python 2) or
:func:`python3:input()` (in Python 3).
.. data:: StringIO
Alias for :class:`python2:StringIO.StringIO` (in Python 2) or
:class:`python3:io.StringIO` (in Python 3).
.. data:: unicode
Alias for :func:`python2:unicode` (in Python 2) or :class:`python3:str` (in
Python 3). See also :func:`coerce_string()`.
.. data:: monotonic
Alias for :func:`python3:time.monotonic()` (in Python 3.3 and higher) or
`monotonic.monotonic()` (a `conditional dependency
<https://pypi.python.org/pypi/monotonic/>`_ on older Python versions).
"""
__all__ = (
'StringIO',
'basestring',
'coerce_string',
'interactive_prompt',
'is_string',
'is_unicode',
'monotonic',
'unicode',
)
try:
# Python 2.
unicode = unicode
basestring = basestring
interactive_prompt = raw_input
from StringIO import StringIO
except (ImportError, NameError):
# Python 3.
unicode = str
basestring = str
interactive_prompt = input
from io import StringIO
try:
# Python 3.3 and higher.
from time import monotonic
except ImportError:
# A replacement for older Python versions:
# https://pypi.python.org/pypi/monotonic/
try:
from monotonic import monotonic
except (ImportError, RuntimeError):
# We fall back to the old behavior of using time.time() instead of
# failing when {time,monotonic}.monotonic() are both missing.
from time import time as monotonic
def coerce_string(value):
"""
Coerce any value to a Unicode string (:func:`python2:unicode` in Python 2 and :class:`python3:str` in Python 3).
:param value: The value to coerce.
:returns: The value coerced to a Unicode string.
"""
return value if is_string(value) else unicode(value)
def is_string(value):
"""
Check if a value is a :func:`python2:basestring` (in Python 2) or :class:`python2:str` (in Python 3) object.
:param value: The value to check.
:returns: :data:`True` if the value is a string, :data:`False` otherwise.
"""
return isinstance(value, basestring)
def is_unicode(value):
"""
Check if a value is a :func:`python2:unicode` (in Python 2) or :class:`python2:str` (in Python 3) object.
:param value: The value to check.
:returns: :data:`True` if the value is a Unicode string, :data:`False` otherwise.
"""
return isinstance(value, unicode)
| 28.018868 | 116 | 0.675421 |
b08ff1436110d059bcc607293e4d292cbe2052bd | 2,756 | py | Python | pybot/endpoints/slack/messages.py | vyaspranjal33/operationcode-pybot | a68adaee74b00f4f97a568db11fa4d295c74381a | [
"MIT"
] | null | null | null | pybot/endpoints/slack/messages.py | vyaspranjal33/operationcode-pybot | a68adaee74b00f4f97a568db11fa4d295c74381a | [
"MIT"
] | null | null | null | pybot/endpoints/slack/messages.py | vyaspranjal33/operationcode-pybot | a68adaee74b00f4f97a568db11fa4d295c74381a | [
"MIT"
] | null | null | null | import logging
from sirbot import SirBot
from slack.events import Message
from slack import methods
from pybot.endpoints.slack.event_messages.tech import TechTerms
logger = logging.getLogger(__name__)
def create_endpoints(plugin):
plugin.on_message(".*", message_changed, subtype="message_changed")
plugin.on_message(".*", message_deleted, subtype="message_deleted")
plugin.on_message(".*\!tech", tech_tips)
plugin.on_message(".*\<\!here\>", here_bad)
plugin.on_message(".*\<\!channel\>", here_bad)
plugin.on_message(".*@here", here_bad)
plugin.on_message(".*@channel", here_bad)
plugin.on_message(".*codervets", not_named)
def not_bot_message(event: Message):
return 'message' not in event or 'subtype' not in event['message'] or event['message']['subtype'] != 'bot_message'
def not_bot_delete(event: Message):
return 'previous_message' not in event or 'bot_id' not in event['previous_message']
async def not_named(event: Message, app: SirBot):
response = {'channel': event['channel'], 'text': f'<@{event["user"]}> - How dare you utter the Dark Lord\'s name'}
await app.plugins["slack"].api.query(methods.CHAT_POST_MESSAGE, data=response)
async def here_bad(event: Message, app: SirBot):
response = {'channel': event['channel'],
'text': f'<@{event["user"]}> - you are a very bad person for using that command'}
await app.plugins["slack"].api.query(methods.CHAT_POST_MESSAGE, data=response)
async def tech_tips(event: Message, app: SirBot):
if not_bot_message(event):
logger.info(
f'tech logging: {event}')
try:
tech_terms: dict = await TechTerms(event['channel'], event['user'],
event.get('text'), app).grab_values()
await app.plugins["slack"].api.query(methods.CHAT_POST_MESSAGE, tech_terms['message'])
except Exception as E:
logger.exception(E)
async def message_changed(event: Message, app: SirBot):
"""
Logs all message edits not made by a bot.
"""
if not_bot_message(event):
try:
logger.info(
f'CHANGE_LOGGING: edited: {event["ts"]} for user: {event["previous_message"]["user"]}\n{event}')
except Exception as E:
logger.exception(E)
logger.debug(event)
async def message_deleted(event: Message, app: SirBot):
"""
Logs all message deletions not made by a bot.
"""
if not_bot_delete(event):
try:
logger.info(
f'CHANGE_LOGGING: deleted: {event["ts"]} for user: {event["previous_message"]["user"]}\n{event}')
except Exception as E:
logger.exception(E)
logger.debug(event)
| 34.45 | 118 | 0.642235 |
9ba0975e27c3811b43e4a160e357d4a8cf65f6b7 | 2,783 | py | Python | lib/geovista/filters.py | trexfeathers/geovista | f11a7a54ef11d8542be632c29f9fe6653572879e | [
"BSD-3-Clause"
] | null | null | null | lib/geovista/filters.py | trexfeathers/geovista | f11a7a54ef11d8542be632c29f9fe6653572879e | [
"BSD-3-Clause"
] | null | null | null | lib/geovista/filters.py | trexfeathers/geovista | f11a7a54ef11d8542be632c29f9fe6653572879e | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime
from typing import Optional, Tuple
import numpy as np
import pyvista as pv
from pyvista import _vtk
from pyvista.core.filters import _get_output
from vtk import vtkObject
from .common import triangulated
from .log import get_logger
__all__ = [
"GV_REMESH_IDS",
"cast_UnstructuredGrid_to_PolyData",
"remesh",
]
# Configure the logger.
logger = get_logger(__name__)
# Type aliases.
Remesh = Tuple[pv.PolyData, pv.PolyData, pv.PolyData]
#: Name of the geovista remesh filter cell indices array.
GV_REMESH_IDS = "gvRemeshCellIds"
def cast_UnstructuredGrid_to_PolyData(
mesh: pv.UnstructuredGrid,
clean: Optional[bool] = False,
) -> pv.PolyData:
"""
TBD
Notes
-----
.. versionadded:: 0.1.0
"""
if not isinstance(mesh, pv.UnstructuredGrid):
dtype = type(mesh).split(" ")[1][:-1]
emsg = f"Expected a 'pyvista.UnstructuredGrid', got {dtype}."
raise TypeError(emsg)
# see https://vtk.org/pipermail/vtkusers/2011-March/066506.html
alg = _vtk.vtkGeometryFilter()
alg.AddInputData(mesh)
alg.Update()
result = _get_output(alg)
if clean:
result = result.clean()
return result
def remesh(
mesh: pv.PolyData, ribbon: pv.PolyData, warnings: Optional[bool] = False
) -> Remesh:
"""
TBD
Notes
-----
.. versionadded :: 0.1.0
"""
if not warnings:
# https://public.kitware.com/pipermail/vtkusers/2004-February/022390.html
vtkObject.GlobalWarningDisplayOff()
m0: pv.PolyData = mesh.copy(deep=True)
r1 = pv.PolyData()
r1.copy_structure(ribbon)
if not triangulated(m0):
m0.triangulate(inplace=True)
logger.debug("mesh: triangulate")
if GV_REMESH_IDS in m0.cell_data:
del m0.cell_data[GV_REMESH_IDS]
m0.cell_data[GV_REMESH_IDS] = np.arange(m0.n_cells)
if not triangulated(r1):
r1.triangulate(inplace=True)
logger.debug("ribbon: triangulate")
# https://vtk.org/doc/nightly/html/classvtkIntersectionPolyDataFilter.html
alg = _vtk.vtkIntersectionPolyDataFilter()
alg.SetInputDataObject(0, m0)
alg.SetInputDataObject(1, r1)
alg.SetComputeIntersectionPointArray(True)
alg.SetSplitFirstOutput(True)
alg.SetSplitSecondOutput(False)
start = datetime.now()
alg.Update()
end = datetime.now()
logger.debug(
f"remesh: lines={alg.GetNumberOfIntersectionLines()}, "
f"points={alg.GetNumberOfIntersectionPoints()} "
f"[{(end-start).total_seconds()} secs]"
)
intersection: pv.PolyData = _get_output(alg, oport=0)
remeshed: pv.PolyData = _get_output(alg, oport=1)
if not warnings:
vtkObject.GlobalWarningDisplayOn()
return m0, intersection, remeshed
| 24.628319 | 81 | 0.676608 |
ea267f9f197b4407a36c7bf7458e0f912362396e | 479 | py | Python | Prefabs/Area.py | niklas2902/py4godot---Open-Project | 0983ea2b4f8dd1d0e239dcffb556c678147a1e79 | [
"Apache-2.0"
] | null | null | null | Prefabs/Area.py | niklas2902/py4godot---Open-Project | 0983ea2b4f8dd1d0e239dcffb556c678147a1e79 | [
"Apache-2.0"
] | null | null | null | Prefabs/Area.py | niklas2902/py4godot---Open-Project | 0983ea2b4f8dd1d0e239dcffb556c678147a1e79 | [
"Apache-2.0"
] | null | null | null | from py4godot.enums.enums import *
from py4godot.core import *
from py4godot.classes.generated import *
from py4godot.pluginscript_api.utils.annotations import *
from py4godot.pluginscript_api.hints import *
@gdclass
class AreaTrigger(Area):
def __init__(self):
#Don't call any godot-methods here
super().__init__()
self.velocity = 0
def _on_Area_body_entered(self, area):
print("AREA_body_entered")
def _on_Area_area_entered(self, area):
print("entered:", area)
| 25.210526 | 57 | 0.772443 |
818e1dbf68bb366e3c6e04352d321626721a630e | 1,052 | py | Python | kubernetes/test/test_v1_replication_controller_spec.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | 3 | 2019-05-19T05:05:37.000Z | 2020-03-20T04:56:20.000Z | kubernetes/test/test_v1_replication_controller_spec.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_replication_controller_spec.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_replication_controller_spec import V1ReplicationControllerSpec
class TestV1ReplicationControllerSpec(unittest.TestCase):
""" V1ReplicationControllerSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ReplicationControllerSpec(self):
"""
Test V1ReplicationControllerSpec
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_replication_controller_spec.V1ReplicationControllerSpec()
pass
if __name__ == '__main__':
unittest.main()
| 23.377778 | 105 | 0.73384 |
b570ba616602fc0f0164dab8e124a0ea6de6226d | 1,992 | py | Python | demos/usage-animated-bfs.py | bookofheavymetal/dash-cytoscape | 72dcf940d4d3652b8cc8adf9176e9bd9ef42faf8 | [
"MIT"
] | null | null | null | demos/usage-animated-bfs.py | bookofheavymetal/dash-cytoscape | 72dcf940d4d3652b8cc8adf9176e9bd9ef42faf8 | [
"MIT"
] | null | null | null | demos/usage-animated-bfs.py | bookofheavymetal/dash-cytoscape | 72dcf940d4d3652b8cc8adf9176e9bd9ef42faf8 | [
"MIT"
] | null | null | null | """
Original Demo: http://js.cytoscape.org/demos/animated-bfs/
Code: https://github.com/cytoscape/cytoscape.js/tree/master/documentation/demos/animated-bfs
Note: Animation Not Implemented yet, please refer to code.
"""
import dash
from dash import html
import dash_cytoscape as cyto
app = dash.Dash(__name__)
server = app.server
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
elements = [
{'data': {'id': 'a'}},
{'data': {'id': 'b'}},
{'data': {'id': 'c'}},
{'data': {'id': 'd'}},
{'data': {'id': 'e'}},
{'data': {'id': 'a"e', 'weight': 1, 'source': 'a', 'target': 'e'}},
{'data': {'id': 'ab', 'weight': 3, 'source': 'a', 'target': 'b'}},
{'data': {'id': 'be', 'weight': 4, 'source': 'b', 'target': 'e'}},
{'data': {'id': 'bc', 'weight': 5, 'source': 'b', 'target': 'c'}},
{'data': {'id': 'ce', 'weight': 6, 'source': 'c', 'target': 'e'}},
{'data': {'id': 'cd', 'weight': 2, 'source': 'c', 'target': 'd'}},
{'data': {'id': 'de', 'weight': 7, 'source': 'd', 'target': 'e'}}
]
# App
app.layout = html.Div([
cyto.Cytoscape(
id='cytoscape',
elements=elements,
layout={
'name': 'breadthfirst',
'directed': True,
'roots': '#a',
'padding': 10
},
stylesheet=[{
'selector': 'node',
'style': {
'content': 'data(id)'
}
}, {
'selector': 'edge',
'style': {
'curve-style': 'bezier',
'target-arrow-shape': 'triangle',
'width': 4,
'line-color': '#ddd',
'target-arrow-color': '#ddd'
}
}],
style={
'width': '100%',
'height': '100%',
'position': 'absolute',
'left': 0,
'top': 0,
'z-index': 999
}
)
])
if __name__ == '__main__':
app.run_server(debug=True)
| 27.666667 | 92 | 0.453815 |
0fdb86a08d7f082001c0ed2b60ba35de791b1957 | 1,539 | py | Python | google/cloud/assuredworkloads_v1/__init__.py | renovate-bot/python-assured-workloads | eaa6b338b10f4fcd42535232208af6e725d58a3f | [
"Apache-2.0"
] | null | null | null | google/cloud/assuredworkloads_v1/__init__.py | renovate-bot/python-assured-workloads | eaa6b338b10f4fcd42535232208af6e725d58a3f | [
"Apache-2.0"
] | 44 | 2020-10-02T16:34:05.000Z | 2022-03-07T16:39:33.000Z | google/cloud/assuredworkloads_v1/__init__.py | renovate-bot/python-assured-workloads | eaa6b338b10f4fcd42535232208af6e725d58a3f | [
"Apache-2.0"
] | 5 | 2020-10-02T16:26:13.000Z | 2022-01-29T08:07:33.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.assured_workloads_service import AssuredWorkloadsServiceClient
from .services.assured_workloads_service import AssuredWorkloadsServiceAsyncClient
from .types.assuredworkloads import CreateWorkloadOperationMetadata
from .types.assuredworkloads import CreateWorkloadRequest
from .types.assuredworkloads import DeleteWorkloadRequest
from .types.assuredworkloads import GetWorkloadRequest
from .types.assuredworkloads import ListWorkloadsRequest
from .types.assuredworkloads import ListWorkloadsResponse
from .types.assuredworkloads import UpdateWorkloadRequest
from .types.assuredworkloads import Workload
__all__ = (
"AssuredWorkloadsServiceAsyncClient",
"AssuredWorkloadsServiceClient",
"CreateWorkloadOperationMetadata",
"CreateWorkloadRequest",
"DeleteWorkloadRequest",
"GetWorkloadRequest",
"ListWorkloadsRequest",
"ListWorkloadsResponse",
"UpdateWorkloadRequest",
"Workload",
)
| 37.536585 | 82 | 0.803769 |
11372e29cb1772b8f390049a6a3edf25a1684f69 | 3,671 | py | Python | floodsystem/datafetcher.py | LuisBustillo/Part1A-flood-warning-system | 48244abf446b3d328747ae2a07232cec9da7e8ee | [
"MIT"
] | null | null | null | floodsystem/datafetcher.py | LuisBustillo/Part1A-flood-warning-system | 48244abf446b3d328747ae2a07232cec9da7e8ee | [
"MIT"
] | null | null | null | floodsystem/datafetcher.py | LuisBustillo/Part1A-flood-warning-system | 48244abf446b3d328747ae2a07232cec9da7e8ee | [
"MIT"
] | null | null | null | # Copyright (C) 2018 Garth N. Wells
#
# SPDX-License-Identifier: MIT
"""This module provides functionality for retrieving real-time and
latest time history level data
"""
import datetime
import json
import os
import dateutil.parser
import requests
def fetch(url):
"""Fetch data from url and return fetched JSON object"""
r = requests.get(url)
data = r.json()
return data
def dump(data, filename):
"""Save JSON object to file"""
f = open(filename, 'w')
data = json.dump(data, f)
f.close()
def load(filename):
"""Load JSON object from file"""
f = open(filename, 'r')
data = json.load(f)
f.close()
return data
def fetch_station_data(use_cache=False):
"""Fetch data from Environment agency for all active river level
monitoring stations via a REST API and return retrieved data as a
JSON object.
Fetched data is dumped to a cache file so on subsequent call it can
optionally be retrieved from the cache file. This is faster than
retrieval over the Internet and avoids excessive calls to the
Environment Agency service.
"""
# URL for retrieving data for active stations with river level
# monitoring (see
# http://environment.data.gov.uk/flood-monitoring/doc/reference)
url = "http://environment.data.gov.uk/flood-monitoring/id/stations?status=Active¶meter=level&qualifier=Stage&_view=full" # noqa
sub_dir = 'cache'
try:
os.makedirs(sub_dir)
except FileExistsError:
pass
cache_file = os.path.join(sub_dir, 'station_data.json')
# Attempt to load station data from file, otherwise fetch over
# Internet
if use_cache:
try:
# Attempt to load from file
data = load(cache_file)
except FileNotFoundError:
# If load from file fails, fetch and dump to file
data = fetch(url)
dump(data, cache_file)
else:
# Fetch and dump to file
data = fetch(url)
dump(data, cache_file)
return data
def fetch_latest_water_level_data(use_cache=False):
"""Fetch latest levels from all 'measures'. Returns JSON object"""
# URL for retrieving data
url = "http://environment.data.gov.uk/flood-monitoring/id/measures?parameter=level&qualifier=Stage&qualifier=level" # noqa
sub_dir = 'cache'
try:
os.makedirs(sub_dir)
except FileExistsError:
pass
cache_file = os.path.join(sub_dir, 'level_data.json')
# Attempt to load level data from file, otherwise fetch over
# Internet
if use_cache:
try:
# Attempt to load from file
data = load(cache_file)
except FileNotFoundError:
data = fetch(url)
dump(data, cache_file)
else:
data = fetch(url)
dump(data, cache_file)
return data
def fetch_measure_levels(measure_id, dt):
"""Fetch measure levels from latest reading and going back a period
dt. Return list of dates and a list of values.
"""
# Current time (UTC)
now = datetime.datetime.utcnow()
# Start time for data
start = now - dt
# Construct URL for fetching data
url_base = measure_id
url_options = "/readings/?_sorted&since=" + start.isoformat() + 'Z'
url = url_base + url_options
# Fetch data
data = fetch(url)
# Extract dates and levels
dates, levels = [], []
for measure in data['items']:
# Convert date-time string to a datetime object
d = dateutil.parser.parse(measure['dateTime'])
# Append data
dates.append(d)
levels.append(measure['value'])
return dates, levels
| 26.035461 | 136 | 0.649687 |
5f93e295a4d11315fe95cda9dd62a35181908e5e | 34,616 | py | Python | tests/test_query.py | psantori/contacthub-sdk-python | 03b6dba72ac0a5c34775b409f28b501894cae080 | [
"Apache-2.0"
] | null | null | null | tests/test_query.py | psantori/contacthub-sdk-python | 03b6dba72ac0a5c34775b409f28b501894cae080 | [
"Apache-2.0"
] | null | null | null | tests/test_query.py | psantori/contacthub-sdk-python | 03b6dba72ac0a5c34775b409f28b501894cae080 | [
"Apache-2.0"
] | null | null | null | import json
import unittest
import mock
from datetime import datetime
from contacthub.errors.operation_not_permitted import OperationNotPermitted
from contacthub.models.customer import Customer
from contacthub.models.query import between_, in_, not_in_
from contacthub.models.query.criterion import Criterion
from contacthub.models.query.entity_field import EntityField
from contacthub.models.query.entity_meta import EntityMeta
from contacthub.models.query.query import Query
from contacthub.workspace import Workspace
from tests.utility import FakeHTTPResponse
class TestQuery(unittest.TestCase):
@classmethod
def setUp(cls):
cls.entity_field = (Customer.attr)
w = Workspace(workspace_id=123, token=456)
cls.node = w.get_node(123)
cls.headers_expected = {'Authorization': 'Bearer 456', 'Content-Type': 'application/json'}
cls.base_url = 'https://api.contactlab.it/hub/v1/workspaces/123/customers'
@classmethod
def tearDown(cls):
pass
def test_enitity_field_get_attr(self):
e1 = EntityField(Customer, 'attr1')
e2 = EntityField(e1, 'attr2')
e = Customer.attr1.attr2
assert isinstance(e, EntityField), type(e)
assert isinstance(e.entity, EntityField), type(e.entity)
assert e.entity == e2.entity, e.entity
assert e.field == e2.field, e.field
assert e.entity.field == e2.entity.field, e.entity
def test_entity_field_eq(self):
cEqual = Criterion(self.entity_field, Criterion.SIMPLE_OPERATORS.EQUALS, 'attr')
c = (Customer.attr == 'attr')
assert c.first_element == cEqual.first_element, c.first_element
assert c.second_element == cEqual.second_element, c.second_element
assert c.operator == cEqual.operator, c.operator
def test_entity_field_neq(self):
cEqual = Criterion(self.entity_field, Criterion.SIMPLE_OPERATORS.NOT_EQUALS, 'attr')
c = (Customer.attr != 'attr')
assert c.first_element == cEqual.first_element, c.first_element
assert c.second_element == cEqual.second_element, c.second_element
assert c.operator == cEqual.operator, c.operator
def test_entity_field_lt(self):
cEqual = Criterion(self.entity_field, Criterion.SIMPLE_OPERATORS.LT, 'attr')
c = (Customer.attr < 'attr')
assert c.first_element == cEqual.first_element, c.first_element
assert c.second_element == cEqual.second_element, c.second_element
assert c.operator == cEqual.operator, c.operator
def test_entity_field_le(self):
cEqual = Criterion(self.entity_field, Criterion.SIMPLE_OPERATORS.LTE, 'attr')
c = (Customer.attr <= 'attr')
assert c.first_element == cEqual.first_element, c.first_element
assert c.second_element == cEqual.second_element, c.second_element
assert c.operator == cEqual.operator, c.operator
def test_entity_field_gt(self):
cEqual = Criterion(self.entity_field, Criterion.SIMPLE_OPERATORS.GT, 'attr')
c = (Customer.attr > 'attr')
assert c.first_element == cEqual.first_element, c.first_element
assert c.second_element == cEqual.second_element, c.second_element
assert c.operator == cEqual.operator, c.operator
def test_entity_field_ge(self):
cEqual = Criterion(self.entity_field, Criterion.SIMPLE_OPERATORS.GTE, 'attr')
c = (Customer.attr >= 'attr')
assert c.first_element == cEqual.first_element, c.first_element
assert c.second_element == cEqual.second_element, c.second_element
assert c.operator == cEqual.operator, c.operator
def test_entity_field_null(self):
cEqual = Criterion(self.entity_field, Criterion.SIMPLE_OPERATORS.IS_NULL)
c = (Customer.attr == None)
assert c.first_element == cEqual.first_element, c.first_element
assert c.second_element is None, c.second_element
assert c.operator == cEqual.operator, c.operator
def test_entity_field_not_null(self):
cEqual = Criterion(self.entity_field, Criterion.SIMPLE_OPERATORS.IS_NOT_NULL)
c = (Customer.attr != None)
assert c.first_element == cEqual.first_element, c.first_element
assert c.second_element is None, c.second_element
assert c.operator == cEqual.operator, c.operator
def test_entity_meta(self):
assert isinstance(Customer.attr1, EntityField), type(Customer.attr1)
def test_criterion_and(self):
c1 = Criterion(self.entity_field, Criterion.SIMPLE_OPERATORS.IS_NOT_NULL)
c2 = Criterion(self.entity_field, Criterion.SIMPLE_OPERATORS.EQUALS, 'attr')
c3 = c1 & c2
assert isinstance(c3, Criterion), type(c3)
assert isinstance(c3.first_element, Criterion), type(c3.first_element)
assert c3.first_element.operator == c1.operator, c3.first_element.operator
assert isinstance(c3.second_element, Criterion), type(c3.second_element)
assert c3.second_element.operator == c2.operator, c3.first_element.operator
assert c3.operator == Criterion.COMPLEX_OPERATORS.AND
def test_criterion_or(self):
c1 = Criterion(self.entity_field, Criterion.SIMPLE_OPERATORS.IS_NOT_NULL)
c2 = Criterion(self.entity_field, Criterion.SIMPLE_OPERATORS.EQUALS, 'attr')
c3 = c1 | c2
assert isinstance(c3, Criterion), type(c3)
assert isinstance(c3.first_element, Criterion), type(c3.first_element)
assert c3.first_element.operator == c1.operator, c3.first_element.operator
assert isinstance(c3.second_element, Criterion), type(c3.second_element)
assert c3.second_element.operator == c2.operator, c3.first_element.operator
assert c3.operator == Criterion.COMPLEX_OPERATORS.OR
@mock.patch('requests.get', return_value=FakeHTTPResponse())
def test_between(self, mock_get):
self.node.query(Customer).filter(
between_(Customer.base.dob, datetime(2011, 12, 11), datetime(2015, 12, 11))).all()
params = {'nodeId': self.node.node_id}
params['query'] = json.dumps({'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'atomic', 'attribute': 'base.dob', 'operator': 'BETWEEN',
'value': ["2011-12-11T00:00:00Z", "2015-12-11T00:00:00Z"]}}},
})
mock_get.assert_called_with(self.base_url, headers=self.headers_expected, params=params)
@mock.patch('requests.get', return_value=FakeHTTPResponse())
def test_between_str(self, mock_get):
self.node.query(Customer).filter(
between_(Customer.base.dob, '2011-12-11', '2015-12-11')).all()
params = {'nodeId': self.node.node_id}
params['query'] = json.dumps({'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'atomic', 'attribute': 'base.dob', 'operator': 'BETWEEN',
'value': ['2011-12-11', '2015-12-11']}}}
})
mock_get.assert_called_with(self.base_url, headers=self.headers_expected, params=params)
@mock.patch('requests.get', return_value=FakeHTTPResponse())
def test_equals(self, mock_get):
self.node.query(Customer).filter(Customer.base.firstName == 'firstName').all()
params = {'nodeId': self.node.node_id}
params['query'] = json.dumps({'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'atomic', 'attribute': 'base.firstName', 'operator': 'EQUALS', 'value': 'firstName'}}}})
mock_get.assert_called_with(self.base_url, headers=self.headers_expected, params=params)
@mock.patch('requests.get', return_value=FakeHTTPResponse())
def test_not_equals(self, mock_get):
self.node.query(Customer).filter(Customer.base.firstName != 'firstName').all()
params = {'nodeId': self.node.node_id}
params['query'] = json.dumps({'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'atomic', 'attribute': 'base.firstName', 'operator': 'NOT_EQUALS',
'value': 'firstName'}}}})
mock_get.assert_called_with(self.base_url, headers=self.headers_expected, params=params)
@mock.patch('requests.get', return_value=FakeHTTPResponse())
def test_gt(self, mock_get):
self.node.query(Customer).filter(Customer.base.firstName > 'firstName').all()
params = {'nodeId': self.node.node_id}
params['query'] = json.dumps({'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'atomic', 'attribute': 'base.firstName', 'operator': 'GT',
'value': 'firstName'}}}})
mock_get.assert_called_with(self.base_url, headers=self.headers_expected, params=params)
@mock.patch('requests.get', return_value=FakeHTTPResponse())
def test_gte(self, mock_get):
self.node.query(Customer).filter(Customer.base.firstName >= 'firstName').all()
params = {'nodeId': self.node.node_id}
params['query'] = json.dumps({'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'atomic', 'attribute': 'base.firstName', 'operator': 'GTE',
'value': 'firstName'}}}})
mock_get.assert_called_with(self.base_url, headers=self.headers_expected, params=params)
@mock.patch('requests.get', return_value=FakeHTTPResponse())
def test_lt(self, mock_get):
self.node.query(Customer).filter(Customer.base.firstName < 'firstName').all()
params = {'nodeId': self.node.node_id}
params['query'] = json.dumps({'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'atomic', 'attribute': 'base.firstName', 'operator': 'LT',
'value': 'firstName'}}}})
mock_get.assert_called_with(self.base_url, headers=self.headers_expected, params=params)
@mock.patch('requests.get', return_value=FakeHTTPResponse())
def test_lte(self, mock_get):
self.node.query(Customer).filter(Customer.base.firstName <= 'firstName').all()
params = {'nodeId': self.node.node_id}
params['query'] = json.dumps({'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'atomic', 'attribute': 'base.firstName', 'operator': 'LTE',
'value': 'firstName'}}}})
mock_get.assert_called_with(self.base_url, headers=self.headers_expected, params=params)
@mock.patch('requests.get', return_value=FakeHTTPResponse())
def test_in(self, mock_get):
self.node.query(Customer).filter(in_('prova', Customer.tags.auto)).all()
params = {'nodeId': self.node.node_id}
params['query'] = json.dumps({'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'atomic', 'attribute': 'tags.auto', 'operator': 'IN',
'value': 'prova'}}}})
mock_get.assert_called_with(self.base_url, headers=self.headers_expected, params=params)
@mock.patch('requests.get', return_value=FakeHTTPResponse())
def test_not_in(self, mock_get):
self.node.query(Customer).filter(not_in_('prova', Customer.tags.auto)).all()
params = {'nodeId': self.node.node_id}
params['query'] = json.dumps({'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'atomic', 'attribute': 'tags.auto', 'operator': 'NOT_IN',
'value': 'prova'}}}})
mock_get.assert_called_with(self.base_url, headers=self.headers_expected, params=params)
@mock.patch('requests.get', return_value=FakeHTTPResponse())
def test_is_null(self, mock_get):
self.node.query(Customer).filter(Customer.base.firstName == None).all()
params = {'nodeId': self.node.node_id, 'query': json.dumps({'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'atomic', 'attribute': 'base.firstName', 'operator': 'IS_NULL'}}}})}
mock_get.assert_called_with(self.base_url, headers=self.headers_expected, params=params)
@mock.patch('requests.get', return_value=FakeHTTPResponse())
def test_is_not_null(self, mock_get):
self.node.query(Customer).filter(Customer.base.firstName != None).all()
params = {'nodeId': self.node.node_id, 'query': json.dumps({'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'atomic', 'attribute': 'base.firstName', 'operator': 'IS_NOT_NULL'}}}})}
mock_get.assert_called_with(self.base_url, headers=self.headers_expected, params=params)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all',
return_value=json.loads(FakeHTTPResponse().text))
def test_or(self, mock_get):
self.node.query(Customer).filter(
(Customer.base.firstName == 'firstName') | (Customer.base.firstName == 'firstName1')).all()
query= {'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'composite', 'conjunction': 'or', 'conditions': [
{'type': 'atomic', 'attribute': 'base.firstName', 'operator': 'EQUALS', 'value': 'firstName'},
{'type': 'atomic', 'attribute': 'base.firstName', 'operator': 'EQUALS', 'value': 'firstName1'}
]
}
}
}
}
mock_get.assert_called_with(page=0, query=query)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all',
return_value=json.loads(FakeHTTPResponse().text))
def test_and(self, mock_get):
self.node.query(Customer).filter(
(Customer.base.firstName == 'firstName') & (Customer.base.lastName == 'lastName')).all()
query = {'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'composite', 'conjunction': 'and', 'conditions': [
{'type': 'atomic', 'attribute': 'base.firstName', 'operator': 'EQUALS', 'value': 'firstName'},
{'type': 'atomic', 'attribute': 'base.lastName', 'operator': 'EQUALS', 'value': 'lastName'}
]
}
}
}
}
mock_get.assert_called_with(page=0, query=query)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all',
return_value=json.loads(FakeHTTPResponse().text))
def test_and_or(self, mock_get):
self.node.query(Customer).filter(
((Customer.base.firstName == 'firstName') & (Customer.base.lastName == 'lastName') | (
Customer.extra == 'extra'))).all()
query= {'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'composite', 'conjunction': 'or', 'conditions': [
{'type': 'composite', 'conjunction': 'and', 'conditions': [
{'type': 'atomic', 'attribute': 'base.firstName', 'operator': 'EQUALS',
'value': 'firstName'},
{'type': 'atomic', 'attribute': 'base.lastName', 'operator': 'EQUALS',
'value': 'lastName'}
]
},
{'type': 'atomic', 'attribute': 'extra', 'operator': 'EQUALS', 'value': 'extra'}
]
}
}
}
}
mock_get.assert_called_with(page=0, query=query)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all',
return_value=json.loads(FakeHTTPResponse().text))
def test_or_and(self, mock_get):
self.node.query(Customer).filter(
(((Customer.base.firstName == 'firstName') | (Customer.base.lastName == 'lastName')) & (
Customer.extra == 'extra'))).all()
query={'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'composite', 'conjunction': 'and', 'conditions': [
{'type': 'composite', 'conjunction': 'or', 'conditions': [
{'type': 'atomic', 'attribute': 'base.firstName', 'operator': 'EQUALS',
'value': 'firstName'},
{'type': 'atomic', 'attribute': 'base.lastName', 'operator': 'EQUALS',
'value': 'lastName'}
]
},
{'type': 'atomic', 'attribute': 'extra', 'operator': 'EQUALS', 'value': 'extra'}
]
}
}
}
}
mock_get.assert_called_with(page=0, query=query)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all',
return_value=json.loads(FakeHTTPResponse().text))
def test_succesive_simple_filters(self, mock_get):
q1 = self.node.query(Customer).filter(Customer.base.firstName == 'firstName')
q2 = q1.filter(Customer.base.lastName == 'lastName')
q2.all()
query = {'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'type': 'composite', 'conjunction': 'and', 'conditions': [
{'type': 'atomic', 'attribute': 'base.firstName', 'operator': 'EQUALS', 'value': 'firstName'},
{'type': 'atomic', 'attribute': 'base.lastName', 'operator': 'EQUALS', 'value': 'lastName'}
]
}
}
}
}
mock_get.assert_called_with(page=0, query=query)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all', return_value=json.loads(FakeHTTPResponse().text))
def test_succesive_complex_filters(self, mock_get):
q1 = self.node.query(Customer).filter((Customer.base.firstName == 'firstName') | (Customer.extra == 'extra'))
q2 = q1.filter(Customer.base.lastName == 'lastName')
q2.all()
query = {'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'conjunction': 'and', 'type': 'composite', 'conditions': [
{'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.firstName', 'type': 'atomic',
'value': 'firstName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
},
{'operator': 'EQUALS', 'attribute': 'base.lastName', 'type': 'atomic',
'value': 'lastName'}
],
}
}
}
}
mock_get.assert_called_with(page=0, query=query)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all',
return_value=json.loads(FakeHTTPResponse().text))
def test_succesive_complex_filters_or(self, mock_get):
q1 = self.node.query(Customer).filter((Customer.base.firstName == 'firstName') | (Customer.extra == 'extra'))
q2 = q1.filter((Customer.base.lastName == 'lastName') | (Customer.extra == 'extra'))
q2.all()
query = {'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'conjunction': 'and', 'type': 'composite', 'conditions': [
{'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.firstName', 'type': 'atomic',
'value': 'firstName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
},
{'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.lastName', 'type': 'atomic',
'value': 'lastName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
}
],
}
}
}
}
mock_get.assert_called_with(page=0, query=query)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all',
return_value=json.loads(FakeHTTPResponse().text))
def test_succesive_complex_filters_and(self, mock_get):
q1 = self.node.query(Customer).filter((Customer.base.firstName == 'firstName') & (Customer.extra == 'extra'))
q2 = q1.filter((Customer.base.lastName == 'lastName') | (Customer.extra == 'extra'))
q2.all()
query = {'name': 'query', 'query':
{'type': 'simple', 'name': 'query', 'are':
{'condition':
{'conjunction': 'and', 'type': 'composite', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.firstName', 'type': 'atomic',
'value': 'firstName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'}
,
{'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.lastName', 'type': 'atomic',
'value': 'lastName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
}
],
}
}
}
}
mock_get.assert_called_with(page=0, query=query)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all',
return_value=json.loads(FakeHTTPResponse().text))
def test_and_query(self, mock_get):
q1 = self.node.query(Customer).filter((Customer.base.firstName == 'firstName') | (Customer.extra == 'extra'))
q2 = self.node.query(Customer).filter((Customer.base.lastName == 'lastName') | (Customer.extra == 'extra'))
q = q1 & q2
q.all()
query = {'name': 'query', 'query':
{'name': 'query', 'type': 'combined', 'conjunction': 'INTERSECT', 'queries':[
{'type': 'simple', 'name': 'query', 'are':
{'condition':{'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.firstName', 'type': 'atomic',
'value': 'firstName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
}}
},
{'type': 'simple', 'name': 'query', 'are':
{'condition': {'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.lastName', 'type': 'atomic',
'value': 'lastName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
}}
}
]
}
}
mock_get.assert_called_with(page=0, query=query)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all',
return_value=json.loads(FakeHTTPResponse().text))
def test_or_query(self, mock_get):
q1 = self.node.query(Customer).filter((Customer.base.firstName == 'firstName') | (Customer.extra == 'extra'))
q2 = self.node.query(Customer).filter((Customer.base.lastName == 'lastName') | (Customer.extra == 'extra'))
q = q1 | q2
q.all()
query = {'name': 'query', 'query':
{'name': 'query', 'type': 'combined', 'conjunction': 'UNION', 'queries': [
{'type': 'simple', 'name': 'query', 'are':
{'condition': {'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.firstName', 'type': 'atomic',
'value': 'firstName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
}}
},
{'type': 'simple', 'name': 'query', 'are':
{'condition': {'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.lastName', 'type': 'atomic',
'value': 'lastName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
}}
}
]
}
}
mock_get.assert_called_with(page=0, query=query)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all',
return_value=json.loads(FakeHTTPResponse().text))
def test_or_combined_query(self, mock_get):
q1 = self.node.query(Customer).filter(
(Customer.base.firstName == 'firstName') | (Customer.extra == 'extra'))
q2 = self.node.query(Customer).filter((Customer.base.lastName == 'lastName') | (Customer.extra == 'extra'))
qor = q1 | q2
qand = q1 & q2
q = qor | qand
q.all()
query = {'name': 'query', 'query': {'name': 'query', 'type': 'combined', 'conjunction': 'UNION', 'queries':
[
{'type': 'simple', 'name': 'query', 'are':
{'condition': {'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.firstName', 'type': 'atomic',
'value': 'firstName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
}}
},
{'type': 'simple', 'name': 'query', 'are':
{'condition': {'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.lastName', 'type': 'atomic',
'value': 'lastName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
}}
}
,
{'name': 'query', 'type': 'combined', 'conjunction': 'INTERSECT', 'queries': [
{'type': 'simple', 'name': 'query', 'are':
{'condition': {'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.firstName', 'type': 'atomic',
'value': 'firstName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
}}
},
{'type': 'simple', 'name': 'query', 'are':
{'condition': {'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.lastName', 'type': 'atomic',
'value': 'lastName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
}}
}
]
}]
}}
mock_get.assert_called_with(page=0, query=query)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all',
return_value=json.loads(FakeHTTPResponse().text))
def test_and_combined_query(self, mock_get):
q1 = self.node.query(Customer).filter(
(Customer.base.firstName == 'firstName') | (Customer.extra == 'extra'))
q2 = self.node.query(Customer).filter((Customer.base.lastName == 'lastName') | (Customer.extra == 'extra'))
qor = q1 | q2
qand = q1 & q2
q = qor & qand
q.all()
query = {'name': 'query', 'query': {'name': 'query', 'type': 'combined', 'conjunction': 'INTERSECT', 'queries':
[
{'type': 'simple', 'name': 'query', 'are':
{'condition': {'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.firstName', 'type': 'atomic',
'value': 'firstName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
}}
},
{'type': 'simple', 'name': 'query', 'are':
{'condition': {'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.lastName', 'type': 'atomic',
'value': 'lastName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
}}
}
,
{'name': 'query', 'type': 'combined', 'conjunction': 'UNION', 'queries': [
{'type': 'simple', 'name': 'query', 'are':
{'condition': {'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.firstName', 'type': 'atomic',
'value': 'firstName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
}}
},
{'type': 'simple', 'name': 'query', 'are':
{'condition': {'type': 'composite', 'conjunction': 'or', 'conditions': [
{'operator': 'EQUALS', 'attribute': 'base.lastName', 'type': 'atomic',
'value': 'lastName'},
{'operator': 'EQUALS', 'attribute': 'extra', 'type': 'atomic',
'value': 'extra'},
]
}}
}
]
}]
}}
mock_get.assert_called_with(page=0, query=query)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all',
return_value=json.loads(FakeHTTPResponse().text))
def test_filter_complex(self, mock_get):
try:
q1 = self.node.query(Customer).filter(
(Customer.base.firstName == 'firstName') | (Customer.extra == 'extra'))
q2 = self.node.query(Customer).filter((Customer.base.lastName == 'lastName') | (Customer.extra == 'extra'))
qor = q1 | q2
qor.filter(Customer.base.firstName == 'firstName')
except OperationNotPermitted as e:
assert 'Cannot apply a filter on a combined query' in str(e), str(e)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all',
return_value=json.loads(FakeHTTPResponse().text))
def test_combine_empty_or(self, mock_get):
try:
q1 = self.node.query(Customer)
q2 = self.node.query(Customer)
qor = q1 | q2
except OperationNotPermitted as e:
assert 'Cannot combine empty queries.' in str(e), str(e)
@mock.patch('contacthub._api_manager._api_customer._CustomerAPIManager.get_all',
return_value=json.loads(FakeHTTPResponse().text))
def test_combine_empty_and(self, mock_get):
try:
q1 = self.node.query(Customer)
q2 = self.node.query(Customer)
qor = q1 & q2
except OperationNotPermitted as e:
assert 'Cannot combine empty queries.' in str(e), str(e)
| 49.031161 | 134 | 0.5156 |
9cf602098c70597cfc910a3a1de68c194b6d1cf3 | 245 | py | Python | frappe/core/doctype/broucher_details/broucher_details.py | erpletzerp/letzerpcore | add4eb411c6b1669d0951b7ce7930c0d85e95c4b | [
"MIT"
] | null | null | null | frappe/core/doctype/broucher_details/broucher_details.py | erpletzerp/letzerpcore | add4eb411c6b1669d0951b7ce7930c0d85e95c4b | [
"MIT"
] | null | null | null | frappe/core/doctype/broucher_details/broucher_details.py | erpletzerp/letzerpcore | add4eb411c6b1669d0951b7ce7930c0d85e95c4b | [
"MIT"
] | null | null | null | # Copyright (c) 2013, letzERP Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class BroucherDetails(Document):
pass
| 24.5 | 56 | 0.812245 |
0f067c8cd68db592ff6bfe31c8deeb3a78b5d052 | 40,417 | py | Python | rest_framework_swagger/introspectors.py | CantemoInternal/django-rest-swagger | 9410c868c631f45a01ce0cb13359080779671fb5 | [
"BSD-2-Clause"
] | null | null | null | rest_framework_swagger/introspectors.py | CantemoInternal/django-rest-swagger | 9410c868c631f45a01ce0cb13359080779671fb5 | [
"BSD-2-Clause"
] | null | null | null | rest_framework_swagger/introspectors.py | CantemoInternal/django-rest-swagger | 9410c868c631f45a01ce0cb13359080779671fb5 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Handles the instrospection of REST Framework Views and ViewSets."""
import inspect
import itertools
import re
import yaml
import importlib
from .compat import OrderedDict, strip_tags, get_pagination_attribures
from abc import ABCMeta, abstractmethod
from django.http import HttpRequest
from django.contrib.admindocs.utils import trim_docstring
from django.utils.encoding import smart_text
import rest_framework
from rest_framework import viewsets
from rest_framework.compat import apply_markdown
from rest_framework.utils import formatting
from django.utils import six
try:
import django_filters
except ImportError:
django_filters = None
def get_view_description(view_cls, html=False, docstring=None):
if docstring is not None:
view_cls = type(
view_cls.__name__ + '_fake',
(view_cls,),
{'__doc__': docstring})
return rest_framework.settings.api_settings \
.VIEW_DESCRIPTION_FUNCTION(view_cls, html)
def get_default_value(field):
default_value = getattr(field, 'default', None)
if rest_framework.VERSION >= '3.0.0':
from rest_framework.fields import empty
if default_value == empty:
default_value = None
if callable(default_value):
default_value = default_value()
return default_value
class IntrospectorHelper(object):
__metaclass__ = ABCMeta
@staticmethod
def strip_yaml_from_docstring(docstring):
"""
Strips YAML from the docstring.
"""
split_lines = trim_docstring(docstring).split('\n')
cut_off = None
for index in range(len(split_lines) - 1, -1, -1):
line = split_lines[index]
line = line.strip()
if line == '---':
cut_off = index
break
if cut_off is not None:
split_lines = split_lines[0:cut_off]
return "\n".join(split_lines)
@staticmethod
def strip_params_from_docstring(docstring):
"""
Strips the params from the docstring (ie. myparam -- Some param) will
not be removed from the text body
"""
params_pattern = re.compile(r' -- ')
split_lines = trim_docstring(docstring).split('\n')
cut_off = None
for index, line in enumerate(split_lines):
line = line.strip()
if params_pattern.search(line):
cut_off = index
break
if cut_off is not None:
split_lines = split_lines[0:cut_off]
return "\n".join(split_lines)
@staticmethod
def get_serializer_name(serializer):
if serializer is None:
return None
if rest_framework.VERSION >= '3.0.0':
from rest_framework.serializers import ListSerializer
assert serializer != ListSerializer, "uh oh, what now?"
if isinstance(serializer, ListSerializer):
serializer = serializer.child
if inspect.isclass(serializer):
return serializer.__name__
return serializer.__class__.__name__
@staticmethod
def get_summary(callback, docstring=None):
"""
Returns the first sentence of the first line of the class docstring
"""
description = get_view_description(
callback, html=False, docstring=docstring) \
.split("\n")[0].split(".")[0]
description = IntrospectorHelper.strip_yaml_from_docstring(
description)
description = IntrospectorHelper.strip_params_from_docstring(
description)
description = strip_tags(get_view_description(
callback, html=True, docstring=description))
return description
class BaseViewIntrospector(object):
__metaclass__ = ABCMeta
def __init__(self, callback, path, pattern, user):
self.callback = callback
self.path = path
self.pattern = pattern
self.user = user
def get_yaml_parser(self):
parser = YAMLDocstringParser(self)
return parser
@abstractmethod
def __iter__(self):
pass
def get_iterator(self):
return self.__iter__()
def get_description(self):
"""
Returns the first sentence of the first line of the class docstring
"""
return IntrospectorHelper.get_summary(self.callback)
def get_docs(self):
return get_view_description(self.callback)
class BaseMethodIntrospector(object):
__metaclass__ = ABCMeta
ENUMS = [
'choice',
'multiple choice',
]
PRIMITIVES = {
'integer': ['int32', 'int64'],
'number': ['float', 'double'],
'string': ['string', 'byte', 'date', 'date-time'],
'boolean': ['boolean'],
}
def __init__(self, view_introspector, method):
self.method = method
self.parent = view_introspector
self.callback = view_introspector.callback
self.path = view_introspector.path
self.user = view_introspector.user
def get_module(self):
return self.callback.__module__
def check_yaml_methods(self, yaml_methods):
missing_set = set()
for key in yaml_methods:
if key not in self.parent.methods():
missing_set.add(key)
if missing_set:
raise Exception(
"methods %s in class docstring are not in view methods %s"
% (list(missing_set), list(self.parent.methods())))
def get_yaml_parser(self):
parser = YAMLDocstringParser(self)
parent_parser = YAMLDocstringParser(self.parent)
self.check_yaml_methods(parent_parser.object.keys())
new_object = {}
new_object.update(parent_parser.object.get(self.method, {}))
new_object.update(parser.object)
parser.object = new_object
return parser
def get_extra_serializer_classes(self):
return self.get_yaml_parser().get_extra_serializer_classes(
self.callback)
def ask_for_serializer_class(self):
if hasattr(self.callback, 'get_serializer_class'):
view = self.create_view()
parser = self.get_yaml_parser()
mock_view = parser.get_view_mocker(self.callback)
view = mock_view(view)
if view is not None:
return view.get_serializer_class()
def create_view(self):
view = self.callback()
if not hasattr(view, 'kwargs'):
view.kwargs = dict()
if hasattr(self.parent.pattern, 'default_args'):
view.kwargs.update(self.parent.pattern.default_args)
view.request = HttpRequest()
view.request.user = self.user
view.request.method = self.method
return view
def get_serializer_class(self):
parser = self.get_yaml_parser()
serializer = parser.get_serializer_class(self.callback)
if serializer is None:
serializer = self.ask_for_serializer_class()
return serializer
def get_response_serializer_class(self):
parser = self.get_yaml_parser()
serializer = parser.get_response_serializer_class(self.callback)
if serializer is None:
serializer = self.get_serializer_class()
return serializer
def get_request_serializer_class(self):
parser = self.get_yaml_parser()
serializer = parser.get_request_serializer_class(self.callback)
if serializer is None:
serializer = self.get_serializer_class()
return serializer
def get_summary(self):
# If there is no docstring on the method, get class docs
return IntrospectorHelper.get_summary(
self.callback,
self.get_docs() or self.parent.get_description())
def get_nickname(self):
""" Returns the APIView's nickname """
return rest_framework.settings.api_settings \
.VIEW_NAME_FUNCTION(self.callback, self.method).replace(' ', '_')
def get_notes(self):
"""
Returns the body of the docstring trimmed before any parameters are
listed. First, get the class docstring and then get the method's. The
methods will always inherit the class comments.
"""
docstring = ""
class_docs = get_view_description(self.callback)
class_docs = IntrospectorHelper.strip_yaml_from_docstring(class_docs)
class_docs = IntrospectorHelper.strip_params_from_docstring(class_docs)
method_docs = self.get_docs()
if class_docs is not None:
docstring += class_docs + " \n"
if method_docs is not None:
method_docs = formatting.dedent(smart_text(method_docs))
method_docs = IntrospectorHelper.strip_yaml_from_docstring(
method_docs
)
method_docs = IntrospectorHelper.strip_params_from_docstring(
method_docs
)
docstring += '\n' + method_docs
docstring = docstring.strip()
return do_markdown(docstring)
def get_parameters(self):
"""
Returns parameters for an API. Parameters are a combination of HTTP
query parameters as well as HTTP body parameters that are defined by
the DRF serializer fields
"""
params = []
path_params = self.build_path_parameters()
body_params = self.build_body_parameters()
form_params = self.build_form_parameters()
query_params = self.build_query_parameters()
if django_filters is not None:
query_params.extend(
self.build_query_parameters_from_django_filters())
if path_params:
params += path_params
if self.get_http_method() not in ["GET", "DELETE", "HEAD"]:
params += form_params
if not form_params and body_params is not None:
params.append(body_params)
if query_params:
params += query_params
return params
def get_http_method(self):
return self.method
@abstractmethod
def get_docs(self):
return ''
def retrieve_docstring(self):
"""
Attempts to fetch the docs for a class method. Returns None
if the method does not exist
"""
method = str(self.method).lower()
if not hasattr(self.callback, method):
return None
return get_view_description(getattr(self.callback, method))
def build_body_parameters(self):
serializer = self.get_request_serializer_class()
serializer_name = IntrospectorHelper.get_serializer_name(serializer)
if serializer_name is None:
return
return {
'name': serializer_name,
'type': serializer_name,
'paramType': 'body',
}
def build_path_parameters(self):
"""
Gets the parameters from the URL
"""
url_params = re.findall('/{([^}]*)}', self.path)
params = []
for param in url_params:
params.append({
'name': param,
'type': 'string',
'paramType': 'path',
'required': True
})
return params
def build_query_parameters(self):
params = []
docstring = self.retrieve_docstring() or ''
docstring += "\n" + get_view_description(self.callback)
if docstring is None:
return params
split_lines = docstring.split('\n')
for line in split_lines:
param = line.split(' -- ')
if len(param) == 2:
params.append({'paramType': 'query',
'name': param[0].strip(),
'description': param[1].strip(),
'type': 'string'})
return params
def build_query_parameters_from_django_filters(self):
"""
introspect ``django_filters.FilterSet`` instances.
"""
params = []
filter_class = getattr(self.callback, 'filter_class', None)
if (filter_class is not None and
issubclass(filter_class, django_filters.FilterSet)):
for name, filter_ in filter_class.base_filters.items():
data_type = 'string'
parameter = {
'paramType': 'query',
'name': name,
'description': filter_.label,
}
normalize_data_format(data_type, None, parameter)
multiple_choices = filter_.extra.get('choices', {})
if multiple_choices:
parameter['enum'] = [choice[0] for choice
in itertools.chain(multiple_choices)]
parameter['type'] = 'enum'
params.append(parameter)
return params
def build_form_parameters(self):
"""
Builds form parameters from the serializer class
"""
data = []
serializer = self.get_request_serializer_class()
if serializer is None:
return data
fields = serializer().get_fields()
for name, field in fields.items():
if getattr(field, 'read_only', False):
continue
data_type, data_format = get_data_type(field) or ('string', 'string')
if data_type == 'hidden':
continue
# guess format
# data_format = 'string'
# if data_type in self.PRIMITIVES:
# data_format = self.PRIMITIVES.get(data_type)[0]
f = {
'paramType': 'form',
'name': name,
'description': getattr(field, 'help_text', '') or '',
'type': data_type,
'format': data_format,
'required': getattr(field, 'required', False),
'defaultValue': get_default_value(field),
}
# Swagger type is a primitive, format is more specific
if f['type'] == f['format']:
del f['format']
# defaultValue of null is not allowed, it is specific to type
if f['defaultValue'] is None:
del f['defaultValue']
# Min/Max values
max_value = getattr(field, 'max_value', None)
min_value = getattr(field, 'min_value', None)
if max_value is not None and data_type == 'integer':
f['minimum'] = min_value
if max_value is not None and data_type == 'integer':
f['maximum'] = max_value
# ENUM options
if data_type in BaseMethodIntrospector.ENUMS:
if isinstance(field.choices, list):
f['enum'] = [k for k, v in field.choices]
elif isinstance(field.choices, dict):
f['enum'] = [k for k, v in field.choices.items()]
data.append(f)
return data
def get_data_type(field):
# (in swagger 2.0 we might get to use the descriptive types..
from rest_framework import fields
if isinstance(field, fields.BooleanField):
return 'boolean', 'boolean'
elif hasattr(fields, 'NullBooleanField') and isinstance(field, fields.NullBooleanField):
return 'boolean', 'boolean'
# elif isinstance(field, fields.URLField):
# return 'string', 'string' # 'url'
# elif isinstance(field, fields.SlugField):
# return 'string', 'string', # 'slug'
elif isinstance(field, fields.ChoiceField):
return 'choice', 'choice'
# elif isinstance(field, fields.EmailField):
# return 'string', 'string' # 'email'
# elif isinstance(field, fields.RegexField):
# return 'string', 'string' # 'regex'
elif isinstance(field, fields.DateField):
return 'string', 'date'
elif isinstance(field, fields.DateTimeField):
return 'string', 'date-time' # 'datetime'
# elif isinstance(field, fields.TimeField):
# return 'string', 'string' # 'time'
elif isinstance(field, fields.IntegerField):
return 'integer', 'int64' # 'integer'
elif isinstance(field, fields.FloatField):
return 'number', 'float' # 'float'
# elif isinstance(field, fields.DecimalField):
# return 'string', 'string' #'decimal'
# elif isinstance(field, fields.ImageField):
# return 'string', 'string' # 'image upload'
# elif isinstance(field, fields.FileField):
# return 'string', 'string' # 'file upload'
# elif isinstance(field, fields.CharField):
# return 'string', 'string'
elif rest_framework.VERSION >= '3.0.0' and isinstance(field, fields.HiddenField):
return 'hidden', 'hidden'
else:
return 'string', 'string'
class APIViewIntrospector(BaseViewIntrospector):
def __iter__(self):
for method in self.methods():
yield APIViewMethodIntrospector(self, method)
def methods(self):
return self.callback().allowed_methods
class WrappedAPIViewIntrospector(BaseViewIntrospector):
def __iter__(self):
for method in self.methods():
yield WrappedAPIViewMethodIntrospector(self, method)
def methods(self):
return self.callback().allowed_methods
def get_notes(self):
class_docs = get_view_description(self.callback)
class_docs = IntrospectorHelper.strip_yaml_from_docstring(
class_docs)
class_docs = IntrospectorHelper.strip_params_from_docstring(
class_docs)
return get_view_description(
self.callback, html=True, docstring=class_docs)
def do_markdown(docstring):
# Markdown is optional
if apply_markdown:
return apply_markdown(docstring)
else:
return docstring.replace("\n\n", "<br/>")
class APIViewMethodIntrospector(BaseMethodIntrospector):
def get_docs(self):
"""
Attempts to retrieve method specific docs for an
endpoint. If none are available, the class docstring
will be used
"""
return self.retrieve_docstring()
class WrappedAPIViewMethodIntrospector(BaseMethodIntrospector):
def get_docs(self):
"""
Attempts to retrieve method specific docs for an
endpoint. If none are available, the class docstring
will be used
"""
return get_view_description(self.callback)
def get_module(self):
from rest_framework_swagger.decorators import wrapper_to_func
func = wrapper_to_func(self.callback)
return func.__module__
def get_notes(self):
return self.parent.get_notes()
def get_yaml_parser(self):
parser = YAMLDocstringParser(self)
return parser
class ViewSetIntrospector(BaseViewIntrospector):
"""Handle ViewSet introspection."""
def __init__(self, callback, path, pattern, user, patterns=None):
super(ViewSetIntrospector, self).__init__(callback, path, pattern, user)
if not issubclass(callback, viewsets.ViewSetMixin):
raise Exception("wrong callback passed to ViewSetIntrospector")
self.patterns = patterns or [pattern]
def __iter__(self):
methods = self._resolve_methods()
for method in methods:
yield ViewSetMethodIntrospector(self, methods[method], method)
def methods(self):
stuff = []
for pattern in self.patterns:
if pattern.callback:
stuff.extend(self._resolve_methods(pattern).values())
return stuff
def _resolve_methods(self, pattern=None):
from .decorators import closure_n_code, get_closure_var
if pattern is None:
pattern = self.pattern
callback = pattern.callback
try:
x = closure_n_code(callback)
while getattr(x.code, 'co_name') != 'view':
# lets unwrap!
callback = get_closure_var(callback)
x = closure_n_code(callback)
freevars = x.code.co_freevars
except (AttributeError, IndexError):
raise RuntimeError(
'Unable to use callback invalid closure/function ' +
'specified.')
else:
return x.closure[freevars.index('actions')].cell_contents
class ViewSetMethodIntrospector(BaseMethodIntrospector):
def __init__(self, view_introspector, method, http_method):
super(ViewSetMethodIntrospector, self) \
.__init__(view_introspector, method)
self.http_method = http_method.upper()
def get_http_method(self):
return self.http_method
def get_docs(self):
"""
Attempts to retrieve method specific docs for an
endpoint. If none are available, the class docstring
will be used
"""
return self.retrieve_docstring()
def create_view(self):
view = super(ViewSetMethodIntrospector, self).create_view()
if not hasattr(view, 'action'):
setattr(view, 'action', self.method)
view.request.method = self.http_method
return view
def build_query_parameters(self):
parameters = super(ViewSetMethodIntrospector, self) \
.build_query_parameters()
view = self.create_view()
page_size, page_query_param, page_size_query_param = get_pagination_attribures(view)
if self.method == 'list' and page_size:
data_type = 'integer'
if page_query_param:
parameters.append({
'paramType': 'query',
'name': page_query_param,
'description': None,
})
normalize_data_format(data_type, None, parameters[-1])
if page_size_query_param:
parameters.append({
'paramType': 'query',
'name': page_size_query_param,
'description': None,
})
normalize_data_format(data_type, None, parameters[-1])
return parameters
def multi_getattr(obj, attr, default=None):
"""
Get a named attribute from an object; multi_getattr(x, 'a.b.c.d') is
equivalent to x.a.b.c.d. When a default argument is given, it is
returned when any attribute in the chain doesn't exist; without
it, an exception is raised when a missing attribute is encountered.
"""
attributes = attr.split(".")
for i in attributes:
try:
obj = getattr(obj, i)
except AttributeError:
if default:
return default
else:
raise
return obj
def normalize_data_format(data_type, data_format, obj):
"""
sets 'type' on obj
sets a valid 'format' on obj if appropriate
uses data_format only if valid
"""
if data_type == 'array':
data_format = None
flatten_primitives = [
val for sublist in BaseMethodIntrospector.PRIMITIVES.values()
for val in sublist
]
if data_format not in flatten_primitives:
formats = BaseMethodIntrospector.PRIMITIVES.get(data_type, None)
if formats:
data_format = formats[0]
else:
data_format = None
if data_format == data_type:
data_format = None
obj['type'] = data_type
if data_format is None and 'format' in obj:
del obj['format']
elif data_format is not None:
obj['format'] = data_format
class YAMLDocstringParser(object):
"""
Docstring parser powered by YAML syntax
This parser allows you override some parts of automatic method inspection
behaviours which are not always correct.
See the following documents for more information about YAML and Swagger:
- https://github.com/wordnik/swagger-core/wiki
- http://www.yaml.org/spec/1.2/spec.html
- https://github.com/wordnik/swagger-codegen/wiki/Creating-Swagger-JSON-from-YAML-files
1. Control over parameters
============================================================================
Define parameters and its properties in docstrings:
parameters:
- name: some_param
description: Foobar long description goes here
required: true
type: integer
paramType: form
minimum: 10
maximum: 100
- name: other_foo
paramType: query
- name: avatar
type: file
It is possible to override parameters discovered by method inspector by
defining:
`parameters_strategy` option to either `merge` or `replace`
To define different strategies for different `paramType`'s use the
following syntax:
parameters_strategy:
form: replace
query: merge
By default strategy is set to `merge`
Sometimes method inspector produces wrong list of parameters that
you might not won't to see in SWAGGER form. To handle this situation
define `paramTypes` that should be omitted
omit_parameters:
- form
2. Control over serializers
============================================================================
Once in a while you are using different serializers inside methods
but automatic method inspector cannot detect this. For that purpose there
is two explicit parameters that allows you to discard serializer detected
by method inspector OR replace it with another one
serializer: some.package.FooSerializer
omit_serializer: true
3. Custom Response Class
============================================================================
If your view is not using serializer at all but instead outputs simple
data type such as JSON you may define custom response object in method
signature like follows:
type:
name:
required: true
type: string
url:
required: false
type: url
4. Response Messages (Error Codes)
============================================================================
If you'd like to share common response errors that your APIView might throw
you can define them in docstring using following format:
responseMessages:
- code: 401
message: Not authenticated
- code: 403
message: Insufficient rights to call this procedure
5. Different models for reading and writing operations
============================================================================
Since REST Framework won't output write_only fields in responses as well as
does not require read_only fields to be provided it is worth to
automatically register 2 separate models for reading and writing operations.
Discovered serializer will be registered with `Write` or `Read` prefix.
Response Class will be automatically adjusted if serializer class was
detected by method inspector.
You can also refer to this models in your parameters:
parameters:
- name: CigarSerializer
type: WriteCigarSerializer
paramType: body
SAMPLE DOCSTRING:
============================================================================
---
# API Docs
# Note: YAML always starts with `---`
type:
name:
required: true
type: string
url:
required: false
type: url
created_at:
required: true
type: string
format: date-time
serializer: .serializers.FooSerializer
omit_serializer: false
parameters_strategy: merge
omit_parameters:
- path
parameters:
- name: name
description: Foobar long description goes here
required: true
type: string
paramType: form
- name: other_foo
paramType: query
- name: other_bar
paramType: query
- name: avatar
type: file
responseMessages:
- code: 401
message: Not authenticated
"""
PARAM_TYPES = ['header', 'path', 'form', 'body', 'query']
yaml_error = None
def __init__(self, method_introspector):
self.method_introspector = method_introspector
self.object = self.load_obj_from_docstring(
docstring=self.method_introspector.get_docs())
if self.object is None:
self.object = {}
def load_obj_from_docstring(self, docstring):
"""Loads YAML from docstring"""
split_lines = trim_docstring(docstring).split('\n')
# Cut YAML from rest of docstring
for index, line in enumerate(split_lines):
line = line.strip()
if line.startswith('---'):
cut_from = index
break
else:
return None
yaml_string = "\n".join(split_lines[cut_from:])
yaml_string = formatting.dedent(yaml_string)
try:
return yaml.load(yaml_string)
except yaml.YAMLError as e:
self.yaml_error = e
return None
def _load_class(self, cls_path, callback):
"""
Dynamically load a class from a string
"""
if not cls_path or not callback or not hasattr(callback, '__module__'):
return None
package = None
if '.' not in cls_path:
# within current module/file
class_name = cls_path
module_path = self.method_introspector.get_module()
else:
# relative or fully qualified path import
class_name = cls_path.split('.')[-1]
module_path = ".".join(cls_path.split('.')[:-1])
if cls_path.startswith('.'):
# relative lookup against current package
# ..serializers.FooSerializer
package = self.method_introspector.get_module()
class_obj = None
# Try to perform local or relative/fq import
try:
module = importlib.import_module(module_path, package=package)
class_obj = getattr(module, class_name, None)
except ImportError:
pass
# Class was not found, maybe it was imported to callback module?
# from app.serializers import submodule
# serializer: submodule.FooSerializer
if class_obj is None:
try:
module = importlib.import_module(
self.method_introspector.get_module())
class_obj = multi_getattr(module, cls_path, None)
except (ImportError, AttributeError):
raise Exception("Could not find %s, looked in %s" % (cls_path, module))
return class_obj
def get_serializer_class(self, callback):
"""
Retrieves serializer class from YAML object
"""
serializer = self.object.get('serializer', None)
try:
return self._load_class(serializer, callback)
except (ImportError, ValueError):
pass
return None
def get_extra_serializer_classes(self, callback):
"""
Retrieves serializer classes from pytype YAML objects
"""
parameters = self.object.get('parameters', [])
serializers = []
for parameter in parameters:
serializer = parameter.get('pytype', None)
if serializer is not None:
try:
serializer = self._load_class(serializer, callback)
serializers.append(serializer)
except (ImportError, ValueError):
pass
return serializers
def get_request_serializer_class(self, callback):
"""
Retrieves request serializer class from YAML object
"""
serializer = self.object.get('request_serializer', None)
try:
return self._load_class(serializer, callback)
except (ImportError, ValueError):
pass
return None
def get_response_serializer_class(self, callback):
"""
Retrieves response serializer class from YAML object
"""
serializer = self.object.get('response_serializer', None)
if isinstance(serializer, list):
serializer = serializer[0]
try:
return self._load_class(serializer, callback)
except (ImportError, ValueError):
pass
return None
def get_response_type(self):
"""
Docstring may define custom response class
"""
return self.object.get('type', None)
def get_response_messages(self):
"""
Retrieves response error codes from YAML object
"""
messages = []
response_messages = self.object.get('responseMessages', [])
for message in response_messages:
messages.append({
'code': message.get('code', None),
'message': message.get('message', None),
'responseModel': message.get('responseModel', None),
})
return messages
def get_view_mocker(self, callback):
view_mocker = self.object.get('view_mocker', lambda a: a)
if isinstance(view_mocker, six.string_types):
view_mocker = self._load_class(view_mocker, callback)
return view_mocker
def get_parameters(self, callback):
"""
Retrieves parameters from YAML object
"""
params = []
fields = self.object.get('parameters', [])
for field in fields:
param_type = field.get('paramType', None)
if param_type not in self.PARAM_TYPES:
param_type = 'form'
# Data Type & Format
# See:
# https://github.com/wordnik/swagger-core/wiki/1.2-transition#wiki-additions-2
# https://github.com/wordnik/swagger-core/wiki/Parameters
data_type = field.get('type', 'string')
pytype = field.get('pytype', None)
if pytype is not None:
try:
serializer = self._load_class(pytype, callback)
data_type = IntrospectorHelper.get_serializer_name(
serializer)
except (ImportError, ValueError):
pass
if param_type in ['path', 'query', 'header']:
if data_type not in BaseMethodIntrospector.PRIMITIVES:
data_type = 'string'
# Data Format
data_format = field.get('format', None)
f = {
'paramType': param_type,
'name': field.get('name', None),
'description': field.get('description', ''),
'required': field.get('required', False),
}
normalize_data_format(data_type, data_format, f)
if field.get('defaultValue', None) is not None:
f['defaultValue'] = field.get('defaultValue', None)
# Allow Multiple Values &f=1,2,3,4
if field.get('allowMultiple'):
f['allowMultiple'] = True
if f['type'] == 'array':
items = field.get('items', {})
elt_data_type = items.get('type', 'string')
elt_data_format = items.get('type', 'format')
f['items'] = {
}
normalize_data_format(elt_data_type, elt_data_format, f['items'])
uniqueItems = field.get('uniqueItems', None)
if uniqueItems is not None:
f['uniqueItems'] = uniqueItems
# Min/Max are optional
if 'minimum' in field and data_type == 'integer':
f['minimum'] = str(field.get('minimum', 0))
if 'maximum' in field and data_type == 'integer':
f['maximum'] = str(field.get('maximum', 0))
# enum options
enum = field.get('enum', [])
if enum:
f['enum'] = enum
# File support
if f['type'] == 'file':
f['paramType'] = 'body'
params.append(f)
return params
def discover_parameters(self, inspector):
"""
Applies parameters strategy for parameters discovered
from method and docstring
"""
parameters = []
docstring_params = self.get_parameters(inspector.callback)
method_params = inspector.get_parameters()
# paramType may differ, overwrite first
# so strategy can be applied
for meth_param in method_params:
for doc_param in docstring_params:
if doc_param['name'] == meth_param['name']:
if 'paramType' in doc_param:
meth_param['paramType'] = doc_param['paramType']
for param_type in self.PARAM_TYPES:
if self.should_omit_parameters(param_type):
continue
parameters += self._apply_strategy(
param_type, method_params, docstring_params
)
# PATCH requests expects all fields except path fields to be optional
if inspector.get_http_method() == "PATCH":
for param in parameters:
if param['paramType'] != 'path':
param['required'] = False
return parameters
def should_omit_parameters(self, param_type):
"""
Checks if particular parameter types should be omitted explicitly
"""
return param_type in self.object.get('omit_parameters', [])
def should_omit_serializer(self):
"""
Checks if serializer should be intentionally omitted
"""
return self.object.get('omit_serializer', False)
def _apply_strategy(self, param_type, method_params, docstring_params):
"""
Applies strategy for subset of parameters filtered by `paramType`
"""
strategy = self.get_parameters_strategy(param_type=param_type)
method_params = self._filter_params(
params=method_params,
key='paramType',
val=param_type
)
docstring_params = self._filter_params(
params=docstring_params,
key='paramType',
val=param_type
)
if strategy == 'replace':
return docstring_params or method_params
elif strategy == 'merge':
return self._merge_params(
method_params,
docstring_params,
key='name',
)
return []
@staticmethod
def _filter_params(params, key, val):
"""
Returns filter function for parameters structure
"""
def filter_by(o):
return o.get(key, None) == val
return filter(filter_by, params)
@staticmethod
def _merge_params(params1, params2, key):
"""
Helper method.
Merges parameters lists by key
"""
import itertools
merged = OrderedDict()
for item in itertools.chain(params1, params2):
merged[item[key]] = item
return [val for (_, val) in merged.items()]
def get_parameters_strategy(self, param_type=None):
"""
Get behaviour strategy for parameter types.
It can be either `merge` or `replace`:
- `merge` overwrites duplicate parameters signatures
discovered by inspector with the ones defined explicitly in
docstring
- `replace` strategy completely overwrites parameters discovered
by inspector with the ones defined explicitly in docstring.
Note: Strategy can be defined per `paramType` so `path` parameters can
use `merge` strategy while `form` parameters will use `replace`
strategy.
Default strategy: `merge`
"""
default = 'merge'
strategy = self.object.get('parameters_strategy', default)
if hasattr(strategy, 'get') and param_type is not None:
strategy = strategy.get(param_type, default)
if strategy not in ['merge', 'replace']:
strategy = default
return strategy
| 33.265021 | 92 | 0.592671 |
77ca40c65f52070da652fc7983fb6b09210b2434 | 2,018 | py | Python | sagas/ofbiz/finder.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 3 | 2020-01-11T13:55:38.000Z | 2020-08-25T22:34:15.000Z | sagas/ofbiz/finder.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | null | null | null | sagas/ofbiz/finder.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 1 | 2021-01-01T05:21:44.000Z | 2021-01-01T05:21:44.000Z | import sagas.ofbiz.connector
from py4j.java_gateway import java_import
class Finder(object):
def __init__(self, oc):
self.oc=oc
java_import(oc.j, 'org.apache.ofbiz.service.ServiceUtil')
java_import(oc.j, 'org.apache.ofbiz.base.util.UtilDateTime')
java_import(oc.j, 'org.apache.ofbiz.entity.util.*')
self.user=self.default_user()
def success(self, ret):
return self.oc.j.ServiceUtil.isSuccess(ret)
def hash_map(self, *args):
arg_len = len(args)
if arg_len % 2 == 1:
raise ValueError("You must pass an even sized array to the toMap method (size = " + str(arg_len) + ")")
m = self.oc.j.HashMap()
i = 0
while i < arg_len:
m[args[i]] = args[i + 1]
i = i + 2
return m
def default_user(self):
return self.oc.gateway.getUserLogin()
def find(self, entity, inputs):
# inputs=oc.jmap(testingId="PERF_TEST_1")
ret = self.oc.call("performFind", userLogin=self.user, entityName=entity, inputFields=inputs)
if self.oc.j.ServiceUtil.isSuccess(ret):
listIt = ret['listIt']
foundElements = listIt.getCompleteList()
return (True, foundElements)
else:
return (False, self.oc.j.ServiceUtil.getErrorMessage(ret))
def find_one(self, entity, params):
return self.oc.delegator.findOne(entity, params, True)
def find_list(self, entity, limit=20, offset=0):
findOptions = self.oc.j.EntityFindOptions()
findOptions.setLimit(limit)
findOptions.setOffset(offset)
rows = self.oc.delegator.findList(entity, None, None, None, findOptions, False)
return rows
def now(self):
UtilDateTime = self.oc.j.UtilDateTime
nowTimestamp = UtilDateTime.nowTimestamp()
return nowTimestamp
def create(self, entity, *args):
# print(hash_map(*args))
return self.oc.delegator.create(entity, self.hash_map(*args))
| 33.081967 | 115 | 0.625372 |
64276950f35f93db95b5eb3b153a40b10473ddc3 | 570 | py | Python | minigest/magazzino/admin/servizio.py | ctrlmaniac/minigest | 2bfceb57e41c872e4112e24d0e6991164846888b | [
"MIT"
] | null | null | null | minigest/magazzino/admin/servizio.py | ctrlmaniac/minigest | 2bfceb57e41c872e4112e24d0e6991164846888b | [
"MIT"
] | 1 | 2021-09-22T19:10:20.000Z | 2021-09-22T19:10:20.000Z | minigest/magazzino/admin/servizio.py | ctrlmaniac/minigest | 2bfceb57e41c872e4112e24d0e6991164846888b | [
"MIT"
] | null | null | null | from django.contrib import admin
from ..models.servizio import Servizio
from .barcode import BarcodeInline
from .costo import CostoInline
from .prezzo import PrezzoInline
from .prodotto_immagine import ProdottoImmagineInline
@admin.register(Servizio)
class ServizioAdmin(admin.ModelAdmin):
exclude = ("fornitori",)
ordering = ("nome",)
list_display = ("nome", "categoria")
search_fields = ["nome"]
list_filter = ("categoria",)
inlines = [
BarcodeInline,
PrezzoInline,
CostoInline,
ProdottoImmagineInline,
]
| 24.782609 | 53 | 0.705263 |
fa96981cb0d99ae73f96d1a46aaa7226f42de560 | 4,680 | py | Python | scripts/hdx_register/delete.py | OCHA-DAP/hdx-scraper-unosat-flood-portal | 80b0bcd404993e4bd1dae442f794c9f86b6d5328 | [
"MIT"
] | 1 | 2016-07-22T13:32:54.000Z | 2016-07-22T13:32:54.000Z | scripts/hdx_register/delete.py | OCHA-DAP/hdx-scraper-unosat-flood-portal | 80b0bcd404993e4bd1dae442f794c9f86b6d5328 | [
"MIT"
] | 21 | 2015-07-08T21:30:32.000Z | 2015-08-27T17:52:24.000Z | scripts/hdx_register/delete.py | OCHA-DAP/hdxscraper-unosat-flood-portal | 80b0bcd404993e4bd1dae442f794c9f86b6d5328 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import requests
import yajl as json
import progressbar as pb
dir = os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(dir)
from termcolor import colored as color
from utilities.prompt_format import item as I
def DeleteAllDatasetsFromOrg(organization, hdx_site, apikey, verbose=True):
'''Delete all datasets owned by an organization.'''
if verbose:
print "--------------------------------------------------"
print "//////////////////////////////////////////////////"
print "--------------------------------------------------"
print "////////////// DELETING DATASETS /////////////////"
print "--------------------------------------------------"
print "//////////////////////////////////////////////////"
print "--------------------------------------------------"
# Checking for input.
if (organization is None):
print "No organization id provided. Please provide an organization id."
print "--------------------------------------------------"
return False
# Base config.
organization_show_url = hdx_site + '/api/action/organization_show?id='
package_delete_url = hdx_site + '/api/action/package_delete'
headers = { 'X-CKAN-API-Key': apikey, 'content-type': 'application/json' }
# Fetching dataset information.
dataset_dict = requests.get(organization_show_url + organization, headers=headers, auth=('dataproject', 'humdata')).json()
#
# Progress bar.
#
i = 0
widgets = [I('prompt_bullet'), ' Deleting resources:', pb.Percentage(), ' ', pb.Bar('-'), ' ', pb.ETA(), ' ']
if verbose is False:
pbar = pb.ProgressBar(widgets=widgets, maxval=len(dataset_dict)).start()
#
# Iterating over every dataset.
#
if dataset_dict["success"] is True:
pbar.update(i)
for dataset in dataset_dict["result"]["packages"]:
u = { 'id': dataset["id"] }
r = requests.post(package_delete_url, data=json.dumps(u), headers=headers, auth=('dataproject', 'humdata'))
if r.status_code != 200:
print "%s : %s" % (I('prompt_error'), dataset["name"])
else:
print "%s : %s" % (I('prompt_success'), dataset["name"])
i += 1
else:
print "%s There was an error getting the dataset list." % I('prompt_error')
print "--------------------------------------------------"
return False
def DeleteResources(dataset_dict, hdx_site, apikey, verbose=True):
'''Delete resources based on a series of dataset ids.'''
if verbose:
print "--------------------------------------------------"
print "//////////////////////////////////////////////////"
print "--------------------------------------------------"
print "///////////// DELETING RESOURCES /////////////////"
print "--------------------------------------------------"
print "//////////////////////////////////////////////////"
print "--------------------------------------------------"
#
# Checking input.
#
if (dataset_dict is None):
print "%s No data provided. Provide a JSON package." % I('prompt_error')
print "--------------------------------------------------"
return
#
# URL config.
#
package_show_url = hdx_site + '/api/action/package_show?id='
resource_delete_url = hdx_site + '/api/action/resource_delete'
headers = { 'X-CKAN-API-Key': apikey, 'content-type': 'application/json' }
#
# Progress bar.
#
i = 0
widgets = [I('prompt_bullet'), ' Deleting resources:', pb.Percentage(), ' ', pb.Bar('-'), ' ', pb.ETA(), ' ']
if verbose is False:
pbar = pb.ProgressBar(widgets=widgets, maxval=len(dataset_dict)).start()
#
# Iterating over every dataset.
#
for dataset in dataset_dict:
if verbose is False:
pbar.update(i)
#
# Make request to HDX.
#
d = requests.get(package_show_url + dataset["name"], headers=headers, auth=('dataproject', 'humdata')).json()
if d["success"] is False:
if d['error']['__type'] == 'Not Found Error':
print '%s Dataset not found.' % I('prompt_warn')
else:
print '%s There was an error connecting to HDX.' % I('prompt_error')
if verbose:
print json.dumps(d['error'])
if d["success"] is True:
for resource in d["result"]["resources"]:
if verbose:
print "%s : resource deleted %s" % (I('prompt_warn'), resource["id"])
#
# Delete resource.
#
u = { 'id': resource["id"] }
requests.post(resource_delete_url, data=json.dumps(u), headers=headers, auth=('dataproject', 'humdata'))
i += 1
if verbose is False:
pbar.finish()
return True
| 30.193548 | 124 | 0.51688 |
59eadd20836a74c69295fef95d1d229f3b314e7c | 785 | py | Python | backend/myeats/myeats_scheduler/serializers.py | Zeppelin17/MyEatsScheduler | 7ed54c71b980072c42ce8eaacf48e013872120ab | [
"MIT"
] | null | null | null | backend/myeats/myeats_scheduler/serializers.py | Zeppelin17/MyEatsScheduler | 7ed54c71b980072c42ce8eaacf48e013872120ab | [
"MIT"
] | 6 | 2021-03-30T14:17:18.000Z | 2022-02-27T10:32:32.000Z | backend/myeats/myeats_scheduler/serializers.py | Zeppelin17/MyEatsScheduler | 7ed54c71b980072c42ce8eaacf48e013872120ab | [
"MIT"
] | null | null | null | """
/**
* Serializer for myeats_scheduler models
*
* @summary short description for the file
* @author Zeppelin17 <elzeppelin17@gmail.com>
*
* Created at : 2020-04-18 11:44:05
* Last modified : 2020-04-22 06:20:14
*/
"""
from rest_framework import serializers
from myeats_scheduler.models import Week, Day, Split
class WeekSerializer(serializers.ModelSerializer):
class Meta:
model = Week
fields = ['id', 'name', 'first_day', 'from_date', 'to_date', 'myeats_user']
class DaySerializer(serializers.ModelSerializer):
class Meta:
model = Day
fields = ['id', 'name', 'week']
class SplitSerializer(serializers.ModelSerializer):
class Meta:
model = Split
fields = ['id', 'name', 'order', 'day', 'recipes'] | 23.787879 | 83 | 0.652229 |
f9504d4054ff5b2e140d812d4f6cfa33b8d6ff8e | 2,081 | py | Python | proxy_extension.py | nicobts/InstaPnb | b854baabd7fec024307a07bbb4fb9a6db2f3abfd | [
"MIT"
] | null | null | null | proxy_extension.py | nicobts/InstaPnb | b854baabd7fec024307a07bbb4fb9a6db2f3abfd | [
"MIT"
] | null | null | null | proxy_extension.py | nicobts/InstaPnb | b854baabd7fec024307a07bbb4fb9a6db2f3abfd | [
"MIT"
] | null | null | null | import zipfile
import os
def create_proxy_extension(proxy):
""" takes proxy looks like login:password@ip:port """
ip = proxy.split('@')[1].split(':')[0]
port = int(proxy.split(':')[-1])
login = proxy.split(':')[0]
password = proxy.split('@')[0].split(':')[1]
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome Proxy",
"permissions": [
"proxy",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequest",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = """
var config = {
mode: "fixed_servers",
rules: {
singleProxy: {
scheme: "http",
host: "%s",
port: parseInt(%s)
},
bypassList: ["localhost"]
}
};
chrome.proxy.settings.set({value: config, scope: "regular"}, function() {});
function callbackFn(details) {
return {
authCredentials: {
username: "%s",
password: "%s"
}
};
}
chrome.webRequest.onAuthRequired.addListener(
callbackFn,
{urls: ["<all_urls>"]},
['blocking']
);
""" % (ip, port, login, password)
dir_path = 'assets/chrome_extensions'
os.makedirs(dir_path, exist_ok=True)
pluginfile = '%s/proxy_auth_%s:%s.zip' % (dir_path, ip, port)
with zipfile.ZipFile(pluginfile, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
return pluginfile
| 29.728571 | 85 | 0.425757 |
b99fd34a9c3d8450b1991d766bb38b082feef38e | 138 | py | Python | events/apps.py | 0xelectron/mhtportal-web | bd05069d6245e86d4ae887cacf33b04ef9476816 | [
"MIT"
] | null | null | null | events/apps.py | 0xelectron/mhtportal-web | bd05069d6245e86d4ae887cacf33b04ef9476816 | [
"MIT"
] | 5 | 2019-10-20T06:17:36.000Z | 2021-06-10T18:13:29.000Z | events/apps.py | 0xelectron/mhtportal-web | bd05069d6245e86d4ae887cacf33b04ef9476816 | [
"MIT"
] | 2 | 2019-05-11T17:25:25.000Z | 2019-10-12T17:59:47.000Z | from django.apps import AppConfig
class EventsConfig(AppConfig):
name = 'events'
def ready(self):
import events.signals | 17.25 | 33 | 0.695652 |
77da7090bc79c9940f633ca9772c292960d81bc6 | 400 | py | Python | python_program/number2text.py | LiuKaiqiang94/PyStudyExample | b30212718b218c71e06b68677f55c33e3a1dbf46 | [
"MIT"
] | 5 | 2018-09-10T02:52:35.000Z | 2018-09-20T07:50:42.000Z | python_program/number2text.py | LiuKaiqiang94/PyStudyExample | b30212718b218c71e06b68677f55c33e3a1dbf46 | [
"MIT"
] | null | null | null | python_program/number2text.py | LiuKaiqiang94/PyStudyExample | b30212718b218c71e06b68677f55c33e3a1dbf46 | [
"MIT"
] | null | null | null |
def main():
print("This program coverts a sequence of Unicode numbers into")
print("the string of text that it reptrsents.\n")
inString = input("Please enter the Unicode-encoded message:")
chars = []
for numStr in inString.split():
codeNum=int(numStr)
chars.append(chr(codeNum))
message="".join(chars)
print("\nThe decoded message is:",message)
main()
| 25 | 68 | 0.655 |
9b77f28d9b735c0c55b7513b2bb7ded05210c6df | 463 | py | Python | DataStructure_implementation/Queue/Palindrome.py | Jason0LiYaoCN/University-Assignments | 20c5072b2041c3442d88878364eb0c253e030f3e | [
"MIT"
] | 1 | 2018-06-22T08:18:40.000Z | 2018-06-22T08:18:40.000Z | DataStructure_implementation/Queue/Palindrome.py | Jason0LiYaoCN/University-Assignments | 20c5072b2041c3442d88878364eb0c253e030f3e | [
"MIT"
] | null | null | null | DataStructure_implementation/Queue/Palindrome.py | Jason0LiYaoCN/University-Assignments | 20c5072b2041c3442d88878364eb0c253e030f3e | [
"MIT"
] | null | null | null | from Queue import Deque
def pal_checker(input_string):
char_deque = Deque()
for ch in input_string:
char_deque.add_rear(ch)
still_equal = True
while char_deque.size() > 1 and still_equal:
first = char_deque.remove_front()
last = char_deque.remove_rear()
if first != last:
still_equal = False
return still_equal
print(pal_checker("lsdkjfskf"))
print(pal_checker("radar"))
| 23.15 | 49 | 0.62851 |
7e9d5c238b0843cdbb77cbf1462c768e7b52a8bc | 9,182 | py | Python | st2common/tests/unit/test_rbac_loader.py | totalkyos/stack-storm | b89bc648d53dae03c7484d22abd771edfe45bbb8 | [
"Apache-2.0"
] | 1 | 2021-04-08T03:21:49.000Z | 2021-04-08T03:21:49.000Z | st2common/tests/unit/test_rbac_loader.py | totalkyos/stack-storm | b89bc648d53dae03c7484d22abd771edfe45bbb8 | [
"Apache-2.0"
] | null | null | null | st2common/tests/unit/test_rbac_loader.py | totalkyos/stack-storm | b89bc648d53dae03c7484d22abd771edfe45bbb8 | [
"Apache-2.0"
] | null | null | null | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest2
import mock
import jsonschema
from st2tests import config
from st2tests.fixturesloader import get_fixtures_base_path
from st2common.rbac.loader import RBACDefinitionsLoader
__all__ = [
'RBACDefinitionsLoaderTestCase'
]
class RBACDefinitionsLoaderTestCase(unittest2.TestCase):
@classmethod
def setUpClass(cls):
config.parse_args()
def test_load_role_definition_success(self):
loader = RBACDefinitionsLoader()
file_path = os.path.join(get_fixtures_base_path(), 'rbac/roles/role_three.yaml')
role_definition_api = loader.load_role_definition_from_file(file_path=file_path)
self.assertEqual(role_definition_api.name, 'role_three')
self.assertTrue('all the pack permissions on pack dummy_pack_1' in
role_definition_api.description)
self.assertEqual(len(role_definition_api.permission_grants), 4)
self.assertEqual(role_definition_api.permission_grants[0]['resource_uid'],
'pack:dummy_pack_1')
self.assertEqual(role_definition_api.permission_grants[1]['resource_uid'],
'pack:dummy_pack_2')
self.assertTrue('rule_view' in role_definition_api.permission_grants[1]['permission_types'])
self.assertEqual(role_definition_api.permission_grants[2]['permission_types'],
['action_execute'])
self.assertTrue('resource_uid' not in role_definition_api.permission_grants[3])
self.assertEqual(role_definition_api.permission_grants[3]['permission_types'],
['action_list', 'rule_list'])
def test_load_role_definition_validation_error(self):
loader = RBACDefinitionsLoader()
# Invalid permission which doesn't apply to the resource in question
file_path = os.path.join(get_fixtures_base_path(), 'rbac_invalid/roles/role_one.yaml')
expected_msg = 'Invalid permission type "rule_all" for resource type "action"'
self.assertRaisesRegexp(ValueError, expected_msg, loader.load_role_definition_from_file,
file_path=file_path)
# Invalid permission type which doesn't exist
file_path = os.path.join(get_fixtures_base_path(), 'rbac_invalid/roles/role_two.yaml')
expected_msg = '.*Failed validating \'enum\'.*'
self.assertRaisesRegexp(jsonschema.ValidationError, expected_msg,
loader.load_role_definition_from_file, file_path=file_path)
# Only list permissions can be used without a resource_uid
file_path = os.path.join(get_fixtures_base_path(), 'rbac_invalid/roles/role_four.yaml')
expected_msg = ('Invalid permission type "action_create". Only "list" permission types '
'can be used without a resource id')
self.assertRaisesRegexp(ValueError, expected_msg,
loader.load_role_definition_from_file, file_path=file_path)
def test_load_user_role_assignments_success(self):
loader = RBACDefinitionsLoader()
file_path = os.path.join(get_fixtures_base_path(), 'rbac/assignments/user3.yaml')
user_role_assignment_api = loader.load_user_role_assignments_from_file(file_path=file_path)
self.assertEqual(user_role_assignment_api.username, 'user3')
self.assertEqual(user_role_assignment_api.description, 'Observer assignments')
self.assertEqual(user_role_assignment_api.roles, ['observer'])
def test_load_role_definitions_duplicate_role_definition(self):
loader = RBACDefinitionsLoader()
# Try to load all the roles from disk where two definitions refer to the same role
file_path1 = os.path.join(get_fixtures_base_path(), 'rbac_invalid/roles/role_three1.yaml')
file_path2 = os.path.join(get_fixtures_base_path(), 'rbac_invalid/roles/role_three2.yaml')
file_paths = [file_path1, file_path2]
loader._get_role_definitions_file_paths = mock.Mock()
loader._get_role_definitions_file_paths.return_value = file_paths
expected_msg = 'Duplicate definition file found for role "role_three_name_conflict"'
self.assertRaisesRegexp(ValueError, expected_msg, loader.load_role_definitions)
def test_load_role_definitions_disabled_role_definition(self):
loader = RBACDefinitionsLoader()
# Disabled role which means this method shouldn't include it in the result
file_path = os.path.join(get_fixtures_base_path(), 'rbac/roles/role_disabled.yaml')
file_paths = [file_path]
loader._get_role_definitions_file_paths = mock.Mock()
loader._get_role_definitions_file_paths.return_value = file_paths
result = loader.load_role_definitions()
self.assertItemsEqual(result, [])
def test_load_role_definitions_empty_definition_file(self):
loader = RBACDefinitionsLoader()
file_path = os.path.join(get_fixtures_base_path(), 'rbac_invalid/roles/role_empty.yaml')
file_paths = [file_path]
loader._get_role_definitions_file_paths = mock.Mock()
loader._get_role_definitions_file_paths.return_value = file_paths
expected_msg = 'Role definition file .+? is empty and invalid'
self.assertRaisesRegexp(ValueError, expected_msg, loader.load_role_definitions)
def test_load_user_role_assignments_duplicate_user_definition(self):
loader = RBACDefinitionsLoader()
# Try to load all the user role assignments from disk where two definitions refer to the
# same user
file_path1 = os.path.join(get_fixtures_base_path(),
'rbac_invalid/assignments/user_foo1.yaml')
file_path2 = os.path.join(get_fixtures_base_path(),
'rbac_invalid/assignments/user_foo2.yaml')
file_paths = [file_path1, file_path2]
loader._get_role_assiginments_file_paths = mock.Mock()
loader._get_role_assiginments_file_paths.return_value = file_paths
expected_msg = 'Duplicate definition file found for user "userfoo"'
self.assertRaisesRegexp(ValueError, expected_msg, loader.load_user_role_assignments)
def test_load_user_role_assignments_disabled_assignment(self):
loader = RBACDefinitionsLoader()
# Disabled role assignment which means this method shouldn't include it in the result
file_path = os.path.join(get_fixtures_base_path(), 'rbac/assignments/user_disabled.yaml')
file_paths = [file_path]
loader._get_role_assiginments_file_paths = mock.Mock()
loader._get_role_assiginments_file_paths.return_value = file_paths
result = loader.load_user_role_assignments()
self.assertItemsEqual(result, [])
def test_load_user_role_assignments_empty_definition_file(self):
loader = RBACDefinitionsLoader()
file_path = os.path.join(get_fixtures_base_path(),
'rbac_invalid/assignments/user_empty.yaml')
file_paths = [file_path]
loader._get_role_assiginments_file_paths = mock.Mock()
loader._get_role_assiginments_file_paths.return_value = file_paths
expected_msg = 'Role assignment file .+? is empty and invalid'
self.assertRaisesRegexp(ValueError, expected_msg, loader.load_user_role_assignments)
def test_load_sample_role_definition(self):
"""
Validate that the sample role definition which we ship with default installation works.
"""
loader = RBACDefinitionsLoader()
file_path = os.path.join(get_fixtures_base_path(), 'rbac/roles/role_sample.yaml')
role_api = loader.load_role_definition_from_file(file_path=file_path)
self.assertEqual(role_api.name, 'sample')
self.assertFalse(role_api.enabled)
def test_load_sample_user_role_assignment_definition(self):
"""
Validate that the sample user role assignment definition which we ship with default
installation works.
"""
loader = RBACDefinitionsLoader()
file_path = os.path.join(get_fixtures_base_path(), 'rbac/assignments/user_sample.yaml')
assignment_api = loader.load_user_role_assignments_from_file(file_path=file_path)
self.assertEqual(assignment_api.username, 'stackstorm_user')
self.assertFalse(assignment_api.enabled)
| 47.57513 | 100 | 0.722174 |
ff801b665871d7ff4575e7cf3284d9806256941a | 100,421 | py | Python | tensorflow/tools/compatibility/tf_upgrade_v2.py | PhotoLabDevelopment/tensorflow | 735642ee1cd8d7f21ddd94f851ee753c17c23019 | [
"Apache-2.0"
] | 2 | 2019-08-04T20:28:14.000Z | 2019-10-27T23:26:42.000Z | tensorflow/tools/compatibility/tf_upgrade_v2.py | PhotoLabDevelopment/tensorflow | 735642ee1cd8d7f21ddd94f851ee753c17c23019 | [
"Apache-2.0"
] | 1 | 2019-08-19T08:03:52.000Z | 2019-08-19T08:03:52.000Z | tensorflow/tools/compatibility/tf_upgrade_v2.py | PhotoLabDevelopment/tensorflow | 735642ee1cd8d7f21ddd94f851ee753c17c23019 | [
"Apache-2.0"
] | 1 | 2021-05-05T05:17:34.000Z | 2021-05-05T05:17:34.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import copy
import functools
import sys
import pasta
from tensorflow.tools.compatibility import all_renames_v2
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import module_deprecations_v2
from tensorflow.tools.compatibility import reorders_v2
# These pylint warnings are a mistake.
# pylint: disable=g-explicit-bool-comparison,g-bool-id-comparison
class UnaliasedTFImport(ast_edits.AnalysisResult):
def __init__(self):
self.log_level = ast_edits.ERROR
self.log_message = ("The tf_upgrade_v2 script detected an unaliased "
"`import tensorflow`. The script can only run when "
"importing with `import tensorflow as tf`.")
class VersionedTFImport(ast_edits.AnalysisResult):
def __init__(self, version):
self.log_level = ast_edits.INFO
self.log_message = ("Not upgrading symbols because `tensorflow." + version
+ "` was directly imported as `tf`.")
class TFAPIImportAnalysisSpec(ast_edits.APIAnalysisSpec):
def __init__(self):
self.symbols_to_detect = {}
self.imports_to_detect = {
("tensorflow", None): UnaliasedTFImport(),
("tensorflow.compat.v1", "tf"): VersionedTFImport("compat.v1"),
("tensorflow.compat.v2", "tf"): VersionedTFImport("compat.v2"),
}
class TFAPIChangeSpec(ast_edits.NoUpdateSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
# If the new argument is None, it will be removed.
# Only keyword args are handled, so make sure to also put any function in
# function_reorders to ensure that all args are made into keywords first.
self.function_keyword_renames = {
# TODO(b/129398290)
# "tf.string_split": {
# "delimiter": "sep",
# },
"tf.test.assert_equal_graph_def": {
"checkpoint_v2": None,
"hash_table_shared_name": None,
},
"tf.autograph.to_code": {
"arg_types": None,
"arg_values": None,
"indentation": None,
},
"tf.autograph.to_graph": {
"arg_types": None,
"arg_values": None,
},
"tf.nn.embedding_lookup": {
"validate_indices": None,
},
"tf.image.sample_distorted_bounding_box": {
"seed2": None,
},
"tf.gradients": {
"colocate_gradients_with_ops": None,
},
"tf.hessians": {
"colocate_gradients_with_ops": None,
},
"*.minimize": {
"colocate_gradients_with_ops": None,
},
"*.compute_gradients": {
"colocate_gradients_with_ops": None,
},
"tf.cond": {
"strict": None,
"fn1": "true_fn",
"fn2": "false_fn"
},
"tf.argmin": {
"dimension": "axis",
},
"tf.argmax": {
"dimension": "axis",
},
"tf.arg_min": {
"dimension": "axis",
},
"tf.arg_max": {
"dimension": "axis",
},
"tf.math.argmin": {
"dimension": "axis",
},
"tf.math.argmax": {
"dimension": "axis",
},
"tf.image.crop_and_resize": {
"box_ind": "box_indices",
},
"tf.extract_image_patches": {
"ksizes": "sizes",
},
"tf.image.extract_image_patches": {
"ksizes": "sizes",
},
"tf.image.resize": {
"align_corners": None,
},
"tf.image.resize_images": {
"align_corners": None,
},
"tf.expand_dims": {
"dim": "axis",
},
"tf.batch_to_space": {
"block_size": "block_shape",
},
"tf.space_to_batch": {
"block_size": "block_shape",
},
"tf.nn.space_to_batch": {
"block_size": "block_shape",
},
"tf.constant": {
"verify_shape": "verify_shape_is_now_always_true",
},
"tf.convert_to_tensor": {
"preferred_dtype": "dtype_hint"
},
"tf.nn.softmax_cross_entropy_with_logits": {
"dim": "axis",
"_sentinel": None,
},
"tf.nn.softmax_cross_entropy_with_logits_v2": {
"dim": "axis"
},
"tf.linalg.l2_normalize": {
"dim": "axis",
},
"tf.linalg.norm": {
"keep_dims": "keepdims",
},
"tf.norm": {
"keep_dims": "keepdims",
},
"tf.load_file_system_library": {
"library_filename": "library_location",
},
"tf.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.math.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.nn.erosion2d": {
"kernel": "filters",
"rates": "dilations",
},
"tf.math.l2_normalize": {
"dim": "axis",
},
"tf.math.log_softmax": {
"dim": "axis",
},
"tf.math.softmax": {
"dim": "axis"
},
"tf.nn.l2_normalize": {
"dim": "axis",
},
"tf.nn.log_softmax": {
"dim": "axis",
},
"tf.nn.moments": {
"keep_dims": "keepdims",
},
"tf.nn.pool": {
"dilation_rate": "dilations"
},
"tf.nn.separable_conv2d": {
"rate": "dilations"
},
"tf.nn.depthwise_conv2d": {
"rate": "dilations"
},
"tf.nn.softmax": {
"dim": "axis"
},
"tf.nn.sufficient_statistics": {
"keep_dims": "keepdims"
},
"tf.debugging.assert_all_finite": {
"t": "x",
"msg": "message",
},
"tf.sparse.add": {
"thresh": "threshold",
},
"tf.sparse_add": {
"thresh": "threshold",
},
"tf.sparse.concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse_concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse.split": {
"split_dim": "axis",
},
"tf.sparse_split": {
"split_dim": "axis",
},
"tf.sparse.reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse.reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.nn.max_pool_with_argmax": {
"Targmax": "output_dtype",
},
"tf.nn.max_pool": {
"value": "input"
},
"tf.nn.avg_pool": {
"value": "input"
},
"tf.nn.avg_pool2d": {
"value": "input"
},
"tf.multinomial": {
"output_dtype": "dtype",
},
"tf.random.multinomial": {
"output_dtype": "dtype",
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis",
},
"tf.nn.batch_norm_with_global_normalization": {
"t": "input",
"m": "mean",
"v": "variance",
},
"tf.nn.dilation2d": {
"filter": "filters",
"rates": "dilations",
},
"tf.nn.conv3d": {
"filter": "filters"
},
"tf.zeros_like": {
"tensor": "input",
},
"tf.ones_like": {
"tensor": "input",
},
"tf.nn.conv2d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.conv3d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.convolution": {
"filter": "filters",
"dilation_rate": "dilations",
},
"tf.gfile.Exists": {
"filename": "path",
},
"tf.gfile.Remove": {
"filename": "path",
},
"tf.gfile.Stat": {
"filename": "path",
},
"tf.gfile.Glob": {
"filename": "pattern",
},
"tf.gfile.MkDir": {
"dirname": "path",
},
"tf.gfile.MakeDirs": {
"dirname": "path",
},
"tf.gfile.DeleteRecursively": {
"dirname": "path",
},
"tf.gfile.IsDirectory": {
"dirname": "path",
},
"tf.gfile.ListDirectory": {
"dirname": "path",
},
"tf.gfile.Copy": {
"oldpath": "src",
"newpath": "dst",
},
"tf.gfile.Rename": {
"oldname": "src",
"newname": "dst",
},
"tf.gfile.Walk": {
"in_order": "topdown",
},
"tf.random.stateless_multinomial": {
"output_dtype": "dtype",
},
"tf.string_to_number": {
"string_tensor": "input",
},
"tf.strings.to_number": {
"string_tensor": "input",
},
"tf.string_to_hash_bucket": {
"string_tensor": "input",
},
"tf.strings.to_hash_bucket": {
"string_tensor": "input",
},
"tf.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.strings.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis",
},
"tf.nn.weighted_moments": {
"keep_dims": "keepdims"
},
"tf.nn.conv1d": {
"value": "input",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d": {
"filter": "filters",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d_backprop_input": {
"use_cudnn_on_gpu": None,
"input_sizes": "output_shape",
"out_backprop": "input",
"filter": "filters",
},
"tf.contrib.summary.audio": {
"tensor": "data",
"family": None,
},
"tf.contrib.summary.create_file_writer": {
"name": None,
},
"tf.contrib.summary.generic": {
"name": "tag",
"tensor": "data",
"family": None,
},
"tf.contrib.summary.histogram": {
"tensor": "data",
"family": None,
},
"tf.contrib.summary.image": {
"tensor": "data",
"bad_color": None,
"max_images": "max_outputs",
"family": None,
},
"tf.contrib.summary.scalar": {
"tensor": "data",
"family": None,
},
"tf.nn.weighted_cross_entropy_with_logits": {
"targets": "labels",
},
"tf.decode_raw": {
"bytes": "input_bytes",
},
"tf.io.decode_raw": {
"bytes": "input_bytes",
},
"tf.contrib.framework.load_variable": {
"checkpoint_dir": "ckpt_dir_or_file",
}
}
# Mapping from function to the new name of the function
# Add additional renames not in renames_v2.py to all_renames_v2.py.
self.symbol_renames = all_renames_v2.symbol_renames
self.import_renames = {}
# Variables that should be changed to functions.
self.change_to_function = {}
# pylint: disable=line-too-long
# This list should just contain names of functions that had
# their arguments reordered. After adding a function name to the list
# run the following to update reorders_v2.py:
# bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
# bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
# pylint: enable=line-too-long
self.reordered_function_names = {
"tf.io.serialize_sparse",
"tf.io.serialize_many_sparse",
"tf.argmax",
"tf.argmin",
"tf.batch_to_space",
"tf.cond",
"tf.nn.space_to_batch",
"tf.boolean_mask",
"tf.convert_to_tensor",
"tf.nn.conv1d",
"tf.nn.conv2d",
"tf.nn.conv2d_backprop_input",
"tf.nn.ctc_beam_search_decoder",
"tf.nn.moments",
"tf.nn.convolution",
"tf.nn.crelu",
"tf.nn.weighted_moments",
"tf.nn.pool",
"tf.nn.separable_conv2d",
"tf.nn.depthwise_conv2d",
"tf.multinomial",
"tf.random.multinomial",
"tf.pad",
"tf.quantize_v2",
"tf.feature_column.categorical_column_with_vocabulary_file",
"tf.shape",
"tf.size",
# TODO(b/129398290)
# "tf.string_split",
"tf.random.poisson",
"tf.sparse.add",
"tf.sparse_add",
"tf.sparse.concat",
"tf.sparse_concat",
"tf.sparse.segment_mean",
"tf.sparse.segment_sqrt_n",
"tf.sparse.segment_sum",
"tf.sparse_matmul",
"tf.sparse.reduce_max",
"tf.sparse_reduce_max",
"tf.io.decode_csv",
"tf.strings.length",
"tf.strings.reduce_join",
"tf.strings.substr",
"tf.substr",
"tf.transpose",
"tf.tuple",
"tf.parse_example",
"tf.parse_single_example",
"tf.io.parse_example",
"tf.io.parse_single_example",
"tf.while_loop",
"tf.reduce_all",
"tf.math.reduce_all",
"tf.reduce_any",
"tf.math.reduce_any",
"tf.reduce_min",
"tf.math.reduce_min",
"tf.reduce_max",
"tf.math.reduce_max",
"tf.reduce_sum",
"tf.math.reduce_sum",
"tf.reduce_mean",
"tf.math.reduce_mean",
"tf.reduce_prod",
"tf.math.reduce_prod",
"tf.reduce_logsumexp",
"tf.math.reduce_logsumexp",
"tf.reduce_join",
"tf.confusion_matrix",
"tf.math.confusion_matrix",
"tf.math.in_top_k",
"tf.nn.depth_to_space",
"tf.nn.embedding_lookup",
"tf.nn.embedding_lookup_sparse",
"tf.nn.in_top_k",
"tf.nn.space_to_depth",
"tf.test.assert_equal_graph_def",
"tf.linalg.norm",
"tf.norm",
"tf.reverse_sequence",
"tf.sparse_split",
# tf.nn.softmax_cross_entropy_with_logits *must* be called with
# keyword arguments. Add keyword arguments in rare case when they
# are not specified.
"tf.nn.softmax_cross_entropy_with_logits",
"tf.nn.fractional_avg_pool",
"tf.nn.fractional_max_pool",
"tf.image.sample_distorted_bounding_box",
"tf.gradients",
"tf.hessians",
"tf.nn.max_pool",
"tf.nn.avg_pool",
"tf.estimator.LinearClassifier",
"tf.estimator.LinearRegressor",
"tf.estimator.DNNLinearCombinedClassifier",
"tf.estimator.DNNLinearCombinedRegressor",
"tf.estimator.DNNRegressor",
"tf.estimator.DNNClassifier",
"tf.estimator.BaselineClassifier",
"tf.estimator.BaselineRegressor",
"tf.initializers.uniform_unit_scaling",
"tf.uniform_unit_scaling_initializer",
"tf.train.sdca_fprint",
"tf.train.sdca_optimizer",
"tf.train.sdca_shrink_l1",
"tf.data.experimental.TensorStructure",
"tf.data.experimental.SparseTensorStructure",
"tf.data.experimental.RaggedTensorStructure",
"tf.data.experimental.TensorArrayStructure",
}
# Manual mapping of function names to be reordered to their list of argument
# names, in order. Only use this if argument names cannot be autodetected,
# e.g. if the functions are in contrib.
self.manual_function_reorders = {
"tf.contrib.summary.audio": [
"name", "tensor", "sample_rate", "max_outputs", "family", "step"],
"tf.contrib.summary.create_file_writer": [
"logdir", "max_queue", "flush_millis", "filename_suffix", "name"],
"tf.contrib.summary.generic": [
"name", "tensor", "metadata", "family", "step"],
"tf.contrib.summary.histogram": [
"name", "tensor", "family", "step"],
"tf.contrib.summary.image": [
"name", "tensor", "bad_color", "max_images", "family", "step"],
"tf.contrib.summary.scalar": [
"name", "tensor", "family", "step"],
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = dict(reorders_v2.reorders)
self.function_reorders.update(self.manual_function_reorders)
decay_function_comment = (
ast_edits.INFO,
"To use learning rate decay schedules with TensorFlow 2.0, switch to "
"the schedules in `tf.keras.optimizers.schedules`.\n"
)
assert_return_type_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, the "
"data argument has been removed, and arguments have been reordered."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
assert_rank_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, and"
" the data and summarize arguments have been removed."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
contrib_layers_layer_norm_comment = (
ast_edits.WARNING,
"(Manual edit required) `tf.contrib.layers.layer_norm` has been "
"deprecated, and its implementation has been integrated with "
"`tf.keras.layers.LayerNormalization` in TensorFlow 2.0. "
"Note that, the default value of `epsilon` is changed to `1e-3` in the "
"new API from `1e-12`, and this may introduce numerical differences. "
"Please check the new API and use that instead."
)
contrib_estimator_head_comment = (
ast_edits.WARNING,
"(Manual edit required) `tf.contrib.estimator.*_head` has been "
"deprecated, and its implementation has been integrated with "
"`tf.estimator.*Head` in TensorFlow 2.0. "
"Please check the new API and use that instead."
)
initializers_no_dtype_comment = (
ast_edits.INFO, "Initializers no longer have the "
"dtype argument in the constructor or partition_info argument in the "
"__call__ method.\nThe calls have been converted to compat.v1 for "
"safety (even though they may already have been correct).")
metrics_comment = (
ast_edits.INFO,
"tf.metrics have been replaced with object oriented versions in"
" TF 2.0 and after. The metric function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
losses_comment = (
ast_edits.INFO,
"tf.losses have been replaced with object oriented versions in"
" TF 2.0 and after. The loss function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
# This could be done with a _rename_if_arg_not_found_transformer
deprecate_partition_strategy_comment = (
ast_edits.WARNING,
"`partition_strategy` has been removed from <function name>. "
" The 'div' strategy will be used by default.")
# make change instead
uniform_unit_scaling_initializer_comment = (
ast_edits.ERROR,
"uniform_unit_scaling_initializer has been removed. Please use"
" tf.initializers.variance_scaling instead with distribution=uniform "
"to get equivalent behaviour.")
# Make change instead (issue warning about strip_...)
export_saved_model_renamed = (
ast_edits.ERROR,
"(Manual edit required) Please rename the method export_savedmodel() "
"to export_saved_model(). Two things to note:\n\t(1) The argument "
"strip_default_attributes has been removed. The function will always "
"strip the default attributes from ops. If this breaks your code, "
"please switch to tf.compat.v1.estimator.Estimator.\n\t(2) This change "
"only effects core estimator. If you are using "
"tf.contrib.learn.Estimator, please switch to using core estimator.")
summary_api_comment = (
ast_edits.INFO,
"The TF 1.x summary API cannot be automatically migrated to TF 2.0, so "
"symbols have been converted to tf.compat.v1.summary.* and must be "
"migrated manually. Typical usage will only require changes to the "
"summary writing logic, not to individual calls like scalar(). "
"For examples of the new summary API, see the Effective TF 2.0 "
"migration document or check the TF 2.0 TensorBoard tutorials.")
contrib_summary_comment = (
ast_edits.WARNING,
"tf.contrib.summary.* functions have been migrated best-effort to "
"tf.compat.v2.summary.* equivalents where possible, but the resulting "
"code is not guaranteed to work, so please check carefully. For more "
"information about the new summary API, see the Effective TF 2.0 "
"migration document or check the updated TensorBoard tutorials.")
contrib_summary_family_arg_comment = (
ast_edits.WARNING,
"<function name> replacement does not accept a 'family' argument; "
"instead regular name scoping should be used. This call site specifies "
"a family argument that has been removed on conversion, so the emitted "
"tag names may be incorrect without manual editing.")
contrib_create_file_writer_comment = (
ast_edits.WARNING,
"tf.contrib.summary.create_file_writer() has been ported to the new "
"tf.compat.v2.summary.create_file_writer(), which no longer re-uses "
"existing event files for the same logdir; instead it always opens a "
"new writer/file. The python writer objects must be re-used explicitly "
"if the reusing behavior is desired.")
contrib_summary_record_every_n_comment = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.summary.record_summaries_every_n_global_steps(n, step) "
"should be replaced by a call to tf.compat.v2.summary.record_if() with "
"the argument `lambda: tf.math.equal(0, global_step % n)` (or in graph "
"mode, the lambda body can be used directly). If no global step was "
"passed, instead use tf.compat.v1.train.get_or_create_global_step().")
contrib_summary_graph_comment = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.summary.graph() has no direct "
"equivalent in TF 2.0 because manual graph construction has been "
"superseded by use of tf.function. To log tf.function execution graphs "
"to the summary writer, use the new tf.compat.v2.summary.trace_* "
"functions instead.")
contrib_summary_import_event_comment = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.summary.import_event() has no "
"direct equivalent in TF 2.0. For a similar experimental feature, try "
"tf.compat.v2.summary.experimental.write_raw_pb() which also accepts "
"serialized summary protocol buffer input, but for tf.Summary "
"protobufs rather than tf.Events.")
keras_default_save_format_comment = (
ast_edits.WARNING,
"(This warning is only applicable if the code saves a tf.Keras model) "
"Keras model.save now saves to the Tensorflow SavedModel format by "
"default, instead of HDF5. To continue saving to HDF5, add the "
"argument save_format='h5' to the save() function.")
distribute_strategy_api_changes = (
"If you're using the strategy with a "
"custom training loop, note the following changes in methods: "
"make_dataset_iterator->experimental_distribute_dataset, "
"experimental_make_numpy_iterator->experimental_make_numpy_dataset, "
"extended.call_for_each_replica->experimental_run_v2, "
"reduce requires an axis argument, "
"unwrap->experimental_local_results "
"experimental_initialize and experimental_finalize no longer needed ")
contrib_mirrored_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.MirroredStrategy has "
"been migrated to tf.distribute.MirroredStrategy. Things to note: "
"Constructor arguments have changed. If you are using "
"MirroredStrategy with Keras training framework, the input provided to "
"`model.fit` will be assumed to have global batch size and split "
"across the replicas. " + distribute_strategy_api_changes)
core_mirrored_strategy_warning = (
ast_edits.WARNING,
"(Manual edit may be required) tf.distribute.MirroredStrategy API has "
"changed. " + distribute_strategy_api_changes)
contrib_one_device_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.OneDeviceStrategy has "
"been migrated to tf.distribute.OneDeviceStrategy. " +
distribute_strategy_api_changes)
contrib_tpu_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) tf.contrib.distribute.TPUStrategy has "
"been migrated to tf.distribute.experimental.TPUStrategy. Note the "
"slight changes in constructor. " + distribute_strategy_api_changes)
contrib_collective_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.distribute.CollectiveAllReduceStrategy has "
"been migrated to "
"tf.distribute.experimental.MultiWorkerMirroredStrategy. Note the "
"changes in constructor. " + distribute_strategy_api_changes)
contrib_ps_strategy_warning = (
ast_edits.ERROR,
"(Manual edit required) "
"tf.contrib.distribute.ParameterServerStrategy has "
"been migrated to "
"tf.distribute.experimental.ParameterServerStrategy (multi machine) "
" and tf.distribute.experimental.CentralStorageStrategy (one machine). "
"Note the changes in constructors. " + distribute_strategy_api_changes)
# Function warnings. <function name> placeholder inside warnings will be
# replaced by function name.
# You can use *. to add items which do not check the FQN, and apply to e.g.,
# methods.
self.function_warnings = {
"*.export_savedmodel":
export_saved_model_renamed,
"*.save":
keras_default_save_format_comment,
"tf.assert_equal":
assert_return_type_comment,
"tf.assert_none_equal":
assert_return_type_comment,
"tf.assert_negative":
assert_return_type_comment,
"tf.assert_positive":
assert_return_type_comment,
"tf.assert_non_negative":
assert_return_type_comment,
"tf.assert_non_positive":
assert_return_type_comment,
"tf.assert_near":
assert_return_type_comment,
"tf.assert_less":
assert_return_type_comment,
"tf.assert_less_equal":
assert_return_type_comment,
"tf.assert_greater":
assert_return_type_comment,
"tf.assert_greater_equal":
assert_return_type_comment,
"tf.assert_integer":
assert_return_type_comment,
"tf.assert_type":
assert_return_type_comment,
"tf.assert_scalar":
assert_return_type_comment,
"tf.assert_rank":
assert_rank_comment,
"tf.assert_rank_at_least":
assert_rank_comment,
"tf.assert_rank_in":
assert_rank_comment,
"tf.contrib.layers.layer_norm":
contrib_layers_layer_norm_comment,
"tf.contrib.estimator.binary_classification_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.logistic_regression_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_class_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.multi_label_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.poisson_regression_head":
contrib_estimator_head_comment,
"tf.contrib.estimator.regression_head":
contrib_estimator_head_comment,
"tf.contrib.summary.all_summary_ops":
contrib_summary_comment,
"tf.contrib.summary.audio":
contrib_summary_comment,
"tf.contrib.summary.create_file_writer":
contrib_create_file_writer_comment,
"tf.contrib.summary.generic":
contrib_summary_comment,
"tf.contrib.summary.graph":
contrib_summary_graph_comment,
"tf.contrib.summary.histogram":
contrib_summary_comment,
"tf.contrib.summary.import_event":
contrib_summary_import_event_comment,
"tf.contrib.summary.image":
contrib_summary_comment,
"tf.contrib.summary.record_summaries_every_n_global_steps":
contrib_summary_record_every_n_comment,
"tf.contrib.summary.scalar":
contrib_summary_comment,
"tf.debugging.assert_equal":
assert_return_type_comment,
"tf.debugging.assert_greater":
assert_return_type_comment,
"tf.debugging.assert_greater_equal":
assert_return_type_comment,
"tf.debugging.assert_integer":
assert_return_type_comment,
"tf.debugging.assert_less":
assert_return_type_comment,
"tf.debugging.assert_less_equal":
assert_return_type_comment,
"tf.debugging.assert_near":
assert_return_type_comment,
"tf.debugging.assert_negative":
assert_return_type_comment,
"tf.debugging.assert_non_negative":
assert_return_type_comment,
"tf.debugging.assert_non_positive":
assert_return_type_comment,
"tf.debugging.assert_none_equal":
assert_return_type_comment,
"tf.debugging.assert_positive":
assert_return_type_comment,
"tf.debugging.assert_type":
assert_return_type_comment,
"tf.debugging.assert_scalar":
assert_return_type_comment,
"tf.debugging.assert_rank":
assert_rank_comment,
"tf.debugging.assert_rank_at_least":
assert_rank_comment,
"tf.debugging.assert_rank_in":
assert_rank_comment,
"tf.train.exponential_decay":
decay_function_comment,
"tf.train.piecewise_constant_decay":
decay_function_comment,
"tf.train.polynomial_decay":
decay_function_comment,
"tf.train.natural_exp_decay":
decay_function_comment,
"tf.train.inverse_time_decay":
decay_function_comment,
"tf.train.cosine_decay":
decay_function_comment,
"tf.train.cosine_decay_restarts":
decay_function_comment,
"tf.train.linear_cosine_decay":
decay_function_comment,
"tf.train.noisy_linear_cosine_decay":
decay_function_comment,
"tf.nn.embedding_lookup":
deprecate_partition_strategy_comment,
"tf.nn.embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.nce_loss":
deprecate_partition_strategy_comment,
"tf.nn.safe_embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.sampled_softmax_loss":
deprecate_partition_strategy_comment,
"tf.keras.estimator.model_to_estimator":
(ast_edits.WARNING,
"Estimators from <function name> will save object-based "
"checkpoints (format used by `keras_model.save_weights` and "
"`keras_model.load_weights`) by default in 2.0. To continue "
"saving name-based checkpoints, set `checkpoint_format='saver'`."),
"tf.keras.initializers.Zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.Ones":
initializers_no_dtype_comment,
"tf.keras.initializers.ones":
initializers_no_dtype_comment,
"tf.keras.initializers.Constant":
initializers_no_dtype_comment,
"tf.keras.initializers.constant":
initializers_no_dtype_comment,
"tf.keras.initializers.VarianceScaling":
initializers_no_dtype_comment,
"tf.keras.initializers.Orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.Identity":
initializers_no_dtype_comment,
"tf.keras.initializers.identity":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.initializers.zeros":
initializers_no_dtype_comment,
"tf.zeros_initializer":
initializers_no_dtype_comment,
"tf.initializers.ones":
initializers_no_dtype_comment,
"tf.ones_initializer":
initializers_no_dtype_comment,
"tf.initializers.constant":
initializers_no_dtype_comment,
"tf.constant_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_uniform":
initializers_no_dtype_comment,
"tf.random_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_normal":
initializers_no_dtype_comment,
"tf.random_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.truncated_normal":
initializers_no_dtype_comment,
"tf.truncated_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.variance_scaling":
initializers_no_dtype_comment,
"tf.variance_scaling_initializer":
initializers_no_dtype_comment,
"tf.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.orthogonal_initializer":
initializers_no_dtype_comment,
"tf.initializers.identity":
initializers_no_dtype_comment,
"tf.glorot_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.glorot_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.losses.absolute_difference":
losses_comment,
"tf.losses.add_loss":
losses_comment,
"tf.losses.compute_weighted_loss":
losses_comment,
"tf.losses.cosine_distance":
losses_comment,
"tf.losses.get_losses":
losses_comment,
"tf.losses.get_regularization_loss":
losses_comment,
"tf.losses.get_regularization_losses":
losses_comment,
"tf.losses.get_total_loss":
losses_comment,
"tf.losses.hinge_loss":
losses_comment,
"tf.losses.huber_loss":
losses_comment,
"tf.losses.log_loss":
losses_comment,
"tf.losses.mean_pairwise_squared_error":
losses_comment,
"tf.losses.mean_squared_error":
losses_comment,
"tf.losses.sigmoid_cross_entropy":
losses_comment,
"tf.losses.softmax_cross_entropy":
losses_comment,
"tf.losses.sparse_softmax_cross_entropy":
losses_comment,
"tf.metrics.accuracy":
metrics_comment,
"tf.metrics.auc":
metrics_comment,
"tf.metrics.average_precision_at_k":
metrics_comment,
"tf.metrics.false_negatives":
metrics_comment,
"tf.metrics.false_negatives_at_thresholds":
metrics_comment,
"tf.metrics.false_positives":
metrics_comment,
"tf.metrics.false_positives_at_thresholds":
metrics_comment,
"tf.metrics.mean":
metrics_comment,
"tf.metrics.mean_absolute_error":
metrics_comment,
"tf.metrics.mean_cosine_distance":
metrics_comment,
"tf.metrics.mean_iou":
metrics_comment,
"tf.metrics.mean_per_class_accuracy":
metrics_comment,
"tf.metrics.mean_relative_error":
metrics_comment,
"tf.metrics.mean_squared_error":
metrics_comment,
"tf.metrics.mean_tensor":
metrics_comment,
"tf.metrics.percentage_below":
metrics_comment,
"tf.metrics.precision":
metrics_comment,
"tf.metrics.precision_at_k":
metrics_comment,
"tf.metrics.precision_at_thresholds":
metrics_comment,
"tf.metrics.precision_at_top_k":
metrics_comment,
"tf.metrics.recall":
metrics_comment,
"tf.metrics.recall_at_k":
metrics_comment,
"tf.metrics.recall_at_thresholds":
metrics_comment,
"tf.metrics.recall_at_top_k":
metrics_comment,
"tf.metrics.root_mean_squared_error":
metrics_comment,
"tf.metrics.sensitivity_at_specificity":
metrics_comment,
"tf.metrics.sparse_average_precision_at_k":
metrics_comment,
"tf.metrics.sparse_precision_at_k":
metrics_comment,
"tf.metrics.specificity_at_sensitivity":
metrics_comment,
"tf.metrics.true_negatives":
metrics_comment,
"tf.metrics.true_negatives_at_thresholds":
metrics_comment,
"tf.metrics.true_positives":
metrics_comment,
"tf.metrics.true_positives_at_thresholds":
metrics_comment,
"tf.get_variable":
(ast_edits.WARNING,
"<function name> returns ResourceVariables by default in 2.0, "
"which have well-defined semantics and are stricter about shapes. "
"You can disable this behavior by passing use_resource=False, or "
"by calling tf.compat.v1.disable_resource_variables()."),
"tf.pywrap_tensorflow":
(ast_edits.ERROR,
"<function name> cannot be converted automatically. "
"`tf.pywrap_tensorflow` will not be distributed with "
"TensorFlow 2.0, please consider an alternative in public "
"TensorFlow APIs."),
"tf.contrib.distribute.MirroredStrategy":
contrib_mirrored_strategy_warning,
"tf.distribute.MirroredStrategy":
core_mirrored_strategy_warning,
"tf.contrib.distribute.OneDeviceStrategy":
contrib_one_device_strategy_warning,
"tf.contrib.distribute.TPUStrategy":
contrib_tpu_strategy_warning,
"tf.contrib.distribute.CollectiveAllReduceStrategy":
contrib_collective_strategy_warning,
"tf.contrib.distribute.ParameterServerStrategy":
contrib_ps_strategy_warning,
"tf.summary.FileWriter": summary_api_comment,
"tf.summary.FileWriterCache": summary_api_comment,
"tf.summary.Summary": summary_api_comment,
"tf.summary.audio": summary_api_comment,
"tf.summary.histogram": summary_api_comment,
"tf.summary.image": summary_api_comment,
"tf.summary.merge": summary_api_comment,
"tf.summary.merge_all": summary_api_comment,
"tf.summary.scalar": summary_api_comment,
"tf.summary.tensor_summary": summary_api_comment,
"tf.summary.text": summary_api_comment,
}
for symbol, replacement in all_renames_v2.addons_symbol_mappings.items():
warning = (
ast_edits.WARNING, (
"(Manual edit required) `{}` has been migrated to `{}` in "
"TensorFlow Addons. The API spec may have changed during the "
"migration. Please see https://github.com/tensorflow/addons "
"for more info.").format(symbol, replacement))
self.function_warnings[symbol] = warning
# Warnings that are emitted only if a specific arg is found.
self.function_arg_warnings = {
"tf.nn.conv1d": {
("use_cudnn_on_gpu", 4): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d": {
("use_cudnn_on_gpu", 4): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_filter": {
("use_cudnn_on_gpu", 5): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_input": {
("use_cudnn_on_gpu", 5): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.gradients": {
("colocate_gradients_with_ops", 4): (
ast_edits.INFO,
"tf.gradients no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.minimize": {
("colocate_gradients_with_ops", 5): (
ast_edits.INFO,
"Optimizer.minimize no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.compute_gradients": {
("colocate_gradients_with_ops", 4): (
ast_edits.INFO,
"Optimizer.compute_gradients no "
"longer takes 'colocate_gradients_with_ops' argument, it "
"behaves as if it was set to True."),
},
"tf.cond": {
("strict", 3): (
ast_edits.WARNING,
"tf.cond no longer takes 'strict' argument, it behaves as "
"if was set to True.")
},
"tf.contrib.summary.audio": {
("family", 4): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.create_file_writer": {
("name", 4): (
ast_edits.WARNING,
"tf.contrib.summary.create_file_writer() no longer supports "
"implicit writer re-use based on shared logdirs or resource "
"names; this call site passed a 'name' argument that has been "
"removed. The new tf.compat.v2.summary.create_file_writer() "
"replacement has a 'name' parameter but the semantics are "
"the usual ones to name the op itself and do not control "
"writer re-use; writers must be manually re-used if desired.")
},
"tf.contrib.summary.generic": {
("name", 0): (
ast_edits.WARNING,
"tf.contrib.summary.generic() takes a 'name' argument for the "
"op name that also determines the emitted tag (prefixed by any "
"active name scopes), but tf.compat.v2.summary.write(), which "
"replaces it, separates these into 'tag' and 'name' arguments. "
"The 'name' argument here has been converted to 'tag' to "
"preserve a meaningful tag, but any name scopes will not be "
"reflected in the tag without manual editing."),
("family", 3): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.histogram": {
("family", 2): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.image": {
("bad_color", 2): (
ast_edits.WARNING,
"tf.contrib.summary.image no longer takes the 'bad_color' "
"argument; caller must now preprocess if needed. This call "
"site specifies a bad_color argument so it cannot be converted "
"safely."),
("family", 4): contrib_summary_family_arg_comment,
},
"tf.contrib.summary.scalar": {
("family", 2): contrib_summary_family_arg_comment,
},
"tf.image.resize": {
("align_corners",
3): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize."),
},
"tf.image.resize_bilinear": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_bilinear."),
},
"tf.image.resize_area": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_area."),
},
"tf.image.resize_bicubic": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_bicubic."),
},
"tf.image.resize_nearest_neighbor": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_nearest_neighbor."),
},
}
# Specially handled functions
# Each transformer is a callable which will be called with the arguments
# transformer(parent, node, full_name, name, logs)
# Where logs is a list to which (level, line, col, msg) tuples can be
# appended, full_name is the FQN of the function called (or None if that is
# unknown), name is the name of the function called (or None is that is
# unknown). node is an ast.Call node representing this function call, and
# parent is its parent in the AST.
# The function may modify node (but not parent), and must return
# - none, if nothing was modified
# - node, if node was modified in place (make sure to use
# pasta.ast_utils.replace_child to swap out children, otherwise formatting
# may get messy)
# - a replacement for node, if the whole call node was replaced. The caller
# will take care of changing parent.
canned_estimator_msg_optimizer = (
"tf.keras.optimizers.* only, so the call was converted to compat.v1. "
"Please note that tf.train.Optimizers have one-to-one correspondents "
"in tf.keras.optimizers, so you may be able to convert to the new "
"optimizers directly (See https://www.tensorflow.org/api_docs/python"
"/tf/keras/optimizers). Checkpoint compatibility is not guaranteed, "
"but there is a checkpoint converter tool that you can use.")
canned_estimator_msg = (
"no longer takes `input_layer_partitioner` arg, and it supports "
+ canned_estimator_msg_optimizer)
self.function_transformers = {
"*.make_initializable_iterator": _iterator_transformer,
"*.make_one_shot_iterator": _iterator_transformer,
"tf.nn.dropout": _dropout_transformer,
"tf.to_bfloat16": _cast_transformer,
"tf.to_complex128": _cast_transformer,
"tf.to_complex64": _cast_transformer,
"tf.to_double": _cast_transformer,
"tf.to_float": _cast_transformer,
"tf.to_int32": _cast_transformer,
"tf.to_int64": _cast_transformer,
"tf.nn.softmax_cross_entropy_with_logits":
_softmax_cross_entropy_with_logits_transformer,
"tf.image.extract_glimpse": _extract_glimpse_transformer,
"tf.image.resize_area": _image_resize_transformer,
"tf.image.resize_bicubic": _image_resize_transformer,
"tf.image.resize_bilinear": _image_resize_transformer,
"tf.image.resize_nearest_neighbor": _image_resize_transformer,
"tf.nn.fractional_avg_pool": _pool_seed_transformer,
"tf.nn.fractional_max_pool": _pool_seed_transformer,
"tf.name_scope": _name_scope_transformer,
# TODO(b/129398290)
# "tf.string_split": _string_split_transformer,
"tf.strings.split": _string_split_rtype_transformer,
"tf.estimator.BaselineEstimator":
functools.partial(
_rename_if_arg_found_transformer,
arg_name="optimizer",
message=("tf.estimator.BaselineEstimator supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.BaselineClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["optimizer"],
message=("tf.estimator.BaselineClassifier supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.BaselineRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message=("tf.estimator.BaselineRegressor supports "
+ canned_estimator_msg_optimizer),
),
"tf.estimator.DNNEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNEstimator no longer takes "
"input_layer_partitioner, so the call was converted to "
"compat.v1."
),
"tf.estimator.DNNClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNClassifier " + canned_estimator_msg,
),
"tf.estimator.DNNRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.DNNRegressor " + canned_estimator_msg,
),
"tf.estimator.LinearEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearEstimator " + canned_estimator_msg,
),
"tf.estimator.LinearClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearClassifier " + canned_estimator_msg,
),
"tf.estimator.LinearRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=["input_layer_partitioner", "optimizer"],
message="tf.estimator.LinearRegressor " + canned_estimator_msg,
),
"tf.estimator.DNNLinearCombinedEstimator":
functools.partial(
_rename_if_any_arg_found_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedEstimator "
+ canned_estimator_msg),
),
"tf.estimator.DNNLinearCombinedClassifier":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedClassifier "
+ canned_estimator_msg),
),
"tf.estimator.DNNLinearCombinedRegressor":
functools.partial(
_rename_if_arg_found_and_add_loss_reduction_transformer,
arg_names=[
"input_layer_partitioner", "dnn_optimizer",
"linear_optimizer"
],
message=("tf.estimator.DNNLinearCombinedRegressor "
+ canned_estimator_msg),
),
"tf.device": functools.partial(
_rename_if_arg_found_transformer, arg_name="device_name",
arg_ok_predicate=_is_ast_str, remove_if_ok=False,
message="tf.device no longer takes functions as an argument. "
"We could not determine that the argument value is a string, so "
"the call was converted to compat.v1."),
"tf.zeros_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.zeros_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.ones_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.ones_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.while_loop": functools.partial(
_rename_if_arg_found_transformer,
arg_name="return_same_structure",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.while_loop no longer takes 'return_same_structure' "
"argument and behaves as if return_same_structure=True. This call "
"site specifies something other than return_same_structure=True, "
"so it was converted to compat.v1."),
"tf.nn.ctc_beam_search_decoder": functools.partial(
_rename_if_arg_found_transformer,
arg_name="merge_repeated",
arg_ok_predicate=_is_ast_false, remove_if_ok=True,
message="tf.nn.ctc_beam_search_decoder no longer takes the "
"'merge_repeated' argument and behaves as if merge_repeated=False. "
"This call site specifies something other than "
"merge_repeated=False, so it was converted to compat.v1."),
"tf.nn.erosion2d": functools.partial(
_add_argument_transformer,
arg_name="data_format",
arg_value_ast=ast.Str("NHWC")),
"tf.contrib.summary.always_record_summaries": functools.partial(
_add_summary_recording_cond_transformer, cond="True"),
"tf.contrib.summary.audio": _add_summary_step_transformer,
"tf.contrib.summary.generic": _add_summary_step_transformer,
"tf.contrib.summary.histogram": _add_summary_step_transformer,
"tf.contrib.summary.image": _add_summary_step_transformer,
"tf.contrib.summary.never_record_summaries": functools.partial(
_add_summary_recording_cond_transformer, cond="False"),
"tf.contrib.summary.scalar": _add_summary_step_transformer,
"tf.contrib.layers.l1_regularizer":
_contrib_layers_l1_regularizer_transformer,
"tf.contrib.layers.l2_regularizer":
_contrib_layers_l2_regularizer_transformer,
"tf.contrib.layers.xavier_initializer":
_contrib_layers_xavier_initializer_transformer,
"tf.contrib.layers.xavier_initializer_conv2d":
_contrib_layers_xavier_initializer_transformer,
"tf.contrib.layers.variance_scaling_initializer":
_contrib_layers_variance_scaling_initializer_transformer,
"tf.initializers.uniform_unit_scaling":
_add_uniform_scaling_initializer_transformer,
"tf.uniform_unit_scaling_initializer":
_add_uniform_scaling_initializer_transformer,
"slim.l1_regularizer":
_contrib_layers_l1_regularizer_transformer,
"slim.l2_regularizer":
_contrib_layers_l2_regularizer_transformer,
"slim.xavier_initializer":
_contrib_layers_xavier_initializer_transformer,
"slim.xavier_initializer_conv2d":
_contrib_layers_xavier_initializer_transformer,
"slim.variance_scaling_initializer":
_contrib_layers_variance_scaling_initializer_transformer,
"tf.keras.models.save_model": functools.partial(
_add_argument_transformer,
arg_name="save_format",
arg_value_ast=ast.Str("h5")),
}
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
def preprocess(self, root_node):
visitor = ast_edits.PastaAnalyzeVisitor(TFAPIImportAnalysisSpec())
visitor.visit(root_node)
detections = set(visitor.results)
# If we have detected the presence of imports of specific TF versions,
# We want to modify the update spec to check only module deprecations
# and skip all other conversions.
if detections:
self.function_handle = {}
self.function_reorders = {}
self.function_keyword_renames = {}
self.symbol_renames = {}
self.function_warnings = {}
self.change_to_function = {}
self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS
self.function_transformers = {}
self.import_renames = {}
return visitor.log, visitor.warnings_and_errors
def clear_preprocessing(self):
self.__init__()
def _is_ast_str(node):
"""Determine whether this node represents a string."""
allowed_types = [ast.Str]
if hasattr(ast, "Bytes"):
allowed_types += [ast.Bytes]
if hasattr(ast, "JoinedStr"):
allowed_types += [ast.JoinedStr]
if hasattr(ast, "FormattedValue"):
allowed_types += [ast.FormattedValue]
return isinstance(node, allowed_types)
def _is_ast_true(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is True
else:
return isinstance(node, ast.Name) and node.id == "True"
def _is_ast_false(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is False
else:
return isinstance(node, ast.Name) and node.id == "False"
# Lots of unused arguments below, since these are called in a standard manner.
# pylint: disable=unused-argument
def _rename_if_arg_found_transformer(parent, node, full_name, name, logs,
arg_name=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Replaces the given call with tf.compat.v1 if the given arg is found.
This requires the function to be called with all named args, so for using
this transformer, the function should also be added to renames.
If the arg is not found, the call site is left alone.
If the arg is found, and if arg_ok_predicate is given, it is called with
the ast Expression representing the argument value found. If it returns
True, the function is left alone.
If the arg is found, arg_ok_predicate is not None and returns ok, and
remove_if_ok is True, the argument is removed from the call.
Otherwise, `compat.v1` is inserted between tf and the function name.
Args:
parent: Parent of node.
node: ast.Call node to maybe modify.
full_name: full name of function to modify
name: name of function to modify
logs: list of logs to append to
arg_name: name of the argument to look for
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
# Check whether arg is there.
arg_present, arg_value = ast_edits.get_arg_value(node, arg_name)
if not arg_present:
return
# Check whether arg is problematic (and if not, maybe remove it).
if arg_ok_predicate and arg_ok_predicate(arg_value):
if remove_if_ok:
for i, kw in enumerate(node.keywords):
if kw.arg == arg_name:
node.keywords.pop(i)
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Removed argument %s for function %s" % (
arg_name, full_name or name)))
break
return node
else:
return
# All conditions met, insert v1 and log what we did.
# We must have a full name, so the func is an attribute.
new_name = full_name.replace("tf.", "tf.compat.v1.", 1)
node.func = ast_edits.full_name_node(new_name)
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Renaming %s to %s because argument %s is present. %s" %
(full_name, new_name, arg_name, message if message is not None else "")
))
return node
def _add_argument_transformer(parent, node, full_name, name, logs,
arg_name, arg_value_ast):
"""Adds an argument (as a final kwarg arg_name=arg_value_ast)."""
node.keywords.append(ast.keyword(arg=arg_name, value=arg_value_ast))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding argument '%s' to call to %s." % (pasta.dump(node.keywords[-1]),
full_name or name)
))
return node
def _iterator_transformer(parent, node, full_name, name, logs):
"""Transform iterator methods to compat function calls."""
# First, check that node.func.value is not already something we like
# (tf.compat.v1.data), or something which is handled in the rename
# (tf.data). This transformer only handles the method call to function call
# conversion.
if full_name and (full_name.startswith("tf.compat.v1.data") or
full_name.startswith("tf.data")):
return
# This should never happen, since we're only called for Attribute nodes.
if not isinstance(node.func, ast.Attribute):
return
# Transform from x.f(y) to tf.compat.v1.data.f(x, y)
# Fortunately, node.func.value should already have valid position info
node.args = [node.func.value] + node.args
node.func.value = ast_edits.full_name_node("tf.compat.v1.data")
logs.append((ast_edits.WARNING, node.lineno, node.col_offset,
"Changing dataset.%s() to tf.compat.v1.data.%s(dataset). "
"Please check this transformation.\n" % (name, name)))
return node
def _dropout_transformer(parent, node, full_name, name, logs):
"""Replace keep_prob with 1-rate."""
def _replace_keep_prob_node(parent, old_value):
"""Replaces old_value with 1-(old_value)."""
one = ast.Num(n=1)
one.lineno = 0
one.col_offset = 0
new_value = ast.BinOp(left=one, op=ast.Sub(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a keep_prob keyword arg
for keep_prob in node.keywords:
if keep_prob.arg == "keep_prob":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate\n"))
keep_prob.arg = "rate"
_replace_keep_prob_node(keep_prob, keep_prob.value)
return node
# Maybe it was a positional arg
if len(node.args) < 2:
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"tf.nn.dropout called without arguments, so "
"automatic fix was disabled. tf.nn.dropout has changed "
"the semantics of the second argument."))
else:
_replace_keep_prob_node(node, node.args[1])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate, and "
"recomputing value.\n"))
return node
def _cast_transformer(parent, node, full_name, name, logs):
"""Transforms to_int and to_float to cast(..., dtype=...)."""
# Find out the dtype to cast to from the function name
dtype_str = name[3:]
# Special cases where the full dtype is not given
if dtype_str == "float":
dtype_str = "float32"
elif dtype_str == "double":
dtype_str = "float64"
new_arg = ast.keyword(arg="dtype",
value=ast.Attribute(value=ast.Name(id="tf",
ctx=ast.Load()),
attr=dtype_str, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 2:
name_arg = ast.keyword(arg="name",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(name_arg)
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "cast"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "cast"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.cast(..., dtype=tf.%s)." % (full_name,
dtype_str)))
return node
def _softmax_cross_entropy_with_logits_transformer(
parent, node, full_name, name, logs):
"""Wrap labels argument with stop_gradients."""
def _wrap_label(parent, old_value):
"""Wrap labels with tf.stop_gradient."""
already_stop_grad = (isinstance(old_value, ast.Call) and
isinstance(old_value.func, ast.Attribute) and
old_value.func.attr == "stop_gradient" and
isinstance(old_value.func.value, ast.Name) and
old_value.func.value.id == "tf")
if already_stop_grad:
return False
try:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [])
except TypeError:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [], None, None)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
return True
# Check if we have a labels keyword arg
for karg in node.keywords:
if karg.arg == "labels":
if _wrap_label(karg, karg.value):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing labels arg of "
"tf.nn.softmax_cross_entropy_with_logits to "
"tf.stop_gradient(labels). Please check this "
"transformation.\n"))
return node
return node
def _image_resize_transformer(parent, node, full_name, name, logs):
"""Transforms image.resize_* to image.resize(..., method=*, ...)."""
resize_method = name[7:].upper()
new_arg = ast.keyword(arg="method",
value=ast.Attribute(
value=ast.Attribute(
value=ast.Attribute(
value=ast.Name(id="tf", ctx=ast.Load()),
attr="image", ctx=ast.Load()),
attr="ResizeMethod", ctx=ast.Load()),
attr=resize_method, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 4:
pos_arg = ast.keyword(arg="preserve_aspect_ratio",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(pos_arg)
if len(node.args) == 3:
pos_arg = ast.keyword(arg="align_corners",
value=node.args[-1])
node.args = node.args[:-1]
new_keywords = []
for kw in node.keywords:
if kw.arg != "align_corners":
new_keywords.append(kw)
node.keywords = new_keywords
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "resize"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "resize"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.image.resize(..., "
"method=tf.image.ResizeMethod.%s)." % (full_name,
resize_method)))
return node
def _pool_seed_transformer(parent, node, full_name, name, logs):
"""Removes seed2 and deterministic, and adds non-zero seed if needed."""
# This requires that this function uses all kwargs (add to renames!).
seed_arg = None
deterministic = False
modified = False
new_keywords = []
for kw in node.keywords:
if sys.version_info[:2] >= (3, 5) and isinstance(kw, ast.Starred):
pass
elif kw.arg == "seed":
seed_arg = kw
elif kw.arg == "seed2" or kw.arg == "deterministic":
lineno = getattr(kw, "lineno", node.lineno)
col_offset = getattr(kw, "col_offset", node.col_offset)
logs.append((ast_edits.INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
kw.arg, full_name or name)))
if kw.arg == "deterministic":
if not _is_ast_false(kw.value):
deterministic = True
modified = True
continue
new_keywords.append(kw)
if deterministic:
if seed_arg is None:
new_keywords.append(ast.keyword(arg="seed", value=ast.Num(42)))
logs.add((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding seed=42 to call to %s since determinism was requested" % (
full_name or name)
))
else:
logs.add((
ast_edits.WARNING, node.lineno, node.col_offset,
"The deterministic argument is deprecated for %s, pass a "
"non-zero seed for determinism. The deterministic argument is "
"present, possibly not False, and the seed is already set. The "
"converter cannot determine whether it is nonzero, please check."
))
if modified:
node.keywords = new_keywords
return node
else:
return
def _extract_glimpse_transformer(parent, node, full_name, name, logs):
def _replace_uniform_noise_node(parent, old_value):
"""Replaces old_value with 'uniform' or 'guassian'."""
uniform = ast.Str(s="uniform")
gaussian = ast.Str(s="gaussian")
new_value = ast.IfExp(body=uniform, test=old_value, orelse=gaussian)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around noise.value.test (and remove the old prefix/
# suffix, they should only be around new_value.test), so that:
# "uniform" if (a if b else c) else "gaussian" is valid.
pasta.base.formatting.set(new_value.test, "prefix", "(")
pasta.base.formatting.set(new_value.test, "suffix", ")")
# Check if we have a uniform_noise keyword arg
for uniform_noise in node.keywords:
if uniform_noise.arg == "uniform_noise":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse "
"to noise, and recomputing value. Please check this "
"transformation.\n"))
uniform_noise.arg = "noise"
value = "uniform" if uniform_noise.value else "gaussian"
_replace_uniform_noise_node(uniform_noise, uniform_noise.value)
return node
# Since `noise`/`uniform_noise` is optional arg, nothing needs to be
# done if len(node.args) < 5.
if len(node.args) >= 5:
_replace_uniform_noise_node(node, node.args[5])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse to "
"noise, and recomputing value.\n"))
return node
def _add_summary_step_transformer(parent, node, full_name, name, logs):
"""Adds a step argument to the summary API call if not specified.
The inserted argument value is tf.compat.v1.train.get_or_create_global_step().
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "step":
return node
default_value = "tf.compat.v1.train.get_or_create_global_step()"
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(default_value)
node.keywords.append(ast.keyword(arg="step", value=ast_value))
logs.append((
ast_edits.WARNING, node.lineno, node.col_offset,
"Summary API writing function %s now requires a 'step' argument; "
"inserting default of %s." % (full_name or name, default_value)))
return node
def _add_summary_recording_cond_transformer(parent, node, full_name, name, logs,
cond):
"""Adds cond argument to tf.contrib.summary.xxx_record_summaries().
This is in anticipation of them being renamed to tf.summary.record_if(), which
requires the cond argument.
"""
node.args.append(pasta.parse(cond))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding `%s` argument to %s in anticipation of it being renamed to "
"tf.compat.v2.summary.record_if()" % (cond, full_name or name)))
return node
def _add_loss_reduction_transformer(parent, node, full_name, name, logs):
"""Adds a loss_reduction argument if not specified.
Default value for tf.estimator.*Classifier and tf.estimator.*Regressor
loss_reduction argument changed to SUM_OVER_BATCH_SIZE. So, we update
existing calls to use the old default value `tf.keras.losses.Reduction.SUM`.
Note: to apply this transformation, symbol must be added
to reordered_function_names above.
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "loss_reduction":
return node
default_value = "tf.keras.losses.Reduction.SUM"
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(default_value)
node.keywords.append(ast.keyword(arg="loss_reduction", value=ast_value))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"%s: Default value of loss_reduction has been changed to "
"SUM_OVER_BATCH_SIZE; inserting old default value %s.\n"
% (full_name or name, default_value)))
return node
def _rename_if_any_arg_found_transformer(
parent,
node,
full_name,
name,
logs,
arg_names=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Replaces the given call with tf.compat.v1 if any of the arg_names is found.
Args:
parent: Parent of node.
node: ast.Call node to modify.
full_name: full name of function to modify.
name: name of function to modify.
logs: list of logs to append to.
arg_names: list of names of the argument to look for.
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
for arg_name in arg_names:
rename_node = _rename_if_arg_found_transformer(parent, node,
full_name, name, logs,
arg_name, arg_ok_predicate,
remove_if_ok, message)
node = rename_node if rename_node else node
return node
def _rename_if_arg_found_and_add_loss_reduction_transformer(
parent,
node,
full_name,
name,
logs,
arg_names=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Combination of _rename_if_arg_found and _add_loss_reduction transformers.
Args:
parent: Parent of node.
node: ast.Call node to maybe modify.
full_name: full name of function to modify
name: name of function to modify
logs: list of logs to append to
arg_names: list of names of the argument to look for
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
node = _add_loss_reduction_transformer(parent, node, full_name, name, logs)
for arg_name in arg_names:
rename_node = _rename_if_arg_found_transformer(parent, node, full_name,
name, logs, arg_name,
arg_ok_predicate,
remove_if_ok, message)
node = rename_node if rename_node else node
return node
def _add_uniform_scaling_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to uniform_unit_scaling_initializer.
Transforms:
tf.uniform_unit_scaling_initializer(factor, seed, dtype) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=factor, distribution="uniform", seed=seed)
Note: to apply this transformation, symbol must be added
to reordered_function_names above.
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "factor":
keyword_arg.arg = "scale"
distribution_value = "\"uniform\""
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(distribution_value)
node.keywords.append(ast.keyword(arg="distribution", value=ast_value))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
return node
def _contrib_layers_xavier_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to contrib.layers.xavier_initializer.
Transforms:
tf.contrib.layers.xavier_initializer(uniform, seed, dtype) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=1.0, mode="fan_avg",
distribution=("uniform" if uniform else "truncated_normal"),
seed=seed, dtype=dtype)
Returns: The new node
"""
def _get_distribution(old_value):
"""Returns an AST matching the following:
("uniform" if (old_value) else "truncated_normal")
"""
dist = pasta.parse("\"uniform\" if old_value else \"truncated_normal\"")
ifexpr = dist.body[0].value
pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)
pasta.base.formatting.set(dist, "prefix", "(")
pasta.base.formatting.set(dist, "suffix", ")")
return dist
found_distribution = False
for keyword_arg in node.keywords:
if keyword_arg.arg == "uniform":
found_distribution = True
keyword_arg.arg = "distribution"
old_value = keyword_arg.value
new_value = _get_distribution(keyword_arg.value)
pasta.ast_utils.replace_child(keyword_arg, old_value, new_value)
pasta.base.formatting.set(keyword_arg.value, "prefix", "(")
pasta.base.formatting.set(keyword_arg.value, "suffix", ")")
new_keywords = []
scale = pasta.parse("1.0")
new_keywords.append(ast.keyword(arg="scale", value=scale))
mode = pasta.parse("\"fan_avg\"")
new_keywords.append(ast.keyword(arg="mode", value=mode))
if len(node.args) >= 1:
found_distribution = True
dist = _get_distribution(node.args[0])
new_keywords.append(ast.keyword(arg="distribution", value=dist))
if not found_distribution:
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
uniform_dist = pasta.parse("\"uniform\"")
new_keywords.append(ast.keyword(arg="distribution", value=uniform_dist))
if len(node.args) >= 2:
new_keywords.append(ast.keyword(arg="seed", value=node.args[1]))
if len(node.args) >= 3:
new_keywords.append(ast.keyword(arg="dtype", value=node.args[2]))
node.args = []
node.keywords = new_keywords + node.keywords
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing tf.contrib.layers xavier initializer"
" to a tf.compat.v1.keras.initializers.VarianceScaling and"
" converting arguments.\n"))
return node
def _contrib_layers_variance_scaling_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to contrib.layers.variance_scaling_initializer.
Transforms:
tf.contrib.layers.variance_scaling_initializer(
factor, mode, uniform, seed, dtype
) to
tf.compat.v1.keras.initializers.VarianceScaling(
scale=factor, mode=mode.lower(),
distribution=("uniform" if uniform else "truncated_normal"),
seed=seed, dtype=dtype)
And handles the case where no factor is provided and scale needs to be
set to 2.0 to match contrib's default instead of tf.keras.initializer's
default of 1.0
"""
def _replace_distribution(parent, old_value):
"""Replaces old_value: ("uniform" if (old_value) else "truncated_normal")"""
new_value = pasta.parse(
"\"uniform\" if old_value else \"truncated_normal\"")
ifexpr = new_value.body[0].value
pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value)
pasta.ast_utils.replace_child(parent, old_value, new_value)
pasta.base.formatting.set(new_value, "prefix", "(")
pasta.base.formatting.set(new_value, "suffix", ")")
def _replace_mode(parent, old_value):
"""Replaces old_value with (old_value).lower()."""
new_value = pasta.parse("mode.lower()")
mode = new_value.body[0].value.func
pasta.ast_utils.replace_child(mode, mode.value, old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Need to keep track of scale because slim & keras
# have different defaults
found_scale = False
for keyword_arg in node.keywords:
if keyword_arg.arg == "factor":
keyword_arg.arg = "scale"
found_scale = True
if keyword_arg.arg == "mode":
_replace_mode(keyword_arg, keyword_arg.value)
if keyword_arg.arg == "uniform":
keyword_arg.arg = "distribution"
_replace_distribution(keyword_arg, keyword_arg.value)
# Handle any detected positional arguments
if len(node.args) >= 1:
found_scale = True
if len(node.args) >= 2:
_replace_mode(node, node.args[1])
if len(node.args) >= 3:
_replace_distribution(node, node.args[2])
# If no scale was provided, make tf 2.0 use slim's default factor
if not found_scale:
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
scale_value = pasta.parse("2.0")
node.keywords = ([ast.keyword(arg="scale", value=scale_value)]
+ node.keywords)
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing tf.contrib.layers.variance_scaling_initializer"
" to a tf.compat.v1.keras.initializers.VarianceScaling and"
" converting arguments.\n"))
return node
def _contrib_layers_l1_regularizer_transformer(
parent, node, full_name, name, logs):
"""Replace slim l1 regularizer with Keras one.
This entails renaming the 'scale' arg to 'l' and dropping any
provided scope arg.
"""
# Check if we have a scale or scope keyword arg
scope_keyword = None
for keyword in node.keywords:
if keyword.arg == "scale":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Renaming scale arg of regularizer\n"))
keyword.arg = "l"
if keyword.arg == "scope":
scope_keyword = keyword
# Remove the scope keyword or arg if it is present
if scope_keyword:
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l1_regularizer,"
" because it is unsupported in tf.keras.regularizers.l1\n"))
node.keywords.remove(scope_keyword)
if len(node.args) > 1:
node.args = node.args[:1]
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l1_regularizer,"
" because it is unsupported in tf.keras.regularizers.l1\n"))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.keras.regularizers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "l1"
return node
def _contrib_layers_l2_regularizer_transformer(
parent, node, full_name, name, logs):
"""Replace slim l2 regularizer with Keras one, with l=0.5*scale.
Also drops the scope argument.
"""
def _replace_scale_node(parent, old_value):
"""Replaces old_value with 0.5*(old_value)."""
half = ast.Num(n=0.5)
half.lineno = 0
half.col_offset = 0
new_value = ast.BinOp(left=half, op=ast.Mult(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
# Put parentheses around scale.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a scale or scope keyword arg
scope_keyword = None
for keyword in node.keywords:
if keyword.arg == "scale":
keyword.arg = "l"
_replace_scale_node(keyword, keyword.value)
if keyword.arg == "scope":
scope_keyword = keyword
# Maybe it was a positional arg
if len(node.args) >= 1:
_replace_scale_node(node, node.args[0])
# Remove the scope keyword or arg if it is present
if scope_keyword:
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l2_regularizer,"
" because it is unsupported in tf.keras.regularizers.l2\n"))
node.keywords.remove(scope_keyword)
if len(node.args) > 1:
node.args = node.args[:1]
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Dropping scope arg from tf.contrib.layers.l2_regularizer,"
" because it is unsupported in tf.keras.regularizers.l2\n"))
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Multiplying scale arg of tf.contrib.layers.l2_regularizer"
" by half to what tf.keras.regularizers.l2 expects.\n"))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.keras.regularizers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "l2"
return node
def _name_scope_transformer(parent, node, full_name, name, logs):
"""Fix name scope invocation to use 'default_name' and omit 'values' args."""
name_found, name = ast_edits.get_arg_value(node, "name", 0)
default_found, default_name = ast_edits.get_arg_value(node, "default_name", 1)
# If an actual name was given...
if name_found and pasta.dump(name) != "None":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"`name` passed to `name_scope`. Because you may be re-entering"
" an existing scope, it is not safe to convert automatically, "
" the v2 name_scope does not support re-entering scopes by"
" name.\n"))
# Rename to compat.v1
new_name = "tf.compat.v1.name_scope"
logs.append((ast_edits.INFO, node.func.lineno, node.func.col_offset,
"Renamed %r to %r" % (full_name, new_name)))
new_name_node = ast_edits.full_name_node(new_name, node.func.ctx)
ast.copy_location(new_name_node, node.func)
pasta.ast_utils.replace_child(node, node.func, new_name_node)
return node
if default_found:
# New name scope doesn't have name, but it has a default name. We use
# name=default_name, and values can be dropped (it's only for
# error reporting and useless outside of graph mode).
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Using default_name as name in call to name_scope.\n"))
# Remove all args other than name
node.args = []
node.keywords = [ast.keyword(arg="name", value=default_name)]
return node
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"name_scope call with neither name nor default_name cannot be "
"converted properly."))
def _rename_to_compat_v1(node, full_name, logs, reason):
new_name = full_name.replace("tf.", "tf.compat.v1.", 1)
return _rename_func(node, full_name, new_name, logs, reason)
def _rename_func(node, full_name, new_name, logs, reason):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Renamed %r to %r: %s" % (full_name, new_name, reason)))
new_name_node = ast_edits.full_name_node(new_name, node.func.ctx)
ast.copy_location(new_name_node, node.func)
pasta.ast_utils.replace_child(node, node.func, new_name_node)
return node
def _string_split_transformer(parent, node, full_name, name, logs):
"""Update tf.string_split arguments: skip_empty, sep, result_type, source."""
# Check the skip_empty parameter: if not false, then use compat.v1.
for i, kw in enumerate(node.keywords):
if kw.arg == "skip_empty":
if _is_ast_false(kw.value):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"removed argument skip_empty for tf.string_split."))
node.keywords.pop(i)
break
else:
return _rename_to_compat_v1(
node, full_name, logs, "tf.string_split's replacement no longer "
"takes the skip_empty argument.")
# Check the sep parameter: if it's definitely an empty string, use
# tf.strings.bytes_split(). If we can't tell, then use compat.v1.
found_sep = False
for i, kw in enumerate(node.keywords):
if kw.arg == "sep":
found_sep = True
if isinstance(kw.value, ast.Str):
if kw.value.s == "":
node = _rename_func(
node, full_name, "tf.strings.bytes_split", logs,
"Splitting bytes is not handled by tf.strings.bytes_split().")
node.keywords.pop(i)
else:
return _rename_to_compat_v1(
node, full_name, logs,
"The semantics for tf.string_split's sep parameter have changed "
"when sep is the empty string; but sep is not a string literal, "
"so we can't tell if it's an empty string.")
if not found_sep:
return _rename_to_compat_v1(
node, full_name, logs,
"The semantics for tf.string_split's sep parameter have changed "
"when sep unspecified: it now splits on all whitespace, not just "
"the space character.")
# Check the result_type parameter
return _string_split_rtype_transformer(parent, node, full_name, name, logs)
def _string_split_rtype_transformer(parent, node, full_name, name, logs):
"""Update tf.strings.split arguments: result_type, source."""
# Remove the "result_type" argument.
need_to_sparse = True
for i, kw in enumerate(node.keywords):
if kw.arg == "result_type":
if (isinstance(kw.value, ast.Str) and
kw.value.s in ("RaggedTensor", "SparseTensor")):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Removed argument result_type=%r for function %s" %
(kw.value.s, full_name or name)))
node.keywords.pop(i)
if kw.value.s == "RaggedTensor":
need_to_sparse = False
else:
return _rename_to_compat_v1(
node, full_name, logs,
"%s no longer takes the result_type parameter." % full_name)
break
for i, kw in enumerate(node.keywords):
if kw.arg == "source":
kw.arg = "input"
# If necessary, add a call to .to_sparse() to convert the output of
# strings.split from a RaggedTensor to a SparseTensor.
if need_to_sparse:
if (isinstance(parent, ast.Attribute) and parent.attr == "to_sparse"):
return # Prevent infinite recursion (since child nodes are transformed)
logs.append(
(ast_edits.INFO, node.lineno, node.col_offset,
"Adding call to RaggedTensor.to_sparse() to result of strings.split, "
"since it now returns a RaggedTensor."))
node = ast.Attribute(value=copy.deepcopy(node), attr="to_sparse")
try:
node = ast.Call(node, [], [])
except TypeError:
node = ast.Call(node, [], [], None, None)
return node
| 39.303718 | 80 | 0.623415 |
4552553f592cf4efb0106ee5bb573b60f10db348 | 10,986 | py | Python | deploy/deploy.py | dsarlis/Cloud-Burst | 2329fcb9874e818732cc0f38bc857add262ea107 | [
"Apache-2.0"
] | 1 | 2015-12-24T17:40:53.000Z | 2015-12-24T17:40:53.000Z | deploy/deploy.py | dsarlis/Cloud-Burst | 2329fcb9874e818732cc0f38bc857add262ea107 | [
"Apache-2.0"
] | null | null | null | deploy/deploy.py | dsarlis/Cloud-Burst | 2329fcb9874e818732cc0f38bc857add262ea107 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import threading
from boto.ec2.autoscale import AutoScaleConnection, Tag
from boto.exception import EC2ResponseError, BotoServerError
import time
import os
from boto.ec2.connection import EC2Connection
from boto.ec2.elb import HealthCheck, ELBConnection
from boto.ec2.autoscale import LaunchConfiguration
from boto.ec2.autoscale import AutoScalingGroup
from boto.ec2.autoscale import ScalingPolicy
from boto.ec2.cloudwatch import MetricAlarm
from boto.ec2.cloudwatch import CloudWatchConnection
from sys import argv
def read_properties(filename):
properties = []
for line in open(filename):
properties.append(line.replace('\n', ''))
return tuple(properties)
class MSBManager:
def __init__(self, aws_access_key, aws_secret_key):
self.ec2_conn = EC2Connection(aws_access_key, aws_secret_key)
self.elb_conn = ELBConnection(aws_access_key, aws_secret_key)
self.auto_scale_conn = AutoScaleConnection(aws_access_key, aws_secret_key)
self.cloud_watch_conn = CloudWatchConnection(aws_access_key, aws_secret_key)
self.default_cooldown = 60
def get_security_group(self, name):
sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name]
return sgs[0] if sgs else None
def create_security_group(self, name, description):
sgs = [g for g in self.ec2_conn.get_all_security_groups() if g.name == name]
sg = sgs[0] if sgs else None
if not sgs:
sg = self.ec2_conn.create_security_group(name, description)
try:
sg.authorize(ip_protocol="-1", from_port=None, to_port=None, cidr_ip="0.0.0.0/0", dry_run=False)
except EC2ResponseError:
pass
return sg
def remove_security_group(self, name):
self.ec2_conn.delete_security_group(name=name)
def create_instance(self, image, instance_type, key_name, zone, security_groups, tags):
instance = None
reservations = self.ec2_conn.get_all_instances()
for reservation in reservations:
for i in reservation.instances:
if 'Name' in i.tags and i.tags['Name'] == tags['Name'] and i.state == 'running':
instance = i
break
if not instance:
reservation = self.ec2_conn.run_instances(image, instance_type=instance_type, key_name=key_name, placement=zone, security_groups=security_groups, monitoring_enabled=True)
instance = reservation.instances[0]
while not instance.update() == 'running':
time.sleep(5)
time.sleep(10)
self.ec2_conn.create_tags([instance.id], tags)
return instance
def request_spot_instance(self, bid, image, instance_type, key_name, zone, security_groups, tags):
req = self.ec2_conn.request_spot_instances(price=bid, instance_type=instance_type, image_id=image, placement=zone,key_name=key_name, security_groups=security_groups)
instance_id = None
while not instance_id:
job_sir_id = req[0].id
requests = self.ec2_conn.get_all_spot_instance_requests()
for sir in requests:
if sir.id == job_sir_id:
instance_id = sir.instance_id
break
print 'Job {} not ready'.format(job_sir_id)
time.sleep(60)
self.ec2_conn.create_tags([instance_id], tags)
def remove_instance(self, instance_id):
self.remove_instances([instance_id])
def remove_instances(self, instance_ids):
self.ec2_conn.terminate_instances(instance_ids)
def remove_instance_by_tag_name(self, name):
reservations = self.ec2_conn.get_all_instances()
data_centers_intance_ids = []
for reservation in reservations:
for instance in reservation.instances:
if 'Name' in instance.tags and instance.tags['Name'] == name and instance.state == 'running':
data_centers_intance_ids.append(instance.id)
if data_centers_intance_ids:
self.remove_instances(data_centers_intance_ids)
def create_elb(self, name, zone, project_tag_value, security_group_id, instance_ids=None):
lbs = [l for l in self.elb_conn.get_all_load_balancers() if l.name == name]
lb = lbs[0] if lbs else None
if not lb:
hc = HealthCheck(timeout=50, interval=60, healthy_threshold=2, unhealthy_threshold=8, target='HTTP:80/heartbeat')
ports = [(80, 80, 'http')]
zones = [zone]
lb = self.elb_conn.create_load_balancer(name, zones, ports)
self.elb_conn.apply_security_groups_to_lb(name, [security_group_id])
lb.configure_health_check(hc)
if instance_ids:
lb.register_instances(instance_ids)
params = {'LoadBalancerNames.member.1': lb.name,
'Tags.member.1.Key': '15619project',
'Tags.member.1.Value': project_tag_value}
lb.connection.get_status('AddTags', params, verb='POST')
return lb
def remove_elb(self, name):
self.elb_conn.delete_load_balancer(name)
def create_launch_configuration(self, name, image, key_name, security_groups, instance_type):
lcs = [l for l in self.auto_scale_conn.get_all_launch_configurations() if l.name == name]
lc = lcs[0] if lcs else None
if not lc:
lc = LaunchConfiguration(name=name, image_id=image, key_name=key_name,
security_groups=[security_groups], instance_type=instance_type)
self.auto_scale_conn.create_launch_configuration(lc)
return lc
def remove_launch_configuration(self, name):
self.auto_scale_conn.delete_launch_configuration(name)
def create_autoscaling_group(self, name, lb_name, zone, tags, instance_ids=None):
lc = self.create_launch_configuration()
as_groups = [a for a in self.auto_scale_conn.get_all_groups() if a.name == name]
as_group = as_groups[0] if as_groups else None
if not as_group:
as_group = AutoScalingGroup(group_name=name, load_balancers=[lb_name], availability_zones=[zone],
launch_config=lc, min_size=4, max_size=4, health_check_type='ELB', health_check_period=120, connection=self.auto_scale_conn,
default_cooldown=self.default_cooldown, desired_capacity=4,
tags=tags)
self.auto_scale_conn.create_auto_scaling_group(as_group)
if instance_ids:
self.auto_scale_conn.attach_instances(name, instance_ids)
scale_up_policy = ScalingPolicy(name='scale_up', adjustment_type='ChangeInCapacity', as_name=name, scaling_adjustment=1, cooldown=self.default_cooldown)
scale_down_policy = ScalingPolicy(name='scale_down', adjustment_type='ChangeInCapacity', as_name=name, scaling_adjustment=-1, cooldown=self.default_cooldown)
self.auto_scale_conn.create_scaling_policy(scale_up_policy)
self.auto_scale_conn.create_scaling_policy(scale_down_policy)
scale_up_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=['scale_up'])[0]
scale_down_policy = self.auto_scale_conn.get_all_policies(as_group=name, policy_names=['scale_down'])[0]
alarm_dimensions = {'AutoScalingGroupName': name}
scale_up_alarm = MetricAlarm(name='scale_up_on_cpu', namespace='AWS/EC2', metric='CPUUtilization',
statistic='Average', comparison='>', threshold=85, period=60, evaluation_periods=1,
alarm_actions=[scale_up_policy.policy_arn], dimensions=alarm_dimensions)
self.cloud_watch_conn.create_alarm(scale_up_alarm)
scale_down_alarm = MetricAlarm(name='scale_down_on_cpu', namespace='AWS/EC2', metric='CPUUtilization', statistic='Average',
comparison='<', threshold=60, period=60, evaluation_periods=1,
alarm_actions=[scale_down_policy.policy_arn], dimensions=alarm_dimensions)
self.cloud_watch_conn.create_alarm(scale_down_alarm)
return as_group
def update_autoscaling_group_max_size(self, as_group, max_size):
setattr(as_group, 'max_size', max_size)
as_group.update()
def update_autoscaling_group_min_size(self, as_group, min_size):
setattr(as_group, 'min_size', min_size)
as_group.update()
def remove_autoscaling_group(self, name):
self.auto_scale_conn.delete_auto_scaling_group(name)
def request_spot_instance(manager, bid, image, instance_type, key_name, zone, security_groups, tags, instances):
print 'Requesting spot instance with {} bid, image {} and {}'.format(bid, image, instance_type)
instances.append(manager.request_spot_instance(bid, image, instance_type, key_name, zone, security_groups, tags))
print 'Created spot instance with {} bid, image {} and {}'.format(bid, image, instance_type)
def deploy(remove=False):
aws_access_key = os.environ['CLOUD_BURST_ACCESS_KEY']
aws_secret_key = os.environ['CLOUD_BURST_SECRET_KEY']
manager = MSBManager(aws_access_key, aws_secret_key)
region = 'us-east-1'
zone = 'us-east-1c'
key_name = 'cloudburstkey'
ssh_http_sg_name = 'SSH/HTTP'
http_sg_name = 'HTTP'
phase = 'phase1'
frontend_image = 'ami-c791c1a2'
number_of_frontend_servers = 1
frontend_server_bid = 0.06
frontend_server_name = 'FrontendServer'
frontend_elb_name = 'FrontendELB'
frontend_servers = []
if remove:
manager.remove_instance_by_tag_name(frontend_server_name)
print 'Frontend Servers removed'
manager.remove_elb(frontend_elb_name)
print 'Frontend ELB removed'
else:
request_spot_instance_threads = []
for dummy in xrange(number_of_frontend_servers):
t = threading.Thread(target=request_spot_instance, args=(manager, frontend_server_bid, frontend_image, 'm3.large', key_name, zone, [ssh_http_sg_name], {'Name': frontend_server_name, '15619project': phase}, frontend_servers, ))
t.start()
request_spot_instance_threads.append(t)
for request_spot_instance_thread in request_spot_instance_threads:
request_spot_instance_thread.join()
ssh_http_sg = manager.get_security_group(http_sg_name)
manager.create_elb(frontend_elb_name, zone, phase, ssh_http_sg.id, [frontend_server.instances[0].id for frontend_server in frontend_servers])
print 'ELB {} created'.format(frontend_elb_name)
if __name__ == "__main__":
if argv[1] == 'deploy':
deploy()
elif argv[1] == 'remove':
deploy(True)
else:
print 'Invalid option'
print 'Done'
| 46.159664 | 238 | 0.677499 |
1f5333b3f901fd0c7c65c27528cc876d420a058d | 641 | py | Python | bootstrapvz/common/tasks/folder.py | zeridon/bootstrap-vz | fcdc6993f59e521567fb101302b02312e741b88c | [
"Apache-2.0"
] | 207 | 2015-01-26T19:00:24.000Z | 2021-12-16T10:05:58.000Z | bootstrapvz/common/tasks/folder.py | zeridon/bootstrap-vz | fcdc6993f59e521567fb101302b02312e741b88c | [
"Apache-2.0"
] | 346 | 2015-01-01T08:56:09.000Z | 2019-06-10T08:03:05.000Z | bootstrapvz/common/tasks/folder.py | zeridon/bootstrap-vz | fcdc6993f59e521567fb101302b02312e741b88c | [
"Apache-2.0"
] | 124 | 2015-01-16T21:22:29.000Z | 2022-02-25T17:36:10.000Z | from bootstrapvz.base import Task
from bootstrapvz.common import phases
from . import volume
from . import workspace
class Create(Task):
description = 'Creating volume folder'
phase = phases.volume_creation
successors = [volume.Attach]
@classmethod
def run(cls, info):
import os.path
info.root = os.path.join(info.workspace, 'root')
info.volume.create(info.root)
class Delete(Task):
description = 'Deleting volume folder'
phase = phases.cleaning
successors = [workspace.DeleteWorkspace]
@classmethod
def run(cls, info):
info.volume.delete()
del info.root
| 22.892857 | 56 | 0.680187 |
05b26d88b7d38f7f56898b1cc5ae3efdba14ce2e | 29,247 | py | Python | tensorflow/tools/compatibility/ast_edits.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 9 | 2019-12-29T01:47:37.000Z | 2021-12-21T13:47:41.000Z | tensorflow/tools/compatibility/ast_edits.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 1 | 2019-06-18T07:56:15.000Z | 2019-06-18T07:56:15.000Z | tensorflow/tools/compatibility/ast_edits.py | PaulWang1905/tensorflow | ebf12d22b4801fb8dab5034cc94562bf7cc33fa0 | [
"Apache-2.0"
] | 2 | 2021-01-26T08:23:41.000Z | 2021-07-13T16:23:18.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts according to an API change specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import collections
import os
import re
import shutil
import sys
import tempfile
import traceback
import pasta
import six
# Some regular expressions we will need for parsing
FIND_OPEN = re.compile(r"^\s*(\[).*$")
FIND_STRING_CHARS = re.compile(r"['\"]")
INFO = "INFO"
WARNING = "WARNING"
ERROR = "ERROR"
ImportRename = collections.namedtuple(
"ImportRename", ["new_name", "excluded_prefixes"])
def full_name_node(name, ctx=ast.Load()):
"""Make an Attribute or Name node for name.
Translate a qualified name into nested Attribute nodes (and a Name node).
Args:
name: The name to translate to a node.
ctx: What context this name is used in. Defaults to Load()
Returns:
A Name or Attribute node.
"""
names = name.split(".")
names.reverse()
node = ast.Name(id=names.pop(), ctx=ast.Load())
while names:
node = ast.Attribute(value=node, attr=names.pop(), ctx=ast.Load())
# Change outermost ctx to the one given to us (inner ones should be Load).
node.ctx = ctx
return node
def get_arg_value(node, arg_name, arg_pos=None):
"""Get the value of an argument from a ast.Call node.
This function goes through the positional and keyword arguments to check
whether a given argument was used, and if so, returns its value (the node
representing its value).
This cannot introspect *args or **args, but it safely handles *args in
Python3.5+.
Args:
node: The ast.Call node to extract arg values from.
arg_name: The name of the argument to extract.
arg_pos: The position of the argument (in case it's passed as a positional
argument).
Returns:
A tuple (arg_present, arg_value) containing a boolean indicating whether
the argument is present, and its value in case it is.
"""
# Check keyword args
if arg_name is not None:
for kw in node.keywords:
if kw.arg == arg_name:
return (True, kw.value)
# Check positional args
if arg_pos is not None:
idx = 0
for arg in node.args:
if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):
continue # Can't parse Starred
if idx == arg_pos:
return (True, arg)
idx += 1
return (False, None)
def excluded_from_module_rename(module, import_rename_spec):
"""Check if this module import should not be renamed.
Args:
module: (string) module name.
import_rename_spec: ImportRename instance.
Returns:
True if this import should not be renamed according to the
import_rename_spec.
"""
for excluded_prefix in import_rename_spec.excluded_prefixes:
if module.startswith(excluded_prefix):
return True
return False
class APIChangeSpec(object):
"""This class defines the transformations that need to happen.
This class must provide the following fields:
* `function_keyword_renames`: maps function names to a map of old -> new
argument names
* `symbol_renames`: maps function names to new function names
* `change_to_function`: a set of function names that have changed (for
notifications)
* `function_reorders`: maps functions whose argument order has changed to the
list of arguments in the new order
* `function_warnings`: maps full names of functions to warnings that will be
printed out if the function is used. (e.g. tf.nn.convolution())
* `function_transformers`: maps function names to custom handlers
* `module_deprecations`: maps module names to warnings that will be printed
if the module is still used after all other transformations have run
* `import_renames`: maps import name (must be a short name without '.')
to ImportRename instance.
For an example, see `TFAPIChangeSpec`.
"""
class _PastaEditVisitor(ast.NodeVisitor):
"""AST Visitor that processes function calls.
Updates function calls from old API version to new API version using a given
change spec.
"""
def __init__(self, api_change_spec):
self._api_change_spec = api_change_spec
self._log = [] # Holds 4-tuples: severity, line, col, msg.
self._stack = [] # Allow easy access to parents.
# Overridden to maintain a stack of nodes to allow for parent access
def visit(self, node):
self._stack.append(node)
super(_PastaEditVisitor, self).visit(node)
self._stack.pop()
@property
def errors(self):
return [log for log in self._log if log[0] == ERROR]
@property
def warnings(self):
return [log for log in self._log if log[0] == WARNING]
@property
def warnings_and_errors(self):
return [log for log in self._log if log[0] in (WARNING, ERROR)]
@property
def info(self):
return [log for log in self._log if log[0] == INFO]
@property
def log(self):
return self._log
def add_log(self, severity, lineno, col, msg):
self._log.append((severity, lineno, col, msg))
print("%s line %d:%d: %s" % (severity, lineno, col, msg))
def add_logs(self, logs):
"""Record a log and print it.
The log should be a tuple `(severity, lineno, col_offset, msg)`, which will
be printed and recorded. It is part of the log available in the `self.log`
property.
Args:
logs: The logs to add. Must be a list of tuples
`(severity, lineno, col_offset, msg)`.
"""
self._log.extend(logs)
for log in logs:
print("%s line %d:%d: %s" % log)
def _get_applicable_entries(self, transformer_field, full_name, name):
"""Get all list entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + name if name else None
transformers = []
if full_name in function_transformers:
transformers.append(function_transformers[full_name])
if glob_name in function_transformers:
transformers.append(function_transformers[glob_name])
if "*" in function_transformers:
transformers.append(function_transformers["*"])
return transformers
def _get_applicable_dict(self, transformer_field, full_name, name):
"""Get all dict entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + name if name else None
transformers = function_transformers.get("*", {}).copy()
transformers.update(function_transformers.get(glob_name, {}))
transformers.update(function_transformers.get(full_name, {}))
return transformers
def _get_full_name(self, node):
"""Traverse an Attribute node to generate a full name, e.g., "tf.foo.bar".
This is the inverse of `full_name_node`.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if node was not Attribute or Name.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _maybe_add_warning(self, node, full_name):
"""Adds an error to be printed about full_name at node."""
function_warnings = self._api_change_spec.function_warnings
if full_name in function_warnings:
level, message = function_warnings[full_name]
message = message.replace("<function name>", full_name)
self.add_log(level, node.lineno, node.col_offset,
"%s requires manual check. %s" % (full_name, message))
return True
else:
return False
def _maybe_add_module_deprecation_warning(self, node, full_name, whole_name):
"""Adds a warning if full_name is a deprecated module."""
warnings = self._api_change_spec.module_deprecations
if full_name in warnings:
level, message = warnings[full_name]
message = message.replace("<function name>", whole_name)
self.add_log(level, node.lineno, node.col_offset,
"Using member %s in deprecated module %s. %s" % (whole_name,
full_name,
message))
return True
else:
return False
def _maybe_add_call_warning(self, node, full_name, name):
"""Print a warning when specific functions are called with selected args.
The function _print_warning_for_function matches the full name of the called
function, e.g., tf.foo.bar(). This function matches the function name that
is called, as long as the function is an attribute. For example,
`tf.foo.bar()` and `foo.bar()` are matched, but not `bar()`.
Args:
node: ast.Call object
full_name: The precomputed full name of the callable, if one exists, None
otherwise.
name: The precomputed name of the callable, if one exists, None otherwise.
Returns:
Whether an error was recorded.
"""
# Only look for *.-warnings here, the other will be handled by the Attribute
# visitor. Also, do not warn for bare functions, only if the call func is
# an attribute.
warned = False
if isinstance(node.func, ast.Attribute):
warned = self._maybe_add_warning(node, "*." + name)
# All arg warnings are handled here, since only we have the args
arg_warnings = self._get_applicable_dict("function_arg_warnings",
full_name, name)
for (kwarg, arg), (level, warning) in sorted(arg_warnings.items()):
present, _ = get_arg_value(node, kwarg, arg)
if present:
warned = True
warning_message = warning.replace("<function name>", full_name or name)
self.add_log(level, node.lineno, node.col_offset,
"%s called with %s argument requires manual check: %s" %
(full_name or name, kwarg, warning_message))
return warned
def _maybe_rename(self, parent, node, full_name):
"""Replace node (Attribute or Name) with a node representing full_name."""
new_name = self._api_change_spec.symbol_renames.get(full_name, None)
if new_name:
self.add_log(INFO, node.lineno, node.col_offset,
"Renamed %r to %r" % (full_name, new_name))
new_node = full_name_node(new_name, node.ctx)
ast.copy_location(new_node, node)
pasta.ast_utils.replace_child(parent, node, new_node)
return True
else:
return False
def _maybe_change_to_function_call(self, parent, node, full_name):
"""Wraps node (typically, an Attribute or Expr) in a Call."""
if full_name in self._api_change_spec.change_to_function:
if not isinstance(parent, ast.Call):
# ast.Call's constructor is really picky about how many arguments it
# wants, and also, it changed between Py2 and Py3.
if six.PY2:
new_node = ast.Call(node, [], [], None, None)
else:
new_node = ast.Call(node, [], [])
pasta.ast_utils.replace_child(parent, node, new_node)
ast.copy_location(new_node, node)
self.add_log(INFO, node.lineno, node.col_offset,
"Changed %r to a function call" % full_name)
return True
return False
def _maybe_add_arg_names(self, node, full_name):
"""Make args into keyword args if function called full_name requires it."""
function_reorders = self._api_change_spec.function_reorders
if full_name in function_reorders:
reordered = function_reorders[full_name]
new_keywords = []
idx = 0
for arg in node.args:
if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):
continue # Can't move Starred to keywords
keyword_arg = reordered[idx]
keyword = ast.keyword(arg=keyword_arg, value=arg)
new_keywords.append(keyword)
idx += 1
if new_keywords:
self.add_log(INFO, node.lineno, node.col_offset,
"Added keywords to args of function %r" % full_name)
node.args = []
node.keywords = new_keywords + (node.keywords or [])
return True
return False
def _maybe_modify_args(self, node, full_name, name):
"""Rename keyword args if the function called full_name requires it."""
renamed_keywords = self._get_applicable_dict("function_keyword_renames",
full_name, name)
if not renamed_keywords:
return False
modified = False
new_keywords = []
for keyword in node.keywords:
argkey = keyword.arg
if argkey in renamed_keywords:
modified = True
if renamed_keywords[argkey] is None:
lineno = getattr(keyword, "lineno", node.lineno)
col_offset = getattr(keyword, "col_offset", node.col_offset)
self.add_log(INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
argkey, full_name or name))
else:
keyword.arg = renamed_keywords[argkey]
lineno = getattr(keyword, "lineno", node.lineno)
col_offset = getattr(keyword, "col_offset", node.col_offset)
self.add_log(INFO, lineno, col_offset,
"Renamed keyword argument for %s from %s to %s" % (
full_name, argkey, renamed_keywords[argkey]))
new_keywords.append(keyword)
else:
new_keywords.append(keyword)
if modified:
node.keywords = new_keywords
return modified
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
assert self._stack[-1] is node
# Get the name for this call, so we can index stuff with it.
full_name = self._get_full_name(node.func)
if full_name:
name = full_name.split(".")[-1]
elif isinstance(node.func, ast.Name):
name = node.func.id
elif isinstance(node.func, ast.Attribute):
name = node.func.attr
else:
name = None
# Call standard transformers for this node.
# Make sure warnings come first, since args or names triggering warnings
# may be removed by the other transformations.
self._maybe_add_call_warning(node, full_name, name)
# Make all args into kwargs
self._maybe_add_arg_names(node, full_name)
# Argument name changes or deletions
self._maybe_modify_args(node, full_name, name)
# Call transformers. These have the ability to modify the node, and if they
# do, will return the new node they created (or the same node if they just
# changed it). The are given the parent, but we will take care of
# integrating their changes into the parent if they return a new node.
#
# These are matched on the old name, since renaming is performed by the
# Attribute visitor, which happens later.
transformers = self._get_applicable_entries("function_transformers",
full_name, name)
parent = self._stack[-2]
for transformer in transformers:
logs = []
new_node = transformer(parent, node, full_name, name, logs)
self.add_logs(logs)
if new_node and new_node is not node:
pasta.ast_utils.replace_child(parent, node, new_node)
node = new_node
self._stack[-1] = node
self.generic_visit(node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar]."""
assert self._stack[-1] is node
full_name = self._get_full_name(node)
if full_name:
parent = self._stack[-2]
# Make sure the warning comes first, otherwise the name may have changed
self._maybe_add_warning(node, full_name)
# Once we did a modification, node is invalid and not worth inspecting
# further. Also, we only perform modifications for simple nodes, so
# There'd be no point in descending further.
if self._maybe_rename(parent, node, full_name):
return
if self._maybe_change_to_function_call(parent, node, full_name):
return
# The isinstance check is enough -- a bare Attribute is never root.
i = 2
while isinstance(self._stack[-i], ast.Attribute):
i += 1
whole_name = pasta.dump(self._stack[-(i-1)])
self._maybe_add_module_deprecation_warning(node, full_name, whole_name)
self.generic_visit(node)
def visit_Import(self, node): # pylint: disable=invalid-name
"""Handle visiting an import node in the AST.
Args:
node: Current Node
"""
new_aliases = []
import_updated = False
import_renames = getattr(self._api_change_spec, "import_renames", {})
# This loop processes imports in the format
# import foo as f, bar as b
for import_alias in node.names:
# Look for rename based on first component of from-import.
# i.e. based on foo in foo.bar.
import_first_component = import_alias.name.split(".")[0]
import_rename_spec = import_renames.get(import_first_component, None)
if not import_rename_spec or excluded_from_module_rename(
import_alias.name, import_rename_spec):
new_aliases.append(import_alias) # no change needed
continue
new_name = (
import_rename_spec.new_name +
import_alias.name[len(import_first_component):])
# If current import is
# import foo
# then new import should preserve imported name:
# import new_foo as foo
# This happens when module has just one component.
new_asname = import_alias.asname
if not new_asname and "." not in import_alias.name:
new_asname = import_alias.name
new_alias = ast.alias(name=new_name, asname=new_asname)
new_aliases.append(new_alias)
import_updated = True
# Replace the node if at least one import needs to be updated.
if import_updated:
assert self._stack[-1] is node
parent = self._stack[-2]
new_node = ast.Import(new_aliases)
ast.copy_location(new_node, node)
pasta.ast_utils.replace_child(parent, node, new_node)
self.add_log(
INFO, node.lineno, node.col_offset,
"Changed import from %r to %r." %
(pasta.dump(node), pasta.dump(new_node)))
self.generic_visit(node)
def visit_ImportFrom(self, node): # pylint: disable=invalid-name
"""Handle visiting an import-from node in the AST.
Args:
node: Current Node
"""
if not node.module:
self.generic_visit(node)
return
from_import = node.module
# Look for rename based on first component of from-import.
# i.e. based on foo in foo.bar.
from_import_first_component = from_import.split(".")[0]
import_renames = getattr(self._api_change_spec, "import_renames", {})
import_rename_spec = import_renames.get(from_import_first_component, None)
if not import_rename_spec:
self.generic_visit(node)
return
# Split module aliases into the ones that require import update
# and those that don't. For e.g. if we want to rename "a" to "b"
# unless we import "a.c" in the following:
# from a import c, d
# we want to update import for "d" but not for "c".
updated_aliases = []
same_aliases = []
for import_alias in node.names:
full_module_name = "%s.%s" % (from_import, import_alias.name)
if excluded_from_module_rename(full_module_name, import_rename_spec):
same_aliases.append(import_alias)
else:
updated_aliases.append(import_alias)
if not updated_aliases:
self.generic_visit(node)
return
assert self._stack[-1] is node
parent = self._stack[-2]
# Replace first component of from-import with new name.
new_from_import = (
import_rename_spec.new_name +
from_import[len(from_import_first_component):])
updated_node = ast.ImportFrom(new_from_import, updated_aliases, node.level)
ast.copy_location(updated_node, node)
pasta.ast_utils.replace_child(parent, node, updated_node)
# If some imports had to stay the same, add another import for them.
additional_import_log = ""
if same_aliases:
same_node = ast.ImportFrom(from_import, same_aliases, node.level,
col_offset=node.col_offset, lineno=node.lineno)
ast.copy_location(same_node, node)
parent.body.insert(parent.body.index(updated_node), same_node)
# Apply indentation to new node.
pasta.base.formatting.set(
same_node, "prefix",
pasta.base.formatting.get(updated_node, "prefix"))
additional_import_log = " and %r" % pasta.dump(same_node)
self.add_log(
INFO, node.lineno, node.col_offset,
"Changed import from %r to %r%s." %
(pasta.dump(node),
pasta.dump(updated_node),
additional_import_log))
self.generic_visit(node)
class ASTCodeUpgrader(object):
"""Handles upgrading a set of Python files using a given API change spec."""
def __init__(self, api_change_spec):
if not isinstance(api_change_spec, APIChangeSpec):
raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" %
type(api_change_spec))
self._api_change_spec = api_change_spec
def process_file(self, in_filename, out_filename):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
# pylint: disable=g-backslash-continuation
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(in_filename, in_file, out_filename,
temp_file)
# pylint: enable=g-backslash-continuation
shutil.move(temp_file.name, out_filename)
return ret
def format_log(self, log, in_filename):
log_string = "%d:%d: %s: %s" % (log[1], log[2], log[0], log[3])
if in_filename:
return in_filename + ":" + log_string
else:
return log_string
def update_string_pasta(self, text, in_filename):
"""Updates a file using pasta."""
try:
t = pasta.parse(text)
except (SyntaxError, ValueError, TypeError):
log = ["ERROR: Failed to parse.\n" + traceback.format_exc()]
return 0, "", log, []
visitor = _PastaEditVisitor(self._api_change_spec)
visitor.visit(t)
logs = [self.format_log(log, None) for log in visitor.log]
errors = [self.format_log(error, in_filename)
for error in visitor.warnings_and_errors]
return 1, pasta.dump(t), logs, errors
def _format_log(self, log, in_filename, out_filename):
text = "-" * 80 + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += "-" * 80 + "\n\n"
text += "\n".join(log) + "\n"
text += "-" * 80 + "\n\n"
return text
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
lines = in_file.readlines()
processed_file, new_file_content, log, process_errors = (
self.update_string_pasta("".join(lines), in_filename))
if out_file and processed_file:
out_file.write(new_file_content)
return (processed_file,
self._format_log(log, in_filename, out_filename),
process_errors)
def process_tree(self, root_directory, output_root_directory,
copy_other_files):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base.
copy_other_files: Copy files that are not touched by this converter.
Returns:
A tuple of files processed, the report string for all files, and a dict
mapping filenames to errors encountered in that file.
"""
if output_root_directory == root_directory:
return self.process_tree_inplace(root_directory)
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." %
(output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" %
(root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
files_to_copy = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if f.endswith(".py")]
copy_files = [f for f in file_list if not f.endswith(".py")]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(output_root_directory,
os.path.relpath(fullpath,
root_directory))
files_to_process.append((fullpath, fullpath_output))
if copy_other_files:
for filename in copy_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(output_root_directory,
os.path.relpath(
fullpath, root_directory))
files_to_copy.append((fullpath, fullpath_output))
file_count = 0
tree_errors = {}
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors[input_path] = l_errors
report += l_report
for input_path, output_path in files_to_copy:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
shutil.copy(input_path, output_path)
return file_count, report, tree_errors
def process_tree_inplace(self, root_directory):
"""Process a directory of python files in place."""
files_to_process = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [os.path.join(dir_name,
f) for f in file_list if f.endswith(".py")]
files_to_process += py_files
file_count = 0
tree_errors = {}
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for path in files_to_process:
file_count += 1
_, l_report, l_errors = self.process_file(path, path)
tree_errors[path] = l_errors
report += l_report
return file_count, report, tree_errors
| 36.241636 | 80 | 0.662017 |
5f53e9fb15c1f9c37e2810299a8253e4ade9b0f2 | 9,246 | py | Python | dr1dl-pyspark.py | quinngroup/r1dl-benchmarks | a29def2b78f9e90cbf8e5e93f7b407731be295ad | [
"MIT"
] | null | null | null | dr1dl-pyspark.py | quinngroup/r1dl-benchmarks | a29def2b78f9e90cbf8e5e93f7b407731be295ad | [
"MIT"
] | null | null | null | dr1dl-pyspark.py | quinngroup/r1dl-benchmarks | a29def2b78f9e90cbf8e5e93f7b407731be295ad | [
"MIT"
] | null | null | null | import argparse
import functools
import numpy as np
import os.path
import scipy.linalg as sla
import sys
import datetime
import os
import psutil
from pyspark import SparkContext, SparkConf
from pyspark.mllib.linalg import SparseVector
###################################
# Utility functions
###################################
def select_topr(vct_input, r):
"""
Returns the R-th greatest elements indices
in input vector and store them in idxs_n.
"""
temp = np.argpartition(-vct_input, r)
idxs_n = temp[:r]
return idxs_n
def input_to_rowmatrix(raw_rdd, norm):
"""
Utility function for reading the matrix data
"""
# Parse each line of the input into a numpy array of floats. This requires
# several steps.
# 1: Split each string into a list of strings.
# 2: Convert each string to a float.
# 3: Convert each list to a numpy array.
p_and_n = functools.partial(parse_and_normalize, norm = norm)
numpy_rdd = raw_rdd \
.zipWithIndex() \
.map(lambda x: (x[1], p_and_n(x[0])))
return numpy_rdd
###################################
# Spark helper functions
###################################
def parse_and_normalize(line, norm):
"""
Utility function. Parses a line of text into a floating point array, then
whitens the array.
"""
x = np.array([float(c) for c in line.strip().split()])
if norm:
x -= x.mean() # 0-mean.
x /= sla.norm(x) # Unit norm.
return x
def vector_matrix(row):
"""
Applies u * S by row-wise multiplication, followed by a reduction on
each column into a single vector.
"""
row_index, vector = row # Split up the [key, value] pair.
u = _U_.value # Extract the broadcasted vector "u".
# This means we're in the first iteration and we just want a random
# vector. To ensure all the workers generate the same random vector,
# we have to seed the RNG identically.
if type(u) == tuple:
T, seed = u
np.random.seed(seed)
u = np.random.random(T)
u -= u.mean()
u /= sla.norm(u)
u = u[row_index]
# Generate a list of [key, value] output pairs, one for each nonzero
# element of vector.
out = []
for i in range(vector.shape[0]):
out.append([i, u * vector[i]])
return out
def matrix_vector(row):
"""
Applies S * v by row-wise multiplication. No reduction needed, as all the
summations are performed within this very function.
"""
k, row = row # Extract the broadcast variables.
v = _V_.value
# Perform the multiplication using the specified indices in both arrays.
innerprod = np.dot(row[v.indices], v.values)
# That's it! Return the [row, inner product] tuple.
return [k, innerprod]
def deflate(row):
"""
Deflates the data matrix by subtracting off the outer product of the
broadcasted vectors and returning the modified row.
"""
k, vector = row
# It's important to keep order of operations in mind: we are computing
# (and subtracting from S) the outer product of u * v. As we are operating
# on a row-distributed matrix, we therefore will only iterate over the
# elements of v, and use the single element of u that corresponds to the
# index of the current row of S.
# Got all that? Good! Explain it to me.
u, v = _U_.value, _V_.value
vector[v.indices] -= (u[k] * v.values)
return [k, vector]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'PySpark Dictionary Learning',
add_help = 'How to use', prog = 'python R1DL_Spark.py <args>')
# Inputs.
parser.add_argument("-i", "--input", required = True,
help = "Input file containing the matrix S.")
parser.add_argument("-T", "--rows", type = int, required = True,
help = "Number of rows (observations) in the input matrix S.")
parser.add_argument("-P", "--cols", type = int, required = True,
help = "Number of columns (features) in the input matrix S.")
# Optional.
parser.add_argument("-r", "--pnonzero", type = float, default = 0.07,
help = "Percentage of non-zero elements. [DEFAULT: 0.07]")
parser.add_argument("-m", "--dictatoms", type = int, default = 5,
help = "Number of the dictionary atoms. [DEFAULT: 5]")
parser.add_argument("-e", "--epsilon", type = float, default = 0.01,
help = "The convergence criteria in the ALS step. [DEFAULT: 0.01]")
parser.add_argument("--normalize", action = "store_true",
help = "If set, normalizes input data.")
parser.add_argument("--debug", action = "store_true",
help = "If set, turns out debug output.")
# Spark options.
parser.add_argument("--partitions", type = int, default = None,
help = "Number of RDD partitions to use. [DEFAULT: 4 * CPUs]")
parser.add_argument("--execmem", default = "8g",
help = "Amount of memory for each executor. [DEFAULT: 8g]")
# Outputs.
parser.add_argument("-d", "--dictionary", required = True,
help = "Output path to dictionary file.(file_D)")
parser.add_argument("-o", "--output", required = True,
help = "Output path to z matrix.(file_z)")
parser.add_argument("--prefix", required = True,
help = "Prefix strings to the output files")
args = vars(parser.parse_args())
if args['debug']: print(datetime.datetime.now())
# Initialize the SparkContext. This is where you can create RDDs,
# the Spark abstraction for distributed data sets.
conf = SparkConf()
conf.set("spark.executor.memory", args['execmem'])
sc = SparkContext(conf = conf)
partitions = args['partitions'] if args['partitions'] is not None else (4 * sc.defaultParallelism)
# Read the data and convert it into a thunder RowMatrix.
raw_rdd = sc.textFile(args['input'], minPartitions = partitions)
S = input_to_rowmatrix(raw_rdd, args['normalize'])
S.cache()
##################################################################
# Here's where the real fun begins.
#
# First, we're going to initialize some variables we'll need for the
# following operations. Next, we'll start the optimization loops. Finally,
# we'll perform the stepping and deflation operations until convergence.
#
# Sound like fun?
##################################################################
T = args['rows']
P = args['cols']
epsilon = args['epsilon'] # convergence stopping criterion
M = args['dictatoms'] # dimensionality of the learned dictionary
R = args['pnonzero'] * P # enforces sparsity
u_new = np.zeros(T) # atom updates at each iteration
v = np.zeros(P)
max_iterations = P * 10
file_D = os.path.join(args['dictionary'], "{}_D.txt".format(args["prefix"]))
file_z = os.path.join(args['output'], "{}_z.txt".format(args["prefix"]))
# Start the loop!
for m in range(M):
# In lieu of generating a dense random vector and broadcasting it, we
# instead compute a random seed. Randomly, of course.
seed = np.random.randint(max_iterations + 1, high = 4294967295)
np.random.seed(seed)
u_old = np.random.random(T)
num_iterations = 0
delta = 2 * epsilon
# Start the inner loop: this learns a single atom.
while num_iterations < max_iterations and delta > epsilon:
# P2: Vector-matrix multiplication step. Computes v.
_U_ = sc.broadcast(u_old) if num_iterations > 0 else sc.broadcast((T, seed))
v = S \
.flatMap(vector_matrix) \
.reduceByKey(lambda x, y: x + y) \
.collect()
v = np.take(sorted(v), indices = 1, axis = 1)
# Use our previous method to select the top R.
indices = np.sort(select_topr(v, R))
sv = SparseVector(P, indices, v[indices])
# Broadcast the sparse vector.
_V_ = sc.broadcast(sv)
# P1: Matrix-vector multiplication step. Computes u.
u_new = S \
.map(matrix_vector) \
.collect()
u_new = np.take(sorted(u_new), indices = 1, axis = 1)
# Subtract off the mean and normalize.
u_new -= u_new.mean()
u_new /= sla.norm(u_new)
# Update for the next iteration.
delta = sla.norm(u_old - u_new)
u_old = u_new
num_iterations += 1
# Save the newly-computed u and v to the output files;
with open(file_D, "a+") as fD:
np.savetxt(fD, u_new, fmt = "%.6f", newline = " ")
fD.write("\n")
with open(file_z, "a+") as fz:
np.savetxt(fz, sv.toArray(), fmt = "%.6f", newline = " ")
fz.write("\n")
# P4: Deflation step. Update the primary data matrix S.
_U_ = sc.broadcast(u_new)
_V_ = sc.broadcast(sv)
if args['debug']: print(m)
S = S.map(deflate).reduceByKey(lambda x, y: x + y)
S.cache()
if args['debug']: print(datetime.datetime.now())
process = psutil.Process(os.getpid())
print(process.memory_info().rss)
| 36.690476 | 102 | 0.598096 |
a367aae21de189049aad11781cf7e33fd726759a | 16,952 | py | Python | pika/src/asyncio_consumer_example.py | abdullatifmouhamadi/rabbitmq-common | cc8209275f67a5e28ca5afd9ac424bcd053c8574 | [
"MIT"
] | 1 | 2020-08-07T19:42:32.000Z | 2020-08-07T19:42:32.000Z | pika/src/asyncio_consumer_example.py | abdullatifmouhamadi/rabbitmq-common | cc8209275f67a5e28ca5afd9ac424bcd053c8574 | [
"MIT"
] | 1 | 2020-01-08T22:45:50.000Z | 2020-01-08T22:45:50.000Z | pika/src/asyncio_consumer_example.py | abdullatifmouhamadi/rabbitmq-common | cc8209275f67a5e28ca5afd9ac424bcd053c8574 | [
"MIT"
] | 1 | 2020-07-07T03:07:53.000Z | 2020-07-07T03:07:53.000Z | # -*- coding: utf-8 -*-
# pylint: disable=C0111,C0103,R0205
import functools
import logging
import time
import pika
from pika.adapters.asyncio_connection import AsyncioConnection
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
class ExampleConsumer(object):
"""This is an example consumer that will handle unexpected interactions
with RabbitMQ such as channel and connection closures.
If RabbitMQ closes the connection, this class will stop and indicate
that reconnection is necessary. You should look at the output, as
there are limited reasons why the connection may be closed, which
usually are tied to permission related issues or socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
EXCHANGE = 'message'
EXCHANGE_TYPE = 'topic'
QUEUE = 'text'
ROUTING_KEY = 'example.text'
def __init__(self, amqp_url):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str amqp_url: The AMQP url to connect with
"""
self.should_reconnect = False
self.was_consuming = False
self._connection = None
self._channel = None
self._closing = False
self._consumer_tag = None
self._url = amqp_url
self._consuming = False
# In production, experiment with higher prefetch values
# for higher consumer throughput
self._prefetch_count = 1
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.adapters.asyncio_connection.AsyncioConnection
"""
LOGGER.info('Connecting to %s', self._url)
return AsyncioConnection(
parameters=pika.URLParameters(self._url),
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed)
def close_connection(self):
self._consuming = False
if self._connection.is_closing or self._connection.is_closed:
LOGGER.info('Connection is closing or already closed')
else:
LOGGER.info('Closing connection')
self._connection.close()
def on_connection_open(self, _unused_connection):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:param pika.adapters.asyncio_connection.AsyncioConnection _unused_connection:
The connection
"""
LOGGER.info('Connection opened')
self.open_channel()
def on_connection_open_error(self, _unused_connection, err):
"""This method is called by pika if the connection to RabbitMQ
can't be established.
:param pika.adapters.asyncio_connection.AsyncioConnection _unused_connection:
The connection
:param Exception err: The error
"""
LOGGER.error('Connection open failed: %s', err)
self.reconnect()
def on_connection_closed(self, _unused_connection, reason):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param Exception reason: exception representing reason for loss of
connection.
"""
self._channel = None
if self._closing:
self._connection.ioloop.stop()
else:
LOGGER.warning('Connection closed, reconnect necessary: %s', reason)
self.reconnect()
def reconnect(self):
"""Will be invoked if the connection can't be opened or is
closed. Indicates that a reconnect is necessary then stops the
ioloop.
"""
self.should_reconnect = True
self.stop()
def open_channel(self):
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
LOGGER.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.info('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
self.setup_exchange(self.EXCHANGE)
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
LOGGER.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reason):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param Exception reason: why the channel was closed
"""
LOGGER.warning('Channel %i was closed: %s', channel, reason)
self.close_connection()
def setup_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
LOGGER.info('Declaring exchange: %s', exchange_name)
# Note: using functools.partial is not required, it is demonstrating
# how arbitrary data can be passed to the callback when it is called
cb = functools.partial(
self.on_exchange_declareok, userdata=exchange_name)
self._channel.exchange_declare(
exchange=exchange_name,
exchange_type=self.EXCHANGE_TYPE,
callback=cb)
def on_exchange_declareok(self, _unused_frame, userdata):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
:param str|unicode userdata: Extra user data (exchange name)
"""
LOGGER.info('Exchange declared: %s', userdata)
self.setup_queue(self.QUEUE)
def setup_queue(self, queue_name):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
LOGGER.info('Declaring queue %s', queue_name)
cb = functools.partial(self.on_queue_declareok, userdata=queue_name)
self._channel.queue_declare(queue=queue_name, callback=cb)
def on_queue_declareok(self, _unused_frame, userdata):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method _unused_frame: The Queue.DeclareOk frame
:param str|unicode userdata: Extra user data (queue name)
"""
queue_name = userdata
LOGGER.info('Binding %s to %s with %s', self.EXCHANGE, queue_name,
self.ROUTING_KEY)
cb = functools.partial(self.on_bindok, userdata=queue_name)
self._channel.queue_bind(
queue_name,
self.EXCHANGE,
routing_key=self.ROUTING_KEY,
callback=cb)
def on_bindok(self, _unused_frame, userdata):
"""Invoked by pika when the Queue.Bind method has completed. At this
point we will set the prefetch count for the channel.
:param pika.frame.Method _unused_frame: The Queue.BindOk response frame
:param str|unicode userdata: Extra user data (queue name)
"""
LOGGER.info('Queue bound: %s', userdata)
self.set_qos()
def set_qos(self):
"""This method sets up the consumer prefetch to only be delivered
one message at a time. The consumer must acknowledge this message
before RabbitMQ will deliver another one. You should experiment
with different prefetch values to achieve desired performance.
"""
self._channel.basic_qos(
prefetch_count=self._prefetch_count, callback=self.on_basic_qos_ok)
def on_basic_qos_ok(self, _unused_frame):
"""Invoked by pika when the Basic.QoS method has completed. At this
point we will start consuming messages by calling start_consuming
which will invoke the needed RPC commands to start the process.
:param pika.frame.Method _unused_frame: The Basic.QosOk response frame
"""
LOGGER.info('QOS set to: %d', self._prefetch_count)
self.start_consuming()
def start_consuming(self):
"""This method sets up the consumer by first calling
add_on_cancel_callback so that the object is notified if RabbitMQ
cancels the consumer. It then issues the Basic.Consume RPC command
which returns the consumer tag that is used to uniquely identify the
consumer with RabbitMQ. We keep the value to use it when we want to
cancel consuming. The on_message method is passed in as a callback pika
will invoke when a message is fully received.
"""
LOGGER.info('Issuing consumer related RPC commands')
self.add_on_cancel_callback()
self._consumer_tag = self._channel.basic_consume(
self.QUEUE, self.on_message)
self.was_consuming = True
self._consuming = True
def add_on_cancel_callback(self):
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
for some reason. If RabbitMQ does cancel the consumer,
on_consumer_cancelled will be invoked by pika.
"""
LOGGER.info('Adding consumer cancellation callback')
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
def on_consumer_cancelled(self, method_frame):
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
receiving messages.
:param pika.frame.Method method_frame: The Basic.Cancel frame
"""
LOGGER.info('Consumer was cancelled remotely, shutting down: %r',
method_frame)
if self._channel:
self._channel.close()
def on_message(self, _unused_channel, basic_deliver, properties, body):
"""Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel _unused_channel: The channel object
:param pika.Spec.Basic.Deliver: basic_deliver method
:param pika.Spec.BasicProperties: properties
:param bytes body: The message body
"""
LOGGER.info('Received message # %s from %s: %s',
basic_deliver.delivery_tag, properties.app_id, body)
self.acknowledge_message(basic_deliver.delivery_tag)
def acknowledge_message(self, delivery_tag):
"""Acknowledge the message delivery from RabbitMQ by sending a
Basic.Ack RPC method for the delivery tag.
:param int delivery_tag: The delivery tag from the Basic.Deliver frame
"""
LOGGER.info('Acknowledging message %s', delivery_tag)
self._channel.basic_ack(delivery_tag)
def stop_consuming(self):
"""Tell RabbitMQ that you would like to stop consuming by sending the
Basic.Cancel RPC command.
"""
if self._channel:
LOGGER.info('Sending a Basic.Cancel RPC command to RabbitMQ')
cb = functools.partial(
self.on_cancelok, userdata=self._consumer_tag)
self._channel.basic_cancel(self._consumer_tag, cb)
def on_cancelok(self, _unused_frame, userdata):
"""This method is invoked by pika when RabbitMQ acknowledges the
cancellation of a consumer. At this point we will close the channel.
This will invoke the on_channel_closed method once the channel has been
closed, which will in-turn close the connection.
:param pika.frame.Method _unused_frame: The Basic.CancelOk frame
:param str|unicode userdata: Extra user data (consumer tag)
"""
self._consuming = False
LOGGER.info(
'RabbitMQ acknowledged the cancellation of the consumer: %s',
userdata)
self.close_channel()
def close_channel(self):
"""Call to close the channel with RabbitMQ cleanly by issuing the
Channel.Close RPC command.
"""
LOGGER.info('Closing the channel')
self._channel.close()
def run(self):
"""Run the example consumer by connecting to RabbitMQ and then
starting the IOLoop to block and allow the AsyncioConnection to operate.
"""
self._connection = self.connect()
self._connection.ioloop.run_forever()
def stop(self):
"""Cleanly shutdown the connection to RabbitMQ by stopping the consumer
with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
will be invoked by pika, which will then closing the channel and
connection. The IOLoop is started again because this method is invoked
when CTRL-C is pressed raising a KeyboardInterrupt exception. This
exception stops the IOLoop which needs to be running for pika to
communicate with RabbitMQ. All of the commands issued prior to starting
the IOLoop will be buffered but not processed.
"""
if not self._closing:
self._closing = True
LOGGER.info('Stopping')
if self._consuming:
self.stop_consuming()
self._connection.ioloop.run_forever()
else:
self._connection.ioloop.stop()
LOGGER.info('Stopped')
class ReconnectingExampleConsumer(object):
"""This is an example consumer that will reconnect if the nested
ExampleConsumer indicates that a reconnect is necessary.
"""
def __init__(self, amqp_url):
self._reconnect_delay = 0
self._amqp_url = amqp_url
self._consumer = ExampleConsumer(self._amqp_url)
def run(self):
while True:
try:
self._consumer.run()
except KeyboardInterrupt:
self._consumer.stop()
break
self._maybe_reconnect()
def _maybe_reconnect(self):
if self._consumer.should_reconnect:
self._consumer.stop()
reconnect_delay = self._get_reconnect_delay()
LOGGER.info('Reconnecting after %d seconds', reconnect_delay)
time.sleep(reconnect_delay)
self._consumer = ExampleConsumer(self._amqp_url)
def _get_reconnect_delay(self):
if self._consumer.was_consuming:
self._reconnect_delay = 0
else:
self._reconnect_delay += 1
if self._reconnect_delay > 30:
self._reconnect_delay = 30
return self._reconnect_delay
def main():
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
amqp_url = 'amqp://guest:guest@localhost:5672/%2F'
consumer = ReconnectingExampleConsumer(amqp_url)
consumer.run()
if __name__ == '__main__':
main()
| 38.18018 | 85 | 0.66765 |
6cc37527d05087171a7c621d2c93d96e260a7c66 | 1,567 | py | Python | data_structure/pilha_expressao.py | uadson/data-structure | e7c62ff732b9b89e57b9b08dfc6f777e57a52397 | [
"MIT"
] | null | null | null | data_structure/pilha_expressao.py | uadson/data-structure | e7c62ff732b9b89e57b9b08dfc6f777e57a52397 | [
"MIT"
] | null | null | null | data_structure/pilha_expressao.py | uadson/data-structure | e7c62ff732b9b89e57b9b08dfc6f777e57a52397 | [
"MIT"
] | null | null | null | import numpy as np
class Pilha:
def __init__(self, capacidade):
self.capacidade = capacidade
self.topo = -1
# Array de chars (b'(')
self.valores = np.chararray(self.capacidade, unicode=True)
def __pilha_cheia(self):
if self.topo == self.capacidade - 1:
return True
else:
return False
# Método público
def pilha_vazia(self):
if self.topo == -1:
return True
else:
return False
def empilhar(self, valor):
if self.__pilha_cheia():
print('A pilha está cheia')
else:
self.topo += 1
self.valores[self.topo] = valor
# Retorna o elemento desempilhado
def desempilhar(self):
if self.pilha_vazia():
print('A pilha está vazia')
return -1
else:
valor = self.valores[self.topo]
self.topo -= 1
return valor
def ver_topo(self):
if self.topo != -1:
return self.valores[self.topo]
else:
return -1
# c[d]
# a{b[c]d}e
# a{b(c]d}e
# a[b{c}d]e}
# a{b(c)
expressao = str(input('Digite uma expressão: '))
pilha = Pilha(len(expressao))
for i in range(len(expressao)):
ch = expressao[i]
if ch == '{' or ch == '[' or ch == '(':
pilha.empilhar(ch)
elif ch == '}' or ch == ']' or ch == ')':
if not pilha.pilha_vazia():
chx = str(pilha.desempilhar())
if (ch == '}' and chx != '{') or (ch == ']' and chx != '[') or (ch == ')' and chx != '('):
print('Erro: ', ch, ' na posição ', i)
break
else:
print('Erro: ', ch, ' na posição ', i)
if not pilha.pilha_vazia():
print('Erro!') | 23.044118 | 96 | 0.563497 |
b26e233352a780bc13652bc516959c54b4ceb2fd | 3,604 | py | Python | samples/basic/basic.py | martinsam/pycloudinary | 11de51083dbce69009fd0dcc8984550b4c3e1f3c | [
"MIT"
] | null | null | null | samples/basic/basic.py | martinsam/pycloudinary | 11de51083dbce69009fd0dcc8984550b4c3e1f3c | [
"MIT"
] | null | null | null | samples/basic/basic.py | martinsam/pycloudinary | 11de51083dbce69009fd0dcc8984550b4c3e1f3c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
from cloudinary.api import delete_resources_by_tag, resources_by_tag
from cloudinary.uploader import upload
from cloudinary.utils import cloudinary_url
# config
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '.'))
if os.path.exists('settings.py'):
exec(open('settings.py').read())
DEFAULT_TAG = "python_sample_basic"
def dump_response(response):
print("Upload response:")
for key in sorted(response.keys()):
print(" %s: %s" % (key, response[key]))
def upload_files():
print("--- Upload a local file")
response = upload("pizza.jpg", tags=DEFAULT_TAG)
dump_response(response)
url, options = cloudinary_url(
response['public_id'],
format=response['format'],
width=200,
height=150,
crop="fill"
)
print("Fill 200x150 url: " + url)
print("")
print("--- Upload a local file with custom public ID")
response = upload(
"pizza.jpg",
tags=DEFAULT_TAG,
public_id="custom_name",
)
dump_response(response)
url, options = cloudinary_url(
response['public_id'],
format=response['format'],
width=200,
height=150,
crop="fit"
)
print("Fit into 200x150 url: " + url)
print("")
print("--- Upload a local file with eager transformation of scaling to 200x150")
response = upload(
"lake.jpg",
tags=DEFAULT_TAG,
public_id="eager_custom_name",
eager=dict(
width=200,
height=150,
crop="scale"
),
)
dump_response(response)
url, options = cloudinary_url(
response['public_id'],
format=response['format'],
width=200,
height=150,
crop="scale",
)
print("scaling to 200x150 url: " + url)
print("")
print("--- Upload by fetching a remote image")
response = upload(
"http://res.cloudinary.com/demo/image/upload/couple.jpg",
tags=DEFAULT_TAG
)
dump_response(response)
url, options = cloudinary_url(
response['public_id'],
format=response['format'],
width=200,
height=150,
crop="thumb",
gravity="faces",
)
print("Face detection based 200x150 thumbnail url: " + url)
print("")
print("--- Fetch an uploaded remote image, fitting it into 500x500 and reducing saturation")
response = upload(
"http://res.cloudinary.com/demo/image/upload/couple.jpg",
tags=DEFAULT_TAG,
width=500,
height=500,
crop="fit",
effect="saturation:-70",
)
dump_response(response)
url, options = cloudinary_url(
response['public_id'],
format=response['format'],
width=200,
height=150,
crop="fill",
gravity="faces",
radius=10,
effect="sepia",
)
print("Fill 200x150, round corners, apply the sepia effect, url: " + url)
print("")
def cleanup():
response = resources_by_tag(DEFAULT_TAG)
resources = response.get('resources', [])
if not resources:
print("No images found")
return
print("Deleting {0:d} images...".format(len(resources)))
delete_resources_by_tag(DEFAULT_TAG)
print("Done!")
if len(sys.argv) > 1:
if sys.argv[1] == 'upload':
upload_files()
if sys.argv[1] == 'cleanup':
cleanup()
else:
print("--- Uploading files and then cleaning up")
print(" you can only one instead by passing 'upload' or 'cleanup' as an argument")
print("")
upload_files()
| 26.115942 | 96 | 0.596837 |
726de4263bb167690bc9ef1a51cca63b3774de7b | 546 | py | Python | API/consumption/urls.py | crowdhackathon-smartcity/CITIZEN17 | daeca11650f1198206a44a6fcd1aa229fb59b8cd | [
"MIT"
] | 5 | 2017-05-13T17:05:41.000Z | 2017-05-18T11:41:01.000Z | API/consumption/urls.py | crowdhackathon-smartcity/CITIZEN17 | daeca11650f1198206a44a6fcd1aa229fb59b8cd | [
"MIT"
] | null | null | null | API/consumption/urls.py | crowdhackathon-smartcity/CITIZEN17 | daeca11650f1198206a44a6fcd1aa229fb59b8cd | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from . import views
urlpatterns = [
url(
r'^municipality/',
views.MunicipalityView.as_view(),
name='municipality'
),
url(
r'^user/',
views.UserView.as_view(),
name='user'
),
url(
r'^sensor/',
csrf_exempt(views.SensorView.as_view()),
name='sensor'
),
url(
r'^payment/',
csrf_exempt(views.PaymentView.as_view()),
name='sensor'
),
]
| 19.5 | 52 | 0.554945 |
154e26ca840db1d0e684fd291b138d0a82f2c545 | 649 | py | Python | cymysql/tests/test_example.py | caty/manyuser | 438f1df02fc9a0faba97559ee1138061d4e28574 | [
"Apache-2.0"
] | 11 | 2018-05-22T03:02:15.000Z | 2021-02-17T06:43:10.000Z | cymysql/tests/test_example.py | caty/manyuser | 438f1df02fc9a0faba97559ee1138061d4e28574 | [
"Apache-2.0"
] | null | null | null | cymysql/tests/test_example.py | caty/manyuser | 438f1df02fc9a0faba97559ee1138061d4e28574 | [
"Apache-2.0"
] | 3 | 2020-11-04T08:37:23.000Z | 2022-03-28T15:49:40.000Z | import cymysql
from cymysql.tests import base
class TestExample(base.PyMySQLTestCase):
def test_example(self):
conn = cymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='mysql')
cur = conn.cursor()
cur.execute("SELECT Host,User FROM user")
# print cur.description
# r = cur.fetchall()
# print r
# ...or...
u = False
for r in cur.fetchall():
u = u or conn.user in r
self.assertTrue(u)
cur.close()
conn.close()
__all__ = ["TestExample"]
if __name__ == "__main__":
import unittest
unittest.main()
| 19.666667 | 95 | 0.563945 |
af3d15fe1a84598f77357efa22ec814f0f979b5e | 3,493 | py | Python | rule_extraction.py | hayesall/bn-rule-extraction | fc9e55744d96afd18870a9660ae6d3e16c86c4da | [
"MIT"
] | 4 | 2021-08-17T18:58:27.000Z | 2021-12-11T18:20:22.000Z | rule_extraction.py | hayesall/bn-rule-extraction | fc9e55744d96afd18870a9660ae6d3e16c86c4da | [
"MIT"
] | null | null | null | rule_extraction.py | hayesall/bn-rule-extraction | fc9e55744d96afd18870a9660ae6d3e16c86c4da | [
"MIT"
] | 2 | 2021-08-17T18:59:48.000Z | 2021-12-11T18:20:25.000Z | # Copyright © 2020 Alexander L. Hayes
"""
Extracting decision rules from Bayesian Networks
"""
from pomegranate import BayesianNetwork
from pomegranate import DiscreteDistribution
from pomegranate import ConditionalProbabilityTable
from sklearn.preprocessing import OrdinalEncoder
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import LeaveOneOut
from sklearn.metrics import accuracy_score
import numpy as np
names = np.loadtxt("toy_decision.csv", max_rows=1, delimiter=",", dtype=str)
data = np.loadtxt("toy_decision.csv", skiprows=1, delimiter=",", dtype=str)
enc = OrdinalEncoder(dtype=np.float32)
data = enc.fit_transform(data)
print(enc.categories_)
# TODO(hayesall): ``mapping`` is basically a "pretty-printer", this could
# probably be included as part of the ``print_rules`` function.
mapping = {}
for variable_name, possible_values in zip(names, enc.categories_):
for i, value_name in enumerate(possible_values):
from_this = variable_name + " = " + str(float(i))
to_that = variable_name + " = " + value_name
mapping[from_this] = to_that
y = data.T[0]
X = data.T[1:].T
def print_rules(pom_model, variable_mapping):
for i in range(len(model.states)):
if isinstance(model.states[i].distribution, DiscreteDistribution):
print(names[i], model.states[i].distribution.parameters)
else:
# Assume isinstance Categorical
cpt = np.array(model.states[i].distribution.parameters[0])
print("\n\n")
for row in cpt:
par_condition = "IF ("
for j, par in enumerate([names[p] for p in model.structure[i]]):
seen = par + " = " + str(row[j])
if seen in variable_mapping:
par_condition += variable_mapping[seen]
else:
par_condition += seen
par_condition += " ^ "
par_condition = par_condition[:-3]
par_condition += ") THEN ("
seen = names[i] + " = " + str(row[-2])
if seen in variable_mapping:
par_condition += variable_mapping[seen]
else:
par_condition += seen
par_condition += ")"
_conf_factor = row[-1] / (1 - row[-1])
if _conf_factor >= 1.0:
print(par_condition)
print("\tCF = {0:0.2f}".format(_conf_factor))
loo = LeaveOneOut()
clf = BayesianNetwork()
required = [
tuple([1, 0]),
tuple([4, 0]),
]
predictions = []
for train_index, test_index in loo.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
learning_data = np.c_[y_train, X_train]
model = clf.from_samples(
learning_data,
algorithm='exact',
include_edges=required,
state_names=[str(name) for name in names],
max_parents=-1,
)
print(model.structure)
if test_index == 0:
print("Decision rules extracted from the first test:\n")
print_rules(model, mapping)
nan_column = np.empty(y_test.shape)
nan_column[:] = np.nan
test_data = np.c_[nan_column, X_test]
pred = model.predict_proba(test_data)
predictions.append(
[item[0].items()[1][1] > 0.5 for item in pred][0]
)
print(accuracy_score(np.array(predictions), y))
| 28.398374 | 80 | 0.610077 |
aa1132c21fbfb2f18a5d62fd4970c8118643acb4 | 536 | py | Python | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/aged-surf-29096 | 6e15acc1c420f1fc9439b3e87a78ebf74253271e | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/aged-surf-29096 | 6e15acc1c420f1fc9439b3e87a78ebf74253271e | [
"FTL",
"AML",
"RSA-MD"
] | 40 | 2021-07-23T23:14:22.000Z | 2021-07-23T23:15:12.000Z | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/aged-surf-29096 | 6e15acc1c420f1fc9439b3e87a78ebf74253271e | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "aged-surf-29096.botics.co"
site_params = {
"name": "Aged Surf",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.615385 | 61 | 0.652985 |
10093a248164812afafeb3694bf26446195b1216 | 1,709 | py | Python | fixture/james.py | piersto/python_training_mantis | 627547e33a876a975ef54e640965866e5b096c19 | [
"Apache-2.0"
] | null | null | null | fixture/james.py | piersto/python_training_mantis | 627547e33a876a975ef54e640965866e5b096c19 | [
"Apache-2.0"
] | null | null | null | fixture/james.py | piersto/python_training_mantis | 627547e33a876a975ef54e640965866e5b096c19 | [
"Apache-2.0"
] | null | null | null | from telnetlib import Telnet
class JamesHelper:
def __init__(self, app):
self.app = app
def ensure_user_exists(self, username, password):
james_config = self.app.config['james']
session = JamesHelper.Session(
james_config['host'], james_config['port'], james_config['username'], james_config['password'])
if session.is_user_registered(username):
session.reset_password(username, password)
else:
session.create_user(username, password)
session.quit()
class Session:
def __init__(self, host, port, username, password):
self.telnet = Telnet(host, port, 5)
self.read_until('Login id:')
self.write(username + '\n')
self.read_until('Password:')
self.write(password + '\n')
self.read_until('Welcome root. HELP for a list of commands')
def read_until(self, text):
self.telnet.read_until(text.encode('ascii'), 5)
def write(self, text):
self.telnet.write(text.encode('ascii'))
def is_user_registered(self, username):
self.write('verify %s\n' % username)
res = self.telnet.expect([b'exists', b'does not exist'])
return res[0] == 0
def create_user(self, username, password):
self.write('adduser %s %s\n' % (username, password))
self.read_until('User %s added' % username)
def reset_password(self, username, password):
self.write('setpassword %s %s\n' % (username, password))
self.read_until('Password for %s reset' % username)
def quit(self):
self.write('quit\n')
| 33.509804 | 107 | 0.592159 |
ae45d57034fa780b97a028cb1c830b76ed5ee366 | 7,077 | py | Python | laika/helpers.py | mfkiwl/laika-gnss | dc38f251dbc7ebb535a3c220de8424634d297248 | [
"MIT"
] | 365 | 2018-12-17T07:43:34.000Z | 2022-03-29T22:23:39.000Z | laika/helpers.py | mfkiwl/laika-gnss | dc38f251dbc7ebb535a3c220de8424634d297248 | [
"MIT"
] | 36 | 2019-07-24T10:20:45.000Z | 2022-02-14T22:11:24.000Z | laika/helpers.py | mfkiwl/laika-gnss | dc38f251dbc7ebb535a3c220de8424634d297248 | [
"MIT"
] | 156 | 2018-12-17T05:06:23.000Z | 2022-03-31T12:06:07.000Z | import warnings
import numpy as np
from .lib.coordinates import LocalCoord
# From https://gpsd.gitlab.io/gpsd/NMEA.html - Satellite IDs section
NMEA_ID_RANGES = (
{
'range': (1, 32),
'constellation': 'GPS'
},
{
'range': (33, 54),
'constellation': 'SBAS'
},
{
'range': (55, 64),
'constellation': 'SBAS'
},
{
'range': (65, 88),
'constellation': 'GLONASS'
},
{
'range': (89, 96),
'constellation': 'GLONASS'
},
{
'range': (120, 151),
'constellation': 'SBAS'
},
{
'range': (152, 158),
'constellation': 'SBAS'
},
{
'range': (173, 182),
'constellation': 'IMES'
},
{
'range': (193, 197),
'constellation': 'QZNSS'
},
{
'range': (198, 200),
'constellation': 'QZNSS'
},
{
'range': (201, 235),
'constellation': 'BEIDOU'
},
{
'range': (301, 336),
'constellation': 'GALILEO'
},
{
'range': (401, 437),
'constellation': 'BEIDOU'
}
)
# Source: RINEX 3.04
RINEX_CONSTELLATION_IDENTIFIERS = {
'GPS': 'G',
'GLONASS': 'R',
'SBAS': 'S',
'GALILEO': 'E',
'BEIDOU': 'C',
'QZNSS': 'J',
'IRNSS': 'I'
}
# Make above dictionary bidirectional map:
# Now you can ask for constellation using:
# >>> RINEX_CONSTELLATION_IDENTIFIERS['R']
# "GLONASS"
RINEX_CONSTELLATION_IDENTIFIERS.update(
dict([reversed(i) for i in RINEX_CONSTELLATION_IDENTIFIERS.items()]) # type: ignore
)
def get_el_az(pos, sat_pos):
converter = LocalCoord.from_ecef(pos)
sat_ned = converter.ecef2ned(sat_pos)
sat_range = np.linalg.norm(sat_ned)
el = np.arcsin(-sat_ned[2]/sat_range) # pylint: disable=unsubscriptable-object
az = np.arctan2(sat_ned[1], sat_ned[0]) # pylint: disable=unsubscriptable-object
return el, az
def get_closest(time, candidates, recv_pos=None):
if recv_pos is None:
# Takes a list of object that have an epoch(GPSTime) value
# and return the one that is closest the given time (GPSTime)
return min(candidates, key=lambda candidate: abs(time - candidate.epoch), default=None)
return min(
(candidate for candidate in candidates if candidate.valid(time, recv_pos)),
key=lambda candidate: np.linalg.norm(recv_pos - candidate.pos),
default=None,
)
def get_constellation(prn):
identifier = prn[0]
if identifier in RINEX_CONSTELLATION_IDENTIFIERS:
return RINEX_CONSTELLATION_IDENTIFIERS[identifier]
warnings.warn(f"Unknown constellation for PRN {prn}")
return None
def get_unknown_prn_from_nmea_id(nmea_id):
return "?%d" % nmea_id
def get_nmea_id_from_unknown_prn(prn):
return int(prn[1:])
def is_unknown_prn(prn):
return prn[0] == '?'
def get_prn_from_nmea_id(nmea_id):
constellation_offsets = {}
for entry in NMEA_ID_RANGES:
start, end = entry['range']
constellation = entry['constellation']
if nmea_id < start:
warnings.warn("RINEX PRN for nmea id %i not known" % nmea_id)
return get_unknown_prn_from_nmea_id(nmea_id)
constellation_offset = constellation_offsets.get(constellation, 0)
if nmea_id <= end:
if constellation is None:
warnings.warn("Constellation for nmea id "
"%i not known" % nmea_id)
return get_unknown_prn_from_nmea_id(nmea_id)
identifier = RINEX_CONSTELLATION_IDENTIFIERS.get(constellation)
if identifier is None:
warnings.warn("RINEX3 constellation identifier for "
"constellation %s is not known" % constellation)
return get_unknown_prn_from_nmea_id(nmea_id)
number = nmea_id - start + 1 + constellation_offset
return "%s%02d" % (identifier, number)
else:
range_width = end - start + 1
constellation_offsets[constellation] = constellation_offset + range_width
warnings.warn("RINEX PRN for nmea id %i not known" % nmea_id)
return get_unknown_prn_from_nmea_id(nmea_id)
def get_nmea_id_from_prn(prn):
if is_unknown_prn(prn):
return get_nmea_id_from_unknown_prn(prn)
prn_constellation = get_constellation(prn)
satellite_id = int(prn[1:])
if satellite_id < 1:
raise ValueError("PRN must contains number greater then 0")
constellation_offset = 0
for entry in NMEA_ID_RANGES:
start, end = entry['range']
constellation = entry['constellation']
if constellation != prn_constellation:
continue
range_width = end - start + 1
index_in_range = satellite_id - constellation_offset - 1
if range_width > index_in_range:
return start + index_in_range
else:
constellation_offset += range_width
raise NotImplementedError(f"NMEA ID not found for PRN {prn}")
def rinex3_obs_from_rinex2_obs(observable):
if observable == 'P2':
return 'C2P'
if len(observable) == 2:
return observable + 'C'
raise NotImplementedError("Don't know this: " + observable)
class TimeRangeHolder:
'''Class to support test if date is in any of the multiple, sparse ranges'''
def __init__(self):
# Sorted list
self._ranges = []
def _previous_and_contains_index(self, time):
prev = None
current = None
for idx, (start, end) in enumerate(self._ranges):
# Time may be in next range
if time > end:
continue
# Time isn't in any next range
if time < start:
prev = idx - 1
current = None
# Time is in current range
else:
prev = idx - 1
current = idx
break
# Break in last loop
if prev is None:
prev = len(self._ranges) - 1
return prev, current
def add(self, start_time, end_time):
prev_start, current_start = self._previous_and_contains_index(start_time)
_, current_end = self._previous_and_contains_index(end_time)
# Merge ranges
if current_start is not None and current_end is not None:
# If ranges are different then merge
if current_start != current_end:
new_start, _ = self._ranges[current_start]
_, new_end = self._ranges[current_end]
new_range = (new_start, new_end)
# Required reversed order to correct remove
del self._ranges[current_end]
del self._ranges[current_start]
self._ranges.insert(current_start, new_range)
# Extend range - left
elif current_start is not None:
new_start, _ = self._ranges[current_start]
new_range = (new_start, end_time)
del self._ranges[current_start]
self._ranges.insert(current_start, new_range)
# Extend range - right
elif current_end is not None:
_, new_end = self._ranges[current_end]
new_range = (start_time, new_end)
del self._ranges[current_end]
self._ranges.insert(prev_start + 1, new_range)
# Create new range
else:
new_range = (start_time, end_time)
self._ranges.insert(prev_start + 1, new_range)
def __contains__(self, time):
for start, end in self._ranges:
# Time may be in next range
if time > end:
continue
# Time isn't in any next range
if time < start:
return False
# Time is in current range
return True
return False
| 26.605263 | 91 | 0.660167 |
85116ef020f70b14463bf3350ef42e1d68acf28d | 262 | py | Python | Django For APIs 3.0/library/api/views.py | ibnshayed/Python-Programming | a5c50b7ced5131b25260f4c3401f98d016ea8355 | [
"MIT"
] | null | null | null | Django For APIs 3.0/library/api/views.py | ibnshayed/Python-Programming | a5c50b7ced5131b25260f4c3401f98d016ea8355 | [
"MIT"
] | null | null | null | Django For APIs 3.0/library/api/views.py | ibnshayed/Python-Programming | a5c50b7ced5131b25260f4c3401f98d016ea8355 | [
"MIT"
] | null | null | null | from rest_framework import generics
from books.models import Book
from .serializers import BookSerializer
# Create your views here.
class BookAPIView(generics.ListAPIView):
queryset = Book.objects.all()
serializer_class = BookSerializer
| 23.818182 | 44 | 0.755725 |
e159068c2e0e14962fdb1843e5e3e2136dfbacb1 | 1,702 | py | Python | elaboorate/elaboo_methods/split.py | oterobravo/elaboo | b248269954ae4de07fcb714da265bf1aaa8ad3db | [
"MIT"
] | null | null | null | elaboorate/elaboo_methods/split.py | oterobravo/elaboo | b248269954ae4de07fcb714da265bf1aaa8ad3db | [
"MIT"
] | null | null | null | elaboorate/elaboo_methods/split.py | oterobravo/elaboo | b248269954ae4de07fcb714da265bf1aaa8ad3db | [
"MIT"
] | null | null | null | import logging
def draw_histogram(values, hist_file, threshold, breaks):
try:
logging.getLogger('matplotlib').setLevel(logging.ERROR) #Disable matplotlib logging
import matplotlib.pyplot as hs
except ImportError:
logging.info("Error importing pyplot. Cannot draw histogram.")
return
logging.info("Saving histogram to file %s" % hist_file)
hs.hist(values, breaks)
hs.axvline(x = threshold, color = 'b')
hs.savefig(hist_file)
return
def get_PCAs(alignment, statistics_array):
PCA_results = alignment.get_pca(array = statistics_array)
logging.info("PCA values:")
logging.info("\n%s" % "\n".join([str(x) + "\t" + str(round(PCA_results[0][x], 5)) for x in PCA_results[0]]))
logging.info("Proportion of variance explained by PCA %s" % PCA_results[1])
return(PCA_results)
def split_taxa(alignment, outgroup, threshold = 0, hist_file = None, breaks = 20, PCA_results = None):
logging.info("Begin SPLIT")
get_different = alignment.get_different(pca = PCA_results, cutoff = threshold)
logging.debug('Two classes identified:')
logging.debug('First group (to be evaluated individually): %s' % get_different[0])
logging.debug('Second group (to be used as the base tree): %s' % get_different[1])
if hist_file is not None:
draw_histogram([PCA_results[0][x] for x in PCA_results[0]], hist_file, threshold, breaks)
if outgroup in get_different[0]:
raise Exception("Outgroup %s was identified as an outlier. It is recommended to use a sequence that is not as diverged." % outgroup)
with open("elaboo_problematic_taxa.txt", "w") as problematic_taxa:
for taxon in get_different[0]:
problematic_taxa.write("%s\n" % taxon)
logging.info("SPLIT finalized.")
return(get_different[0]) | 46 | 134 | 0.743243 |
25a9668f83d6a95cc36c884c949ff9b17b6a3ce9 | 20,960 | py | Python | src/diamond/collector.py | devanshukoyalkar-rubrik/Diamond | c4c3f2e4723c2e4381b7bf5348cc3a25f321315d | [
"MIT"
] | null | null | null | src/diamond/collector.py | devanshukoyalkar-rubrik/Diamond | c4c3f2e4723c2e4381b7bf5348cc3a25f321315d | [
"MIT"
] | null | null | null | src/diamond/collector.py | devanshukoyalkar-rubrik/Diamond | c4c3f2e4723c2e4381b7bf5348cc3a25f321315d | [
"MIT"
] | null | null | null | # coding=utf-8
# Ignore lint errors as code is from github.com/python-diamond/Diamond
"""
The Collector class is a base class for all metric collectors.
"""
import logging
import os
import platform
import re
import socket
import subprocess
import time
import traceback
import configobj
from diamond.metric import Metric
from diamond.utils.config import load_config
from .error import DiamondException
# Detect the architecture of the system and set the counters for MAX_VALUES
# appropriately. Otherwise, rolling over counters will cause incorrect or
# negative values.
if platform.architecture()[0] == '64bit':
MAX_COUNTER = (2 ** 64) - 1
else:
MAX_COUNTER = (2 ** 32) - 1
DEFAULT_RUN_COUNT_DIR = "/etc/service/diamond/plugin_metrics"
def raw_hostname():
if os.path.exists('/var/lib/rubrik/nodeId'):
with open('/var/lib/rubrik/nodeId') as f:
return f.read().strip()
else:
return socket.gethostname()
def get_hostname(config, method=None):
"""
Returns a hostname as configured by the user
"""
method = method or config.get('hostname_method', 'smart')
# case insensitive method
method = method.lower()
if 'hostname' in config and method != 'shell':
return config['hostname']
if method in get_hostname.cached_results:
return get_hostname.cached_results[method]
if method == 'shell':
if 'hostname' not in config:
raise DiamondException(
"hostname must be set to a shell command for"
" hostname_method=shell")
else:
proc = subprocess.Popen(config['hostname'],
stdout=subprocess.PIPE)
hostname = proc.communicate()[0].strip()
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode,
config['hostname'])
get_hostname.cached_results[method] = hostname
return hostname
if method == 'smart':
hostname = get_hostname(config, 'fqdn_short')
if hostname != 'localhost':
get_hostname.cached_results[method] = hostname
return hostname
hostname = get_hostname(config, 'hostname_short')
get_hostname.cached_results[method] = hostname
return hostname
if method == 'fqdn_short':
hostname = socket.getfqdn().split('.')[0]
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'fqdn':
hostname = socket.getfqdn().replace('.', '_')
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'fqdn_rev':
hostname = socket.getfqdn().split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'uname_short':
hostname = os.uname()[1].split('.')[0]
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'uname_rev':
hostname = os.uname()[1].split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'hostname':
hostname = raw_hostname()
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'hostname_short':
hostname = raw_hostname().split('.')[0]
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'hostname_rev':
hostname = raw_hostname().split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'none':
get_hostname.cached_results[method] = None
return None
raise NotImplementedError(config['hostname_method'])
get_hostname.cached_results = {}
def str_to_bool(value):
"""
Converts string truthy/falsey strings to a bool
Empty strings are false
"""
if isinstance(value, str):
value = value.strip().lower()
if value in ['true', 't', 'yes', 'y']:
return True
elif value in ['false', 'f', 'no', 'n', '']:
return False
else:
raise NotImplementedError("Unknown bool %s" % value)
return value
class Collector(object):
"""
The Collector class is a base class for all metric collectors.
"""
def __init__(self, config=None, handlers=[], name=None, configfile=None):
"""
Create a new instance of the Collector class
"""
# Initialize Logger
self.log = logging.getLogger('diamond')
# Initialize Members
if name is None:
self.name = self.__class__.__name__
else:
self.name = name
self.handlers = handlers
self.last_values = {}
self.configfile = None
self.load_config(configfile, config)
# Generate a metric-friendly collector name without spaces
collector_name = self.name.replace(' ', '_')
metric_name = '%s.Run.count' % collector_name
# Store run count in a file
run_count = self.read_run_counter(collector_name)
run_count += 1
self.write_run_counter(collector_name, run_count)
def read_run_counter(self, collector_name):
filename = os.path.join(DEFAULT_RUN_COUNT_DIR, "%s_run_count" %
collector_name)
count = 0
try:
with open(filename, 'r') as f:
count = int(f.read().strip())
except:
# Be extremely generous when it comes to handling file open or read
# errors. Delete and create new file
self.log.error('Error when reading run count file for %s: %s' %
(self.name.replace(' ', '_'),
traceback.format_exc()))
self.reset_run_counter(collector_name)
return count
def write_run_counter(self, collector_name, run_count):
filename = os.path.join(DEFAULT_RUN_COUNT_DIR, "%s_run_count" %
collector_name)
with open(filename, 'w') as f:
f.write("%d" % run_count)
def reset_run_counter(self, collector_name):
if not os.path.exists(DEFAULT_RUN_COUNT_DIR):
os.makedirs(DEFAULT_RUN_COUNT_DIR)
# Reset run counter by deleting the old file & creating a new one.
filename = os.path.join(DEFAULT_RUN_COUNT_DIR, "%s_run_count" %
collector_name)
if os.path.exists(filename):
os.remove(filename)
with open(filename, 'w') as f:
f.write("0")
def load_config(self, configfile=None, override_config=None):
"""
Process a configfile, or reload if previously given one.
"""
self.config = configobj.ConfigObj()
# Load in the collector's defaults
if self.get_default_config() is not None:
self.config.merge(self.get_default_config())
if configfile is not None:
self.configfile = os.path.abspath(configfile)
if self.configfile is not None:
config = load_config(self.configfile)
if 'collectors' in config:
if 'default' in config['collectors']:
self.config.merge(config['collectors']['default'])
if self.name in config['collectors']:
self.config.merge(config['collectors'][self.name])
if override_config is not None:
if 'collectors' in override_config:
if 'default' in override_config['collectors']:
self.config.merge(override_config['collectors']['default'])
if self.name in override_config['collectors']:
self.config.merge(override_config['collectors'][self.name])
self.process_config()
def process_config(self):
"""
Intended to put any code that should be run after any config reload
event
"""
if 'byte_unit' in self.config:
if isinstance(self.config['byte_unit'], str):
self.config['byte_unit'] = self.config['byte_unit'].split()
if 'enabled' in self.config:
self.config['enabled'] = str_to_bool(self.config['enabled'])
if 'measure_collector_time' in self.config:
self.config['measure_collector_time'] = str_to_bool(
self.config['measure_collector_time'])
# Raise an error if both whitelist and blacklist are specified
if ((self.config.get('metrics_whitelist', None) and
self.config.get('metrics_blacklist', None))):
raise DiamondException(
'Both metrics_whitelist and metrics_blacklist specified ' +
'in file %s' % self.configfile)
if self.config.get('metrics_whitelist', None):
self.config['metrics_whitelist'] = re.compile(
self.config['metrics_whitelist'])
elif self.config.get('metrics_blacklist', None):
self.config['metrics_blacklist'] = re.compile(
self.config['metrics_blacklist'])
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this collector
"""
return {
'enabled': 'Enable collecting these metrics',
'byte_unit': 'Default numeric output(s)',
'measure_collector_time': 'Collect the collector run time in ms',
'metrics_whitelist': 'Regex to match metrics to transmit. ' +
'Mutually exclusive with metrics_blacklist',
'metrics_blacklist': 'Regex to match metrics to block. ' +
'Mutually exclusive with metrics_whitelist',
}
def get_default_config(self):
"""
Return the default config for the collector
"""
return {
# Defaults options for all Collectors
# Uncomment and set to hardcode a hostname for the collector path
# Keep in mind, periods are seperators in graphite
# 'hostname': 'my_custom_hostname',
# If you perfer to just use a different way of calculating the
# hostname
# Uncomment and set this to one of these values:
# fqdn_short = Default. Similar to hostname -s
# fqdn = hostname output
# fqdn_rev = hostname in reverse (com.example.www)
# uname_short = Similar to uname -n, but only the first part
# uname_rev = uname -r in reverse (com.example.www)
# 'hostname_method': 'fqdn_short',
# All collectors are disabled by default
'enabled': False,
# Path Prefix
'path_prefix': 'servers',
# Path Prefix for Virtual Machine metrics
'instance_prefix': 'instances',
# Path Suffix
'path_suffix': '',
# Default run count file directory
'run_count_dir': DEFAULT_RUN_COUNT_DIR,
# Default Poll Interval (seconds)
'interval': 300,
# Default Event TTL (interval multiplier)
'ttl_multiplier': 2,
# Default numeric output
'byte_unit': 'byte',
# Collect the collector run time in ms
'measure_collector_time': False,
# Whitelist of metrics to let through
'metrics_whitelist': None,
# Blacklist of metrics to let through
'metrics_blacklist': None,
}
def get_metric_path(self, name, instance=None):
"""
Get metric path.
Instance indicates that this is a metric for a
virtual machine and should have a different
root prefix.
"""
if 'path' in self.config:
path = self.config['path']
else:
path = self.__class__.__name__
if instance is not None:
if 'instance_prefix' in self.config:
prefix = self.config['instance_prefix']
else:
prefix = 'instances'
if path == '.':
return '.'.join([prefix, instance, name])
else:
return '.'.join([prefix, instance, path, name])
if 'path_prefix' in self.config:
prefix = self.config['path_prefix']
else:
prefix = 'systems'
if 'path_suffix' in self.config:
suffix = self.config['path_suffix']
else:
suffix = None
hostname = get_hostname(self.config)
if hostname is not None:
if prefix:
prefix = ".".join((prefix, hostname))
else:
prefix = hostname
# if there is a suffix, add after the hostname
if suffix:
prefix = '.'.join((prefix, suffix))
is_path_invalid = path == '.' or not path
if is_path_invalid and prefix:
return '.'.join([prefix, name])
elif prefix:
return '.'.join([prefix, path, name])
elif is_path_invalid:
return name
else:
return '.'.join([path, name])
def get_hostname(self):
return get_hostname(self.config)
def collect(self):
"""
Default collector method
"""
raise NotImplementedError()
def publish(self, name, value, raw_value=None, precision=0,
metric_type='GAUGE', instance=None):
"""
Publish a metric with the given name
"""
# Check whitelist/blacklist
if self.config['metrics_whitelist']:
if not self.config['metrics_whitelist'].match(name):
return
elif self.config['metrics_blacklist']:
if self.config['metrics_blacklist'].match(name):
return
# Get metric Path
path = self.get_metric_path(name, instance=instance)
# Get metric TTL
ttl = float(self.config['interval']) * float(
self.config['ttl_multiplier'])
# Create Metric
try:
metric = Metric(path, value, raw_value=raw_value, timestamp=None,
precision=precision, host=self.get_hostname(),
metric_type=metric_type, ttl=ttl)
except DiamondException:
self.log.error(('Error when creating new Metric: path=%r, '
'value=%r'), path, value)
raise
# Publish Metric
self.publish_metric(metric)
def publish_metric(self, metric):
"""
Publish a Metric object
"""
# Process Metric
for handler in self.handlers:
handler._process(metric)
def publish_gauge(self, name, value, precision=0, instance=None):
return self.publish(name, value, precision=precision,
metric_type='GAUGE', instance=instance)
def publish_counter(self, name, value, precision=0, max_value=0,
time_delta=True, interval=None, allow_negative=False,
instance=None):
raw_value = value
value = self.derivative(name, value, max_value=max_value,
time_delta=time_delta, interval=interval,
allow_negative=allow_negative,
instance=instance)
return self.publish(name, value, raw_value=raw_value,
precision=precision, metric_type='COUNTER',
instance=instance)
def derivative(self, name, new, max_value=0,
time_delta=True, interval=None,
allow_negative=False, instance=None):
"""
Calculate the derivative of the metric.
"""
# Format Metric Path
path = self.get_metric_path(name, instance=instance)
if path in self.last_values:
old = self.last_values[path]
# Check for rollover
if new < old:
old = old - max_value
# Get Change in X (value)
derivative_x = new - old
# If we pass in a interval, use it rather then the configured one
if interval is None:
interval = float(self.config['interval'])
# Get Change in Y (time)
if time_delta:
derivative_y = interval
else:
derivative_y = 1
result = float(derivative_x) / float(derivative_y)
if result < 0 and not allow_negative:
result = 0
else:
result = 0
# Store Old Value
self.last_values[path] = new
# Return result
return result
def _run(self):
"""
Run the collector unless it's already running
"""
try:
start_time = time.time()
# Collect Data
self.collect()
end_time = time.time()
collector_time = int((end_time - start_time) * 1000)
self.log.debug('Collection took %s ms', collector_time)
if 'measure_collector_time' in self.config:
if self.config['measure_collector_time']:
metric_name = 'collector_time_ms'
metric_value = collector_time
self.publish(metric_name, metric_value)
finally:
# After collector run, invoke a flush
# method on each handler.
for handler in self.handlers:
handler._flush()
def find_binary(self, binary):
"""
Scan and return the first path to a binary that we can find
"""
if os.path.exists(binary):
return binary
# Extract out the filename if we were given a full path
binary_name = os.path.basename(binary)
# Gather $PATH
search_paths = os.environ['PATH'].split(':')
# Extra paths to scan...
default_paths = [
'/usr/bin',
'/bin'
'/usr/local/bin',
'/usr/sbin',
'/sbin'
'/usr/local/sbin',
]
for path in default_paths:
if path not in search_paths:
search_paths.append(path)
for path in search_paths:
if os.path.isdir(path):
filename = os.path.join(path, binary_name)
if os.path.exists(filename):
return filename
return binary
class ProcessCollector(Collector):
"""
Collector with helpers for handling running commands with/without sudo
"""
def get_default_config_help(self):
config_help = super(ProcessCollector, self).get_default_config_help()
config_help.update({
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ProcessCollector, self).get_default_config()
config.update({
'use_sudo': False,
'sudo_cmd': self.find_binary('/usr/bin/sudo'),
})
return config
def run_command(self, args):
if 'bin' not in self.config:
raise Exception('config does not have any binary configured')
if not os.access(self.config['bin'], os.X_OK):
raise Exception('%s is not executable' % self.config['bin'])
try:
command = args
command.insert(0, self.config['bin'])
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
return subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()
except OSError:
self.log.exception("Unable to run %s", command)
return None
| 33.269841 | 79 | 0.5677 |
3fb4cfcfec408da103431c1e2aa752e17f615b56 | 4,613 | py | Python | hip/utils.py | kasrsf/hip-tensorflow | 24d2a350f7ef8d4baf7fcc4bed4c5256a8fe039e | [
"MIT"
] | 2 | 2020-10-28T00:07:19.000Z | 2021-02-25T09:16:16.000Z | hip/utils.py | kasrsf/hip-tensorflow | 24d2a350f7ef8d4baf7fcc4bed4c5256a8fe039e | [
"MIT"
] | 7 | 2019-01-09T14:42:51.000Z | 2022-02-09T23:54:39.000Z | hip/utils.py | kasrsf/hip-tensorflow | 24d2a350f7ef8d4baf7fcc4bed4c5256a8fe039e | [
"MIT"
] | 3 | 2019-01-03T07:20:54.000Z | 2019-05-28T05:07:18.000Z | import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def load_data_from_csv(filename):
raw_data_df = pd.read_csv(filename)
# always assume that the last column in the CSV file is the target series
# and the rest are time-series data for the features
features, target = np.split(raw_data_df, [-1], axis=1)
feature_names = list(features)
target_name = list(target)[0]
return features.values.T, target.values.T[0], feature_names, target_name
def print_params_to_tsv(params, feature_name):
eta = params['eta']
mu = params['mu'][0][0]
theta = params['theta']
param_names = ['feature_name','eta', 'mu', 'theta']
param_values = [feature_name,eta, mu, theta]
#print('\t'.join([str(x) for x in param_names]))
print('\t'.join([str(x) for x in param_values]))
def plot_predictions(y_truth, y_predictions, xs=None, train_test_split_point=0.8, legend=True):
"""
Plot the current predictions from the fitted model
"""
num_of_series = len(y_truth)
data_length = len(y_truth[0])
data_test_split_point = (int)(data_length * train_test_split_point)
srows = (int)(np.ceil(np.sqrt(num_of_series)))
fig, axes = plt.subplots(srows, srows, sharex='all')
for i in range(num_of_series):
row = (int)(i / srows)
col = (int)(i % srows)
truth = y_truth[i]
pred = y_predictions[i]
if num_of_series == 1:
ax = plt
else:
ax = axes[row, col]
ax.axvline(data_test_split_point, color='k')
ax.plot(np.arange(data_length), truth, 'k--', label='Observed #views')
if xs is not None:
x = xs[i]
colors = iter(plt.cm.rainbow(np.linspace(0, 1, len(x))))
for index, exo_source in enumerate(x):
c = next(colors)
ax.plot(np.arange(data_length), exo_source, c=c, alpha=0.3)
# plot predictions on training data with a different alpha to make the plot more clear
ax.plot(
np.arange(data_test_split_point+1),
pred[:data_test_split_point+1],
'b-',
alpha=0.5,
label='Model Fit'
)
ax.plot(
np.arange(data_test_split_point, data_length),
pred[data_test_split_point:],
'b-',
alpha=1,
label='Model Predictions'
)
plt.show()
def get_test_rmse(truth, predictions, train_test_split=0.8):
loss = 0
split_point = (int)(train_test_split * len(truth[0])) + 1
for i in range(len(predictions)):
y_truth = truth[i][split_point:]
y_pred = predictions[i][split_point:]
loss += np.sqrt(np.sum(y_pred - y_truth) ** 2) / len(y_truth)
return loss
class TimeSeriesScaler():
def __init__(self):
self.y_mins = []
self.y_maxs = []
def transform_x(self, x):
x_min = np.min(x)
x_max = np.max(x)
if x_max > 0:
return (x - x_min) / (x_max - x_min)
else:
return x
def transform_xs(self, xs):
scaled_xs = []
for x_series in xs:
scaled_x_series = []
for x in x_series:
scaled_x = self.transform_x(x)
scaled_x_series.append(scaled_x)
scaled_xs.append(scaled_x_series)
return np.asarray(scaled_xs)
def transform_add_y(self, y):
y_min = np.min(y)
y_max = np.max(y)
scaled_y = (y - y_min) / (y_max - y_min)
self.y_mins.append(y_min)
self.y_maxs.append(y_max)
return scaled_y
def transform_ys(self, ys):
self.y_mins = []
self.y_maxs = []
scaled_ys = []
for y in ys:
scaled_y = self.transform_add_y(y)
scaled_ys.append(scaled_y)
return np.asarray(scaled_ys)
def invert_transform_ys(self, scaled_ys):
rescaled_ys = []
for index, scaled_y in enumerate(scaled_ys):
rescaled_y = (
scaled_y * (self.y_maxs[index] - self.y_mins[index]) +
self.y_mins[index]
)
rescaled_ys.append(rescaled_y)
return np.asarray(rescaled_ys) | 30.959732 | 110 | 0.535227 |
01236c452b8030b07466d4c3ea48eee0466be1da | 962 | py | Python | website/visualizeData.py | BornToDebug/homeStruction | 354e03c05cb363d8397d0e2d7afeb78a029266f9 | [
"Apache-2.0"
] | 6 | 2016-08-31T16:46:54.000Z | 2017-09-15T19:34:30.000Z | website/visualizeData.py | BornToDebug/homeStruction | 354e03c05cb363d8397d0e2d7afeb78a029266f9 | [
"Apache-2.0"
] | 4 | 2016-09-02T09:18:41.000Z | 2016-09-02T09:24:08.000Z | website/visualizeData.py | BornToDebug/homeStruction | 354e03c05cb363d8397d0e2d7afeb78a029266f9 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from matplotlib import pyplot as plt
from matplotlib import dates
import django
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "homeStruction.settings")
django.setup()
from project.models import Temperature
from django.utils import timezone
# Get temperature data from the last 24 hours
tempArray = Temperature.objects.order_by('-time_recorded').\
filter(time_recorded__gte=timezone.now() - timezone.timedelta(days=1))
print tempArray
dateTemp = []
valueTemp = []
for item in tempArray:
dateTemp.append(item.time_recorded)
valueTemp.append(item.value)
print dateTemp, valueTemp
formating = dates.DateFormatter('%H:%M')
print dates.date2num(dateTemp)
cmap = (0, 0, 0)
plt.plot(dateTemp, valueTemp, color=cmap, linewidth=2.0)
plt.ylabel('temperature C'), plt.xlabel('time')
plt.xticks(rotation='vertical')
plt.subplots_adjust(bottom=.3)
plt.savefig('image.png', bbox_inches='tight', transparent='true')
plt.show()
| 26 | 74 | 0.769231 |
51f073b60f5683c647da0bc2b1b6e5742b307cdf | 895 | py | Python | env/bin/rst2xetex.py | marcotroisi/zip3 | 43c3b0d4baf729405f2c5fdd580ab8ae7038fb6e | [
"MIT"
] | 3 | 2020-08-04T20:29:41.000Z | 2020-11-09T09:28:19.000Z | env/bin/rst2xetex.py | marcotroisi/zip3 | 43c3b0d4baf729405f2c5fdd580ab8ae7038fb6e | [
"MIT"
] | null | null | null | env/bin/rst2xetex.py | marcotroisi/zip3 | 43c3b0d4baf729405f2c5fdd580ab8ae7038fb6e | [
"MIT"
] | null | null | null | #!/Users/marco/Projects/zip/env/bin/python
# $Id: rst2xetex.py 7847 2015-03-17 17:30:47Z milde $
# Author: Guenter Milde
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Lua/XeLaTeX code.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources for compilation with the Unicode-aware TeX variants '
'XeLaTeX or LuaLaTeX. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='xetex', description=description)
| 31.964286 | 77 | 0.673743 |
3be66f3b32dbeb404bc656a987f0c5ba7e046bac | 3,045 | py | Python | neo/Core/TX/PublishTransaction.py | volekerb/neo-python | 5bdded2c339219355cf1d31ae58653b0f94c6e51 | [
"MIT"
] | 387 | 2017-07-17T18:25:54.000Z | 2021-11-18T06:19:47.000Z | neo/Core/TX/PublishTransaction.py | volekerb/neo-python | 5bdded2c339219355cf1d31ae58653b0f94c6e51 | [
"MIT"
] | 967 | 2017-08-19T15:48:03.000Z | 2021-06-01T21:42:39.000Z | neo/Core/TX/PublishTransaction.py | volekerb/neo-python | 5bdded2c339219355cf1d31ae58653b0f94c6e51 | [
"MIT"
] | 286 | 2017-07-17T03:44:36.000Z | 2021-11-18T06:19:32.000Z | from neo.Core.TX.Transaction import Transaction, TransactionType
from neo.Core.FunctionCode import FunctionCode
from neo.Core.Size import GetVarSize
from neo.Core.Size import Size as s
from neo.logging import log_manager
logger = log_manager.getLogger()
class PublishTransaction(Transaction):
def __init__(self, *args, **kwargs):
"""
Create instance.
Args:
*args:
**kwargs:
"""
super(PublishTransaction, self).__init__(*args, **kwargs)
self.Type = TransactionType.PublishTransaction
self.Code = None
self.NeedStorage = False
self.Name = ''
self.CodeVersion = ''
self.Author = ''
self.Email = ''
self.Description = ''
def Size(self):
"""
Get the total size in bytes of the object.
Returns:
int: size.
"""
return super(PublishTransaction, self).Size() + GetVarSize(self.Code.Script) + GetVarSize(self.Code.ParameterList) + s.uint8 + GetVarSize(
self.Name) + GetVarSize(self.CodeVersion) + GetVarSize(self.Author) + GetVarSize(self.Email) + GetVarSize(self.Description)
def DeserializeExclusiveData(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
if self.Version > 1:
logger.error("format exception...")
self.Code = FunctionCode()
self.Code.Deserialize(reader)
if self.Version >= 1:
self.NeedStorage = reader.ReadBool()
else:
self.NeedStorage = False
self.Name = reader.ReadVarString()
self.CodeVersion = reader.ReadVarString()
self.Author = reader.ReadVarString()
self.Email = reader.ReadVarString()
self.Description = reader.ReadVarString()
def SerializeExclusiveData(self, writer):
"""
Serialize object.
Args:
writer (neo.IO.BinaryWriter):
"""
self.Code.Serialize(writer)
if self.Version >= 1:
writer.WriteBool(self.NeedStorage)
writer.WriteVarString(self.Name)
writer.WriteVarString(self.CodeVersion)
writer.WriteVarString(self.Author)
writer.WriteVarString(self.Email)
writer.WriteVarString(self.Description)
def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
jsn = super(PublishTransaction, self).ToJson()
jsn['contract'] = {}
jsn['contract']['code'] = self.Code.ToJson()
jsn['contract']['needstorage'] = self.NeedStorage
jsn['contract']['name'] = self.Name.decode('utf-8')
jsn['contract']['version'] = self.CodeVersion.decode('utf-8')
jsn['contract']['author'] = self.Author.decode('utf-8')
jsn['contract']['email'] = self.Email.decode('utf-8')
jsn['contract']['description'] = self.Description.decode('utf-8')
return jsn
| 31.071429 | 146 | 0.598686 |
3d26f4131b994b4522424383cd74bbd13e97c07d | 5,394 | py | Python | api/app.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | api/app.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | api/app.py | korenlev/calipso-cvim | 39278a5cf09c40b26a8a143ccc0c8d437961abc2 | [
"Apache-2.0"
] | null | null | null | ###############################################################################
# Copyright (c) 2017-2020 Koren Lev (Cisco Systems), #
# Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
import importlib
import falcon
from api.auth.token import Token
from api.backends import auth_backend
from api.backends.credentials_backend import CredentialsBackend
from api.backends.ldap_backend import LDAPBackend
from api.exceptions.exceptions import CalipsoApiException
from api.middleware import AuthenticationMiddleware, CORSMiddleware
from base.utils.inventory_mgr import InventoryMgr
from base.utils.logging.full_logger import FullLogger
from base.utils.mongo_access import MongoAccess
class App:
CORE_ENDPOINTS = {
"/aggregates": "resource.aggregates.Aggregates",
"/clique_constraints": "resource.clique_constraints.CliqueConstraints",
"/clique_types": "resource.clique_types.CliqueTypes",
"/cliques": "resource.cliques.Cliques",
"/connection_tests": "resource.connection_tests.ConnectionTests",
"/constants": "resource.constants.Constants",
"/environment_configs": "resource.environment_configs.EnvironmentConfigs",
"/graph": "resource.graph.Graph",
"/health": "resource.health.Health",
"/inventory": "resource.inventory.Inventory",
"/links": "resource.links.Links",
"/messages": "resource.messages.Messages",
"/monitoring_config_templates": "resource.monitoring_config_templates.MonitoringConfigTemplates",
"/scans": "resource.scans.Scans",
"/scheduled_scans": "resource.scheduled_scans.ScheduledScans",
"/schema": "resource.schema.Schema",
"/search": "resource.search.Search",
"/timezone": "resource.timezone.Timezone",
}
BASE_GRAFANA_ENDPOINTS = {
"/grafana": "grafana.__init__.Health",
"/grafana/search": "grafana.search.Search",
"/grafana/query": "grafana.query.Query",
}
ROUTE_DECLARATIONS = {
"/auth/tokens": "auth.tokens.Tokens",
**CORE_ENDPOINTS,
**BASE_GRAFANA_ENDPOINTS,
**{"/grafana/query{}".format(k): v for k, v in CORE_ENDPOINTS.items()}
}
responders_path = "api.responders"
def __init__(self, mongo_config: str = "", ldap_enabled: bool = True, ldap_config: str = "", auth_config: str = "",
log_level: str = "", log_file: str = "", inventory: str = "", token_lifetime: int = 86400):
MongoAccess.set_config_file(mongo_config)
self.inv = InventoryMgr()
self.inv.set_collections(inventory)
self.log = FullLogger(name="API", log_file=log_file, level=log_level)
self.setup_auth_backend(ldap_enabled=ldap_enabled, ldap_config=ldap_config, auth_config=auth_config,
log_file=log_file, log_level=log_level)
Token.set_token_lifetime(token_lifetime)
self.middleware = [
AuthenticationMiddleware(log_file=log_file, log_level=log_level),
CORSMiddleware()
]
self.app = falcon.API(middleware=self.middleware)
self.app.add_error_handler(CalipsoApiException)
self.app.req_options.strip_url_path_trailing_slash = True
self.set_routes(self.app)
def get_app(self):
return self.app
def set_routes(self, app):
for url in self.ROUTE_DECLARATIONS.keys():
class_path = self.ROUTE_DECLARATIONS.get(url)
module = self.responders_path + "." + class_path[:class_path.rindex(".")]
class_name = class_path.split('.')[-1]
module = importlib.import_module(module)
class_ = getattr(module, class_name)
resource = class_()
app.add_route(url, resource)
def setup_auth_backend(self, ldap_enabled: bool, ldap_config: str, auth_config: str = "",
log_file: str = "", log_level: str = ""):
if ldap_enabled:
try:
auth_backend.ApiAuth = LDAPBackend(config_file_path=ldap_config, log_file=log_file, log_level=log_level)
return
except ValueError as e:
self.log.error("Failed to setup LDAP access. Exception: {}".format(e))
raise ValueError("LDAP authentication required.")
elif auth_config:
try:
auth_backend.ApiAuth = CredentialsBackend(auth_config)
self.log.info("Set up credentials authentication")
return
except ValueError as e:
self.log.error("Failed to setup credentials access. Exception: {}".format(e))
raise ValueError("Credentials authentication required.")
else:
self.log.info("Skipping LDAP authentication")
# TODO: try mongo auth
self.log.warning("Falling back to no authentication")
| 46.5 | 120 | 0.620319 |
3011012c4861c5b6f58750a353b56dc0add4a2d9 | 14,885 | py | Python | Pyrado/pyrado/environments/mujoco/base.py | theogruner/SimuRLacra | 4893514ccdeb10a736c55de9aa7753fd51c5afec | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | null | null | null | Pyrado/pyrado/environments/mujoco/base.py | theogruner/SimuRLacra | 4893514ccdeb10a736c55de9aa7753fd51c5afec | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | null | null | null | Pyrado/pyrado/environments/mujoco/base.py | theogruner/SimuRLacra | 4893514ccdeb10a736c55de9aa7753fd51c5afec | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from abc import ABC, abstractmethod
from copy import deepcopy
from math import floor
from typing import Optional
import mujoco_py
import numpy as np
from init_args_serializer import Serializable
from mujoco_py.generated.const import RND_FOG
import pyrado
from pyrado.environments.sim_base import SimEnv
from pyrado.spaces.base import Space
from pyrado.tasks.base import Task
from pyrado.utils.data_types import RenderMode
from pyrado.utils.input_output import print_cbt
class MujocoSimEnv(SimEnv, ABC, Serializable):
"""
Base class for MuJoCo environments.
Uses Serializable to facilitate proper serialization.
.. seealso::
https://github.com/openai/gym/blob/master/gym/envs/mujoco/mujoco_env.py
"""
def __init__(
self,
model_path: str,
frame_skip: int = 1,
dt: Optional[float] = None,
max_steps: int = pyrado.inf,
task_args: Optional[dict] = None,
):
"""
Constructor
:param model_path: path to the MuJoCo xml model config file
:param frame_skip: number of simulation frames for which the same action is held, results in a multiplier of
the time step size `dt`
:param dt: by default the time step size is the one from the mujoco config file multiplied by the number of
frame skips (legacy from OpenAI environments). By passing an explicit `dt` value, this can be
overwritten. Possible use case if if you know that you recorded a trajectory with a specific `dt`.
:param max_steps: max number of simulation time steps
:param task_args: arguments for the task construction, e.g `dict(fwd_rew_weight=1.)`
"""
Serializable._init(self, locals())
# Initialize
self.model_path = model_path
self._domain_param = self.get_nominal_domain_param()
if dt is None:
# Specify the time step size as a multiple of MuJoCo's simulation time step size
self.frame_skip = frame_skip
else:
# Specify the time step size explicitly
with open(self.model_path, mode="r") as file_raw:
xml_model_temp = file_raw.read()
xml_model_temp = self._adapt_model_file(xml_model_temp, self.domain_param)
# Create a dummy model to extract the solver's time step size
model_tmp = mujoco_py.load_model_from_xml(xml_model_temp)
frame_skip = dt / model_tmp.opt.timestep
if frame_skip.is_integer():
self.frame_skip = int(frame_skip)
elif dt > model_tmp.opt.timestep:
print_cbt(
f"The desired time step size is {dt} s, but solver's time step size in the MuJoCo config file is "
f"{model_tmp.opt.timestep} s. Thus, frame_skip is rounded down to {floor(frame_skip)}.",
"y",
)
self.frame_skip = floor(frame_skip)
else:
# The number of skipped frames must be >= 1
pyrado.ValueErr(given=dt, ge_constraint=model_tmp.opt.timestep)
# Creat the MuJoCo model
with open(self.model_path, mode="r") as file_raw:
# Save raw (with placeholders) XML-file as attribute since we need it for resetting the domain params
self.xml_model_template = file_raw.read()
self._create_mujoco_model()
# Call SimEnv's constructor
super().__init__(dt=self.model.opt.timestep * self.frame_skip, max_steps=max_steps)
# Memorize the initial states of the model from the xml (for fixed init space or later reset)
self.init_qpos = self.sim.data.qpos.copy()
self.init_qvel = self.sim.data.qvel.copy()
# Initialize space (to be overwritten in constructor of subclasses)
self._init_space = None
# Create task
if not (isinstance(task_args, dict) or task_args is None):
raise pyrado.TypeErr(given=task_args, expected_type=dict)
self.task_args = dict() if task_args is None else task_args
self._task = self._create_task(self.task_args)
# Visualization
self.camera_config = dict()
self.viewer = None
self._curr_act = np.zeros(self.act_space.shape)
@property
@abstractmethod
def state_space(self) -> Space:
raise NotImplementedError
@property
@abstractmethod
def obs_space(self) -> Space:
raise NotImplementedError
@property
@abstractmethod
def act_space(self) -> Space:
raise NotImplementedError
@property
def init_space(self) -> Space:
return self._init_space
@init_space.setter
def init_space(self, space: Space):
if not isinstance(space, Space):
raise pyrado.TypeErr(given=space, expected_type=Space)
self._init_space = space
@property
def task(self) -> Task:
return self._task
@abstractmethod
def _create_task(self, task_args: dict) -> Task:
# Needs to implemented by subclasses
raise NotImplementedError
@property
def domain_param(self) -> dict:
return deepcopy(self._domain_param)
@domain_param.setter
def domain_param(self, domain_param: dict):
if not isinstance(domain_param, dict):
raise pyrado.TypeErr(given=domain_param, expected_type=dict)
# Update the parameters
self._domain_param.update(domain_param)
# Update MuJoCo model
self._create_mujoco_model()
if self.viewer is not None:
# If the viewer already exists and we reset the domain parameters, we must also recreate the viewer since
# it references to the simulation object which get's reconstructed during _create_mujoco_model()
import glfw
glfw.destroy_window(self.viewer.window)
self.viewer = None
# Update task
self._task = self._create_task(self.task_args)
def _adapt_model_file(self, xml_model: str, domain_param: dict) -> str:
"""
Changes the model's XML-file given the current domain parameters before constructing the MuJoCo simulation.
One use case is for example the cup_scale for the `WAMBallInCupSim` where multiple values in the model's
XML-file are changed based on one domain parameter.
.. note::
It is mandatory to call this function in case you modified the mxl config file with tags like `[DP_NAME]`.
:param xml_model: parsed model file
:param domain_param: copy of the environments domain parameters
:return: adapted model file where the placeholders are filled with numerical values
"""
# The mesh dir is not resolved when later passed as a string, thus we do it manually
xml_model = xml_model.replace(f"[ASSETS_DIR]", pyrado.MUJOCO_ASSETS_DIR)
# Replace all occurrences of the domain parameter placeholder with its value
for key, value in domain_param.items():
xml_model = xml_model.replace(f"[{key}]", str(value))
return xml_model
@abstractmethod
def _mujoco_step(self, act: np.ndarray) -> dict:
"""
Apply the given action to the MuJoCo simulation. This executes one step of the physics simulation.
:param act: action
:return: dict with optional information from MuJoCo
"""
def _create_mujoco_model(self):
"""
Called to update the MuJoCo model by rewriting and reloading the XML file.
.. note::
This function is called from the constructor and from the domain parameter setter.
"""
xml_model = self.xml_model_template # don't change the template
xml_model = self._adapt_model_file(xml_model, self.domain_param)
# Create MuJoCo model from parsed XML file
self.model = mujoco_py.load_model_from_xml(xml_model)
self.sim = mujoco_py.MjSim(self.model, nsubsteps=self.frame_skip)
def configure_viewer(self):
"""Configure the camera when the viewer is initialized. You need to set `self.camera_config` before."""
# Render a fog around the scene by default
if self.camera_config.pop("render_fog", True):
self.viewer.scn.flags[RND_FOG] = 1
# Parse all other options
for key, value in self.camera_config.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
# Reset time
self._curr_step = 0
# Reset the domain parameters
if domain_param is not None:
self.domain_param = domain_param
# Sample or set the initial simulation state
if init_state is None:
# Sample init state from init state space
init_state = self.init_space.sample_uniform()
elif not isinstance(init_state, np.ndarray):
# Make sure init state is a numpy array
try:
init_state = np.asarray(init_state)
except Exception:
raise pyrado.TypeErr(given=init_state, expected_type=np.ndarray)
if not self.init_space.contains(init_state, verbose=True):
raise pyrado.ValueErr(msg="The init state must be within init state space!")
# Update the state attribute
self.state = init_state.copy()
# Reset the task which also resets the reward function if necessary
self._task.reset(env_spec=self.spec, init_state=init_state.copy())
# Reset MuJoCo simulation model (only reset the joint configuration)
self.sim.reset()
old_state = self.sim.get_state()
nq = self.init_qpos.size
if not init_state[:nq].shape == old_state.qpos.shape: # check joint positions dimension
raise pyrado.ShapeErr(given=init_state[:nq], expected_match=old_state.qpos)
# Exclude everything that is appended to the state (at the end), e.g. the ball position for WAMBallInCupSim
if not init_state[nq : 2 * nq].shape == old_state.qvel.shape: # check joint velocities dimension
raise pyrado.ShapeErr(given=init_state[nq : 2 * nq], expected_match=old_state.qvel)
new_state = mujoco_py.MjSimState(
# Exclude everything that is appended to the state (at the end), e.g. the ball position for WAMBallInCupSim
old_state.time,
init_state[:nq],
init_state[nq : 2 * nq],
old_state.act,
old_state.udd_state,
)
self.sim.set_state(new_state)
self.sim.forward()
# Return an observation
return self.observe(self.state)
def step(self, act: np.ndarray) -> tuple:
# Current reward depending on the state (before step) and the (unlimited) action
remaining_steps = self._max_steps - (self._curr_step + 1) if self._max_steps is not pyrado.inf else 0
self._curr_rew = self.task.step_rew(self.state, act, remaining_steps)
# Apply actuator limits
act = self.limit_act(act)
self._curr_act = act # just for the render function
# Apply the action and simulate the resulting dynamics
info = self._mujoco_step(act)
self._curr_step += 1
# Check if the environment is done due to a failure within the mujoco simulation (e.g. bad inputs)
mjsim_done = info.get("failed", False)
# Check if the task is done
task_done = self._task.is_done(self.state)
# Handle done case
done = mjsim_done or task_done
if self._curr_step >= self._max_steps:
done = True
if done:
# Add final reward if done
self._curr_rew += self._task.final_rew(self.state, remaining_steps)
return self.observe(self.state), self._curr_rew, done, info
def render(self, mode: RenderMode = RenderMode(), render_step: int = 1):
if self._curr_step % render_step == 0:
# Call base class
super().render(mode)
# Print to console
if mode.text:
print(
f"step: {self._curr_step:4d} | r_t: {self._curr_rew: 1.3f} | a_t: {self._curr_act} | s_t+1: {self.state}"
)
# Forward to MuJoCo viewer
if mode.video:
if self.viewer is None:
# Create viewer if not existent (see 'human' mode of OpenAI Gym's MujocoEnv)
self.viewer = mujoco_py.MjViewer(self.sim)
# Adjust window size and position to custom values
import glfw
glfw.make_context_current(self.viewer.window)
glfw.set_window_size(self.viewer.window, 1280, 720)
glfw.set_window_pos(self.viewer.window, 50, 50)
self.configure_viewer()
self.viewer.render()
| 41.929577 | 131 | 0.656701 |
df063b4c5a9ce72a9057eaf28b19ff0527a426ca | 29,140 | py | Python | alert_bot.py | becca-mayers/alert-bot | c4abdc39038fdda97a08c650a64d231282ba0cce | [
"MIT"
] | null | null | null | alert_bot.py | becca-mayers/alert-bot | c4abdc39038fdda97a08c650a64d231282ba0cce | [
"MIT"
] | null | null | null | alert_bot.py | becca-mayers/alert-bot | c4abdc39038fdda97a08c650a64d231282ba0cce | [
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 2 15:23:03 2021
@author: beccamayers
"""
from jinja2 import Environment as JinjaEnvironment
from jinja2 import FileSystemLoader
from alert_variables import alert_variables
import plotly.express as px
from colour import Color
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
def get_alert():
(metrics,
final_metrics,
current_readable,
last_month,
four_month_filter,
three_month_filter,
fytd_list,
last_fytd_list,
metric_renamer,
round_these,
data,
red_hex_shades,
version,
option,
templates_path,
vis_path,
version_slicer,
rolling_dict,
css_file,
facilities,
headers) = alert_variables()
extended_filter = rolling_dict[option]['extended_filter']
exact_filter = rolling_dict[option]['filter']
earliest_month = rolling_dict[option]['earliest_month']
current_month = rolling_dict[option]['current_month']
trend_label = rolling_dict[option]['trend_range']
roll_integer = rolling_dict[option]['xtnd_trend_no']
metric_holder = []
for mc in metrics:
tempdf = data[['Facility', 'Reporting Month', 'MonthYear']]
tempdf.loc[:,'Metric'] = mc
tempdf.loc[:,'Value'] = data[mc]
metric_holder.append(tempdf)
reformatted_df = pd.concat(metric_holder)
calculated_values = []
for facility in facilities:
for metric in final_metrics:
fig_title = facility + ' ' + metric
if metric == 'LOS Ratio':
#Monthly
tempdff = reformatted_df.loc[(reformatted_df['Facility'] == facility) & (reformatted_df['Metric'] == metric)]
most_recent_value = tempdff.loc[tempdff['MonthYear'] == current_readable]['Value'].reset_index(drop=True)[0]
last_month_value = tempdff.loc[tempdff['Reporting Month'] == last_month]['Value'].reset_index(drop=True)[0]
month_series = pd.Series([last_month_value, most_recent_value])
month_var = month_series.pct_change()[1]
month_var = round(month_var*100)
if month_var == np.inf:
month_var = 0.00
tempdff['Month_to_Month_Variance'] = f'{month_var:,}'
#FYTD
tempdfytd = reformatted_df.loc[(reformatted_df['Facility'] == facility) & (reformatted_df['Reporting Month'].isin(fytd_list))]
temp_los = tempdfytd.loc[tempdfytd['Metric'] == 'LOS']
temp_gmlos = tempdfytd.loc[tempdfytd['Metric'] == 'GMLOS']
current_fytd = temp_los['Value'].sum()/temp_gmlos['Value'].sum()
current_fytd = round(current_fytd)
tempdff['FYTD'] = f'{current_fytd:,}'
#Prior FYTD
tempdfx = reformatted_df.loc[(reformatted_df['Facility'] == facility) & (reformatted_df['Reporting Month'].isin(last_fytd_list))]
temp_los = tempdfx.loc[tempdfx['Metric'] == 'LOS']
temp_gmlos = tempdfx.loc[tempdfx['Metric'] == 'GMLOS']
prior_fytd = temp_los['Value'].sum()/temp_gmlos['Value'].sum()
prior_fytd = round(prior_fytd)
tempdff['Prior FYTD'] = f'{prior_fytd:,}'
#FYTD Variance
fytd_series = pd.Series([prior_fytd, current_fytd])
fytd_var = fytd_series.pct_change()[1]
fytd_var = round(fytd_var*100)
if fytd_var == np.inf:
fytd_var = 0.00
tempdff['FYTD_Variance'] = f'{fytd_var:,}'
#Rolling, window=1
temp_frame = reformatted_df.loc[(reformatted_df['Facility'] == facility) & (reformatted_df['Metric'] == metric) & (reformatted_df['Reporting Month'].isin(extended_filter))]
temp_frame_set = temp_frame.set_index(['Facility', 'Reporting Month', 'MonthYear', 'Metric'])
temp = temp_frame_set.rolling(roll_integer, min_periods=1).mean()
temp = temp.reset_index()
most_recent_value = temp.loc[temp['Reporting Month'] == current_month]['Value'].reset_index(drop=True)[0]
earliest_value = temp.loc[temp['Reporting Month'] == earliest_month]['Value'].reset_index(drop=True)[0]
rolling_series = pd.Series([earliest_value, most_recent_value])
rolling_var = rolling_series.pct_change()[1]
rolling_var = (most_recent_value-earliest_value)/earliest_value
rolling_var = round(rolling_var*100)
if rolling_var == np.inf:
rolling_var = 0.00
tempdff[trend_label] = f'{rolling_var:,}'
fig_df = temp.loc[temp['Reporting Month'].isin(exact_filter)]
fig_df = fig_df.rename(columns={'Value':'Rolling Sum'})
fig = px.line(fig_df, x = 'MonthYear', y = 'Rolling Sum')
fig.update_layout({
'plot_bgcolor': 'rgba(0, 0, 0, 0)',
'paper_bgcolor': 'rgba(0, 0, 0, 0)',
})
fig.update_yaxes(visible=False, fixedrange=True)
fig.update_xaxes(visible=False, fixedrange=True)
fig.update_traces(line_color='#e63674')
fig_save = vis_path + fig_title + '.svg'
fig.write_image(fig_save)
tempdff['plotted'] = fig_save
calculated_values.append(tempdff)
elif metric == 'Observation Rate':
#Monthly
tempdff = reformatted_df.loc[(reformatted_df['Facility'] == facility) & (reformatted_df['Metric'] == metric)]
most_recent_value = tempdff.loc[tempdff['MonthYear'] == current_readable]['Value'].reset_index(drop=True)[0]
last_month_value = tempdff.loc[tempdff['Reporting Month'] == last_month]['Value'].reset_index(drop=True)[0]
month_series = pd.Series([last_month_value, most_recent_value])
month_var = month_series.pct_change()[1]
month_var = round(month_var*100)
if month_var == np.inf:
month_var = 0.00
#tempdff['Value'] =
tempdff['Month_to_Month_Variance'] = f'{month_var:,}' + '%'
tempdff['Value'] = tempdff['Value'].astype(str) + '%'
#FYTD
tempdfytd = reformatted_df.loc[(reformatted_df['Facility'] == facility) & (reformatted_df['Reporting Month'].isin(fytd_list))]
temp_obs_cases = tempdfytd.loc[tempdfytd['Metric'] == 'Obs_Cases']
temp_obs_rate_inp = tempdfytd.loc[tempdfytd['Metric'] == 'Obs_Rate_Inp']
current_fytd = temp_obs_cases['Value'].sum()/(temp_obs_rate_inp['Value'].sum()+temp_obs_cases['Value'].sum())
current_fytd = round(current_fytd*100)
tempdff['FYTD'] = f'{current_fytd:,}' + '%'
#Prior FYTD
tempdfx = reformatted_df.loc[(reformatted_df['Facility'] == facility) & (reformatted_df['Reporting Month'].isin(last_fytd_list))]
prior_cases = tempdfx.loc[tempdfx['Metric'] == 'Obs_Cases']
prior_inp = tempdfx.loc[tempdfx['Metric'] == 'Obs_Rate_Inp']
prior_fytd = prior_cases['Value'].sum()/(prior_inp['Value'].sum()+prior_cases['Value'].sum())
prior_fytd = round(prior_fytd*100)
tempdff['Prior FYTD'] = f'{prior_fytd:,}' + '%'
#FYTD Variance
fytd_series = pd.Series([prior_fytd, current_fytd])
fytd_var = fytd_series.pct_change()[1]
fytd_var = round(fytd_var*100)
if fytd_var == np.inf:
fytd_var = 0.00
tempdff['FYTD_Variance'] = f'{fytd_var:,}' + '%'
#rolling, window=1
temp_frame = reformatted_df.loc[(reformatted_df['Facility'] == facility) & (reformatted_df['Metric'] == metric) & (reformatted_df['Reporting Month'].isin(extended_filter))]
temp_frame_set = temp_frame.set_index(['Facility', 'Reporting Month', 'MonthYear', 'Metric'])
temp = temp_frame_set.rolling(roll_integer, min_periods=1).mean()
temp = temp.reset_index()
most_recent_value = temp.loc[temp['Reporting Month'] == current_month]['Value'].reset_index(drop=True)[0]
earliest_value = temp.loc[temp['Reporting Month'] == earliest_month]['Value'].reset_index(drop=True)[0]
rolling_series = pd.Series([earliest_value, most_recent_value])
rolling_var = rolling_series.pct_change()[1]
rolling_var = round(rolling_var*100)
if rolling_var == np.inf:
rolling_var = 0.00
tempdff[trend_label] = f'{rolling_var:,}' + '%'
fig_df = temp.loc[temp['Reporting Month'].isin(exact_filter)]
fig_df = fig_df.rename(columns={'Value':'Rolling Sum'})
fig = px.line(fig_df, x = 'MonthYear', y = 'Rolling Sum')
fig.update_layout({
'plot_bgcolor': 'rgba(0, 0, 0, 0)',
'paper_bgcolor': 'rgba(0, 0, 0, 0)',
})
fig.update_yaxes(visible=False, fixedrange=True)
fig.update_xaxes(visible=False, fixedrange=True)
fig.update_traces(line_color='#e63674')
fig_save = vis_path + fig_title + '.svg'
fig.write_image(fig_save)
tempdff['plotted'] = fig_save
calculated_values.append(tempdff)
else:
#Monthly
tempdff = reformatted_df.loc[(reformatted_df['Facility'] == facility) & (reformatted_df['Metric'] == metric)]
most_recent_value = tempdff.loc[tempdff['MonthYear'] == current_readable]['Value'].reset_index(drop=True)[0]
last_month_value = tempdff.loc[tempdff['Reporting Month'] == last_month]['Value'].reset_index(drop=True)[0]
month_series = pd.Series([last_month_value, most_recent_value])
month_var = month_series.pct_change()[1]
month_var = round(month_var*100)
tempdff['Month_to_Month_Variance'] = f'{month_var:,}'
#FYTD
tempdfff = tempdff.loc[tempdff['Reporting Month'].isin(fytd_list)]
current_fytd = tempdfff['Value'].sum()
current_fytd = int(current_fytd)
tempdff['FYTD'] = f'{current_fytd:,}'
#Prior FYTD
tempdfx = tempdff.loc[tempdff['Reporting Month'].isin(last_fytd_list)]
prior_fytd = tempdfx['Value'].sum()
prior_fytd = int(prior_fytd)
tempdff['Prior FYTD'] = f'{prior_fytd:,}'
#FYTD Variance
fytd_series = pd.Series([prior_fytd, current_fytd])
fytd_var = fytd_series.pct_change()[1]
fytd_var = round(fytd_var*100)
tempdff['FYTD_Variance'] = f'{fytd_var:,}'
#Rolling, window=1
temp_frame = reformatted_df.loc[(reformatted_df['Facility'] == facility) & (reformatted_df['Metric'] == metric) & (reformatted_df['Reporting Month'].isin(extended_filter))]
temp_frame_set = temp_frame.set_index(['Facility', 'Reporting Month', 'MonthYear', 'Metric'])
temp = temp_frame_set.rolling(roll_integer, min_periods=1).mean()
temp = temp.reset_index()
most_recent_value = temp.loc[temp['Reporting Month'] == current_month]['Value'].reset_index(drop=True)[0]
earliest_value = temp.loc[temp['Reporting Month'] == earliest_month]['Value'].reset_index(drop=True)[0]
rolling_series = pd.Series([earliest_value, most_recent_value])
rolling_var = rolling_series.pct_change()[1]
rolling_var = round(rolling_var*100)
tempdff[trend_label] = f'{rolling_var:,}'
fig_df = temp.loc[temp['Reporting Month'].isin(exact_filter)]
fig_df = fig_df.rename(columns={'Value':'Rolling Sum'})
fig = px.line(fig_df, x = 'MonthYear', y = 'Rolling Sum')
fig.update_layout({
'plot_bgcolor': 'rgba(0, 0, 0, 0)',
'paper_bgcolor': 'rgba(0, 0, 0, 0)',
})
fig.update_yaxes(visible=False, fixedrange=True)
fig.update_xaxes(visible=False, fixedrange=True)
fig.update_traces(line_color='#e63674')
fig_save = vis_path + fig_title + '.svg'
fig.write_image(fig_save)
tempdff['plotted'] = fig_save
calculated_values.append(tempdff)
dfff = pd.concat(calculated_values)
dfff = dfff.replace(np.inf, 0.000)
#Clean up the data
dfff = dfff.replace({'Metric': metric_renamer})
dff = dfff.loc[dfff['MonthYear'] == current_readable]
dff = dff.rename(columns={trend_label: 'months_trend'})
three_df = dfff.loc[dfff['Reporting Month'].isin(three_month_filter)]
three_df['type'] = 'monthly value'
#%%Find the discerning variances
monthly_dff = dff.sort_values(by='Month_to_Month_Variance', ascending=False)[:version_slicer]
monthly_dff['alert'] = 'Month Variance'
fytd_dff = dff.sort_values(by='FYTD_Variance', ascending=False)[:version_slicer]
fytd_dff['alert'] = 'FYTD Variance'
trend_dff = dff.sort_values(by='months_trend', ascending=False)[:version_slicer]
trend_dff['alert'] = trend_label
dff_quant = pd.concat([monthly_dff, fytd_dff, trend_dff])
dff_quant = dff_quant.reset_index().drop('index', axis=1)
#%% watchlist facility deets
watchlist_facilities = dff_quant['Facility'].drop_duplicates().tolist()
facility_length = len(watchlist_facilities)
watchlist_dict = {}
for f in range(0, facility_length):
facility = watchlist_facilities[f]
mo_alert = dff_quant.loc[(dff_quant['Facility'] == facility) & (dff_quant['alert'] == 'Month Variance')]['Metric'].reset_index(drop=True).tolist()
fytd_alert = dff_quant.loc[(dff_quant['Facility'] == facility) & (dff_quant['alert'] == 'FYTD Variance')]['Metric'].reset_index(drop=True).tolist()
trend_alert = dff_quant.loc[(dff_quant['Facility'] == facility) & (dff_quant['alert'] == trend_label)]['Metric'].reset_index(drop=True).tolist()
fac_df = data.loc[(data['Facility'] == facility) & (data['MonthYear'] == current_readable)]
fytd_df = data.loc[(data['Facility'] == facility) & (data['Reporting Month'].isin(fytd_list))]
trend_df = data.loc[(data['Facility'] == facility) & (data['Reporting Month'].isin(extended_filter))]
''' Opp Days '''
opp_days = int(fac_df['OppDays'].sum())
fytd_opp_days = int(fytd_df['OppDays'].sum())
opp_trend_df = trend_df[['Facility', 'Reporting Month', 'MonthYear', 'OppDays']].set_index(['Facility', 'Reporting Month', 'MonthYear'])
trend = opp_trend_df.rolling(roll_integer, min_periods=1).mean()
trend = trend.reset_index()
most_recent_value = trend.loc[trend['Reporting Month'] == current_month]['OppDays'].sum() #reset_index(drop=True)[0]
earliest_value = trend.loc[trend['Reporting Month'] == earliest_month]['OppDays'].sum() #['Value'].reset_index(drop=True)[0]
trend_series = pd.Series([earliest_value, most_recent_value])
trend_opp_days = trend_series.pct_change()[1]
trend_opp_days = round(trend_opp_days*100)
opp_days_dict = {'metric':'<b> Opportunity <br> Days </b>',
'monthly_value': f'{opp_days:,}', #'<b>' + str(opp_days) + '</b>'
'fytd_value': f'{fytd_opp_days:,}', #'<b>' + str(fytd_opp_days) + '</b>'
'trend_value': f'{trend_opp_days:,}',
'trend_type': trend_label} #'<b>' + str(trend_opp_days) + '</b>'
if 'Opportunity Days' in mo_alert:
opp_days_dict['monthly_class'] = 'text-rose strong'
else:
opp_days_dict['monthly_class'] = 'text-secondary'
if 'Opportunity Days' in fytd_alert:
opp_days_dict['fytd_class'] = 'text-rose strong'
else:
opp_days_dict['fytd_class'] = 'text-secondary'
if 'Opportunity Days' in trend_alert:
opp_days_dict['trend_class'] = 'text-rose strong'
else:
opp_days_dict['trend_class'] = 'text-secondary'
''' Cases > 48 H '''
cases_48h = int(fac_df['Obs_Hours_48'].sum())
fytd_cases_48h = int(fytd_df['Obs_Hours_48'].sum())
cases48_trend_df = trend_df[['Facility', 'Reporting Month', 'MonthYear', 'Obs_Hours_48']].set_index(['Facility', 'Reporting Month', 'MonthYear'])
trend = cases48_trend_df.rolling(roll_integer, min_periods=1).mean()
trend = trend.reset_index()
most_recent_value = trend.loc[trend['Reporting Month'] == current_month]['Obs_Hours_48'].sum() #reset_index(drop=True)[0]
earliest_value = trend.loc[trend['Reporting Month'] == earliest_month]['Obs_Hours_48'].sum() #['Value'].reset_index(drop=True)[0]
cases_series = pd.Series([earliest_value, most_recent_value])
trend_cases_48h = cases_series.pct_change()[1]
trend_cases_48h = round(trend_cases_48h*100)
if trend_cases_48h == np.inf:
trend_cases_48h = 0
else:
trend_cases_48h = int(trend_cases_48h)
cases48_dict = {'metric':'<b> Observation <br> Cases > 48H </b>',
'monthly_value': f'{cases_48h:,}', #'<b>' + str(cases_48h) + '</b>'
'fytd_value': f'{fytd_cases_48h:,}', #'<b>' + str(fytd_cases_48h) + '</b>'
'trend_value': f'{trend_cases_48h:,}',
'trend_type': trend_label} #'<b>' + str(int(trend_cases_48h)) + '</b>'
if 'Observation Cases > 48 Hours' in mo_alert:
cases48_dict['monthly_class'] = 'text-rose strong'
else:
cases48_dict['monthly_class'] = 'text-secondary'
if 'Observation Cases > 48 Hours' in fytd_alert:
cases48_dict['fytd_class'] = 'text-rose strong'
else:
cases48_dict['fytd_class'] = 'text-secondary'
if 'Observation Cases > 48 Hours' in trend_alert:
cases48_dict['trend_class'] = 'text-rose strong'
else:
cases48_dict['trend_class'] = 'text-secondary'
'''Obs Rate'''
obs_cases = fac_df['Obs_Cases'].sum()
obs_rate_inp = fac_df['Obs_Rate_Inp'].sum()
obs_rate = obs_cases/(obs_rate_inp+obs_cases)
obs_rate = round(obs_rate*100)
obs_rate_dict = {'metric':'<b> Observation <br> Rate </b>',
'monthly_value': f'{obs_rate:,}' + '%'}
obs_cases = fytd_df['Obs_Cases'].sum()
obs_rate_inp = fytd_df['Obs_Rate_Inp'].sum()
obs_rate = obs_cases/(obs_rate_inp+obs_cases)
obs_rate = round(obs_rate*100)
obs_rate_dict['fytd_value'] = f'{obs_rate:,}' + '%'
obs_rate_trend_df = trend_df[['Facility', 'Reporting Month', 'MonthYear', 'Obs_Cases', 'Obs_Rate_Inp']].set_index(['Facility', 'Reporting Month', 'MonthYear'])
obs_cases_trend = obs_rate_trend_df.rolling(4, min_periods=1).mean()
obs_cases_trend = obs_cases_trend.reset_index()
most_recent_cases_value = obs_cases_trend.loc[obs_cases_trend['Reporting Month'] == current_month]['Obs_Cases'].sum() #reset_index(drop=True)[0]
most_recent_inp_value = obs_cases_trend.loc[obs_cases_trend['Reporting Month'] == current_month]['Obs_Rate_Inp'].sum() #reset_index(drop=True)[0]
most_recent_obs_rate = most_recent_cases_value/(most_recent_inp_value+most_recent_cases_value)
two_mo_ago_cases_value = obs_cases_trend.loc[obs_cases_trend['Reporting Month'] == earliest_month]['Obs_Cases'].sum() #['Value'].reset_index(drop=True)[0]
two_mo_ago_inp_value = obs_cases_trend.loc[obs_cases_trend['Reporting Month'] == earliest_month]['Obs_Rate_Inp'].sum() #['Value'].reset_index(drop=True)[0]
two_mo_ago_obs_rate = two_mo_ago_cases_value/(two_mo_ago_inp_value+two_mo_ago_cases_value)
obs_series = pd.Series([two_mo_ago_obs_rate, most_recent_obs_rate])
trend_obs_rate = obs_series.pct_change()[1]
trend_obs_rate = round(trend_obs_rate*100)
obs_rate_dict['trend_value'] = f'{trend_obs_rate:,}' + '%'
obs_rate_dict['trend_type'] = trend_label
if 'Observation Rate' in mo_alert:
obs_rate_dict['monthly_class'] = 'text-rose strong'
else:
obs_rate_dict['monthly_class'] = 'text-secondary'
if 'Observation Rate' in fytd_alert:
obs_rate_dict['fytd_class'] = 'text-rose strong'
else:
obs_rate_dict['fytd_class'] = 'text-secondary'
if 'Observation Rate' in trend_alert:
obs_rate_dict['trend_class'] = 'text-rose strong'
else:
obs_rate_dict['trend_class'] = 'text-secondary'
'''LOS Ratio'''
los = fac_df['LOS'].sum()
gmlos = fac_df['GMLOS'].sum()
los_ratio = los/gmlos
los_ratio = round(los_ratio)
los_dict = {'metric':'<b> LOS Ratio </b>',
'monthly_value': f'{los_ratio:,}'} #'<b>' + str(los_ratio) + '</b>'
los = fytd_df['LOS'].sum()
gmlos = fytd_df['GMLOS'].sum()
los_ratio = los/gmlos
los_ratio = round(los_ratio)
los_dict['fytd_value'] = f'{los_ratio:,}'
los_trend_df = trend_df[['Facility', 'Reporting Month', 'MonthYear', 'LOS', 'GMLOS']].set_index(['Facility', 'Reporting Month', 'MonthYear'])
los_trend = los_trend_df.rolling(roll_integer, min_periods=1).mean()
los_trend = los_trend.reset_index()
most_recent_los_value = los_trend.loc[los_trend['Reporting Month'] == current_month]['LOS'].sum() #reset_index(drop=True)[0]
most_recent_gmlos_value = los_trend.loc[los_trend['Reporting Month'] == current_month]['GMLOS'].sum() #reset_index(drop=True)[0]
most_recent_los = most_recent_los_value/most_recent_gmlos_value
two_mo_ago_los_value = los_trend.loc[los_trend['Reporting Month'] == earliest_month]['LOS'].sum() #reset_index(drop=True)[0]
two_mo_ago_gmlos_value = los_trend.loc[los_trend['Reporting Month'] == earliest_month]['GMLOS'].sum() #reset_index(drop=True)[0]
two_mo_ago_los = two_mo_ago_los_value/two_mo_ago_gmlos_value
los_series = pd.Series([two_mo_ago_los, most_recent_los])
trend_los = los_series.pct_change()[1]
trend_los = round(trend_los*100)
los_dict['trend_value'] = f'{trend_los:,}'
los_dict['trend_type'] = trend_label
if 'LOS Ratio' in mo_alert:
los_dict['monthly_class'] = 'text-rose strong'
else:
los_dict['monthly_class'] = 'text-secondary'
if 'LOS Ratio' in fytd_alert:
los_dict['fytd_class'] = 'text-rose strong'
else:
los_dict['fytd_class'] = 'text-secondary'
if 'LOS Ratio' in trend_alert:
los_dict['trend_class'] = 'text-rose strong'
else:
los_dict['trend_class'] = 'text-secondary'
watchlist_dict[f] = {}
watchlist_dict[f] = [facility, los_dict, opp_days_dict, obs_rate_dict, cases48_dict]
#%% Make the color map
if len(watchlist_facilities) < 7:
#go with the pre-set color shade list
color_df = pd.DataFrame(red_hex_shades, columns=['colors'])
else:
#generate a color shade list
rose = Color('#e60250')
white = Color('#ffffff')
red_hex_shade_list = list(rose.range_to(white, len(watchlist_facilities)))
red_hex_shades = []
for i in red_hex_shade_list:
red_hex_shades.append('bgcolor:' + red_hex_shade_list[i].hex_l)
color_df = pd.DataFrame(red_hex_shades, columns=['colors'])
color_df['colors'] = color_df['colors'] + "; color:#ffffff; text-align:center; font-size:14px; font-weight:bolder;"
dff_columns = dff_quant.columns.values
monthly_variance = dff_quant.loc[dff_quant['alert'] == 'Month Variance']
monthly_variance = monthly_variance.sort_values(by='Month_to_Month_Variance', ascending=False)
monthly_variance = monthly_variance.reset_index().drop('index', axis=1)
monthly_variance = monthly_variance.merge(color_df, left_index=True, right_index=True, how='left')
monthly_variance['Month_to_Month_Variance'] = monthly_variance['colors']
monthly_variance = monthly_variance.drop('colors', axis=1)
monthly_columns = [x for x in dff_columns if x != 'Month_to_Month_Variance']
for c in monthly_columns:
monthly_variance[c] = "background-color:#ffffff; color:#6c757d; text-align:center; font-size:14px;"
fytd_variance = dff_quant.loc[dff_quant['alert'] == 'FYTD Variance']
fytd_variance = fytd_variance.sort_values(by='FYTD_Variance', ascending=False)
fytd_variance = fytd_variance.reset_index().drop('index', axis=1)
fytd_variance = fytd_variance.merge(color_df, left_index=True, right_index=True, how='left')
fytd_variance['FYTD_Variance'] = fytd_variance['colors']
fytd_variance = fytd_variance.drop('colors', axis=1)
fytd_columns = [x for x in dff_columns if x != 'FYTD_Variance']
for c in fytd_columns:
fytd_variance[c] = "background-color:#ffffff; color:#6c757d; text-align:center; font-size:14px;"
three_month = dff_quant.loc[dff_quant['alert'] == trend_label]
three_month = three_month.sort_values(by='months_trend', ascending=False)
three_month = three_month.reset_index().drop('index', axis=1)
three_month = three_month.merge(color_df, left_index=True, right_index=True, how='left')
three_month['months_trend'] = three_month['colors']
three_month = three_month.drop('colors', axis=1)
three_columns = [x for x in dff_columns if x != 'months_trend']
for c in three_columns:
three_month[c] = "background-color:#ffffff; color:#6c757d; text-align:center; font-size:14px;"
color_map = pd.concat([monthly_variance, fytd_variance, three_month])
dff_quant = dff_quant.drop('alert', axis=1)
#wrap metric labels
dff_quant.loc[dff_quant['Metric'] == 'Observation Cases > 48 Hours', 'Metric'] = 'Observation Cases <br> > 48 Hours'
dff_quant.loc[dff_quant['Metric'] == 'Observation Rate', 'Metric'] = 'Observation <br> Rate'
dff_quant.loc[dff_quant['Metric'] == 'Opportunity Days', 'Metric'] = 'Opportunity Days'
dff_quant.loc[dff_quant['Metric'] == 'LOS Ratio', 'Metric'] = 'LOS <br> Ratio'
#%%Templating & rendering
data_dict = dff_quant.to_dict('records')
color_dict = color_map.to_dict('records')
final_dict = zip(data_dict, color_dict)
env = JinjaEnvironment(loader=FileSystemLoader('templates/'))
template = env.get_template('alert_template.html')
html = template.render(version = version,
current_readable = current_readable,
trend_label = trend_label,
headers = headers,
data = final_dict,
watchlist_data = watchlist_dict,
facilities = watchlist_facilities,
css_file = css_file,
icon_integers = ['', 'one', 'two', '3', '4'])
#icon_path = icon_path)
#return html
with open("html/alert.html", "w") as h:
h.write(html)
h.close()
| 52.410072 | 189 | 0.583699 |
ed423b0f27127ec4302f9936ead295c10ab3b90d | 226 | py | Python | frappe/core/doctype/navbar_settings/test_navbar_settings.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 3 | 2017-12-09T22:05:11.000Z | 2019-10-22T12:03:43.000Z | frappe/core/doctype/navbar_settings/test_navbar_settings.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 17 | 2021-03-22T18:47:14.000Z | 2022-03-15T12:21:00.000Z | frappe/core/doctype/navbar_settings/test_navbar_settings.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 2 | 2021-05-06T06:14:40.000Z | 2021-05-06T10:05:29.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestNavbarSettings(unittest.TestCase):
pass
| 20.545455 | 58 | 0.774336 |
d748c1574c0056ce281916dd5498ea5eeb811c35 | 469 | py | Python | packages/python/plotly/plotly/validators/densitymapbox/colorbar/_ticklen.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/densitymapbox/colorbar/_ticklen.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/densitymapbox/colorbar/_ticklen.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class TicklenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ticklen", parent_name="densitymapbox.colorbar", **kwargs
):
super(TicklenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
| 31.266667 | 83 | 0.643923 |
876c16835fff29e93529890245154dc7d5c1f166 | 3,862 | py | Python | pos-tagging/torch_lstm.py | naetherm/NLP | c715e424e37f1a3a1bde28df430a2d2b30ef205a | [
"MIT"
] | 3 | 2020-08-11T12:33:48.000Z | 2020-12-29T11:37:38.000Z | pos-tagging/torch_lstm.py | naetherm/NLP | c715e424e37f1a3a1bde28df430a2d2b30ef205a | [
"MIT"
] | null | null | null | pos-tagging/torch_lstm.py | naetherm/NLP | c715e424e37f1a3a1bde28df430a2d2b30ef205a | [
"MIT"
] | null | null | null | import re
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
EPOCHS=20
def parse(file):
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART-' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[1])
return left, right
left_train, right_train = parse('eng.train')
left_test, right_test = parse('eng.testa')
def process_string(string):
string = re.sub('[^A-Za-z0-9\-\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
word2idx = {'PAD': 0,'NUM':1,'UNK':2}
tag2idx = {'PAD': 0}
char2idx = {'PAD': 0}
word_idx = 3
tag_idx = 1
char_idx = 1
def parse_XY(texts, labels):
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower()
tag = labels[no]
for c in text:
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx:
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag])
if text not in word2idx:
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text])
return X, np.array(Y)
train_X, train_Y = parse_XY(left_train, right_train)
test_X, test_Y = parse_XY(left_test, right_test)
idx2word = {idx: tag for tag, idx in word2idx.items()}
idx2tag = {i: w for w, i in tag2idx.items()}
seq_len = 50
def iter_seq(x):
return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])
def to_train_seq(*args):
return [iter_seq(x) for x in args]
def generate_char_seq(batch):
x = [[len(idx2word[i]) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((batch.shape[0],batch.shape[1],maxlen),dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i,k]]):
temp[i,k,-1-no] = char2idx[c]
return temp
X_seq, Y_seq = to_train_seq(train_X, train_Y)
X_char_seq = generate_char_seq(X_seq)
X_seq.shape
X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)
X_char_seq_test = generate_char_seq(X_seq_test)
X_seq_test.shape
train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq
test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test
from numpy.random import seed
seed(1)
torch.manual_seed(1)
output_dim = 64
class LSTMModel(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
super(LSTMModel, self).__init__()
self.hidden_dim = hidden_dim
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim)
self.hidden2tag = nn.Linear(hidden_dim, tagset_size)
def forward(self, sentence):
embeds = self.word_embeddings(sentence)
lstm_out, _ = self.lstm(embeds.view(len(sentence), 1, -1))
tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
tag_scores = F.log_softmax(tag_space, dim=1)
return tag_scores
model = LSTMModel(output_dim, output_dim, len(word2idx), len(tag2idx))
loss_function = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
for epoch in range(EPOCHS):
for s, t in zip(train_X, train_Y):
model.zero_grad()
tag_scores = model(torch.tensor(s, dtype=torch.int32))
loss = loss_function(tag_scores, torch.from_numpy(t))
loss.backward()
optimizer.step() | 29.257576 | 75 | 0.642672 |
7d20f7f2b7301640d0df448a3b8c1f18092dd902 | 7,575 | py | Python | qa/rpc-tests/txn_clone.py | knight2008/axe | 007bf4ff9605b4552810bcc4df73f4eb4fb011fa | [
"MIT"
] | 1 | 2019-05-16T09:00:00.000Z | 2019-05-16T09:00:00.000Z | qa/rpc-tests/txn_clone.py | knight2008/axe | 007bf4ff9605b4552810bcc4df73f4eb4fb011fa | [
"MIT"
] | null | null | null | qa/rpc-tests/txn_clone.py | knight2008/axe | 007bf4ff9605b4552810bcc4df73f4eb4fb011fa | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with an equivalent malleability clone
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 12,500 AXE:
starting_balance = 12500
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 12190)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 290)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 12190 - 290 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 400, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 200, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 400 AXE serialized is 00902f5009000000
pos0 = 2*(4+1+36+1+4+1)
hex400 = "00902f5009000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 400 and clone_raw[pos0 : pos0 + 16] != hex400 or
rawtx1["vout"][0]["value"] != 400 and clone_raw[pos0 : pos0 + 16] == hex400):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 500AXE for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 500
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 12190 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 290 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 1000 AXE for 2 matured,
# less possible orphaned matured subsidy
expected += 1000
if (self.options.mine_block):
expected -= 500
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 12190 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 290 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 12190
+ fund_foo_tx["fee"]
- 290
+ fund_bar_tx["fee"]
+ 1000)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
| 47.34375 | 111 | 0.611221 |
ae7d532db354594e5670bc2ab5437189366a167e | 3,007 | py | Python | deploy-to-azure/azext_deploy_to_azure/dev/common/utils.py | dksimpson/deploy-to-azure-cli-extension | 250a5bae1088f8ea695bd13db2b48c889f93be62 | [
"MIT"
] | 1 | 2020-06-01T14:08:37.000Z | 2020-06-01T14:08:37.000Z | deploy-to-azure/azext_deploy_to_azure/dev/common/utils.py | dksimpson/deploy-to-azure-cli-extension | 250a5bae1088f8ea695bd13db2b48c889f93be62 | [
"MIT"
] | 12 | 2020-03-05T08:47:02.000Z | 2021-08-09T20:19:47.000Z | deploy-to-azure/azext_deploy_to_azure/dev/common/utils.py | dksimpson/deploy-to-azure-cli-extension | 250a5bae1088f8ea695bd13db2b48c889f93be62 | [
"MIT"
] | 10 | 2020-03-05T08:35:53.000Z | 2021-08-28T15:54:48.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import platform
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
FILE_ENCODING_TYPES = ['ascii', 'utf-16be', 'utf-16le', 'utf-8']
def read_file_content(file_path, encoding):
if not file_path or not encoding:
raise CLIError("File path {} or encoding {} is missing.".format(file_path, encoding))
if encoding not in FILE_ENCODING_TYPES:
raise CLIError("File encoding {encoding} is not supported.".format(encoding=encoding))
try:
import sys
if sys.version_info[0] < 3:
return _read_file_content_ver2(file_path, encoding)
return _read_file_content_ver3(file_path, encoding)
except UnicodeDecodeError as ex:
logger.debug(msg=ex)
raise CLIError("Unable to decode file '{}' with '{}' encoding.".format(
file_path, encoding))
def open_file(filepath):
"""
Opens a file in the default editor for the file type and exits.
"""
import subprocess
if platform.system() == 'Darwin': # macOS
subprocess.call(('open', filepath))
elif platform.system() == 'Windows': # Windows
os.system(filepath)
else: # linux variants
subprocess.call(('xdg-open', filepath))
def delete_dir(path):
import shutil
shutil.rmtree(path)
def time_now_as_string():
from datetime import datetime
now = datetime.utcnow().strftime("%H%M%S")
return now
def open_url(url):
"""Opens the url in new window in the default browser.
"""
from webbrowser import open_new
open_new(url=url)
# inspired from aks_preview
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
# Decorators
def singleton(myclass):
instance = [None]
def wrapper(*args, **kwargs):
if instance[0] is None:
instance[0] = myclass(*args, **kwargs)
return instance[0]
return wrapper
def _read_file_content_ver3(file_path, encoding):
logger.debug('inside read_file_content_ver3')
with open(file_path, 'r', encoding=encoding) as f:
return f.read()
def _read_file_content_ver2(file_path, encoding):
logger.debug('inside read_file_content_ver2')
with open(file_path) as f:
return f.read().decode(encoding)
| 29.194175 | 98 | 0.614566 |
aebede3db84bf6b44c81441e011d439cde497276 | 1,254 | py | Python | product/migrations/0016_auto_20170131_0045.py | skylifewww/pangolin-fog | b1fa4b51b5c6eb40ff5cfcdbb71a3f932235da94 | [
"MIT"
] | null | null | null | product/migrations/0016_auto_20170131_0045.py | skylifewww/pangolin-fog | b1fa4b51b5c6eb40ff5cfcdbb71a3f932235da94 | [
"MIT"
] | null | null | null | product/migrations/0016_auto_20170131_0045.py | skylifewww/pangolin-fog | b1fa4b51b5c6eb40ff5cfcdbb71a3f932235da94 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-01-31 00:45
from __future__ import unicode_literals
from django.db import migrations, models
import easy_thumbnails.fields
import product.models
class Migration(migrations.Migration):
dependencies = [
('product', '0015_auto_20170126_0132'),
]
operations = [
migrations.AlterModelOptions(
name='slideproduct',
options={'ordering': ['ordering'], 'verbose_name': 'Slide', 'verbose_name_plural': 'Slides'},
),
migrations.AddField(
model_name='accessory',
name='image',
field=easy_thumbnails.fields.ThumbnailerImageField(blank=True, upload_to=product.models.make_upload_path, verbose_name='Image'),
),
migrations.AddField(
model_name='product',
name='product_image',
field=easy_thumbnails.fields.ThumbnailerImageField(blank=True, upload_to=product.models.make_upload_path, verbose_name='Image'),
),
migrations.AddField(
model_name='slideproduct',
name='image',
field=models.ImageField(blank=True, upload_to=product.models.make_upload_path, verbose_name='Изображение'),
),
]
| 33.891892 | 140 | 0.648325 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.