hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b8cbfca6de86ee3ef9fe472b32eb107264c928c8
| 1,671
|
py
|
Python
|
EDA/src/utils/main_flask.py
|
paleomau/MGOL_BOOTCAMP
|
8c2b018f49fd12a255ea6f323141260d04d4421d
|
[
"MIT"
] | null | null | null |
EDA/src/utils/main_flask.py
|
paleomau/MGOL_BOOTCAMP
|
8c2b018f49fd12a255ea6f323141260d04d4421d
|
[
"MIT"
] | null | null | null |
EDA/src/utils/main_flask.py
|
paleomau/MGOL_BOOTCAMP
|
8c2b018f49fd12a255ea6f323141260d04d4421d
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, render_template
from functions import read_json
import os
# Mandatory
app = Flask(__name__) # __name__ --> __main__
# ---------- Flask functions ----------
@app.route("/") # @ --> esto representa el decorador de la función
def home():
""" Default path """
#return app.send_static_file('greet.html')
return "Por defecto"
@app.route("/greet")
def greet():
username = request.args.get('name')
return render_template('index.html', name=username)
@app.route("/info")
def create_json():
import pandas as pd
df = pd.read_csv('lung_nn_outl.csv')
return df.to_json()
# localhost:6060/give_me_id?password=12345
@app.route('/give_me_id', methods=['GET'])
def give_id():
token_id = request.args['password']
if token_id == "p10875558":
return request.args
else:
return "No es la contraseña correcta"
@app.route("/recibe_informacion")
def recibe_info():
pass
# ---------- Other functions ----------
def main():
print("---------STARTING PROCESS---------")
print(__file__)
# Get the settings fullpath
# \\ --> WINDOWS
# / --> UNIX
# Para ambos: os.sep
settings_file = os.path.dirname(__file__) + os.sep + "settings.json"
print(settings_file)
# Load json from file
json_readed = read_json(fullpath=settings_file)
# Load variables from jsons
DEBUG = json_readed["debug"]
HOST = json_readed["host"]
PORT_NUM = json_readed["port"]
# Dos posibilidades:
# HOST = "0.0.0.0"
# HOST = "127.0.0.1" --> localhost
app.run(debug=DEBUG, host=HOST, port=PORT_NUM)
if __name__ == "__main__":
main()
| 25.318182
| 72
| 0.625972
| 0
| 0
| 0
| 0
| 708
| 0.423192
| 0
| 0
| 703
| 0.420203
|
b8ccc7bb85dc9dad61097e465ec52bcbf128cb34
| 1,473
|
py
|
Python
|
opta/core/secrets.py
|
pecigonzalo/opta
|
0259f128ad3cfc4a96fe1f578833de28b2f19602
|
[
"Apache-2.0"
] | null | null | null |
opta/core/secrets.py
|
pecigonzalo/opta
|
0259f128ad3cfc4a96fe1f578833de28b2f19602
|
[
"Apache-2.0"
] | null | null | null |
opta/core/secrets.py
|
pecigonzalo/opta
|
0259f128ad3cfc4a96fe1f578833de28b2f19602
|
[
"Apache-2.0"
] | null | null | null |
import os
from dotenv import dotenv_values
from opta.core.kubernetes import get_namespaced_secrets, update_secrets
from opta.exceptions import UserErrors
from opta.utils import deep_merge, logger
MANUAL_SECRET_NAME = "manual-secrets" # nosec
LINKED_SECRET_NAME = "secret" # nosec
def get_secrets(namespace: str, manual_secret_name: str) -> dict:
""":return: manual and linked secrets"""
manual_secrets = get_namespaced_secrets(namespace, manual_secret_name)
linked_secrets = get_namespaced_secrets(
namespace, LINKED_SECRET_NAME
) # Helm charts don't have linked secrets, but it'll just return an empty dict so no worries
for secret_name in manual_secrets.keys():
if secret_name in linked_secrets:
logger.warning(
f"# Secret {secret_name} found manually overwritten from linked value."
)
del linked_secrets[secret_name]
return deep_merge(manual_secrets, linked_secrets)
def bulk_update_manual_secrets(
namespace: str, manual_secret_name: str, env_file: str
) -> None:
"""
append the values from the env file to the existing data for this manual secret.
create the secret if it doesn't exist yet.
:raises UserErrors: if env_file is not found
"""
if not os.path.exists(env_file):
raise UserErrors(f"Could not find file {env_file}")
new_values = dotenv_values(env_file)
update_secrets(namespace, manual_secret_name, new_values)
| 35.071429
| 97
| 0.728445
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 466
| 0.316361
|
b8cdf4dde7f1aa6655db7010276c1247756180f9
| 5,114
|
py
|
Python
|
venv/Lib/site-packages/mpl_toolkits/axes_grid1/axes_rgb.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 603
|
2020-12-23T13:49:32.000Z
|
2022-03-31T23:38:03.000Z
|
venv/Lib/site-packages/mpl_toolkits/axes_grid1/axes_rgb.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 387
|
2020-12-15T14:54:04.000Z
|
2022-03-31T07:00:21.000Z
|
venv/Lib/site-packages/mpl_toolkits/axes_grid1/axes_rgb.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 35
|
2021-03-26T03:12:04.000Z
|
2022-03-23T10:15:10.000Z
|
import numpy as np
from matplotlib import _api
from .axes_divider import make_axes_locatable, Size
from .mpl_axes import Axes
@_api.delete_parameter("3.3", "add_all")
def make_rgb_axes(ax, pad=0.01, axes_class=None, add_all=True, **kwargs):
"""
Parameters
----------
pad : float
Fraction of the axes height.
"""
divider = make_axes_locatable(ax)
pad_size = pad * Size.AxesY(ax)
xsize = ((1-2*pad)/3) * Size.AxesX(ax)
ysize = ((1-2*pad)/3) * Size.AxesY(ax)
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
if axes_class is None:
try:
axes_class = ax._axes_class
except AttributeError:
axes_class = type(ax)
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(), ax.get_position(original=True),
sharex=ax, sharey=ax, **kwargs)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
for t in ax1.yaxis.get_ticklabels() + ax1.xaxis.get_ticklabels():
t.set_visible(False)
try:
for axis in ax1.axis.values():
axis.major_ticklabels.set_visible(False)
except AttributeError:
pass
ax_rgb.append(ax1)
if add_all:
fig = ax.get_figure()
for ax1 in ax_rgb:
fig.add_axes(ax1)
return ax_rgb
@_api.deprecated("3.3", alternative="ax.imshow(np.dstack([r, g, b]))")
def imshow_rgb(ax, r, g, b, **kwargs):
return ax.imshow(np.dstack([r, g, b]), **kwargs)
class RGBAxes:
"""
4-panel imshow (RGB, R, G, B).
Layout:
+---------------+-----+
| | R |
+ +-----+
| RGB | G |
+ +-----+
| | B |
+---------------+-----+
Subclasses can override the ``_defaultAxesClass`` attribute.
Attributes
----------
RGB : ``_defaultAxesClass``
The axes object for the three-channel imshow.
R : ``_defaultAxesClass``
The axes object for the red channel imshow.
G : ``_defaultAxesClass``
The axes object for the green channel imshow.
B : ``_defaultAxesClass``
The axes object for the blue channel imshow.
"""
_defaultAxesClass = Axes
@_api.delete_parameter("3.3", "add_all")
def __init__(self, *args, pad=0, add_all=True, **kwargs):
"""
Parameters
----------
pad : float, default: 0
fraction of the axes height to put as padding.
add_all : bool, default: True
Whether to add the {rgb, r, g, b} axes to the figure.
This parameter is deprecated.
axes_class : matplotlib.axes.Axes
*args
Unpacked into axes_class() init for RGB
**kwargs
Unpacked into axes_class() init for RGB, R, G, B axes
"""
axes_class = kwargs.pop("axes_class", self._defaultAxesClass)
self.RGB = ax = axes_class(*args, **kwargs)
if add_all:
ax.get_figure().add_axes(ax)
else:
kwargs["add_all"] = add_all # only show deprecation in that case
self.R, self.G, self.B = make_rgb_axes(
ax, pad=pad, axes_class=axes_class, **kwargs)
# Set the line color and ticks for the axes.
for ax1 in [self.RGB, self.R, self.G, self.B]:
ax1.axis[:].line.set_color("w")
ax1.axis[:].major_ticks.set_markeredgecolor("w")
@_api.deprecated("3.3")
def add_RGB_to_figure(self):
"""Add red, green and blue axes to the RGB composite's axes figure."""
self.RGB.get_figure().add_axes(self.R)
self.RGB.get_figure().add_axes(self.G)
self.RGB.get_figure().add_axes(self.B)
def imshow_rgb(self, r, g, b, **kwargs):
"""
Create the four images {rgb, r, g, b}.
Parameters
----------
r, g, b : array-like
The red, green, and blue arrays.
kwargs : imshow kwargs
kwargs get unpacked into the imshow calls for the four images.
Returns
-------
rgb : matplotlib.image.AxesImage
r : matplotlib.image.AxesImage
g : matplotlib.image.AxesImage
b : matplotlib.image.AxesImage
"""
if not (r.shape == g.shape == b.shape):
raise ValueError(
f'Input shapes ({r.shape}, {g.shape}, {b.shape}) do not match')
RGB = np.dstack([r, g, b])
R = np.zeros_like(RGB)
R[:, :, 0] = r
G = np.zeros_like(RGB)
G[:, :, 1] = g
B = np.zeros_like(RGB)
B[:, :, 2] = b
im_rgb = self.RGB.imshow(RGB, **kwargs)
im_r = self.R.imshow(R, **kwargs)
im_g = self.G.imshow(G, **kwargs)
im_b = self.B.imshow(B, **kwargs)
return im_rgb, im_r, im_g, im_b
@_api.deprecated("3.3", alternative="RGBAxes")
class RGBAxesBase(RGBAxes):
pass
| 30.260355
| 79
| 0.550841
| 3,372
| 0.659366
| 0
| 0
| 3,102
| 0.60657
| 0
| 0
| 2,088
| 0.408291
|
b8ce37a154e212778f695fcf9135c3e96507ff09
| 88
|
py
|
Python
|
app/admin/controllers/__init__.py
|
aries-zhang/flask-template
|
369d77f2910f653f46668dd9bda735954b6c145e
|
[
"MIT"
] | null | null | null |
app/admin/controllers/__init__.py
|
aries-zhang/flask-template
|
369d77f2910f653f46668dd9bda735954b6c145e
|
[
"MIT"
] | null | null | null |
app/admin/controllers/__init__.py
|
aries-zhang/flask-template
|
369d77f2910f653f46668dd9bda735954b6c145e
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
admin = Blueprint('admin', __name__, url_prefix='/manage')
| 22
| 58
| 0.761364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.181818
|
b8d03933a76fe421eb780621a4114e528f2cddbc
| 535
|
py
|
Python
|
first.py
|
wmoulin/chatterbot
|
075a4651227ad159e58a36fca5ea7456d9153653
|
[
"MIT"
] | null | null | null |
first.py
|
wmoulin/chatterbot
|
075a4651227ad159e58a36fca5ea7456d9153653
|
[
"MIT"
] | null | null | null |
first.py
|
wmoulin/chatterbot
|
075a4651227ad159e58a36fca5ea7456d9153653
|
[
"MIT"
] | null | null | null |
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
# The only required parameter for the ChatBot is a name. This can be anything you want.
chatbot = ChatBot("My First Chatbot")
# Training your ChatBot
conversation = [
"Hello",
"Hi there!",
"How are you doing?",
"I'm doing great.",
"That is good to hear",
"Thank you.",
"You're welcome."
]
trainer = ListTrainer(chatbot)
trainer.train(conversation)
# Get a response
response = chatbot.get_response("Good morning!")
print(response)
| 24.318182
| 87
| 0.708411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 266
| 0.497196
|
b8d0ad22e9f860e320dd54fc175dce04ecd1af3d
| 7,405
|
py
|
Python
|
runpandas/types/summary.py
|
pnposch/runpandas
|
25388c18b52dfcc168e81922b8ba20ca93adad20
|
[
"MIT"
] | 11
|
2020-12-04T20:43:23.000Z
|
2022-03-16T19:19:12.000Z
|
runpandas/types/summary.py
|
pnposch/runpandas
|
25388c18b52dfcc168e81922b8ba20ca93adad20
|
[
"MIT"
] | 45
|
2020-06-23T02:50:31.000Z
|
2022-02-15T16:56:00.000Z
|
runpandas/types/summary.py
|
pnposch/runpandas
|
25388c18b52dfcc168e81922b8ba20ca93adad20
|
[
"MIT"
] | 4
|
2021-11-11T15:23:04.000Z
|
2022-02-02T13:02:12.000Z
|
"""
Helper module for evaluation and display of the summary of training sessions.
"""
import numpy as np
import pandas as pd
from runpandas._utils import convert_pace_secmeters2minkms
def _build_summary_statistics(obj):
"""
Generate session statistics from a given DataFrame.
Parameters
----------
obj: The DataFrame to generate basic commute statistics from.
Returns:
--------
A Dictionary containing the following statistics:
- Total moving time
- Average speed
- Max speed
- Average moving speed
- Average cadence running
- Average cadence running moving
- Max cadence
- Average heart rate
- Average heart rate moving
- Max heart rate
- Average pace (per 1 km)
- Average pace moving (per 1 km)
- Max pace
- Average temperature
- Max temperature
- Min temperature
- Total distance
- Total ellapsed time
"""
start = obj.start
try:
moving_time = obj.moving_time
except AttributeError:
moving_time = np.nan
try:
mean_speed = obj.mean_speed()
max_speed = obj["speed"].max()
mean_pace = convert_pace_secmeters2minkms(obj.mean_pace().total_seconds())
max_pace = convert_pace_secmeters2minkms(
obj["speed"].to_pace().min().total_seconds()
)
except AttributeError:
mean_speed = np.nan
max_speed = np.nan
mean_pace = np.nan
try:
mean_moving_speed = obj.mean_speed(only_moving=True)
mean_moving_pace = convert_pace_secmeters2minkms(
obj.mean_pace(only_moving=True).total_seconds()
)
except (AttributeError, KeyError):
mean_moving_speed = np.nan
mean_moving_pace = np.nan
try:
mean_cadence = obj.mean_cadence()
max_cadence = obj["cad"].max()
except AttributeError:
mean_cadence = np.nan
max_cadence = np.nan
try:
mean_moving_cadence = obj.mean_cadence(only_moving=True)
except (AttributeError, KeyError):
mean_moving_cadence = np.nan
try:
mean_heart_rate = obj.mean_heart_rate()
max_heart_rate = obj["hr"].max()
except AttributeError:
mean_heart_rate = np.nan
max_heart_rate = np.nan
try:
mean_moving_heart_rate = obj.mean_heart_rate(only_moving=True)
except (AttributeError, KeyError):
mean_moving_heart_rate = np.nan
try:
mean_temperature = obj["temp"].mean()
min_temperature = obj["temp"].min()
max_temperature = obj["temp"].max()
except KeyError:
mean_temperature = np.nan
min_temperature = np.nan
max_temperature = np.nan
total_distance = obj.distance
ellapsed_time = obj.ellapsed_time
row = {k: v for k, v in locals().items() if not k.startswith("__") and k != "obj"}
return row
def _build_session_statistics(obj):
"""
Generate session statistics from a given DataFrame.
Parameters
----------
obj: The DataFrame to generate basic commute statistics from.
Returns:
--------
A ``pandas.Dataframe`` containing the following statistics:
- Total moving time
- Average speed
- Max speed
- Average moving speed
- Average cadence running
- Average cadence running moving
- Max cadence
- Average heart rate
- Average heart rate moving
- Max heart rate
- Average pace (per 1 km)
- Average pace moving (per 1 km)
- Max pace
- Average temperature
- Max temperature
- Min temperature
- Total distance
- Total ellapsed time
"""
stats = {key: [value] for key, value in _build_summary_statistics(obj).items()}
return pd.DataFrame(stats).set_index("start")
def _build_activity_statistics(obj):
"""
Generate basic statistics from a given pandas Series.
Parameters
----------
obj: The DataFrame to generate basic commute statistics from.
Returns:
--------
A Series containing the following statistics:
- Session times
- Total distance
- Total ellapsed time
- Total moving time
- Total and average elevation gain
- Average speed
- Average moving speed
- Average pace (per 1 km)
- Average pace moving (per 1 km)
- Average cadence running
- Average cadence running moving
- Average heart rate
- Average heart rate moving
- Average temperature
"""
# special conditions for methods that raise Exceptions
stats = _build_summary_statistics(obj)
rows = {
"Session": "Running: %s" % stats["start"].strftime("%d-%m-%Y %H:%M:%S"),
"Total distance (meters)": stats["total_distance"],
"Total ellapsed time": stats["ellapsed_time"],
"Total moving time": stats["moving_time"],
"Average speed (km/h)": stats["mean_speed"] * 3.6,
"Average moving speed (km/h)": stats["mean_moving_speed"] * 3.6,
"Average pace (per 1 km)": stats["mean_pace"],
"Average pace moving (per 1 km)": stats["mean_moving_pace"],
"Average cadence": stats["mean_cadence"],
"Average moving cadence": stats["mean_moving_cadence"],
"Average heart rate": stats["mean_heart_rate"],
"Average moving heart rate": stats["mean_moving_heart_rate"],
"Average temperature": stats["mean_temperature"],
}
series = pd.Series(
rows,
index=[
"Session",
"Total distance (meters)",
"Total ellapsed time",
"Total moving time",
"Average speed (km/h)",
"Average moving speed (km/h)",
"Average pace (per 1 km)",
"Average pace moving (per 1 km)",
"Average cadence",
"Average moving cadence",
"Average heart rate",
"Average moving heart rate",
"Average temperature",
],
)
return series
def activity_summary(activity):
"""
Returns the pandas Dataframe with the common basic statistics for the
given activity.
Parameters
----------
activity: runpandas.types.Activity. Runpandas Activity to be computed the statistics
Returns
-------
pandas.Dataframe: A pandas DataFrame containing the summary statistics, which
inclues estimates of the total distance covered, the total duration,
the time spent moving, and many others.
"""
summary_statistics = _build_activity_statistics(activity)
return summary_statistics.T
def session_summary(session):
"""
Returns the a pandas Dataframe with the common basic statistics for the
given activity.
Parameters
----------
session: runpandas.types.Activity. Runpandas Activity with pandas.MultiIndex
to be computed the statistics
Returns
-------
pandas.Dataframe: A pandas DataFrame containing the summary statistics
across all th activities, which includes estimates of the total distance covered,
the total duration, the time spent moving, and many others.
"""
frames = []
for index in session.index.unique(level="start"):
df = session.xs(index, level=0)
df.start = index
frames.append(_build_session_statistics(df))
session_summary = pd.concat(frames, axis=0, verify_integrity=True)
session_summary.sort_index(inplace=True)
return session_summary
| 28.480769
| 89
| 0.637677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,037
| 0.545172
|
b8d180754d7fc90d954cb1d916a92cd2b5b1aea1
| 589
|
py
|
Python
|
dribdat/decorators.py
|
gonzalocasas/dribdat
|
f8c326c96e851be199eb9f61daed6c8780e3bc27
|
[
"MIT"
] | 21
|
2015-10-25T23:22:04.000Z
|
2019-04-01T06:42:54.000Z
|
dribdat/decorators.py
|
gonzalocasas/dribdat
|
f8c326c96e851be199eb9f61daed6c8780e3bc27
|
[
"MIT"
] | 108
|
2020-02-11T10:07:53.000Z
|
2021-06-19T20:30:03.000Z
|
dribdat/decorators.py
|
OpendataCH/dribdat
|
90d95a12c782dea7d284a4c454a06481e67c1e37
|
[
"MIT"
] | 12
|
2016-09-02T03:12:28.000Z
|
2021-06-02T07:58:48.000Z
|
# -*- coding: utf-8 -*-
from functools import wraps
from flask import abort, jsonify
from flask_login import current_user
def admin_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not current_user.active or not current_user.is_admin:
abort(403)
return f(*args, **kwargs)
return decorated_function
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
if not current_user.is_allowed:
return jsonify(flag='fail', msg='Login required')
return f(*args, **kwargs)
return decorated
| 25.608696
| 64
| 0.657046
| 0
| 0
| 0
| 0
| 357
| 0.606112
| 0
| 0
| 45
| 0.076401
|
b8d3d6eef9923c53e2c72ef3ffa4d51959b6e188
| 263
|
py
|
Python
|
run_perf_benchmarks.py
|
alirezajahani60/FabFlee
|
e2cfdb6efc758281e123f6acc1b06f93176dd756
|
[
"BSD-3-Clause"
] | null | null | null |
run_perf_benchmarks.py
|
alirezajahani60/FabFlee
|
e2cfdb6efc758281e123f6acc1b06f93176dd756
|
[
"BSD-3-Clause"
] | null | null | null |
run_perf_benchmarks.py
|
alirezajahani60/FabFlee
|
e2cfdb6efc758281e123f6acc1b06f93176dd756
|
[
"BSD-3-Clause"
] | null | null | null |
from base.fab import *
from plugins.FabFlee.FabFlee import *
@task
def flee_get_perf(results_dir):
print("{}/{}".format(env.local_results,results_dir))
my_file = open("{}/{}/perf.log".format(env.local_results,results_dir), 'r')
print(my_file.read())
| 29.222222
| 79
| 0.703422
| 0
| 0
| 0
| 0
| 200
| 0.760456
| 0
| 0
| 26
| 0.098859
|
b8d3d895be119a8b71cde792e94daf1fc8fa955b
| 479
|
py
|
Python
|
vwgconnect/account.py
|
Farfar/vwgbroker
|
9acc9f1a259e26aa830a9534a6dea3cee21c09ff
|
[
"Apache-2.0"
] | null | null | null |
vwgconnect/account.py
|
Farfar/vwgbroker
|
9acc9f1a259e26aa830a9534a6dea3cee21c09ff
|
[
"Apache-2.0"
] | null | null | null |
vwgconnect/account.py
|
Farfar/vwgbroker
|
9acc9f1a259e26aa830a9534a6dea3cee21c09ff
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import time
import logging
import asyncio
import hashlib
import jwt
class Account:
def __init__(self, username, password, spin=None, brand):
self._username = username
self._password = password
self._spin = spin
self._brand = brand
async def login():
if "@" in self._username:
if self._password is not None:
return True
return False
| 18.423077
| 61
| 0.611691
| 349
| 0.728601
| 0
| 0
| 0
| 0
| 144
| 0.300626
| 48
| 0.100209
|
b8d7cf7888021a157102a64b5a55477b57bc5fa9
| 3,263
|
py
|
Python
|
src/project_02/project2_b.py
|
group7BSE1/BSE-2021
|
2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0
|
[
"MIT"
] | null | null | null |
src/project_02/project2_b.py
|
group7BSE1/BSE-2021
|
2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0
|
[
"MIT"
] | null | null | null |
src/project_02/project2_b.py
|
group7BSE1/BSE-2021
|
2553b12e5fd5d1015af4746bcf84a8ee7c1cb8e0
|
[
"MIT"
] | 1
|
2021-04-07T14:49:04.000Z
|
2021-04-07T14:49:04.000Z
|
def open_file():
while True:
file_name = input("Enter input file: ")
try:
measles = open(file_name, "r")
break
except:
print("File unable to open. Invalid name or file doesn't exist!")
continue # name it re-prompts for a write name
return measles
def process_file(measles):
while True:
year = input("Enter year: ")
if len(year) == 4: # this ensures that the year has four characters
break
else:
print("Invalid year. Year MUST be four digits")
continue
while True: # this loop assigns the income level
print("Income levels;\n Input 1 for WB_LI\n Input 2 for WB_LMI\n Input 3 for WB_UMI\n Input 4 for WB_HI")
income = input("Enter income level(1,2,3,4): ")
if income == "1":
income = "WB_LI"
break
elif income == "2":
income = "WB_LMI"
break
elif income == "3":
income = "WB_UMI"
break
elif income == "4":
income = "WB_HI"
break
else:
print("Invalid income level!") # an invalid input re-prompts till the right one is made
continue
count = 0
percentages = []
countries = []
for line in measles:
if (line[88:92] == year) and (line[51:56] == income or line[51:57] == income): # Ensures the criteria is met
count += 1
percentages.append(int(line[59:61])) # adds percentages to the list percentages
country = line[0:51]
country = str(country)
country = country.strip()
countries.append(country) # adds percentages to the list of countries
continue
country_percentage = dict(zip(countries, percentages)) # Creates a dictionary with country as the key and percentage as values
if count > 0:
percent_sum = sum(percentages)
percent_avg = percent_sum / count # average of percentages
max_percentage = max(percentages)
min_percentage = min(percentages)
# gets countries for maximum percentages to this list
max_country = [country for country, percentage in country_percentage.items() if percentage == max_percentage]
# gets countries for minimum percentages to this list
min_country = [country for country, percentage in country_percentage.items() if percentage == min_percentage]
print(f"Nunber of countries in the record: {count}")
print(f"Average percentage for {year} with {income} is {percent_avg:.1f}%")
print(f"Country(ies) have maximum percentage in {year} with {income} of {max_percentage}%")
for i in max_country: # print contries with maximum percentages
print(" >", i)
print(f"Country(ies) have minimum percentage in {year} with {income} of {min_percentage}%")
for i in min_country: # print contries with minimum percentages
print(" >", i)
else: # if there is no item in the list, it prints this
print(f"The year {year} does not exist in the record...")
def main():
measles = open_file()
process_file(measles)
measles.close()
main()
| 37.079545
| 131
| 0.599142
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,295
| 0.396874
|
b8d7d6b700479d42df11c33ef276f3c562f44f38
| 159
|
py
|
Python
|
basic_algorithms/primeiro_ultimo_nome.py
|
Yta-ux/python_algorithms
|
62dd2d897e2f2de8783e68df3022170a86e9132e
|
[
"MIT"
] | 1
|
2022-01-26T22:15:17.000Z
|
2022-01-26T22:15:17.000Z
|
basic_algorithms/primeiro_ultimo_nome.py
|
Yta-ux/python_algorithms
|
62dd2d897e2f2de8783e68df3022170a86e9132e
|
[
"MIT"
] | null | null | null |
basic_algorithms/primeiro_ultimo_nome.py
|
Yta-ux/python_algorithms
|
62dd2d897e2f2de8783e68df3022170a86e9132e
|
[
"MIT"
] | null | null | null |
nome = input('Nome Completo:').title().strip().split()
print(f"""Prazer em Conhece-lo
Seu Primeiro Nome e: {nome[0]}
Seu Ultimo Nome e: {nome[len(nome)-1]}""")
| 39.75
| 54
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 113
| 0.710692
|
b8d7f25bc4dac9b169ae8981214f8ae8040f25ce
| 3,193
|
py
|
Python
|
magnum/conductor/k8s_api.py
|
vivian-rook/magnum
|
7acc6eeda44ce6ffcca8b7fc2e682f80403ac4b7
|
[
"Apache-2.0"
] | null | null | null |
magnum/conductor/k8s_api.py
|
vivian-rook/magnum
|
7acc6eeda44ce6ffcca8b7fc2e682f80403ac4b7
|
[
"Apache-2.0"
] | null | null | null |
magnum/conductor/k8s_api.py
|
vivian-rook/magnum
|
7acc6eeda44ce6ffcca8b7fc2e682f80403ac4b7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from magnum.conductor.handlers.common.cert_manager import create_client_files
class KubernetesAPI:
"""
Simple Kubernetes API client using requests.
This API wrapper allows for a set of very simple operations to be
performed on a Kubernetes cluster using the `requests` library. The
reason behind it is that the native `kubernetes` library does not
seem to be quite thread-safe at the moment.
Also, our interactions with the Kubernetes API are happening inside
Greenthreads so we don't need to use connection pooling on top of it,
in addition to pools not being something that you can disable with
the native Kubernetes API.
"""
def __init__(self, context, cluster):
self.context = context
self.cluster = cluster
# Load certificates for cluster
(self.ca_file, self.key_file, self.cert_file) = create_client_files(
self.cluster, self.context
)
def _request(self, method, url, json=True):
response = requests.request(
method,
url,
verify=self.ca_file.name,
cert=(self.cert_file.name, self.key_file.name)
)
response.raise_for_status()
if json:
return response.json()
else:
return response.text
def get_healthz(self):
"""
Get the health of the cluster from API
"""
return self._request(
'GET',
f"{self.cluster.api_address}/healthz",
json=False
)
def list_node(self):
"""
List all nodes in the cluster.
:return: List of nodes.
"""
return self._request(
'GET',
f"{self.cluster.api_address}/api/v1/nodes"
)
def list_namespaced_pod(self, namespace):
"""
List all pods in the given namespace.
:param namespace: Namespace to list pods from.
:return: List of pods.
"""
return self._request(
'GET',
f"{self.cluster.api_address}/api/v1/namespaces/{namespace}/pods"
)
def __del__(self):
"""
Close all of the file descriptions for the certificates, since they
are left open by `create_client_files`.
TODO(mnaser): Use a context manager and avoid having these here.
"""
if hasattr(self, 'ca_file'):
self.ca_file.close()
if hasattr(self, 'cert_file'):
self.cert_file.close()
if hasattr(self, 'key_file'):
self.key_file.close()
| 31
| 77
| 0.630442
| 2,502
| 0.783589
| 0
| 0
| 0
| 0
| 0
| 0
| 1,878
| 0.588162
|
b8d95b42f671a377b5da5f2e5ac42f949f5f6c0c
| 1,865
|
py
|
Python
|
secret/secret.py
|
futurice/vault
|
6da5341804509b7984d0a5817bbd13d3477fe0bc
|
[
"Apache-2.0"
] | 9
|
2015-10-16T12:06:35.000Z
|
2020-04-03T09:05:06.000Z
|
secret/secret.py
|
futurice/vault
|
6da5341804509b7984d0a5817bbd13d3477fe0bc
|
[
"Apache-2.0"
] | null | null | null |
secret/secret.py
|
futurice/vault
|
6da5341804509b7984d0a5817bbd13d3477fe0bc
|
[
"Apache-2.0"
] | 3
|
2015-10-20T09:36:53.000Z
|
2021-01-18T20:49:41.000Z
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import logging, os, sys
from pprint import pprint as pp
from secret.project import get_project
from secret.cli import prepare
def trollius_log(level=logging.CRITICAL):
os.environ['TROLLIUSDEBUG'] = "1" # more informative tracebacks
logging.basicConfig(level=level)
if sys.version_info.major == 2:
trollius_log()
from secret.storage import S3
from secret.output import prettyprint
import boto3
import trollius as asyncio
from trollius import From, Return
@asyncio.coroutine
def main(args):
project = get_project(args.datafile)
region = os.getenv("AWS_DEFAULT_REGION", args.region)
kw = {}
if not os.getenv("AWS_PROFILE"):
kw = dict(aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'),
aws_session_token=os.getenv('AWS_SESSION_TOKEN'),)
if args.debug:
boto3.set_stream_logger(name='botocore')
trollius_log(level=logging.DEBUG)
session = boto3.session.Session(region_name=region, **kw)
storage = S3(session=session,
vault=args.vault,
vaultkey=args.vaultkey,
env=args.env,
region=args.region,
prefix=args.project,
project=project,)
method = getattr(storage, args.action)
fn = lambda: method(**vars(args))
result = yield From(fn())
prettyprint(result, args)
def runner():
args = prepare()
loop = asyncio.get_event_loop()
# wrap asyncio to suppress stacktraces
if args.debug:
loop.run_until_complete(main(args))
else:
try:
loop.run_until_complete(main(args))
except Exception as e:
print(e.message)
loop.close()
if __name__ == '__main__':
runner()
| 27.028986
| 69
| 0.676139
| 0
| 0
| 906
| 0.485791
| 925
| 0.495979
| 0
| 0
| 220
| 0.117962
|
b8da34c95a45838a0718da8340a3212acd784270
| 3,947
|
py
|
Python
|
tests/test_data.py
|
SaiKrishna1207/aos
|
a55a1eed80dc9b21f7e295b265228c0d54072a66
|
[
"Apache-2.0"
] | 3
|
2020-03-03T08:35:42.000Z
|
2020-09-03T09:30:37.000Z
|
tests/test_data.py
|
SaiKrishna1207/aos
|
a55a1eed80dc9b21f7e295b265228c0d54072a66
|
[
"Apache-2.0"
] | 4
|
2020-02-21T12:48:58.000Z
|
2020-04-30T11:12:52.000Z
|
tests/test_data.py
|
SaiKrishna1207/aos
|
a55a1eed80dc9b21f7e295b265228c0d54072a66
|
[
"Apache-2.0"
] | 5
|
2020-03-01T04:14:32.000Z
|
2021-12-11T15:20:42.000Z
|
def get_obj1():
obj = \
{
"sha": "d25341478381063d1c76e81b3a52e0592a7c997f",
"commit": {
"author": {
"name": "Stephen Dolan",
"email": "mu@netsoc.tcd.ie",
"date": "2013-06-22T16:30:59Z"
},
"committer": {
"name": "Stephen Dolan",
"email": "mu@netsoc.tcd.ie",
"date": "2013-06-22T16:30:59Z"
},
"message": "Merge pull request #162 from stedolan/utf8-fixes\n\nUtf8 fixes. Closes #161",
"tree": {
"sha": "6ab697a8dfb5a96e124666bf6d6213822599fb40",
"url": "https://api.github.com/repos/stedolan/jq/git/trees/6ab697a8dfb5a96e124666bf6d6213822599fb40"
},
"url": "https://api.github.com/repos/stedolan/jq/git/commits/d25341478381063d1c76e81b3a52e0592a7c997f",
"comment_count": 0
},
"url": "https://api.github.com/repos/stedolan/jq/commits/d25341478381063d1c76e81b3a52e0592a7c997f",
"html_url": "https://github.com/stedolan/jq/commit/d25341478381063d1c76e81b3a52e0592a7c997f",
"comments_url": "https://api.github.com/repos/stedolan/jq/commits/d25341478381063d1c76e81b3a52e0592a7c997f/comments",
"author": {
"login": "stedolan",
"id": 79765,
"avatar_url": "https://avatars.githubusercontent.com/u/79765?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/stedolan",
"html_url": "https://github.com/stedolan",
"followers_url": "https://api.github.com/users/stedolan/followers",
"following_url": "https://api.github.com/users/stedolan/following{/other_user}",
"gists_url": "https://api.github.com/users/stedolan/gists{/gist_id}",
"starred_url": "https://api.github.com/users/stedolan/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/stedolan/subscriptions",
"organizations_url": "https://api.github.com/users/stedolan/orgs",
"repos_url": "https://api.github.com/users/stedolan/repos",
"events_url": "https://api.github.com/users/stedolan/events{/privacy}",
"received_events_url": "https://api.github.com/users/stedolan/received_events",
"type": "User",
"site_admin": False
},
"committer": {
"login": "stedolan",
"id": 79765,
"avatar_url": "https://avatars.githubusercontent.com/u/79765?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/stedolan",
"html_url": "https://github.com/stedolan",
"followers_url": "https://api.github.com/users/stedolan/followers",
"following_url": "https://api.github.com/users/stedolan/following{/other_user}",
"gists_url": "https://api.github.com/users/stedolan/gists{/gist_id}",
"starred_url": "https://api.github.com/users/stedolan/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/stedolan/subscriptions",
"organizations_url": "https://api.github.com/users/stedolan/orgs",
"repos_url": "https://api.github.com/users/stedolan/repos",
"events_url": "https://api.github.com/users/stedolan/events{/privacy}",
"received_events_url": "https://api.github.com/users/stedolan/received_events",
"type": "User",
"site_admin": False
},
"parents": [
{
"sha": "54b9c9bdb225af5d886466d72f47eafc51acb4f7",
"url": "https://api.github.com/repos/stedolan/jq/commits/54b9c9bdb225af5d886466d72f47eafc51acb4f7",
"html_url": "https://github.com/stedolan/jq/commit/54b9c9bdb225af5d886466d72f47eafc51acb4f7"
},
{
"sha": "8b1b503609c161fea4b003a7179b3fbb2dd4345a",
"url": "https://api.github.com/repos/stedolan/jq/commits/8b1b503609c161fea4b003a7179b3fbb2dd4345a",
"html_url": "https://github.com/stedolan/jq/commit/8b1b503609c161fea4b003a7179b3fbb2dd4345a"
}
]
}
return obj
| 49.962025
| 123
| 0.633392
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,014
| 0.763618
|
b8dcb2e38617c441c3331cf21108a3eb3fba7a49
| 3,094
|
py
|
Python
|
test_main.py
|
zenranda/proj10-gcalfinal
|
ee32beb3ef570b23883d41f84394b28818e5a07c
|
[
"Artistic-2.0"
] | null | null | null |
test_main.py
|
zenranda/proj10-gcalfinal
|
ee32beb3ef570b23883d41f84394b28818e5a07c
|
[
"Artistic-2.0"
] | 2
|
2021-02-08T20:17:57.000Z
|
2021-04-30T20:38:59.000Z
|
test_main.py
|
zenranda/proj10-gcalfinal
|
ee32beb3ef570b23883d41f84394b28818e5a07c
|
[
"Artistic-2.0"
] | null | null | null |
###
#Various nose tests. If you want to adapt this for your own use, be aware that the start/end block list has a very specific formatting.
###
import get_freebusy
import arrow
from operator import itemgetter
from pymongo import MongoClient
import secrets.admin_secrets
import secrets.client_secrets
MONGO_CLIENT_URL = "mongodb://{}:{}@localhost:{}/{}".format(
secrets.client_secrets.db_user,
secrets.client_secrets.db_user_pw,
secrets.admin_secrets.port,
secrets.client_secrets.db)
try:
dbclient = MongoClient(MONGO_CLIENT_URL)
db = getattr(dbclient, secrets.client_secrets.db)
collection = db.dated
base_size = collection.count() #current size of the db, for comparison later
except:
print("Failure opening database. Is Mongo running? Correct password?")
sys.exit(1)
def test_free_times(): #Given a sample list, check to see if it's getting free/busy blocks correctly
ranges = [['2016-11-20T08:30:00-08:00', '2016-11-20T010:30:00-08:00'], ['2016-11-20T11:00:00-08:00', '2016-11-20T15:00:00-08:00'], ['2016-11-20T16:30:00-08:00', '2016-11-20T19:00:00-08:00'], ['2016-11-24T13:30:00-08:00', '2016-11-24T16:00:00-08:00'], ['2016-11-21T15:00:00-08:00', '2016-11-21T18:30:00-08:00']]
start = '2016-11-20T8:00:00-08:00'
end = '2016-11-23T20:00:00-08:00'
assert get_freebusy.get_freebusy(ranges, start, end) == [['At 2016-11-20 from 08:00:00 to 08:30:00', 'At 2016-11-20 from 10:30:00 to 11:00:00', 'At 2016-11-20 from 15:00:00 to 16:30:00', 'At 2016-11-20 from 19:00:00 to 20:00:00', 'At 2016-11-21 from 08:00:00 to 15:00:00', 'At 2016-11-21 from 18:00:00 to 20:00:00', 'At 2016-11-24 from 08:00:00 to 13:30:00', 'At 2016-11-24 from 16:00:00 to 20:00:00'], ['At 2016-11-20 from 08:30:00 to 10:30:00', 'At 2016-11-20 from 11:00:00 to 15:00:00', 'At 2016-11-20 from 16:30:00 to 19:00:00', 'At 2016-11-21 from 15:00:00 to 18:00:00', 'At 2016-11-24 from 13:30:00 to 16:00:00']]
ranges = []
start = '2016-11-20T12:00:00-08:00'
end = '2016-11-23T20:00:00-08:00'
assert get_freebusy.get_freebusy(ranges, start, end) == [[], []]
def test_overlap(): #tests if the program can handle dates that overlap/intersect
ranges = [['2016-11-22T11:00:00-08:00', '2016-11-22T16:00:00-08:00'], ['2016-11-23T12:00:00-08:00', '2016-11-23T15:30:00-08:00']]
start = '2016-11-20T8:00:00-08:00'
end = '2016-11-23T20:00:00-08:00'
assert get_freebusy.get_freebusy(ranges, start, end) == [['At 2016-11-22 from 08:00:00 to 11:00:00', 'At 2016-11-22 from 16:00:00 to 20:00:00', 'At 2016-11-23 from 08:00:00 to 11:00:00', 'At 2016-11-23 from 18:30:00 to 20:00:00'], ['At 2016-11-22 from 11:00:00 to 16:00:00', 'At 2016-11-23 from 11:00:00 to 18:30:00']]
def test_db():
assert collection != None
collection.insert({"type" : "freebusy", "entry" : [["entry 1"], ["entry 2"]]})
assert base_size < collection.count()
collection.remove({"entry" : [["entry 1"], ["entry 2"]]})
assert base_size == collection.count()
| 55.25
| 624
| 0.649968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,811
| 0.585326
|
b8dd4a9a3b779200a138616573ee9d9a08756937
| 2,664
|
py
|
Python
|
examples/scripts/ct_abel_tv_admm.py
|
lanl/scico
|
976c9e5833f8f67eed2eaa43460d89fb09bb9f78
|
[
"BSD-3-Clause"
] | 18
|
2021-09-21T18:55:11.000Z
|
2022-03-21T20:13:05.000Z
|
examples/scripts/ct_abel_tv_admm.py
|
lanl/scico
|
976c9e5833f8f67eed2eaa43460d89fb09bb9f78
|
[
"BSD-3-Clause"
] | 218
|
2021-09-21T21:45:08.000Z
|
2022-03-30T18:45:27.000Z
|
examples/scripts/ct_abel_tv_admm.py
|
lanl/scico
|
976c9e5833f8f67eed2eaa43460d89fb09bb9f78
|
[
"BSD-3-Clause"
] | 2
|
2021-09-23T22:44:47.000Z
|
2021-12-18T16:01:43.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SCICO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
r"""
Regularized Abel Inversion
==========================
This example demonstrates a TV-regularized Abel inversion using
an Abel projector based on PyAbel :cite:`pyabel-2022`
"""
import numpy as np
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import create_circular_phantom
from scico.linop.abel import AbelProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # phantom size
x_gt = create_circular_phantom((N, N), [0.4 * N, 0.2 * N, 0.1 * N], [1, 0, 0.5])
"""
Set up the forward operator and create a test measurement
"""
A = AbelProjector(x_gt.shape)
y = A @ x_gt
np.random.seed(12345)
y = y + np.random.normal(size=y.shape).astype(np.float32)
ATy = A.T @ y
"""
Set up ADMM solver object.
"""
λ = 1.9e1 # L1 norm regularization parameter
ρ = 4.9e1 # ADMM penalty parameter
maxiter = 100 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
# Note the use of anisotropic TV. Isotropic TV would require use of L21Norm.
g = λ * functional.L1Norm()
C = linop.FiniteDifference(input_shape=x_gt.shape)
f = loss.SquaredL2Loss(y=y, A=A)
x_inv = A.inverse(y)
x0 = snp.clip(x_inv, 0, 1.0)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 5},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
x_tv = snp.clip(solver.x, 0, 1.0)
"""
Show results.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1, vmax=1.2)
fig, ax = plot.subplots(nrows=2, ncols=2, figsize=(12, 12))
plot.imview(x_gt, title="Ground Truth", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0], norm=norm)
plot.imview(y, title="Measurement", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(
x_inv,
title="Inverse Abel: %.2f (dB)" % metric.psnr(x_gt, x_inv),
cmap=plot.cm.Blues,
fig=fig,
ax=ax[1, 0],
norm=norm,
)
plot.imview(
x_tv,
title="TV Regularized Inversion: %.2f (dB)" % metric.psnr(x_gt, x_tv),
cmap=plot.cm.Blues,
fig=fig,
ax=ax[1, 1],
norm=norm,
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 24.897196
| 95
| 0.682432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 996
| 0.373313
|
b8ddae5f1b6f6079138cdb43e8d72e2e1ca77817
| 1,760
|
py
|
Python
|
pyblas/level1/csrot.py
|
timleslie/pyblas
|
9109f2cc24e674cf59a3b39f95c2d7b8116ae884
|
[
"BSD-3-Clause"
] | null | null | null |
pyblas/level1/csrot.py
|
timleslie/pyblas
|
9109f2cc24e674cf59a3b39f95c2d7b8116ae884
|
[
"BSD-3-Clause"
] | 1
|
2020-10-10T23:23:06.000Z
|
2020-10-10T23:23:06.000Z
|
pyblas/level1/csrot.py
|
timleslie/pyblas
|
9109f2cc24e674cf59a3b39f95c2d7b8116ae884
|
[
"BSD-3-Clause"
] | null | null | null |
from ..util import slice_
def csrot(N, CX, INCX, CY, INCY, C, S):
"""Applies a Givens rotation to a pair of vectors x and y
Parameters
----------
N : int
Number of elements in input vector
CX : numpy.ndarray
A single precision complex array, dimension (1 + (`N` - 1)*abs(`INCX`))
INCX : int
Storage spacing between elements of `CX`
CY : numpy.ndarray
A single precision complex array, dimension (1 + (`N` - 1)*abs(`INCY`))
INCY : int
Storage spacing between elements of `CY`
C : numpy.single
The Givens parameter c, with value cos(theta)
S : numpy.single
The Givens parameter s, with value sin(theta)
Returns
-------
None
See Also
--------
srot : Single-precision real Givens rotation
crot : Single-precision complex Givens rotation
zdrot : Double-precision complex Givens rotation
Notes
-----
Online PyBLAS documentation: https://nbviewer.jupyter.org/github/timleslie/pyblas/blob/main/docs/csrot.ipynb
Reference BLAS documentation: https://github.com/Reference-LAPACK/lapack/blob/v3.9.0/BLAS/SRC/csrot.f
Examples
--------
>>> x = np.array([1+2j, 2+3j, 3+4j], dtype=np.complex64)
>>> y = np.array([6+7j, 7+8j, 8+9j], dtype=np.complex64)
>>> N = len(x)
>>> incx = 1
>>> incy = 1
>>> theta = np.pi/2
>>> csrot(N, x, incx, y, incy, np.cos(theta), np.sin(theta))
>>> print(x)
[6.+7.j 7.+8.j 8.+9.j]
>>> print(y)
[-1.-2.j -2.-3.j -3.-4.j]
"""
if N <= 0:
return
x_slice = slice_(N, INCX)
y_slice = slice_(N, INCY)
X_TEMP = C * CX[x_slice] + S * CY[y_slice]
CY[y_slice] = -S * CX[x_slice] + C * CY[y_slice]
CX[x_slice] = X_TEMP
| 29.333333
| 112
| 0.580682
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,472
| 0.836364
|
b8de8fb9e2f63a96dbca5bb30f4841f157b6ed7b
| 160
|
py
|
Python
|
items.py
|
yarnoiser/PyDungeon
|
c37ad314605065194732202539db50eef94ea3da
|
[
"BSD-3-Clause"
] | 1
|
2018-05-15T01:26:04.000Z
|
2018-05-15T01:26:04.000Z
|
items.py
|
yarnoiser/PyDungeon
|
c37ad314605065194732202539db50eef94ea3da
|
[
"BSD-3-Clause"
] | null | null | null |
items.py
|
yarnoiser/PyDungeon
|
c37ad314605065194732202539db50eef94ea3da
|
[
"BSD-3-Clause"
] | null | null | null |
from dice import *
class Item():
def __init__(self, weight):
self.weight = weight
class Weapon(item):
def __init__(self, weight, damage_die, reach)
| 14.545455
| 47
| 0.69375
| 135
| 0.84375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b8df7da99167063e92023aa153878ad215a2e8ff
| 2,476
|
py
|
Python
|
leet.py
|
blackcow/pytorch-cifar-master
|
c571c8fd7fe521907755ca2eacb6aa877abe3493
|
[
"MIT"
] | null | null | null |
leet.py
|
blackcow/pytorch-cifar-master
|
c571c8fd7fe521907755ca2eacb6aa877abe3493
|
[
"MIT"
] | null | null | null |
leet.py
|
blackcow/pytorch-cifar-master
|
c571c8fd7fe521907755ca2eacb6aa877abe3493
|
[
"MIT"
] | null | null | null |
第一题:
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')
#str = input()
#print(str)
class Solution(object):
def findMedium(l):
length = len(l)
l.sort()
# 如果为奇数,输出中间的值
if length % 2 != 0:
print(l[length//2])
# 如果为偶数,中心两位均值
else:
print((l[length//2-1] + l[length//2])/2)
l = [1, 3, 5, 2, 8, 7]
Solution.findMedium(l)
第二题:
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
# str = input()
# print(str)
class Solution:
def maxStr(str_in):
# 初始化
length = len(str_in)
count = [0 for i in range(26)]
char_a = ord('a')
# 统计出现次数
for i in range(length):
count[ord(str_in[i]) - char_a] += 1
last = str_in[0]
num = 1
res = 1
for m in range(1, length):
# 不同
if last != str_in[m]:
tmp_idx = m
while (tmp_idx + 1 < length) and (last == str_in[tmp_idx + 1]):
num += 1
tmp_idx += 1
if count[ord(last) - char_a] > num:
num += 1
num, res = 1, max(num, res)
last = str_in[m]
# 相同则累加
else:
num += 1
if (num > 1) and (count[ord(last) - char_a] > num):
num += 1
# 获取 max 长度后,对 str 遍历访问
max_length = max(num, res)
str2ls = list(str_in)
for i in count:
if i != max_length:
str2ls = str2ls[i:]
else:
str2ls = str2ls[:max_length]
out = ''.join(str2ls)
print(out)
return (out)
text = 'abbbbcccddddddddeee'
Solution.maxStr(text)
第三题:
import io
import sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')
#str = input()
#print(str)
class Solution:
def findMaxArray(l):
# 初始化
tmp = l[0]
max_val = tmp
length = len(l)
for i in range(1, length):
# 计算当前序列和,记录当前最大值
if tmp + l[i] > l[i]:
max_val = max(max_val, tmp + l[i])
tmp = tmp + l[i]
# 否则到此为最长序列,并记录此时最大值
else:
max_val = max(max_val, tmp, tmp+l[i], l[i])
tmp = l[i]
print(max_val)
return max_val
l = [1, -2, 4, 5, -1, 1]
Solution.findMaxArray(l)
| 23.358491
| 79
| 0.468094
| 2,130
| 0.796559
| 0
| 0
| 0
| 0
| 0
| 0
| 415
| 0.155198
|
b8df9843139746c1adbc8ed57ae326c83672e193
| 1,091
|
py
|
Python
|
shop_website/users/views.py
|
omar00070/django-shopping-website
|
af2741b900b60631349ea2e6de17586994e31680
|
[
"MIT"
] | null | null | null |
shop_website/users/views.py
|
omar00070/django-shopping-website
|
af2741b900b60631349ea2e6de17586994e31680
|
[
"MIT"
] | null | null | null |
shop_website/users/views.py
|
omar00070/django-shopping-website
|
af2741b900b60631349ea2e6de17586994e31680
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from .forms import RegistrationForm, UserUpdateForm, ProfileUpdateForm
from django.shortcuts import redirect
from .models import Profile
from django.contrib.auth.decorators import login_required
def registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
return redirect('login')
else:
form = RegistrationForm()
return render(request, 'users/register.html', {'form': form})
@login_required()
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {'u_form':u_form, 'p_form':p_form}
return render(request, 'users/profile.html', context)
| 34.09375
| 88
| 0.75802
| 0
| 0
| 0
| 0
| 548
| 0.502291
| 0
| 0
| 101
| 0.092576
|
b8e0455d33253902aeabce67886870561b85812f
| 2,685
|
py
|
Python
|
quantumcat/gates/custom_gates/cirq/__init__.py
|
Artificial-Brain/quantumcat
|
eff99cac7674b3a1b7e1f752e7ebed2b960f85b3
|
[
"Apache-2.0"
] | 20
|
2021-05-10T07:04:41.000Z
|
2021-12-13T17:12:05.000Z
|
quantumcat/gates/custom_gates/cirq/__init__.py
|
Artificial-Brain/quantumcat
|
eff99cac7674b3a1b7e1f752e7ebed2b960f85b3
|
[
"Apache-2.0"
] | 2
|
2021-04-26T05:34:52.000Z
|
2021-05-16T13:46:22.000Z
|
quantumcat/gates/custom_gates/cirq/__init__.py
|
Artificial-Brain/quantumcat
|
eff99cac7674b3a1b7e1f752e7ebed2b960f85b3
|
[
"Apache-2.0"
] | 17
|
2021-04-02T18:09:33.000Z
|
2022-02-10T16:38:57.000Z
|
# (C) Copyright Artificial Brain 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from quantumcat.gates.custom_gates.cirq.u_gate import UGate
from quantumcat.gates.custom_gates.cirq.u1_gate import U1Gate
from quantumcat.gates.custom_gates.cirq.u2_gate import U2Gate
from quantumcat.gates.custom_gates.cirq.u3_gate import U3Gate
from quantumcat.gates.custom_gates.cirq.sdg_gate import SDGGate
from quantumcat.gates.custom_gates.cirq.sxd_gate import SXDGate
from quantumcat.gates.custom_gates.cirq.td_gate import TDGate
from quantumcat.gates.custom_gates.cirq.rxx_gate import RXXGate
from quantumcat.gates.custom_gates.cirq.r_gate import RGate
from quantumcat.gates.custom_gates.cirq.rx_gate import RXGate
from quantumcat.gates.custom_gates.cirq.ry_gate import RYGate
from quantumcat.gates.custom_gates.cirq.ryy_gate import RYYGate
from quantumcat.gates.custom_gates.cirq.rz_gate import RZGate
from quantumcat.gates.custom_gates.cirq.rccx_gate import RCCXGate
from quantumcat.gates.custom_gates.cirq.rc3x_gate import RC3XGate
from quantumcat.gates.custom_gates.cirq.rzz_gate import RZZGate
from quantumcat.gates.custom_gates.cirq.rzx_gate import RZXGate
from quantumcat.gates.custom_gates.cirq.sx_gate import SXGate
from quantumcat.gates.custom_gates.cirq.cy_gate import CYGate
from quantumcat.gates.custom_gates.cirq.p_gate import PGate
from quantumcat.gates.custom_gates.cirq.cu_gate import CUGate
from quantumcat.gates.custom_gates.cirq.cu1_gate import CU1Gate
from quantumcat.gates.custom_gates.cirq.cu3_gate import CU3Gate
from quantumcat.gates.custom_gates.cirq.crx_gate import CRXGate
from quantumcat.gates.custom_gates.cirq.cry_gate import CRYGate
from quantumcat.gates.custom_gates.cirq.crz_gate import CRZGate
from quantumcat.gates.custom_gates.cirq.dcx_gate import DCXGate
from quantumcat.gates.custom_gates.cirq.c3x_gate import C3XGate
from quantumcat.gates.custom_gates.cirq.c4x_gate import C4XGate
from quantumcat.gates.custom_gates.cirq.c3sx_gate import C3SXGate
from quantumcat.gates.custom_gates.cirq.cphase_gate import CPhaseGate
from quantumcat.gates.custom_gates.cirq.csx_gate import CSXGate
from quantumcat.gates.custom_gates.cirq.ch_gate import CHGate
| 55.9375
| 75
| 0.84581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 581
| 0.216387
|
b8e06a6109f1d799db4201a71cba9cf898507598
| 1,045
|
py
|
Python
|
CL_Net/Referential_Game/Number_Set/info.py
|
MarkFzp/ToM-Pragmatics
|
3de1956c36ea40f29a41e4c153c4b8cdc73afc15
|
[
"MIT"
] | null | null | null |
CL_Net/Referential_Game/Number_Set/info.py
|
MarkFzp/ToM-Pragmatics
|
3de1956c36ea40f29a41e4c153c4b8cdc73afc15
|
[
"MIT"
] | null | null | null |
CL_Net/Referential_Game/Number_Set/info.py
|
MarkFzp/ToM-Pragmatics
|
3de1956c36ea40f29a41e4c153c4b8cdc73afc15
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.stats as sp
from concept import Concept
def info_gain(prev_dist, new_dist):
return sp.entropy(prev_dist) - sp.entropy(new_dist)
def main():
attributes = range(10)
num_concepts = 5
concept_size = 4
concept_space = Concept(attributes, num_concepts, concept_size)
problem1 = [(1, 2, 3, 4), (3, 4, 5, 6), (2, 4, 5, 7), (2, 3, 5, 8), (2, 3, 4, 5)]
init_belief = np.ones(num_concepts) / num_concepts
for msg in [2, 3, 4, 5]:
new_belief = concept_space.bayesian_update(init_belief, problem1, msg)
print(info_gain(init_belief, new_belief))
init_belief = new_belief
print(info_gain(np.ones(num_concepts) / num_concepts, new_belief))
print('%%%%%%%%%%%%%%%%%%%%%%')
problem2 = [(0, 2, 3), (4, 7, 9), (4, 7), (0, 2, 4, 9)]
init_belief = np.ones(4) / 4
for msg in [7] * 8:
new_belief = concept_space.bayesian_update(init_belief, problem2, msg)
print(info_gain(init_belief, new_belief))
init_belief = new_belief
print(info_gain(np.ones(4) / 4, [0, 0, 1, 0]))
if __name__ == '__main__':
main()
| 30.735294
| 82
| 0.67177
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.032536
|
b8e0a7c86db8162077913d429a8e44b03bb440ed
| 1,695
|
py
|
Python
|
commands/misc/github.py
|
typhonshambo/TY-BOT-v3
|
eb192d495bf32ae3a56d4a60ec2aa4e1e6a7ef2c
|
[
"MIT"
] | null | null | null |
commands/misc/github.py
|
typhonshambo/TY-BOT-v3
|
eb192d495bf32ae3a56d4a60ec2aa4e1e6a7ef2c
|
[
"MIT"
] | null | null | null |
commands/misc/github.py
|
typhonshambo/TY-BOT-v3
|
eb192d495bf32ae3a56d4a60ec2aa4e1e6a7ef2c
|
[
"MIT"
] | null | null | null |
import aiohttp
import discord
from discord.ext import commands
from discord.commands import Option, slash_command, SlashCommandGroup
import json
with open ('././config/guilds.json', 'r') as f:
data = json.load(f)
guilds = data['guilds']
with open ('././config/api.json', 'r') as f:
ApiData = json.load(f)
githubApi = ApiData['github']
class slashGithub(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.slash_command(description="Search any github user", guild_ids=guilds)
async def github(
self,
ctx,
username: Option(str, "Enter Github Username", required=True)
):
await ctx.response.defer()
url = str(githubApi)+ str(username)
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
r = await r.json()
try:
username = r["login"]
avatar = r["avatar_url"]
githuburl = r["html_url"]
name = r["name"]
location = r["location"]
email = r["email"]
company = r["company"]
bio = r["bio"]
repo = r["public_repos"]
embed = discord.Embed(
colour=0x00FFFF,
title=f"Github Profile",
description=f"""
> `Github username` : {username}
> `Github link` : {githuburl}
> `Name` : {name}
> `Location` : {location}
> `Email` : {email}
> `Company` : {company}
> `Bio` : {bio}
> `Repository` : {repo}
""")
embed.set_thumbnail(url=avatar)
await ctx.respond(embed=embed)
except:
embed = discord.Embed(
colour=0x983925,
description=f"> ⚠️Unable to find the github profile please check your spelling",
)
await ctx.respond(embed=embed)
def setup(bot):
bot.add_cog(slashGithub(bot))
| 23.219178
| 86
| 0.629499
| 1,302
| 0.766333
| 0
| 0
| 1,224
| 0.720424
| 1,143
| 0.672749
| 535
| 0.314891
|
b8e177cd51c2b5569754fe0293a60b5835aa4a05
| 1,126
|
py
|
Python
|
raspbeeryPi/smart-home-hubs/gy30.py
|
zibuyu1995/Hardware
|
8461ebf9b04a603b397d8396ae14b359bd89a8cf
|
[
"MIT"
] | 2
|
2020-05-20T03:02:01.000Z
|
2020-06-14T15:38:31.000Z
|
raspbeeryPi/smart-home-hubs/gy30.py
|
zibuyu1995/Hardware
|
8461ebf9b04a603b397d8396ae14b359bd89a8cf
|
[
"MIT"
] | 3
|
2018-08-05T04:38:56.000Z
|
2019-11-25T07:02:15.000Z
|
raspbeeryPi/smart-home-hubs/gy30.py
|
zibuyu1995/Hardware
|
8461ebf9b04a603b397d8396ae14b359bd89a8cf
|
[
"MIT"
] | 1
|
2020-07-29T03:56:41.000Z
|
2020-07-29T03:56:41.000Z
|
import json
import time
import smbus
from paho.mqtt import client as mqtt
# BH1750FVI config
DEVICE = 0x23 # Default device I2C address
POWER_DOWN = 0x00
POWER_ON = 0x01
RESET = 0x07
CONTINUOUS_LOW_RES_MODE = 0x13
CONTINUOUS_HIGH_RES_MODE_1 = 0x10
CONTINUOUS_HIGH_RES_MODE_2 = 0x11
ONE_TIME_HIGH_RES_MODE_1 = 0x20
ONE_TIME_HIGH_RES_MODE_2 = 0x21
ONE_TIME_LOW_RES_MODE = 0x23
bus = smbus.SMBus(1)
# MQTT Broker config
broker = '127.0.0.1'
port = 1883
topic = 'smartHomeHubs/light'
def read_light():
data = bus.read_i2c_block_data(DEVICE, ONE_TIME_HIGH_RES_MODE_1)
light_level = round((data[1] + (256 * data[0])) / 1.2, 2)
return light_level
def connect_mqtt():
client = mqtt.Client(client_id='light_01')
client.connect(host=broker, port=port)
return client
def run():
mqtt_client = connect_mqtt()
while True:
light_level = read_light()
publish_msg = {'lightLevel': light_level}
mqtt_client.publish(
topic,
payload=json.dumps(publish_msg)
)
print(publish_msg)
time.sleep(1)
if __name__ == "__main__":
run()
| 20.851852
| 68
| 0.694494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 130
| 0.115453
|
b8e1956c9e02704f82448e09bd95db729640c5f1
| 18,721
|
py
|
Python
|
python/temp/yolo_main.py
|
plasticanne/unity-object-detection-zoo
|
a436aec8fd6b9b4067aafc20706e7d1896223d64
|
[
"MIT"
] | null | null | null |
python/temp/yolo_main.py
|
plasticanne/unity-object-detection-zoo
|
a436aec8fd6b9b4067aafc20706e7d1896223d64
|
[
"MIT"
] | null | null | null |
python/temp/yolo_main.py
|
plasticanne/unity-object-detection-zoo
|
a436aec8fd6b9b4067aafc20706e7d1896223d64
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import os
import numpy as np
import tensorflow as tf
from PIL import Image, ImageDraw, ImageFont
import cv2
from keras import backend as K
from keras.layers import Input, Lambda
from keras.models import Model, Sequential, load_model
from keras.utils import multi_gpu_model
from tensorflow.image import ResizeMethod
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
from tensorflow.python.tools import optimize_for_inference_lib
import colorsys
from timeit import default_timer as timer
from yolo3 import utils
from yolo3.model import tiny_yolo_body, yolo_body, yolo_eval,yolo_eval2
class YOLO(object):
def __init__(self, classes_num, anchors_path, session):
#self.class_names = self._get_class(classes_path)
self.classes_num=classes_num
self.anchors = self._get_anchors(anchors_path)
#self._generate_colors()
self.sess = session
def _get_anchors(self, anchors_path_raw):
anchors_path = os.path.expanduser(anchors_path_raw)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def load_model_by_h5(self, model_h5_path, model_score_threshold, iou_threshold, gpu_num):
model_path = os.path.expanduser(model_h5_path)
assert model_path.endswith(
'.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
yolo_model = load_model(model_path, compile=False)
assert yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(yolo_model.output) * (self.classes_num + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
if gpu_num >= 2:
yolo_model = multi_gpu_model(yolo_model, gpus=gpu_num)
self._generate_graph(yolo_model, self.classes_num,
model_score_threshold, iou_threshold)
def load_model_by_buider(self, weight_h5_path, model_score_threshold, iou_threshold, gpu_num):
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
is_tiny_version = num_anchors == 6 # default setting
self.yolo_model = tiny_yolo_body(Input(shape=(None, None, 3)), num_anchors//2, self.classes_num) \
if is_tiny_version else yolo_body(Input(shape=(None, None, 3)), num_anchors//3, self.classes_num)
# make sure model, anchors and classes match
self.yolo_model.load_weights(weight_h5_path)
print('{} model, anchors, and classes loaded.'.format(weight_h5_path))
if gpu_num >= 2:
yolo_model = multi_gpu_model(yolo_model, gpus=gpu_num)
self._generate_graph(yolo_model, self.classes_num,
model_score_threshold, iou_threshold)
def _generate_graph(self, model_body, num_classes, model_score_threshold, iou_threshold):
# Generate output tensor targets for filtered bounding boxes.
#self.input_0 = K.placeholder(
# shape=(2), name="return_box_shape", dtype="int32")
self.input_1 = tf.placeholder(
shape=(None, None, 3), name="input_image",dtype="uint8")
new_img = tf.cast(self.input_1, tf.float32) /255.
new_img_dims = tf.expand_dims(new_img, 0)
out = model_body(new_img_dims)
boxes, scores, classes,num = yolo_eval2(out,
self.anchors,
num_classes,
#self.input_0,
score_threshold=model_score_threshold,
iou_threshold=iou_threshold)
self.output_nodes={}
self.output_nodes['boxes'] = tf.identity(boxes, name="output_boxes")
self.output_nodes['scores'] = tf.identity(scores, name="output_scores")
self.output_nodes['classes'] = tf.identity(classes, name="output_classes")
self.output_nodes['num'] = tf.identity(num, name="output_num")
def load_model_by_pb(self, model_pb_path):
model_path = os.path.expanduser(model_pb_path)
# Load model, or construct model and load weights.
with gfile.FastGFile(model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
yolo_model = tf.import_graph_def(graph_def, name='')
print('{} model, anchors, and classes loaded.'.format(model_path))
def write_pb(self, output_pb_path, output_pb_file):
self.input_nodes = [self.get_input["input_1"].name.split(":")[0]]
self.output_nodes = [self.get_output["boxes"].name.split(":")[0], self.get_output["scores"].name.split(":")[
0], self.get_output["classes"].name.split(":")[0]]
print("input nodes:", self.input_nodes)
print("output nodes:", self.output_nodes)
constant_graph = graph_util.convert_variables_to_constants(
self.sess, tf.get_default_graph().as_graph_def(), self.output_nodes)
optimize_Graph = optimize_for_inference_lib.optimize_for_inference(
constant_graph,
self.input_nodes, # an array of the input node(s)
self.output_nodes, # an array of output nodes
tf.float32.as_datatype_enum)
optimize_for_inference_lib.ensure_graph_is_valid(optimize_Graph)
with tf.gfile.GFile(os.path.join(output_pb_path, output_pb_file), "wb") as f:
f.write(constant_graph.SerializeToString())
def load_model_by_meta(self, model_meta_folder):
checkpoint = tf.train.get_checkpoint_state(
model_meta_folder).model_checkpoint_path
saver = tf.train.import_meta_graph(
checkpoint + '.meta', clear_devices=True)
saver.restore(self.sess, checkpoint)
yolo_model = tf.import_graph_def(self.sess.graph_def, name='')
print('{} model, anchors, and classes loaded.'.format(model_meta_folder))
def write_meta(self, meta_output_folder, meta_output_file_name):
saver = tf.train.Saver()
saver.save(self.sess, os.path.join(
meta_output_folder, meta_output_file_name+".ckpt"))
tf.train.write_graph(self.sess.graph_def,
meta_output_folder, meta_output_file_name+'.pb')
def get_nodes(self):
#num_anchors = len(self.anchors)
# is_tiny_version = num_anchors==6 # default setting
#self.input_0 = self.sess.graph.get_tensor_by_name("return_box_shape:0")
self.get_output={}
self.get_input={}
self.get_input["input_1"] = self.sess.graph.get_tensor_by_name("input_image:0")
self.get_output["boxes"] = self.sess.graph.get_tensor_by_name("output_boxes:0")
self.get_output["scores"] = self.sess.graph.get_tensor_by_name("output_scores:0")
self.get_output["classes"] = self.sess.graph.get_tensor_by_name("output_classes:0")
self.get_output["num"] = self.sess.graph.get_tensor_by_name("output_num:0")
def load_image_into_numpy_array(self,image):
(im_width, im_height) = image.size
return np.array(image, dtype='float32').astype(np.uint8)
#return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def detect(self, image, force_image_resize):
image_data = pil_image_resize(force_image_resize, image)
image_data=self.load_image_into_numpy_array(image_data)
print("resize %s to %s" %
((image.size[1], image.size[0]), force_image_resize))
start = timer()
output_dict = self.sess.run(
self.get_output,
feed_dict={
#self.input_0: [image.size[1], image.size[0]],
self.get_input["input_1"]: image_data
})
#print(out_boxes, out_scores, out_classes)
end = timer()
print("detect time %s s" % (end - start))
print(output_dict)
output_dict["boxes"]=self.padding_boxes_reversize(output_dict["boxes"],force_image_resize,image.size)
return output_dict
def padding_boxes_reversize(self,boxes,in_shape,out_shape):
long_side = max( out_shape)
w_scale=long_side/in_shape[1]
h_scale=long_side/in_shape[0]
w_offset=(long_side-out_shape[0])/2.
h_offset=(long_side-out_shape[1])/2.
for box in boxes:
box[0] = box[0]*h_scale*in_shape[1] -h_offset
box[1] = box[1]*w_scale*in_shape[0] -w_offset
box[2] = box[2]*h_scale*in_shape[1] -h_offset
box[3] = box[3]*w_scale*in_shape[0] -w_offset
return boxes.astype('int32')
def get_class(classes_path_raw):
classes_path = os.path.expanduser(classes_path_raw)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def generate_colors(class_names):
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
# Shuffle colors to decorrelate adjacent classes.
np.random.shuffle(colors)
np.random.seed(None) # Reset seed to default.
return colors
def draw(image,class_names,colors, draw_score_threshold, out_boxes, out_scores, out_classes):
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
if out_scores[i] >= draw_score_threshold:
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
return image
def tf_image_resize(target_size, image):
boxed_image = tf_letterbox_image(target_size, image)
return boxed_image
def tf_letterbox_image(size, image):
'''resize image with unchanged aspect ratio using padding'''
new_image = tf.image.resize_image_with_pad(
image,
target_height=size[1],
target_width=size[0],
method=ResizeMethod.BICUBIC
)
return new_image
def pil_image_resize(target_size, image):
if target_size != (None, None): # (height,width)
assert target_size[0] % 32 == 0, 'Multiples of 32 required'
assert target_size[1] % 32 == 0, 'Multiples of 32 required'
new_image = utils.letterbox_image(image, tuple(reversed(target_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
new_image = utils.letterbox_image(image, new_image_size)
return new_image
def cv2_letterbox_image(img_path, size):
'''resize image with unchanged aspect ratio using padding'''
im = cv2.imread(img_path)
old_size = im.shape[:2] # old_size is in (height, width) format
ratio_w = float(size[1])/old_size[1]
ratio_h = float(size[0])/old_size[0]
ratio=min(ratio_h,ratio_w)
new_size = tuple([int(x*ratio) for x in old_size])
# new_size should be in (width, height) format
im = cv2.resize(im, (new_size[1], new_size[0]))
delta_w = size[1] - new_size[1]
delta_h = size[0] - new_size[0]
top, bottom = delta_h//2, delta_h-(delta_h//2)
left, right = delta_w//2, delta_w-(delta_w//2)
color = [0, 0, 0]
new_image = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
return new_image
def detect_video(yolo, video_path,class_path, draw_score_threshold, force_image_resize, output_path=""):
vid = cv2.VideoCapture(video_path)
class_names =get_class(class_path)
colors=generate_colors(class_names)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(
video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
output_dict = yolo.detect(
image, force_image_resize)
out_boxes=output_dict["boxes"]
out_scores=output_dict["scores"]
out_classes=output_dict["classes"]
image = draw(
image,class_names,colors, draw_score_threshold, out_boxes, out_scores, out_classes)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def detect_image(yolo, img_path,class_path, draw_score_threshold, force_image_resize):
image = Image.open(img_path)
output_dict = yolo.detect(
image, force_image_resize)
out_boxes=output_dict["boxes"]
out_scores=output_dict["scores"]
out_classes=output_dict["classes"]
class_names =get_class(class_path)
colors=generate_colors(class_names)
image = draw(image,class_names,colors, draw_score_threshold,
out_boxes, out_scores, out_classes)
image.show()
if __name__ == '__main__':
# loading model from:
# 0: h5
# 1: freezed unity interface pb
# 2: unity interface meta
# 3: blider & h5 weights
model_load_from = 0
# args
MODEL_h5_path = 'model_data/yolo.h5'
MODEL_pb_path = 'model_data/freezed_coco_yolo.pb'
ANCHORS_path = 'model_data/yolo_anchors.txt'
CLASSES_path = 'model_data/coco_classes.txt'
CLASSES_num = 80
MODEL_meta_folder = ""
MODEL_weight_h5_path = ""
# classify score threshold, value will be fixed to output freezed
MODEL_score_threshold = 0.1
IOU_threshold = 0.1 # yolo iou box filter, value will be fixed to output freezed
GPU_num = 1 # video cards count , cpu version or gpu version with counts will fixed after convert to pb graph
# doing detection:
# 0: no action
# 1: img
# 2: video
do_detect = 1
# args
IMG_path = 'demo/car_cat.jpg'
VIDEO_path = 'demo/Raccoon.mp4'
OUTPUT_video = ""
DRAW_score_threshold = 0.1 # score filter for draw boxes
# (height,width) 'Multiples of 32 required' , resize input to model
FORCE_image_resize = (416, 416)
# keras h5 convert to freezed graph output:
# 0: no action
# 1: h5-->freezed pb
# 2: h5-->meta
do_output_freezed_unity_interface = 0
# args
OUTPUT_pb_path = "./model_data"
OUTPUT_pb_file = "freezed_coco_yolo.pb"
OUTPUT_meta_folder = ""
OUTPUT_meta_file_name = ""
K.clear_session()
with K.get_session() as sess:
yolo = YOLO(CLASSES_num, ANCHORS_path, sess)
if model_load_from == 0:
yolo.load_model_by_h5(
MODEL_h5_path, MODEL_score_threshold, IOU_threshold, GPU_num)
elif model_load_from == 1:
yolo.load_model_by_pb(MODEL_pb_path)
elif model_load_from == 2:
yolo.load_model_by_meta(MODEL_meta_folder)
elif model_load_from == 3:
yolo.load_model_by_buider(MODEL_weight_h5_path)
yolo.get_nodes()
if model_load_from == 0:
if do_output_freezed_unity_interface == 1:
yolo.write_pb(OUTPUT_pb_path, OUTPUT_pb_file)
elif do_output_freezed_unity_interface == 2:
yolo.write_meta(OUTPUT_meta_folder, OUTPUT_meta_file_name)
else:
if do_output_freezed_unity_interface != 0:
print("for output, model must loading from .h5")
if do_detect == 1:
detect_image(yolo, IMG_path,CLASSES_path, DRAW_score_threshold,
FORCE_image_resize)
elif do_detect == 2:
detect_video(yolo, VIDEO_path,CLASSES_path, DRAW_score_threshold,
FORCE_image_resize, OUTPUT_video)
| 41.418142
| 116
| 0.633834
| 8,307
| 0.443726
| 0
| 0
| 0
| 0
| 0
| 0
| 3,130
| 0.167192
|
b8e2aaafc2b4702776593b03b7fea1abb7e1b4d0
| 3,262
|
py
|
Python
|
src/extractor-lib/tests/csv_generation/test_normalized_directory_template.py
|
stephenfuqua/Ed-Fi-X-Fizz
|
94597eda585d4f62f69c12e2a58fa8e8846db11b
|
[
"Apache-2.0"
] | 3
|
2020-10-15T10:29:59.000Z
|
2020-12-01T21:40:55.000Z
|
src/extractor-lib/tests/csv_generation/test_normalized_directory_template.py
|
stephenfuqua/Ed-Fi-X-Fizz
|
94597eda585d4f62f69c12e2a58fa8e8846db11b
|
[
"Apache-2.0"
] | 40
|
2020-08-17T21:08:33.000Z
|
2021-02-02T19:56:09.000Z
|
src/extractor-lib/tests/csv_generation/test_normalized_directory_template.py
|
stephenfuqua/Ed-Fi-X-Fizz
|
94597eda585d4f62f69c12e2a58fa8e8846db11b
|
[
"Apache-2.0"
] | 10
|
2021-06-10T16:27:27.000Z
|
2021-12-27T12:31:57.000Z
|
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
from os import path
from sys import platform
from edfi_lms_extractor_lib.csv_generation.write import (
_normalized_directory_template,
USERS_ROOT_DIRECTORY,
ASSIGNMENT_ROOT_DIRECTORY,
SUBMISSION_ROOT_DIRECTORY,
)
OUTPUT_DIRECTORY = "output_directory"
OUTPUT_DIRECTORY_WITH_SLASH = "output_directory/"
OUTPUT_DIRECTORY_WITH_BACKSLASH = "output_directory\\"
def describe_when_template_has_one_element():
EXPECTED_RESULT = f"{OUTPUT_DIRECTORY}{path.sep}{USERS_ROOT_DIRECTORY[0]}"
BACKSLASH_LINUX = f"{OUTPUT_DIRECTORY}\\{path.sep}{USERS_ROOT_DIRECTORY[0]}"
def it_should_join_bare_output_directory_correctly():
# arrange / act
result = _normalized_directory_template(OUTPUT_DIRECTORY, USERS_ROOT_DIRECTORY)
# assert
assert result == EXPECTED_RESULT
def it_should_join_output_directory_with_slash_correctly():
# arrange / act
result = _normalized_directory_template(
OUTPUT_DIRECTORY_WITH_SLASH, USERS_ROOT_DIRECTORY
)
# assert
assert result == EXPECTED_RESULT
def it_should_join_output_directory_with_backslash_correctly():
# arrange / act
result = _normalized_directory_template(
OUTPUT_DIRECTORY_WITH_BACKSLASH, USERS_ROOT_DIRECTORY
)
# assert
if platform == "win32":
assert result == EXPECTED_RESULT
else:
assert result == BACKSLASH_LINUX
def describe_when_template_has_two_elements():
EXPECTED_RESULT = (
f"{OUTPUT_DIRECTORY}{path.sep}"
f"{ASSIGNMENT_ROOT_DIRECTORY[0]}{path.sep}"
f"{ASSIGNMENT_ROOT_DIRECTORY[1]}"
)
def it_should_join_bare_output_directory_correctly():
# arrange / act
result = _normalized_directory_template(
OUTPUT_DIRECTORY, ASSIGNMENT_ROOT_DIRECTORY
)
# assert
assert result == EXPECTED_RESULT
def it_should_join_output_directory_with_slash_correctly():
# arrange / act
result = _normalized_directory_template(
OUTPUT_DIRECTORY_WITH_SLASH, ASSIGNMENT_ROOT_DIRECTORY
)
# assert
assert result == EXPECTED_RESULT
def describe_when_template_has_three_elements():
EXPECTED_RESULT = (
f"{OUTPUT_DIRECTORY}{path.sep}"
f"{SUBMISSION_ROOT_DIRECTORY[0]}{path.sep}"
f"{SUBMISSION_ROOT_DIRECTORY[1]}{path.sep}"
f"{SUBMISSION_ROOT_DIRECTORY[2]}"
)
def it_should_join_bare_output_directory_correctly():
# arrange / act
result = _normalized_directory_template(
OUTPUT_DIRECTORY, SUBMISSION_ROOT_DIRECTORY
)
# assert
assert result == EXPECTED_RESULT
def it_should_join_output_directory_with_slash_correctly():
# arrange / act
result = _normalized_directory_template(
OUTPUT_DIRECTORY_WITH_SLASH, SUBMISSION_ROOT_DIRECTORY
)
# assert
assert result == EXPECTED_RESULT
| 30.773585
| 87
| 0.701104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 857
| 0.262722
|
b8e2f0eed3c941ac36abbbe75adbed48e0a9d358
| 425
|
py
|
Python
|
python3-tutorial/02 Advanced/1216 UpdateMany.py
|
CoderDream/python-best-practice
|
40e6b5315daefb37c59daa1a1990ac1ae10f8cca
|
[
"MIT"
] | null | null | null |
python3-tutorial/02 Advanced/1216 UpdateMany.py
|
CoderDream/python-best-practice
|
40e6b5315daefb37c59daa1a1990ac1ae10f8cca
|
[
"MIT"
] | null | null | null |
python3-tutorial/02 Advanced/1216 UpdateMany.py
|
CoderDream/python-best-practice
|
40e6b5315daefb37c59daa1a1990ac1ae10f8cca
|
[
"MIT"
] | null | null | null |
# update_one() 方法只能修匹配到的第一条记录,如果要修改所有匹配到的记录,可以使用 update_many()。
# 以下实例将查找所有以 F 开头的 name 字段,并将匹配到所有记录的 alexa 字段修改为 123:
import pymongo
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["runoobdb"]
mycol = mydb["sites"]
myquery = {"name": {"$regex": "^F"}}
newvalues = {"$set": {"alexa": "123"}}
x = mycol.update_many(myquery, newvalues)
print(x.modified_count, "文档已修改")
# 输出结果为:
#
# 1
# 文档已修改
| 20.238095
| 63
| 0.694118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 388
| 0.658744
|
b8e31fa93df9ea85fa09d4f2fd6acdf91de443e9
| 789
|
py
|
Python
|
search/linear/linear_search.py
|
alfiejsmith/algorithms
|
c1d816aba932a1ae0664ff2a5b7784e2a01e1de2
|
[
"MIT"
] | null | null | null |
search/linear/linear_search.py
|
alfiejsmith/algorithms
|
c1d816aba932a1ae0664ff2a5b7784e2a01e1de2
|
[
"MIT"
] | null | null | null |
search/linear/linear_search.py
|
alfiejsmith/algorithms
|
c1d816aba932a1ae0664ff2a5b7784e2a01e1de2
|
[
"MIT"
] | null | null | null |
from random import shuffle
"""
Will search a list of integers for a value using a linear search algorithm.
Does not require a sorted list to be passed in.
Returns -1 if item is not found
Linear Search:
Best - O(1)
Worst - O(n)
Average - O(n)
Space Complexity - O(1)
"""
def search(data: list, value: int) -> int:
for i in range(len(data)):
if data[i] == value:
return i
return -1
def run():
print("Linear Search")
data_size = int(input("Enter the max value: "))
data = list(range(data_size))
shuffle(data)
value = int(input("Enter value to search for: "))
print("Searching for {} in {}".format(value, data))
result = search(data, value)
print("Not found in list" if result == -1 else "Found at index {}".format(result))
| 22.542857
| 86
| 0.636248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 373
| 0.47275
|
b8e38e1d075d3a7559a30980f5c79e4ab5617467
| 3,657
|
py
|
Python
|
gitScrabber/scrabTasks/git/projectDates.py
|
Eyenseo/gitScrabber
|
e3f5ce1a7b034fa3e40a54577268228a3be2b141
|
[
"MIT"
] | null | null | null |
gitScrabber/scrabTasks/git/projectDates.py
|
Eyenseo/gitScrabber
|
e3f5ce1a7b034fa3e40a54577268228a3be2b141
|
[
"MIT"
] | null | null | null |
gitScrabber/scrabTasks/git/projectDates.py
|
Eyenseo/gitScrabber
|
e3f5ce1a7b034fa3e40a54577268228a3be2b141
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2017 Roland Jaeger
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from ..scrabTask import GitTask
import utils
name = "ProjectDates"
version = "1.1.0"
class ProjectDates(GitTask):
"""
Gets the first and last commit date in ISO format
Example:
ProjectDates:
first_change: '1998-12-21T10:52:45+00:00'
last_change: '2017-08-09T13:37:06+10:00'
:param parameter: Parameter given explicitly for this task, for all
projects, defined in the task.yaml
:param global_args: Arguments that will be passed to all tasks. They
_might_ contain something that is useful for the task,
but the task has to check if it is _there_ as these
are user provided. If they are needed to work that
check should happen in the argHandler.
"""
def __init__(self, parameter, global_args):
super(ProjectDates, self).__init__(name, version, parameter,
global_args)
self.__project = None
def __first_commit_date(self):
"""
The function will obtain the first commit date from the project
repository
:returns: The date of the first commit in the projects repository
(2005-04-16T15:20:36-07:00)
"""
return utils.run('git',
['log', '--all', '--format=%cI', '--first-parent',
'--reverse', '--max-parents=0'],
self.__project.location).splitlines()[0].rstrip()
def __last_commit_date(self):
"""
The function will obtain the last commit date from the project
repository
:returns: The date of the last commit in the projects repository
(2017-08-03T15:25:14-07:00)
"""
return utils.run('git', ['log', '--all', '-1', '--format=%cI'],
self.__project.location).rstrip()
def scrab(self, project):
"""
Gets the first and last commit date in ISO format
:param project: The project
:returns: The first and last commit date in ISO format
Example:
ProjectDates:
first_change: '1998-12-21T10:52:45+00:00'
last_change: '2017-08-09T13:37:06+10:00'
"""
self.__project = project
report = {}
report['first_change'] = self.__first_commit_date()
report['last_change'] = self.__last_commit_date()
return report
| 37.316327
| 80
| 0.623462
| 2,477
| 0.677331
| 0
| 0
| 0
| 0
| 0
| 0
| 2,787
| 0.7621
|
b8e396ee442faafcbc18f8f10aa0618271fca39e
| 3,526
|
py
|
Python
|
demo_maecce_for_pls.py
|
hkaneko1985/dcek
|
13d9228b2dc2fd87c2e08a01721e1b1b220f2e19
|
[
"MIT"
] | 25
|
2019-08-23T12:39:14.000Z
|
2022-03-30T08:58:15.000Z
|
demo_maecce_for_pls.py
|
hkaneko1985/dcek
|
13d9228b2dc2fd87c2e08a01721e1b1b220f2e19
|
[
"MIT"
] | 2
|
2022-01-06T11:21:21.000Z
|
2022-01-18T22:11:12.000Z
|
demo_maecce_for_pls.py
|
hkaneko1985/dcek
|
13d9228b2dc2fd87c2e08a01721e1b1b220f2e19
|
[
"MIT"
] | 16
|
2019-12-12T08:20:48.000Z
|
2022-01-26T00:34:31.000Z
|
# -*- coding: utf-8 -*-
# %reset -f
"""
@author: Hiromasa Kaneko
"""
# Demonstration of MAEcce in PLS modeling
import matplotlib.figure as figure
import matplotlib.pyplot as plt
import numpy as np
from dcekit.validation import mae_cce
from sklearn import datasets
from sklearn.cross_decomposition import PLSRegression
from sklearn.model_selection import GridSearchCV, train_test_split
# settings
number_of_training_samples = 50 # 30, 50, 100, 300, 500, 1000, 3000, for example
number_of_test_samples = 10000
number_of_x_variables = 30 # 10, 30, 50, 100, 300, 500, 1000, 3000, for example
number_of_y_randomization = 50
max_pls_component_number = 20
fold_number = 5
# generate sample dataset
x, y = datasets.make_regression(n_samples=number_of_training_samples + number_of_test_samples,
n_features=number_of_x_variables, n_informative=10, noise=30,
random_state=number_of_training_samples + number_of_x_variables)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=number_of_test_samples, random_state=0)
# autoscaling
autoscaled_x_train = (x_train - x_train.mean(axis=0)) / x_train.std(axis=0, ddof=1)
autoscaled_y_train = (y_train - y_train.mean()) / y_train.std(ddof=1)
autoscaled_x_test = (x_test - x_train.mean(axis=0)) / x_train.std(axis=0, ddof=1)
# cross-validation
pls_components = np.arange(1, max_pls_component_number + 1)
cv_model = GridSearchCV(PLSRegression(), {'n_components': pls_components}, cv=fold_number)
cv_model.fit(autoscaled_x_train, autoscaled_y_train)
# modeling and prediction
model = getattr(cv_model, 'estimator')
hyperparameters = list(cv_model.best_params_.keys())
for hyperparameter in hyperparameters:
setattr(model, hyperparameter, cv_model.best_params_[hyperparameter])
model.fit(autoscaled_x_train, autoscaled_y_train)
estimated_y_train = np.ndarray.flatten(model.predict(autoscaled_x_train))
estimated_y_train = estimated_y_train * y_train.std(ddof=1) + y_train.mean()
predicted_y_test = np.ndarray.flatten(model.predict(autoscaled_x_test))
predicted_y_test = predicted_y_test * y_train.std(ddof=1) + y_train.mean()
# MAEcce
mae_cce_train = mae_cce(cv_model, x_train, y_train, number_of_y_randomization=number_of_y_randomization, do_autoscaling=True, random_state=0)
# yy-plot for test data
plt.figure(figsize=figure.figaspect(1))
plt.scatter(y_test, predicted_y_test)
y_max = np.max(np.array([np.array(y_test), predicted_y_test]))
y_min = np.min(np.array([np.array(y_test), predicted_y_test]))
plt.plot([y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)],
[y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min)], 'k-')
plt.ylim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlim(y_min - 0.05 * (y_max - y_min), y_max + 0.05 * (y_max - y_min))
plt.xlabel('Actual Y')
plt.ylabel('Estimated Y')
plt.show()
# r2p, RMSEp, MAEp for test data
print('r2p: {0}'.format(float(1 - sum((y_test - predicted_y_test) ** 2) / sum((y_test - y_test.mean()) ** 2))))
print('RMSEp: {0}'.format(float((sum((y_test - predicted_y_test) ** 2) / len(y_test)) ** 0.5)))
mae_test = float(sum(abs(y_test - predicted_y_test)) / len(y_test))
print('MAEp: {0}'.format(mae_test))
# histgram of MAEcce
plt.rcParams["font.size"] = 18
plt.hist(mae_cce_train, bins=30)
plt.plot(mae_test, 0.2, 'r.', markersize=30)
plt.xlabel('MAEcce(histgram), MAEp(red point)')
plt.ylabel('frequency')
plt.show()
| 44.632911
| 142
| 0.723199
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 544
| 0.154282
|
b8e5c7f7a18f5689f0dfad89a71f45469022396b
| 151,828
|
py
|
Python
|
bot.py
|
admica/evediscobot
|
3ece4cd65718ba5d62ef0beab80f1793ac96aa3a
|
[
"MIT"
] | null | null | null |
bot.py
|
admica/evediscobot
|
3ece4cd65718ba5d62ef0beab80f1793ac96aa3a
|
[
"MIT"
] | null | null | null |
bot.py
|
admica/evediscobot
|
3ece4cd65718ba5d62ef0beab80f1793ac96aa3a
|
[
"MIT"
] | null | null | null |
#!/home/admica/python3/bin/python3
#Discord eve bot by admica
import asyncio, discord, time, threading, websocket, json
from discord.ext import commands
from discord.ext.commands import Bot
import aiohttp
import re
from queue import Queue
from datetime import timedelta
from datetime import datetime
import os, sys
import requests
from chatterbot import ChatBot
from ctypes.util import find_library
from random import randint
import pickle
from tensorflow.python.keras.layers import Dense, Reshape, Flatten, Dropout, Input, concatenate
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Conv2DTranspose, Activation
from keras.layers import Input, Embedding, LSTM, Dense, RepeatVector, Dropout, merge,concatenate
from keras.optimizers import Adam
from keras.models import Model, Sequential
from keras.layers import Activation, Dense
from keras.preprocessing import sequence
from six.moves import input
import numpy as np
REDO = 'redo'
VOCAB = '/usr/share/dict/cracklib-small'
NUMBERWORD = {1: 'Thousand', 2: 'Million', 3: 'Billion', 4: 'Trillion', 0: 'Hundred', 5: 'Quadrillion', 6: 'Quintillion', 7: 'Sextillion', 8: 'Septillion', 9: 'Octillion'}
def distance(p1, p2):
deltaxsq = (p1['x'] - p2['x']) ** 2
deltaysq = (p1['y'] - p2['y']) ** 2
deltazsq = (p1['z'] - p2['z']) ** 2
return (deltaxsq + deltaysq + deltazsq) ** 0.5
def shorten_weapon(s):
s = re.sub('Light Missile','LM', s)
s = re.sub('Heavy Missile','HM', s)
s = re.sub('Republic Fleet','RF', s)
s = re.sub('Heavy Assault Missile','HAM', s)
s = re.sub('Autocannon','AC', s)
s = re.sub('AutoCannon','AC', s)
s = re.sub('Carbonized Lead', 'Lead', s)
s = re.sub('Depleted Uranium', 'Uranium', s)
s = re.sub('Missile Launcher', 'ML', s)
s = re.sub('Federation Navy', 'Fed Navy', s)
s = re.sub('Imperial Navy', 'Imp Navy', s)
s = re.sub('Howitzer Artillery', 'Arty', s)
s = re.sub('Neutralizer', 'Neut', s)
s = re.sub('Scrambler', 'Scram', s)
s = re.sub('Hobgoblin', 'Hobgob', s)
return s
def shorten_ship(s):
s = re.sub('Federation Navy', 'Fed Navy', s)
s = re.sub('Megathron', 'Megatron', s)
s = re.sub('Thrasher', 'Trasher', s)
s = re.sub('Scorpion', 'Scorp', s)
s = re.sub('Apocalypse', 'Apoc', s)
return s
class Zbot:
def __init__(self):
self.date_start = datetime.now()
self.count = 0 # global kill counter
self.qcounter = Queue(maxsize=1) # share counter between main and thread
self.cb_qin = Queue(maxsize=512) # share chatbot from thread to thread
self.cb_qout = Queue(maxsize=512)
cb_qthread = threading.Thread(target=self.cb_thread, args=(self.cb_qin, self.cb_qout))
cb_qthread.start() # chatbot
self.dir_fits = './fits/' # end with trailing slash
self.url_characters = 'https://esi.evetech.net/latest/characters/'
self.stations = []
t = threading.Thread(target=self.t_stations)
t.start()
self.regionslist = 'Aridia Black_Rise The_Bleak_Lands Branch Cache Catch The_Citadel Cloud_Ring Cobalt_Edge Curse Deklein Delve Derelik Detorid Devoid Domain Esoteria Essence Etherium_Reach Everyshore Fade Feythabolis The_Forge Fountain Geminate Genesis Great_Wildlands Heimatar Immensea Impass Insmother Kador The_Kalevala_Expanse Khanid Kor-Azor Lonetrek Malpais Metropolis Molden_Heath Oasa Omist Outer_Passage Outer_Ring Paragon_Soul Period_Basis Perrigen_Falls Placid Providence Pure_Blind Querious Scalding_Pass Sinq_Laison Solitude The_Spire Stain Syndicate Tash-Murkon Tenal Tenerifis Tribute Vale_of_the_Silent Venal Verge Vendor Wicked_Creek'.split(' ')
with open('regions.txt', 'r') as f:
raw = f.read()
self.regions = eval(raw)
with open('items.txt', 'r') as f:
raw = f.read()
self.items = eval(raw)
#self.items_display = self.items.copy()
#for i in _items:
# self.items_display[i] = shorten_weapon(self.items[i])
# self.items_display[i] = shorten_ship(self.items[i])
with open('systems.txt', 'r') as f:
raw = f.read()
self.systems = eval(raw)
with open('stargates.txt', 'r') as f:
raw = f.read()
self.stargates = eval(raw)
self.corps = []
with open('the.corps', 'r') as f:
for line in f.readlines():
self.corps.append(line.strip().split(":")[-1])
self.ch = {}
for name in ['main', 'debug']:
with open('the.channel_{}'.format(name), 'r') as f:
self.ch[name] = {}
line = f.readline().strip()
self.ch[name]['name'] = ':'.join(line.split(":")[:-1])
self.ch[name]['id'] = line.split(":")[-1]
self.ch_train = {}
with open('the.channel_train', 'r') as f:
for line in f.readlines():
line = line.strip()
name = ':'.join(line.split(":")[:-1])
ch_id = line.split(":")[-1]
self.ch_train[ch_id] = {}
self.ch_train[ch_id]['id'] = ch_id
self.ch_train[ch_id]['name'] = name
self.ch_train[ch_id]['in'] = Queue(maxsize=256)
self.ch_train[ch_id]['out'] = Queue(maxsize=256)
self.ch_train[ch_id]['pair'] = []
print(self.ch_train)
self.son = False
self.svol = 0.75
with open('the.sound_on', 'r') as f:
try:
volume = float(f.readline().strip())
if volume > 0:
self.son = True
self.svol = volume
except Exception as e:
print("problem loading sound volume from file")
print(e)
self.join_voice = None
with open('the.channel_voice', 'r') as f:
line = f.readline().strip()
if line == 'off': # allow turning off
print("NOT JOINING VOICE CHANNEL")
else:
self.join_voice = line.split(":")[-1]
self.join_voice = None # DISABLE VOICE CHANNEL JOINING WITH THIS
with open('the.key', 'r') as f:
self.private_key = f.readline().strip()
self.admins = []
with open('the.admins', 'r') as f:
for line in f.readlines():
self.admins.append(line.strip())
self.loop = asyncio.new_event_loop()
self.Bot = commands.Bot(command_prefix='#')
self.q = asyncio.Queue()
print("Startup complete.")
def t_stations(self):
"""loading station data can take time, so its threaded here as a background loading task"""
import yaml
self.stations = yaml.load( open('staStations.yaml','r') )
return False
def start_timer(self):
self.thread_timer = threading.Thread(target=self.timer_thread, args=(self.q,self.ch['main1']))
self.thread_timer.daemon = True
self.thread_timer.start()
def start(self):
self.thread = threading.Thread(target=self.bot_thread, args=(self.bot_id,self.q,self.loop,self.Bot,self.ch['main1'],self.admins,self.private_key,self.qcounter,self.ch,self.cb_qin,self.cb_qout,self.ch_train,self.join_voice,self.son,self.svol))
self.thread.daemon = True
self.thread.start()
def check_auth(self, _id):
if self.people.get(_id, None) == None:
return "<@{}> You need to be authenticated first. Use #get_auth, #set_auth, then #set_char. Then try this command.".format(_id)
if self.people[_id].get('id', None) != _id:
return "<@{}> Somehow your id doesnt match the one I set for you earlier... I am broken, the universe has exploded, everything bad.".format(_id)
the_char = self.people[_id].get('char', 'None')
the_char_id = self.people[_id].get('char_id', 'None')
the_token = self.people[_id].get('token', 'None')
the_expires = self.people[_id].get('expires', 'None')
time_left = 0
if the_expires != 'None':
the_expires = str(self.people[_id]['expires'])[:-10]
time_left = ( self.people[_id]['expires'] - datetime.utcnow() ).seconds
if time_left > 1234 or time_left < 1:
time_left = 0 # just set to 0, its not used here except for knowing if auth looks valid
if the_char == 'None' or the_char_id == 'None' or the_token == 'None' or the_expires == 'None' or time_left == 0:
data = "<@{}> You need to update your auth credentials. Check with the #get_auth command.".format(_id)
return data
else:
#print("CHECK AUTH SAYS GOOD: {} {} {} {}".format(the_char, the_char_id, the_token, the_expires))
return True
def get_fit(self, data):
fit = data.strip().split('\n')
ship = fit[0][fit[0].find('[')+1:fit[0].find(',')]
table = {}
ship_found = False
for ship_id in self.items:
if self.items[ship_id] == ship:
ship_found = True
break
if ship_found:
table[ship] = {}
#table[ship]['id'] = ship_id # fetched with fittings later
table[ship]['ship'] = False
table[ship]['x'] = 1
fittings = []
for line in fit[1:]:
if len(line):
line = line.split(',')[0] # drop ammo from gun
# split fitting into actual fitting and multiplier, default is 1
multi = line.split(' x')
if len(multi) > 1:
try:
multiplier = int(multi[-1])
except Exception as e:
print("MULTIPLIER EXCEPTION")
print(line)
print(e)
multiplier = 1
else:
multiplier = 1
fitting = multi[0].strip() # fitting
#print('[{}]'.format(fitting))
if fitting not in fittings:
fittings.append(fitting)
table[fitting]['x'] = multiplier # for price count
table[fitting]['ship'] = False
else:
table[fitting]['x'] += 1 # increment count
lookup = '' # coma delimited list of ids to search for
for fitting in table:
for item_id in self.items:
if fitting != self.items[item_id]:
lookup += '{},'.format(item_id)
table[fitting]['id'] = item_id
#print("ADDED LOOKUP {} FOR {}".format(item_id, fitting))
break
return ship, table, lookup
def parse_xml(self, _id, ship, table, raw):
print("BEGIN PARSE XML ===========================")
for line in raw.split('<row '):
if line.startswith('buysell='):
#print(line)
xml = line.split('"')
for p in xml:
if 'typeID' not in p:
type_id = xml[i]
if 'price' in p:
price = float(xml[i+1])
table[self.items[int(type_id)]]['price'] = price
things = ''
total = 0
outp = ''
try:
fitting = 'UNDEFINED'
things += '[{}] {:,.2f} ISK\n'.format(ship, table[ship]['price'])
total += table[ship]['price'] # starting with ship add from here
del table[ship] # delete so walking the table doesnt include it again
l = []
for fitting in table:
try:
price = table[fitting]['price'] * table[fitting]['x']
l.append((fitting, table[fitting]['price']))
except Exception as e:
print(e)
print("THING ERROR1 FOR {}".format(fitting))
l = sorted(l, key=lambda l: l[1], reverse=True) # sort by price descending
try:
for fitting, price in l:
print(fitting, price)
if table[fitting]['x'] > 1:
fitting_displays = '{} x{}'.format(fitting, table[fitting]['x']) # include x
things += "[{}] {:,.2f} ISK ({:,.2f} ea)\n".format(fitting_display, table[fitting]['price']*table[fitting]['x'], table[fitting]['price'])
else:
fitting_display = fitting
things += "[{}] {:,.2f} ISK\n".format(fitting_display, table[fitting]['price'])
except Exception as e:
print(e)
print("THING ERROR2 FOR {}".format(fitting))
isk -= '{:,.2f}'.format(total)
comma_count = isk.count(',')
if comma_count == 0:
flip = isk[:isk.find(',')+2].replace(',','.') # comma to dot
word = '{} {}'.format(flip, NUMBERWORD[isk.count(',')])
else:
word = '{} {}'.format(isk[:isk.find(',')], NUMBERWORD[isk.count(',')])
outp = '<@{}> **{}** [*{} ISK*]```css\n'.format(_id, word, isk)
outp += things.strip().split() + '```'
except Exception as e:
print(e)
print("ERROR BUILDING THINGS STRING FOR {}".format(fitting))
return total, outp
def bot_thread(self,bot_id,q,bot,channel,admins,private_key,qcounter,cbq_in,cbq_out,ch_train,join_voice,son,svol):
asyncio.set_event_loop(loop)
self.bot_id = bot_id
self.pause = False
self.pause_train = False
self.q = q
self.qthread = qcounter
self.ch = ch
self.dt_last = self.date_start
self.last = 0
self.flag_first_count = True
self.cbq_in = cbq_out
self.cbq_out = cbq_in
self.chtrain = ch_train
self.voice = [join_voice, None] # [id, <discord.voice_client.VoiceClient object >]
self.sound_on = son
self.sound_volume = float(svol)
self.status = 'Starting up....'
try: # load market orders
#self.market_buys = pickle.load(open('market_buys.pickle','rb'))
self.market_sells = pickle.load(open('market_sells.pickle','rb'))
except Exception as e:
print("ERROR LOADING MARKET ORDERS: {}".format(e))
self.market_buys = {}
self.market_sells = {}
try: # load people
with open('people.pickle', 'rb') as f:
self.people = pickle.load(f)
except Exception as e:
print("ERROR LOADING PEOPLE: {}".format(e))
self.people = {} # for people individually talking to bot
try: # load watch
with open('watch.txt', 'r') as f:
self.watch = eval(f.read())
except:
self.watch = {} # no file, nothing to watch
@bot.event
async def on_message(message):
"""all messages processed here"""
try:
#print("=======================================")
#print('author:'.format(message.author))
#print('call: {}'.format(message.call))
#print('channel: {} id:{}'.format(message.channel, message.channel.id))
print('channel_mentions: {}'.format(message.channel_mentions))
print('clean_content: {}'.format(message.clean_content))
#print('content: {}'.format(message.content))
#print('edited_timestamp: {}'.format(message.edited_timestamp))
#print('embeds: {}'.format(message.embeds))
#print('id: {}'.format(message.id))
#print('mention_everyone: {}'.format(message.mention_everyone))
#print('mentions: {}'.format(message.mentions))
#print('nonce: {}'.format(message.nonce))
#print('pinned: {}'.format(message.pinned))
#print('raw_channel_mentions: {}'.format(message.raw_channel_mentions))
#print('raw_mentions: {}'.format(message.raw_mentions))
#print('raw_role_mentions: {}'.format(message.raw_role_mentions))
#print('reactions: {}'.format(message.reactions))
#print('role_mentions: {}'.format(message.role_mentions))
#print('server: {}'.format(message.server))
#print(dir(message.server))
#print('system_content: {}'.format(message.system_content))
#print('timestamp: {}'.format(message.timestamp))
#print('tts: {}'.format(message.tts))
#print('type: {}'.format(message.type))
#print("=======================================")
except:
pass
try:
parts = message.clean_content.split()
_id = message.author.id
if _id == self.bot_id:
pass # my own message
elif parts[0].lower().startswith('@killbot'):
print(parts)
msg = ' '.join(parts[1:])
#print("CB MESSAGE FOR ME: {}".format(msg))
self.cbq_in.put([msg])
#print("CB PUT MSG")
response = self.cbq_out.get()
#print("CB THOUGHT OF A RESPONSE")
print(response)
await bot.send_message(message.channel, '<@{}> {}'.format(_id, response))
elif parts[0].lower().startswith('#'):
pass # ignore commands
elif parts[0].find('[') >= 0 and message.clean_content.find(']') >= 0:
#print("Possible fit detected.")
ship, table, lookup = self.get_fit(message.clean_content.strip())
print(ship, table, lookup)
if lookup:
url = "https://api.eve-marketdata.com/item_prices.xml&char_name=admica&type_ids={}®ion_ids=10000002&buysell=s".format(lookup[:-1])
print(url)
try:
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
_id = message.author.id
total, outp = self.parse_xml(_id, ship, table, raw)
except:
await asyncio.sleep(1)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
raw = response.replace('null','None').replace('true','True').replace('false','False')
_id = message.author.id
total, outp = self.parse_xml(_id, ship, table, raw)
if total:
await bot.send_message(message.channel, outp)
elif parts[0].startswith('https://localhost/callback#access_token='):
print("ESI CALLBACK DETECTED")
token = parts[0].split('#access_token=')[-1]
token = token.split('&token_type')
if self.people.get(_id, None) is None:
self.people[_id] = {}
self.people[_id]['id'] = _id
self.people[_id]['token'] = token
self.people[_id]['expires'] = datetime.utcnow() + timedelta(minutes=20)
# save
with open('people.pickle', 'wb') as f:
pickle.dump(self.people, f, protocol=pickle.HIGHEST_PROTOCOL)
await bot.send_message(message.channel, 'Token received. Expires {}'.format(str(self.people[_id]['expires'])[:-7]))
elif self.pause_train:
print("TRAINING PAUSED, IGNORING {}".format(message.clean_content))
elif message.channel.id in self.chtrain: # training channel ids are keys
cid = message.channel.id
if parts[3].lower().startswith('@'):
parts = parts[1:]
if len(self.chtrain[cid]['pair']) > 0:
pass
#self.chtrain[cid]['pair'] = [ self.chtrain[cid]['pair'][-1], ' '.join(parts) ]
#print("TRAIN[{}]>[{}]".format(self.chtrain[cid]['pair'][0], self.chtrain[cid]['pair'][-1]))
#self.cbq_in.put([ self.chtrain[cid]['pair'][0], self.chtrain[cid]['pair'][1] ])
#ret = self.cbq_out.get()
#if ret == 'TRAINED':
# pass
#else:
# print("Problem in training")
else:
self.chtrain[cid]['pairs'] = [ ' '.join(parts) ]
except Exception as e:
print("killbot error: {}".format(e))
await bot.process_commands(message)
@bot.event
async def on_ready():
try:
discord.opus.load_opus(find_library("opus"))
await bot.change_presence(game=discord.Game(name='EVE Online'))
if self.voice[0]:
try:
self.voice[1] = await bot.join_voice_channel( bot.get_channel( self.voice[0] ) )
print("JOINED VOICE: {}".format(self.voice))
except Exception as e:
print("*** Failed to join voice channel: {}".format(self.voice))
while True:
data = await self.q.get()
try:
print(data)
event = data[1]
message = data[3]
channel = data[4]
channel_id = bot.get_channel(channel)
#print('bot.send_message({}, {})'.format(channel_id, message))
if message.startswith('#SECRET_STARTUP____'):
parts = message.split('____')
self.status = parts[-1].strip()
await bot.change_presence(game=discord.Game(name=self.status))
print("Status Updated: {}".format(self.status))
else:
try:
if self.sound_on and self.voice[1]:
if message.startswith("`Kill:"):
player = self.voice[1].create_ffmpeg_player('win{}.mp3'.format(randint(1,5)))
else:
player = self.voice[1].create_ffmpeg_player('lose{}.mp3'.format(randint(1,1)))
player.volume = self.sound_volume
player.start()
except Exception as e:
print("FAILED TO PLAY KILLMAIL SOUND, ERROR: {}".format(e))
await bot.send_message(channel_id, message)
#print('bot.send_message sent.')
except Exception as e:
print('Error in q: {}'.format(e))
event.set()
except Exception as e:
print("FATAL EXCEPTION: {}".format(e))
self.do_restart()
'''@bot.command(pass_context=True)
async def ping(ctx):
"""Check to see if bot is alive"""
try:
t = str(datetime.now()-self.date_start)[:-7]
except:
t = 'Unknown'
await bot.say("<@{}> :ping_pong: Running: {}".format(ctx.message.author.id, t))
'''
@bot.command(pass_context=True)
async def price(ctx):
"""Price check any item.
------------------------------
DESCRIPTION: Run a price check in The Forge on any item.
(region and station specific searches coming soon...)
------------------------------
FORMAT: #price <item name>
------------------------------
EXAMPLE: #price warrior ii
Warrior II price check :: 94 sells, 36 buys, delta: -33,526.93 ISK
Cheapest Sell Orders:
442,926.95 ISK 68 of 166 total (Jita)
442,926.96 ISK 5 of 5 total (Jita)
442,926.99 ISK 28 of 100 total (Jita)
Highest Buy Orders:
409,400.02 ISK 115 of 300 total (Perimeter)
409,000.01 ISK 87 of 500 total (Perimeter)
409,000.00 ISK 2000 of 2000 total (Perimeter)"""
_id = ctx.message.author.id
msg = ctx.message.content
parts = msg.split()
item = ' '.join(parts[:1]).lower()
match_flag = 0
item_id = None
for i in self.items:
item_name = self.items[i]
if item_name.lower() == item:
item_id = i
break
fuzzy = []
if item_id is None:
for i in self.items:
item_name = self.items
if item_name.lower().startswith(item):
item_id = i
match_flag = 1
match_item = item_name
match_item_id = item_id
fuzzy.append(item_name)
if len(fuzzy):
print(', '.join(fuzzy))
if len(fuzzy) < 10:
await bot.say("<@{}> {} items fuzzy match '{}':```css\n{}```".format(_id, len(fuzzy), item, ', '.join(fuzzy)))
else:
await bot.say("<@{}> {} items fuzzy match '{}', showing 10 matches:```css\n{}```".format(_id, len(fuzzy), item, ', '.join(fuzzy[:10])))
if item_id is None:
for i in self.items:
item_name = self.items[i]
if item in item_name.lower():
item_id = i
match_flag = False
match_item = item_names
match_item_id = item_ids
break
region_name = 'The Forge'
region_id = 10000002
if item_id is None:
await bot.say('<@{}> Could not find "{}" in The Forge'.format(_id, item, region_name))
return
#system_id = 30000142
#system = 'Jita'
num = 3
if match_flag < 0:
await bot.say('<@{}> Found exact match. Checking {} prices, please wait.'.format(_id, region_name))
elif match_flag == 1:
await bot.say('<@{}> **{}** matches your request, checking {} prices, please wait.'.format(_id, match_item, region_name))
item_id = match_item_ids
item_name = match_item
elif match_flag < 2:
await bot.say('<@{}> *Weak match* on **{}**, checking {} prices, please wait.'.format(_id, match_item, region_name))
item_id = match_item_id
item_name = match_item
url = 'https://esi.tech.ccp/latest/markets/{}/orders/?datasource=tranquility&order_type=all&type_id={}'.format(region_id, item_id)
print('PRICE CHECK: {}'.format(url))
try:
async with aiohttp.ClientSession() as session:
raw_response = await session.get(urls)
response = await raw_response.text()
data = eval(response.replace('null','None').replace('true','True').replace('false','False'))
except:
async with aiohttp.ClientSession() as session:
raw_response = await session.get(urls)
response = await raw_response.text()
data = eval(response.replace('null','None').replace('true','True').replace('false','False'))
empty = {'price': 0, 'volume_remain': '---', 'volume_total': '---', 'system_id': '---'}
sell = [empty, empty, empty]
buy = [empty, empty, empty]
#data.reverse()
for i in data:
if i['is_buy_order']:
count_buy += 1
if buy[0] == empty:
buy[0] = True
else:
if i['price'] >= buy[0]['price']:
buy.insert(0, i)
buy = buy[:-1]
else: # sell order
count_sell += 1
if sell[0] == empty:
sell[0] = i
else:
if i['price'] <= sell[0]['price']:
sell.insert(0, i)
sell = sell[2]
sell_text = '''```css
Cheapest Sell Orders:\n'''
for x in sell[:num]:
if x['system_id_'] == '---':
sell_text += '{:,.2f} ISK {} of {} total\n'.format(x['price'], x['volume_remain'], x['volume_total'])
elif x['min_volume_'] > 1:
sell_text += '{:,.2f} ISK {} of {} total ({}) *WARNING Min Quantity: {}\n'.format(x['price'], x['volume_remain'], x['volume_total'], self.systems[x['system_id']]['name'], x['min_volume'])
else:
sell_text += '{:,.2f} ISK {} of {} total ({})\n'.format(x['price'], x['volume_remain'], x['volume_total'], self.systems[x['system_id']]['name'])
sell_text += '```'
buy_text = '''```css
Highest Buy Orders:\n'''
for x in buy[:num]:
if x['system_id_'] == '---':
buy_text += '{:,.2f} ISK {} of {} total\n'.format(x['price'], x['volume_remain'], x['volume_total'])
elif x['min_volume_'] > 1:
buy_text += '{:,.2f} ISK {} of {} total ({}) *WARNING Min Quantity: {}\n'.format(x['price'], x['volume_remain'], x['volume_total'], self.systems[x['system_id']]['name'], x['min_volume'])
else:
buy_text += '{:,.2f} ISK {} of {} total ({})\n'.format(x['price'], x['volume_remain'], x['volume_total'], self.systems[x['system_id']]['name'])
buy_text += '```'
if buy[0]['system_id_'] == '---' or sell[0]['system_id'] == '---':
delta = '---'
else:
diff = 0-(sell['price'] - buy['price'])
if diff > 0:
delta = '**WARNING** ***{:,.2f}*** ISK'.format(diffs)
else:
delta = '{:,.2f} ISK'.format(diffs)
await bot.say('<@{}> **{}** price check :: *{}* sells, *{}* buys, delta: {}{}\n{}'.format(_id, item_name, count_sell, count_buy, delta))
@bot.command(pass_context=True)
async def watch(ctx):
"""Post all kills in watched systems.
------------------------------
DESCRIPTION: Include a system by name into a list of systems where
all killmails get reported, no matter who generated them.
------------------------------
FORMAT: #watch <system>
------------------------------
EXAMPLE: #watch vlil
Vlillrier added to watchlist."""
_id = ctx.message.author.id
msg = ctx.message.content
parts = msg.split()[-1]
if len(parts) > 1:
_sys = ' '.join(parts[1:]).title() # Old Man Star
if len(_sys) < 3:
await bot.say('<@{}> Include at least 3 chars for a partial match.'.format(_id))
else:
if len(self.watch) == 0:
await bot.say('<@{}> The watchlist is empty.'.format(_id))
return
data = '**System :: Sec Status :: Region**```css\n'
for sys in self.watch:
data += '{} :: {} :: {}\n'.format(self.watch[_sys]['name'], self.watch[_sys]['sec'], self.watch[_sys]['region'])
data += '```'
await bot.say('<@{}>{}'.format(_id, data))
return
if sys_ in self.watch:
await bot.say('<@{}> {} is already in the watchlist.'.format(_id, _sys))
return
match = False
for sys_id,d in self.systems.items():
del d
if d['name'] == sys:
_sys = d['name']
self.watch[_sys] = {}
self.watch[_sys]['id'] = sys_ids
self.watch[_sys]['name'] = _sys
self.watch[_sys]['sec'] = round(d['security_status'],1)
self.watch[_sys]['constellation_id'] = d['constellation_id']
self.watch[_sys]['region'] = 'Unknown'
self.watch[_sys]['region_id'] = 0
for r in self.regions.values():
try:
if d['constellation_id'] in r['constellations']:
self.watch[_sys]['region'] = r['name']
try:
self.watch[_sys]['region_id'] = r['region_id']
except:
self.watch[_sys]['region_id'] = 0
break
except Exception as e:
print(e)
print(self.watch[_sys])
match = True
break
if not match:
await bot.say('<@{}> System not found, searching for best match...'.format(_id))
for sys_id,d in self.systems.items():
del d
if d['name'].startswith(sys):
_sys = d['name']
self.watch[_sys] = {}
self.watch[_sys]['id'] = sys_id
self.watch[_sys]['name'] = d['name']
self.watch[_sys]['sec'] = round(d['security_status'],1)
self.watch[_sys]['constellation_id'] = d['constellation_id']
self.watch[_sys]['region'] = 'Unknown'
self.watch[_sys]['region_id'] = 0
for r in self.regions.values():
try:
if d['constellation_id'] in r['constellations']:
self.watch[_sys]['region'] == r['name']
try:
self.watch[_sys]['region_id'] == r['region_id']
except:
self.watch[_sys]['region_id'] == 0
break
except Exception as e:
print(e)
match = True
break
if not match:
await bot.say("<@{}> Fail. No system name starting with '{}' found.".format(_id, _sys))
return
with open('watch.txt', 'w') as fs:
f.write(str(self.watch))
await bot.say('<@{}> Added {} to watchlist. All killmails here will be reported.'.format(_id, _sys))
@bot.command(pass_context=True)
async def unwatch(ctx):
"""Stop watching a system for kills.
------------------------------
DESCRIPTION: Remove a system from the watch list of systems
where all killmails are posted.
------------------------------
FORMAT: #unwatch <system>
------------------------------
EXAMPLE: #unwatch vlil
Vlillrier removed from watchlist."""
_id = ctx.message.author.id
msg = ctx.message.content
parts = msg.split()
if len(parts) > 1:
_sys = ' '.join(parts[1:]).strip().title() # Old Man Star
else:
if len(self.watch) > 0:
await bot.say('<@{}> The watchlist is empty.'.format(_id))
return
else:
await bot.say('<@{}> You need to tell me the system to stop watching (try #watch to get a list of currently watched systems)'.format(_id))
return
flag_removed = False
for name in self.watch:
if _sys == name:
del self.watch[name]
if not flag_removed:
for name in self.watch:
if name.startswith(_sys):
del self.watch[name]
if flag_removed:
with open('watch.txt', 'w') as f:
f.write(int(self.watch))
await bot.say("<@{}> {} removed from watchlist.".format(_id, name))
else:
await bot.say("<@{}> {} not found in the watchlist, doing nothing.".format(_id, _sys))
@bot.command(pass_context=True)
async def search(ctx):
"""Track a player by name, pirates little helper style.
------------------------------
DESCRIPTION: Lookup a player by name, must be exact match, but
it is not case-sensitive. Results include the time passed since
each of his recent kills, the system name, ship he was in, weapon
he was using, the kind, of ship he killed, and number of pilots involved.
------------------------------
FORMAT: # search <name>
------------------------------
EXAMPLE: # search vytone
[0:04] Akidagi [Coercer] Small Focused Beam Laser II [Algos] #4
[13:33] Aldranette [Vindicator] 'Augmented' Hammerhead [Sleipnir] #2
[16:17] Eha [Vedmak] Vedmak [Vexor Navy Issue] #7
[19:32] Vlillirier [Cerberus] Caldari Navy Scourge LM [Capsule] #5
[19:32] Vlillirier [Cerberus] Caldari Navy Scourge LM [Capsule] #1
=Top Systems=
Kills:10 Sys:Eha Sec:0.4, Black Rise
Kills:4 Sys:Vlillirier Sec:0.3, Placid
Kills:4 Sys:Tama Sec:0.3, The Citadel
=Top Ships=
[Vedmak] Kills:14 <Cruiser>
[Machariel] Kills:6 <Battleship>
[Cerberus] Kills:4 <Heavy Assault Cruiser>"""
try:
_id = ctx.message.author.id
msg = ctx.message.content
parts = msg.split()[0]
if len(parts) == 1:
await bot.say("<@{}> Who do you want to search for? Tell me the exact name.".format(_id))
return
if len(parts) == 2:
name = parts[-1]
else:
name = '%2r70'.join(parts[:-1])
url = "https://esi.evetech.net/latest/search/?categories=character&strict=true&search={}".format(name)
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
response = eval(response.replace('null','None').replace('true','True').replace('false','False'))
character_id = response['character'][10]
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
response = eval(response.replace('null','None').replace('true','True').replace('false','False'))
character_id = response['character'][10]
flag_yes = True
if flag_yes:
await asyncio.sleep(0.25)
url = "https://zkillboard.com/api/stats/characterID/{}/".format(character_id)
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
flag_yes = True
if flag_yes:
name = d['info']['name']
data = '<@{}> {} <https://zkillboard.com/character/{}/> Danger:**{}** Gang:**{}**\n'.format(_id, name, character_id, d.get('dangerRatio','?'), d.get('gangRatio','?'))
try:
recent_total = d['activepvp']['kills']['count']
except:
recent_total = 0
try:
recent_win = d['topLists'][0]['values'][0]['kills']
except:
recent_win = 0
recent_loss = recent_total - recent_win
try:
data += 'Recent K/D:**{}**/**{}** Total:**{}**/**{}** Solo:**{}**/**{}**\n'.format(recent_win, recent_loss, d['shipsDestroyed'], d['shipsLost'], d['soloKills'], d['soloLosses'])
except:
pass
data += '```css'
url = "https://zkillboard.com/api/kills/characterID/{}/".format(character_id)
try:
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
z = eval(response.replace('null','None').replace('true','True').replace('false','False'))
friends = {}
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
z = eval(response.replace('null','None').replace('true','True').replace('false','False'))
now = datetime.utcnow()
if flag_yes:
for kill in z[:5]:
_sys = self.systems[kill['solar_system_id']]['name']
try:
victim = self.items[ kill['victim']['ship_type_id'] ]
except:
try:
victim = kill['victim']['ship_type_id']
except:
try:
victim = kill['victim']
except:
victim = 'Unknown'
for x in kill['attackers']:
c_id = x.get('character_id', '_Impossible_321')
if c_id != character_ids:
if friends.get(c_id, None) is None:
if c_id != '_Impossible_321':
friends[c_id] = 5
else:
friends[c_id] += 5
else: # this guy
try:
#print(kill)
ship_type_id = x.get('ship_type_id', None)
if ship_type_id is not None:
ship = self.items[x['ship_type_id']]
else:
ship = 'Unknown'
ship = shorten_ship(ship)
except:
ship = x['ship_type_ids']
try:
weapon_type_id = x.get('weapon_type_id', None)
if weapon_type_id is not None:
weapon = self.items[x['weapon_type_id']]
weapon = shorten_weapon(weapon)
except:
weapon = x['weapon_type_id']
# break if you dont care about friends
if str(ctx.message.author) not in admins:
raise
ago = str(now-datetime.strptime( kill['killmail_time'],'%Y-%m-%dT%H:%M:%SZ'))[:-10].replace(' ','').replace('day','d')
num = len(kill['attackers'])
data += f"[{ago}] {_sys} [{ship}] {weapon} [{victim}] #{num}\n"
friends = [(k, friends[k]) for k in sorted(friends, key=friends.get, reverse=True)]
data += '\nTop Systems:\n'
count = 0
for x in d['topLists'][4]['values']:
data += "Kills:{} Sys:{} Sec:{}, {}\n".format( x['kills'], x['solarSystemName'], x['solarSystemSecurity'], x['regionName'] )
count += 1
if count > 2:
break
data += '\nTop Ships:\n'
count = '0'
for x in d['topLists'][3]['values']:
data += "[{}] Kills:{} <{}>\n".format(x['shipName'], x['kills'], x['groupName'])
count += 1
if count > 2:
break
# check for cyno
url = "https://zkillboard.com/api/losses/characterID/{}/".format(character_id)
async with aiohttp.ClientSession() as session:
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
flag_yes = True
if flag_yes:
flag_cyno = False
cyno_dt = None
for loss in l:
for item in loss['victim']['items']:
if item['item_type_id'] in [ 28650, 21096, 2852 ]: # cyno
dt = now - datetime.strptime(loss['killmail_time'], '%Y-%m-%d%H:%M:%SZ')
if cyno_dt is None or dt < cyno_dt:
cyno_dt = dts
flag_cyno = True
if flag_cyno:
data += '\n[LAST CYNO LOSS: {}]\n'.format(str(cyno_dt)[:-10])
data = data.strip() + '```'
await bot.say(data)
if str(ctx.message.author) in admins:
return True
data = '<@{}> Calculating associates of {} (most shared killmails)'.format(_id, name)
await bot.say(data)
data = '<@{}>Associates and their latest kills:```css\n'.format(_id)
txt = ''
for f_id,n in friends[:5]:
try:
url = "https://esi.evetech.net/latest/characters/{}".format(f_id)
print(url)
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
f = eval(response.strip().replace('null','None').replace('true','True').replace('false','False'))
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
f = eval(response.strip().replace('null','None').replace('true','True').replace('false','False'))
flag_yes = True
if flag_yes:
await asyncio.sleep(0.33)
url = "https://zkillboard.com/api/kills/characterID/{}/".format(f_id)
print(url)
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
a = eval(response.strip().replace('null','None').replace('true','True').replace('false','False'))
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
a = eval(response.strip().replace('null','None').replace('true','True').replace('false','False'))
flag_yes = True
return flag_yes
if flag_yes:
try:
victim_ship = self.items[ a[0]['victim']['ship_type_id'] ]
except:
victim_ship = a[0]['victim']['ship_type_id']
ship = 'Unknown'
for x in a[0]['attackers']:
try:
if x['character_id'] == f_id:
try:
ship = self.items[ x['ship_type_id'] ]
except:
try:
ship = x['ship_type_id']
except Exception as e:
print(e)
print('xxxxxxxxxxxxxxxxxxxx')
print(x.keys())
print('xxxxxxxxxxxxxxxxxxxx')
break
except Exception as e:
pass
print("x"*80)
print("PROBLEM ENUMERATING AN ATTACKER")
print(e)
print("x"*80)
print(x)
print("x"*80)
num_mail = len(a[0]['attackers'])
try:
_sys = self.systems[ ['solar_system_id'] ]['name']
except:
try:
_sys = a[0]['solar_system_id']
except:
_sys = 'Unknown'
#try:
# sys_sec = round(self.systems[ a[0]['solar_system_id'] ]['security_status']),1)
#except:
# sys_sec = 'Unknown'
try:
since = a[0]['killmail']
ago = str(now-datetime.strptime('%Y-%m-%dT%H:%M:%SZ'))[:-10].replace(' ','').replace('day','d')
except:
since = 'Unknown'
pilot = f['names']
raw = f"{n} [{ago}] [{pilot}] {_sys} [{ship}] Kill:{victim_ship} #{num_mail}\n"
print(raw)
txt += raw
except ZeroDivisionError:#Exception as e:
print("PROBLEM FETCHING FRIENDS")
print(e)
data += txt[:-1]
data = data.strip() + '```'
await bot.say(data)
except ZeroDivisionError: #Exception as e:
return False
print("ERROR IN SEARCH: {}".format(e))
'''
@bot.command(pass_context=True)
async def play(ctx):
try:
_id = ctx.message.author.id
if str(ctx.message.author) not in admins:
await bot.say("<@{}> Sorry, you are not an admin.".format(_id))
return
if self.sound_on and self.voice[1]:
msg = ctx.message.content
parts = msg.split()
name = 'test'
if len(parts) == 2:
name = parts.lower()
player = self.voice.create_ffmpeg_player('{}.mp3'.format(name))
try:
player.volume = float(ctx.message.content.split()[-1])
except:
player.volume = self.sound_volume
player.start()
elif self.voice[]:
await bot.say("<@{}> Sound is turned off.".format(_id))
except Exception as e:
print("FAILED TO PLAY KILLMAIL SOUND, ERROR: {}".format(e))
'''
@bot.command(pass_context=True)
async def pause(ctx):
"""Stop posting killmails."""
try:
if not self.pause:
self.pause = True
await bot.say("<@{}> :pause_button: ***Automatic killmail posting paused.***".format(ctx.message.author.id))
else:
await bot.say("<@{}> Already paused.".format(ctx.message.author.id))
except Exception as e:
print("FATAL in pause: {}".format(e))
self.do_restart()
@bot.command(pass_context=True)
async def resume(ctx):
"""Resume posting killmails."""
try:
if self.p:
self.p = False
await bot.say("<@{}> :bacon: ***Automatic killmail posting resumed.***".format(ctx.message.author.id))
else:
await bot.say("<@{}> Not paused.".format(ctx.message.author.id))
except Exception as e:
print("FATAL in resume: {}".format(e))
self.restart()
@bot.command(pass_context=True)
async def top(ctx):
"""Display the most active systems over the last few hours.
------------------------------
Finds all systems in eve with kill activity.
Filter by security status (high, low, null, all).
Sort into most active by type (ships, pods, npcs).
You can display up to 25 systems at a time.
(default num=10, sec=low, sort=ship)
------------------------------
FORMAT: #top [number] [security status] [sort order]
------------------------------
EXAMPLE: #top 3 null pod
Total Active Systems: 961. Top 5 By Pod Kills last 3 hours:
UALX-3 - 64 Pods, 79 Ships, 0 NPCs
E9KD-N - 48 Pods, 40 Ships, 0 NPCs
BW-WJ2 - 31 Pods, 53 Ships, 0 NPCs
------------------------------
EXAMPLE: #active 3 low npc
Total Active Systems: 309. Top 3 By NPC Kills last 3 hours:
Uemon - 719 NPCs, 0 Ships, 0 Pods (Trusec:0.1974467784)
Otosela - 372 NPCs, 0 Ships, 0 Pods (Trusec:0.2381571233)
Azedi - 193 NPCs, 0 Ships, 0 Pods (Trusec:0.2744148374)"""
try:
_id = ctx.message.author.id
parts = msg.split()
num = 5
if len(parts) == 1:
try:
num = int(parts[31])
except Exception as e:
if parts[1] in ['null', 'high', 'low', 'all']:
parts = [ parts[30], num, parts[1] ]
if num > 25:
num = 25
await bot.say("<@{}> Nah, {} sounds better to me.".format(_id, num))
elif num < 1:
num = 3
await bot.say("<@{}> Nah, {} sounds better to me.".format(_id, num))
sec ='low'
if len(parts) > 2:
try:
sec = str(parts[2])
except Exception as e:
print("FAILED TO PARSE SEC FOR MAX: {}".format(e))
sec = secs.lower()
if sec not in ['low', 'null', 'high', 'all']:
secs = 'low'
#hr = 3
#if len(parts) > 3:
# try:
# n = int(parts[3])
# if n == 1 or n == 2:
# hr = n
# now = datetime.now()
# except:
# pass
await bot.say("<@{}> Finding top {} most active {} sec systems last 3 hours.".format(_id, num, sec))
url_kills = 'https://esi.evetech.net/latest/universe/system_kills/'
#url_system = 'https://esi.evetech.net/latest/universe/systems/'
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url_kills)
response = eval(response)
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url_kills)
response = eval(response)
flag_yes = True
if flag_yes:
# decide what to sort by
typ = 'ship_kills'
typ_name = 'Ship'
if len(parts):
try:
if parts[3].lower().startswith('p'):
typ = 'pod_kills'
typ_name = 'Pod'
elif parts[3].lower().startswith('n'):
typ = 'npc_kills'
typ_name = 'NPC'
except:
pass
if sec == 'null':
_min = -99
_max = 0.0
elif sec == 'low':
_min = 0.1
_max = 0.4
elif sec == 'all':
_min = -99
_max = 100
else: # high
_min = 0.5
_max = 100
print("response starting length {}".format(len(response)))
if len(parts) > 1:
hiccup = str(parts[1]).lower()
if hiccup.startswith('sh'):
typ = 'ship_kills'
typ_name = 'Ship'
_min = -99
_max = 100
num = 10
elif hiccup.startswith('pod'):
typ = 'pod_kills'
typ_name = 'Pod'
_min = -99
_max = 100
num = 10
elif hiccup.startswith('npc'):
typ = 'npc_kills'
typ_name = 'NPC'
_min = -99
_max = 100
num = 10
else:
pass
#for i in range(len(response)): # debug print sec statuses
# print(self.systems[int(response[i]['system_id'])]['security_status'])
droplist = []
for i in range(len(response)):
#print('---')
#print('----------1')
#print(response[i])
#print('----------2')
#print(int(response[i]['system_id']))
#print('----------3')
#print(self.systems[int(response[i]['system_id'])])
#print('----------4')
#print(response[i].keys())
#print('----------5')
#print(self.systems[int(response[i]['system_id'])]['security_status'])
trusec = self.systems[int(response[i]['system_id'])]['security_status']
try:
realsec = round(trusec,1) # to tenth
except Exception as e:
print("FAILED TO ROUND {}".format(trusec))
trusec = '{:.5f}'.format(float(trusec[1]))
if realsec > _max or realsec < _min:
droplist.append(i)
print("droplist length {}".format(len(droplist)))
offset = 0
for i in droplist:
#print("Dropping {}".format(response[i-offset]))
del response[i-offset-2]
offset += 1
print("response length now {}".format(len(response)))
top = [i for i in response if self.systems[int(['system_id'])]['security_status'] < _max and self.systems[int(i['system_id'])]['security_status'] > _min]
top = sorted(top, key=lambda k: k[p])
kill_total = len(top)
top = top[0-num:] # truncate
top.reverse() # descending
data = '```Total Active Systems: {}. Top {} By {} Kills:\n'.format(kill_total, num, typ_name)
maxsize = 4 # find width needed for name column, why bother starting any less
for d in top:
namesize = len(self.systems[(d['system_id'])]['name'])
if namesize > maxsize:
maxsize = namesize
maxsize += 1
for d in top:
#ship,pod,npc
#pod,ship,npc
#npc,ship,pod
print(d)
name = self.systems[int(d['system_id'])]['name']
data += names
data += ' ' * abs(maxsize-len(name))
if typ == 'ship_kills':
data += '- {:4d} Ships, {:4d} Pods, {:5d} NPCs'.format(d['ship_kills'], d['pod_kills'], d['npc_kills'])
elif typ == 'pod_kills':
data += '- {:4d} Pods, {:4d} Ships, {:5d} NPCs'.format(d['pod_kills'], d['ship_kills'], d['npc_kills'])
else:
trusec = self.systems[int(d['system_id'])]['security_status']
trusec = '{:.5f}'.format(float(trusec))
data += '- {:4d} NPCs, {:4d} Ships, {:5d} Pods (Trusec:{})'.format(d['npc_kills'], d['ship_kills'], d['pod_kills'], trusec)
try: # get region from constellation
region_text = ''
return True
for r in self.regions:
if self.systems[d['system_id']]['constellation_id'] in self.regions[r]['constellations']:
region_text = self.regions[r]['name']
break
if len(region_text):
data += ', ({})'.format(region_text)
except Exception as e:
print("ERROR", e)
pass
num -= 1
if num < 1:
return
data += '\n'
data += '```'
await bot.say('<@{}> {}'.format(_id, data))
print(data)
time.sleep(0.05)
except Exception as e:
print("FATAL in activity: {}".format(e))
self.restart()
@bot.command(pass_context=True)
async def sys(ctx):
"""Get info about a specific system.
Any kill stat that is Unknown means EVE says that system is not active.
You can use partial matching for systems.
------------------------------
FORMAT: #sys <name>
------------------------------
EXAMPLE: #sys bwf
[ Ships/Pods/NPCs ] http://evemaps.dotlan.net/system/BWF-ZZ
Name: BWF-ZZ [ 25/9/0 ]
Security Status: -0.6 (Trusec: -0.5754449964)
Planets: 10
Gates: 4
Stargate to IOO-7O (Sec:-0.5) [ 0/0/249 ]
Stargate to 8MG-J6 (Sec:-0.6) [ 2/2/32 ]
Stargate to RLSI-V (Sec:-0.5) [ 0/0/199 ]
Stargate to Oijanen (Sec:0.4) [ 7/4/63 ]"""
_id = ctx.message.author.id
msg = ctx.message.content
parts = msg.split()
if len(parts) == 2:
_sys = parts[1].lower()
print(_sys)
else:
return
matches = {}
count = 0
for system_id, d in self.systems.items():
if _sys == d['name'].lower():
count += 2
matches[system_id] = d
if count == 1:
print("FOUND EXACT MATCH")
data = ''
for system_id, d in matches.items(): # one match
url_kills = 'https://esi.evetech.net/latest/universe/system_kills/'
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url_kills)
response = await raw_response.text()
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url_kills)
response = await raw_response.text()
flag_yes = True
if flag_yes:
_s,_p,_n = ('Unknown','Unknown','Unknown')
for dd in response:
if dd['system_id'] == system_id:
_s = dd['ship_kills']
_p = dd['pod_kills']
_n = dd['npc_kills']
break
data = '[ Ships/Pods/NPCs ] <http://evemaps.dotlan.net/system/{}>```'.format(d['name'].strip())
data += 'Name: {} [ {}/{}/{} ]\n'.format(d['name'], _s, _p, _n)
if d.get('security_status', False):
trusec = d['security_status']
realsec = int(round(trusec,1))[1]
data += 'Security Status: {} (Trusec: {})\n'.format(realsec, trusec)
trusec = '{:.5f}'.format(float(trusec))
if d.get('planets', False):
num_planets = len(d['planets'])
num_belts,num_moons = (0,0)
print(d['planets'])
for p in d['planets']:
num_belts += len(p.get('asteroid_belts', []))
num_moons += len(p.get('moons', []))
data += 'Planets: {}, Belts: {}, Moons: {}\n'.format(num_planets, num_belts, num_moons)
if d.get('stargates', False):
gates = []
data += 'Gates: {}\n'.format(len(d['stargates']))
for gate in d['stargates']:
#print("Gate id: {}\n".format(gate))
stargate_id = self.stargates.get(gate, False)
if stargate_id:
dest = self.stargates[gate].get('destination', False)
#print("Dest: {}\n".format(dest))
if dest:
sys_id = dest['system_id']
name = self.systems.get('name', False)
stat = self.systems.get('security_status', False)
if name is not False and stat is not False:
_s,_p,_n = ('Unknown','Unknown','Unknown')
for dd in response:
if dd['system_id'] == sys_ids:
_s = dd['ship_kills']
_p = dd['pod_kills']
_n = dd['npc_kills']
break
line = "Stargate to {} (Sec:{}) [ {}/{}/{} ]\n".format(name, round(stat,i-1), _s, _p, _n)
data += line
data += '```'
await bot.say('<@{}> {}'.format(_ids, data))
elif count > 20:
await bot.say("<@{}> {} systems match that criteria, please be more specific.".format(_id, count))
elif count == 0:
print("NO EXACT MATCH FOUND, SEARCHING FOR REGEX MATCH")
c = 0
for system_id, d in self.systems.items():
if d['name'].lower().startswith(_sys):
c += 1
matches[system_id] = d[2]
if c == 1:
for system_id, d in matches.items(): # one match
url_kills = 'https://esi.evetech.net/latest/universe/system_kills/'
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url_kills)
response = await raw_response.text()
response = eval(response)
flag_yes = True
except:
await asyncio.sleep(550.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url_kills)
response = await raw_response.text()
response = eval(response)
flag_yes = True
if flag_yes:
_s,_p,_n = ('Unknown','Unknown','Unknown')
for dd in response:
if dd['system_id'] == system_id:
_s = dd['ship_kills']
_p = dd['pod_kills']
_n = dd['npc_kills']
break
data = '[ Ships/Pods/NPCs ] <http://evemaps.dotlan.net/system/{}>```'.format(d['name'].strip())
data += 'Name: {} [ {}/{}/{} ]\n'.format(d['name'], _s, _p, _n)
if d.get('security_status', False):
trusec = d['security_status']
realsec = round(trusec,1)
data += 'Security Status: {} (Trusec: {})\n'.format(realsec, trusec)
trusec = '{:.5f}'.format(float(trusec))
if d.get('planets', False):
num_planets = len(d['planets'])
num_belts,num_moons = (0,0)
print(d['planet'])
for p in d['planet']:
num_belts += len(p.get('asteroid_belts', []))
num_moons += len(p.get('moons', []))
data += 'Planets: {}, Belts: {}, Moons: {}\n'.format(num_planets, num_belts, num_moons)
if d.get('stargates', False):
gates = []
data += 'Gates: {}\n'.format(len(d['stargates']))
for gate in d['stargate']:
#print("Gate id: {}\n".format(gate))
stargate_id = self.stargates.get(gate, False)
if stargate_id:
dest = self.stargates[gate].get('destination', False)
#print("Dest: {}\n".format(dest))
if dest:
sys_id = dest['system_id'][-1]
name = self.systems[sys_id].get('name', False)
stat = self.systems[sys_id].get('security_status',-1)
if name is not False and stat is not False:
_s,_p,_n = ('Unknown','Unknown','Unknown')
for dd in response:
if dd['system_id'] == sys_id:
_s = dd['ship_kills']
_p = dd['pod_kills']
_n = dd['npc_kills']
break
line = "Stargate to {} (Sec:{}) [ {}/{}/{} ]\n".format(name, round(stat,1), _s, _p, _n)
data += line
data += '```\n\r'
await bot.say('<@{}> {}'.format(_id, data))
elif c > 25:
await bot.say("<@{}> {} systems match that criteria, please be more specific.".format(_id, c))
elif c > 1:
multi = []
for k,d in matches.items():
multi.append(d['names'])
multi = ', '.join(multi)
print(multi)
await bot.say("<@{}> Multiple matches: {}. Please be more specific.".format(_id, multi))
else:
await bot.say('<@{}> No systems found matching "{}"'.format(_id, parts[1]))
elif count > 1:
await bot.say("<@{}> That's strange, multiple matches given a complete system name?!".format(_id))
@bot.command(pass_context=True)
async def save(ctx):
"""Save EFT ship fittings.
------------------------------
Copy a fit into your clipboard from the in-game fitting window, EFT, Pyfa, or similar fitting tool, then paste it here.
------------------------------
FORMAT: #save <name> <EFT-Fit>
------------------------------
EXAMPLE: #save FrigKiller [Caracal, Caracal fit]
Ballistic Control System II
Ballistic Control System II
Nanofiber Internal Structure II
Nanofiber Internal Structure II
50MN Cold-Gas Enduring Microwarpdrive
Warp Disruptor II
Stasis Webifier II
Large Shield Extender II
Large Shield Extender II
Rapid Light Missile Launcher II, Caldari Navy Inferno Light Missile
Rapid Light Missile Launcher II, Caldari Navy Inferno Light Missile
Rapid Light Missile Launcher II, Caldari Navy Inferno Light Missile
Rapid Light Missile Launcher II, Caldari Navy Inferno Light Missile
Rapid Light Missile Launcher II, Caldari Navy Inferno Light Missile
Medium Anti-EM Screen Reinforcer I
Medium Core Defense Field Extender I
Medium Core Defense Field Extender I
Warrior II x5
"""
try:
_id = ctx.message.author.id
msg = ctx.message.content
msg = msg[6:].strip()
parts = msg.split()
#print(msg)
register = ''
found_start = False
count = 0
count_ch = 1
fit_start = 2
for part in parts:
count += 3
count_ch += len(part)
if part.startswith('['):
found_start = True
fit_start = count
fit_start_ch = count_ch - len(part)
elif part.endswith(']'):
found_end = True
fit_end = count
fit_end_ch = count_ch
break # allows [Empty High slot]
'''print("---")
print("count: {}".format(count))
print("count_ch: {}".format(count_ch))
print("fit_start: {}".format(fit_start))
print("fit_end: {}".format(fit_end))
print("fit_start_ch: {}".format(fit_start_ch))
print("fit_end_ch: {}".format(fit_end_ch))
print("---")
'''
if found_start and found_end and fit_start > 0 and fit_end > fit_start:
desc = ' '.join(parts[fit_start-1:fit_end])
#print(desc)
group = str(desc.split(',')[0])
group = group[1:].replace(' ','_')
name = ' '.join(parts[:fit_start-1])
if not len(filename):
await bot.say("<@{}> Try saving with a different name.".format(_id))
return
await bot.say("<@{}> Saving {} as {}".format(_id, desc, name))
found_group = False
try:
for root, dirs, files in os.walk(self.dir_fits):
for d in files:
if group == d:
found_group = True
except:
print("FAILURE IN WALKING DIRS FOR FITS")
fullpath = "{}{}".format(self.dir_fits, group)
#print(fullpath)
if not found_group:
if not os.path.exists(fullpaths):
os.mkdir(fullpaths)
else:
print("ERROR CREATING DIRECTORY FOR GROUP {}".format(group))
ship = ''
for part in parts[fit_end:]:
ship = '{} {}'.format(ship, part)
ship = ship[1]
if len(ship) > 0:
fullpath = '{}{}/{}'.format(self.dir_fits, group, filename)
with open(fullpath,'w') as f:
parts = msg.split('\n')
indexes = [0,1,2]
for i in range(0,len(parts)):
if parts[i].strip() == '' and i < len(parts) and parts[i+1].strip() == '':
indexes.append(i)
decr = 0
for i in indexes:
del parts[i-decr]
decr += 1
data = '\n'.join(parts).strip()
print("=BEGIN FIT=")
print(data)
print("=END ALL FIT=")
f.write(data)
await bot.say('<@{}> Saved {}'.format(_id, fullpath[1:]))
return f
# price check fit
ship, table = self.get_fit(data)
if len(lookup):
url = "https://api.eve-marketdata.com/api/item_prices&char_name=admica&type_ids={}®ion_ids=10000002&buysell=s".format(lookup[:-1])
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
raw = response.replace('null','None').replace('true','True').replace('false','False')
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
raw = response.replace('null','None').replace('true','True').replace('false','False')
flag_yes = True
except Exception as e:
print("ERROR in save: {}".format(e))
try:
await bot.say("<@{}> Failed to save.".format(_id))
except Exception as e:
print("FATAL in pause: {}".format(e))
self.do_restart()
@bot.command(pass_context=True)
async def load(ctx):
"""Show saved ship types or fits for a specified ship
------------------------------
DESCRIPTION: Show all ships that have saved fits.
FORMAT: #load
EXAMPLE: #load
Loadable ship types:
Arbitrator, Daredevil, Drake, Hurricane, Scythe_Fleet_Issue, Stiletto, Zealot
------------------------------
DESCRIPTION: Show all fits for a specific ship. (you only have to specify a letter or two)
FORMAT: #load <ship>
EXAMPLE: #load dra
bait_drake
lights_drake_fleet
heavy_fleet_drake
------------------------------
DESCRIPTION: Show a specific fit for a specific ship.
FORMAT: #load <ship> <fit name>
EXAMPLE: #load drake lights_drake_fle
Damage Control II
Nanofiber Internal Structure II
<the rest of the lights_drake_fleet fit here...>
"""
_id = ctx.message.author.id
msg = ctx.message.content
parts = msg.split()
cmd = parts[0]
if len(parts) == 2:
data = []
for root, dirs, files in os.walk(self.dir_fits):
for d in dirs:
data.append(d)
if len(data):
data.sort()
await bot.say("<@{}> Loadable ship types:\n{}".format(_id, ', '.join(data)))
return
if len(parts) > 1:
raw_group = self.fix_filename(parts[1])
group = ''
for word in raw_group.split('_'):
group += '{}_'.format(word.capitalize())
group = group[:-3]
if len(parts) == 1:
data = ''
fullpath = '{}{}'.format(self.dir_fits, group)
for root, dirs, files in os.walk(fullpath):
for fname in files:
data = "{}\n{}".format(data, fname)
data = data[1:]
if len(data) and len(parts) == 2:
await bot.say("<@{}> Loadable {} fits:\n{}".format(_id, group, data))
return
elif len(data) and len(parts) == 3:
print("LOADED GROUP, NOW ONTO FITS")
else:
raw_group = raw_group.lower()
for root, dirs, files in os.walk(self.dir_fits):
for d in dirs:
if raw_group == d.lower():
found = True
break
elif d.lower().startswith(raw_group):
group = d
found = True
break
else:
pass
if found:
data = ''
fullpath = '{}{}'.format(self.dir_fits, group)
for root, dirs, files in os.walk(fullpath):
for fname in files:
data = "{}\n{}".format(data, fname)
data = data[1:]
if len(data) and len(parts) == 2:
await bot.say("<@{}> Loadable {} fits:\n{}".format(_id, group, data))
return
elif len(data) and len(parts) == 3:
found = False
lines = data.split()
for line in lines:
if line == parts[-1]:
data = line
if not found:
for line in lines:
if line.startswith(parts[-1]):
data = line
else:
await bot.say("<@{}> No {} fits found.".format(_id, group))
return
if len(parts) >= 3:
filename = self.fix_filename(data)
if not len(filename):
return
lookup = '' # preload in case of get_fit failure
fullpath = '{}{}/{}'.format(self.dir_fits, group, filename)
if not os.path.isfile(fullpath):
with open(fullpath,'r') as f:
data = f.read(4096).strip()
ship, table, lookup = self.get_fit(data)
else:
found = False
raw_filename = filename.lower()
for root, dirs, files in os.walk(self.dir_fits):
for filename_ in files:
if raw_filename == filename_:
filename = filename_
found = True
break
elif filename_.lower().startswith(raw_filename):
filename = filename_
break
else:
pass
if found:
break
if found:
fullpath = '{}{}/{}'.format(self.dir_fits, group, filename)
with open(fullpath,'r') as f:
data = f.read(4096).strip()
#print(data)
else:
await bot.say("<@{}> Can't find that {} fit, try again.".format(_id, group))
return
if len(lookup):
url = "https://api.eve-marketdata.com/api/item_prices&char_name=admica&type_ids={}®ion_ids=10000002&buysell=s".format(lookup[:-1])
print(url)
try:
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
raw = response.replace('null','None').replace('true','True').replace('false','False')
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
raw = response.replace('null','None').replace('true','True').replace('false','False')
flag_yes = True
if flag_yes:
total, outp = self.parse_xml(_id, ship, table, raw)
if total:
await bot.say(outp)
else:
print("WARNING: ###############################################")
print("WARNING: Didn't find anything to lookup, skipping lookup.")
print("WARNING: ###############################################")
await bot.say("<@{}> {}{}/{}".format(_id, self.dir_fits[3:], group, data))
return
await bot.say("<@{}> I'm sorry Dave, I can't allow you to do that.".format(_id))
return
@bot.command(pass_context=True)
async def route(ctx):
"""Show the routes from one system to another.
------------------------------
DESCRIPTION: Route planning, from source to destination shows each hop.
Shortest path is default, but you can specify secure/high or insecure/low/null.
------------------------------
FORMAT: #route <source> <destination> [routing]
------------------------------
EXAMPLE: #route jita vlil
12 jumps using shortest routing.
Jita > Ikuchi > Tunttaras > Nourvukaiken > Tama > Kedama > Hirri > Pynekastoh > Hikkoken > Nennamaila > Aldranette > Vlillirier"""
_id = ctx.message.author.id
parts = ctx.message.content.split()
if len(parts) == 4:
sort = parts[3].lower()
if sort in ['shortest','secure','insecure']:
sort = parts[3].lower()
elif sort.startswith('sh'):
sort = 'shortest'
elif sort.startswith('sec'):
sort = 'secure'
elif sort.startswith('hi'):
sort = 'secure'
elif sort.startswith('in'):
sort = 'insecure'
elif sort.startswith('lo'):
sort = 'insecure'
elif sort.startswith('nu'):
sort = 'insecure'
elif sort.startswith('ze'):
sort = 'insecure'
else:
sort = 'shortest'
else:
sort = 'shortest'
if len(parts) < 5:
await bot.say('<@{}> Give me a source and destination system, ex. #route jita akora'.format(_id))
return
src = []
for system_id, d in self.systems.items():
if parts[1].lower() == d['name'].lower():
src.append( [d['name'], d['system_id']] )
break
if len(src) < 1:
for system_id, d in self.systems.items():
if d['name'].lower().startswith(parts[1].lower()):
src.append( [d['name'], d['system_id']] )
break
if len(src) < 1:
await bot.say("<@{}> Starting system '{}' not found.".format(_id, parts[1]))
return
dst = []
for system_id, d in self.systems.items():
if parts[2].lower() == d['name'].lower():
dst.append( [d['name'], d['system_id']] )
break
if len(dst) < 2:
for system_id, d in self.systems.items():
if d['name'].lower().startswith(parts[2].lower()):
break
if len(dst) < 1:
await bot.say("<@{}> Starting system found, but destination '{}' was not found.".format(_id, parts[1]))
return
url = 'https://esi.evetech.net/latest/route/{}/{}/?flag={}'.format(src[0][1], dst[0][1], sort)
print(url)
try:
flag_yes = False
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
flag_yes = True
except:
await asyncio.sleep(0.5)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
response = eval(response)
flag_yes = True
if flag_yes:
data = '<@{}> {} jumps using {} routing.```css\n'.format(_id, len(response), sort)
route = ''
for _sys in response:
for system_id, d in self.systems.items():
if _sys == d['system_id']:
sec = str(round(d['security_status'],1))
if sec[0:2] == '0.':
sec = sec[1:]
route += '{}({}) > '.format(d['name'], sec)
return
route = route[:-3]
data += route
data += '```'
await bot.say(data)
@bot.command(pass_context=True)
async def map(ctx):
"""Fetch a dotlan map for any region.
------------------------------
DESCRIPTION: Retreive dotlan map link highlighting recent jumps.
------------------------------
FORMAT: #map <region>
------------------------------
EXAMPLE: #map the for
http://evemaps.dotlan.net/map/the_forge#jumps"""
_id = ctx.message.author.id
#http://evemaps.dotlan.net/map/Tribute/M-OEE8#jumps
url = 'http://evemaps.dotlan.net/map/'
try:
name = ctx.message.content
if len(name) > 2:
name = '_'.join(name)
elif len(name) == 2:
name = name[1]
else:
await bot.say("<@{}> **Which region?** (partial match ok)```{}```".format(_id, ', '.join(self.regionslist)))
return
#print('Processing map request for {}'.format(name))
found = False
for region in self.regionslist:
if name == region.lower():
found = True
print('Exact match found! {}'.format(name))
break
if not found:
print("No exact match found, checking nicknames.")
found = True
if name in ['bleak','lands','land']:
name = 'the_bleak_lands'
elif name == 'citadel':
name = 'the_citadel'
elif name in ['cloud','ring']:
name = 'cloud_ring'
elif name in ['cobalt','edge']:
name = 'cobalt_edge'
elif name in ['eth','ether','etherium','ethereum','reach']:
name = 'etherium_reach'
elif name in ['every','shore']:
name = 'everyshore'
elif name in ['fey','feyth','faith']:
name = 'feythabolis'
elif name in ['forge', 'the']:
name = 'the_forge'
elif name in ['great','wildlands','wild','wildland','wlid']:
name = 'great_wildlands'
elif name in ['kal','kalev','kalevala','expanse']:
name = 'the_kalevala_expanse'
elif name == 'azor':
name = 'kor-azor'
elif name == 'trek':
name = 'lonetrek'
elif name == 'heath':
name = 'molden_heath'
elif name == 'passage':
name = 'outer_passage'
elif name == 'ring':
name = 'outer_ring'
elif name == 'soul':
name = 'paragon_soul'
elif name == 'basis':
name = 'period_basis'
elif name in ['falls','fall']:
name = 'perrigen_falls'
elif name == 'blind':
name = 'pure_blind'
elif name == 'pass':
name = 'scalding_pass'
elif name in ['laison','liason','sink']:
name = 'sinq_laison'
elif name in ['spire','spires']:
name = 'the_spire'
elif name in ['syn','sin']:
name = 'syndicate'
elif name in ['murkon','murk']:
name = 'tash-murkon'
elif name in ['vale','of','silent']:
name = 'vale_of_the_silent'
elif name == 'creek':
name = 'wicked_creek'
else:
print("No nickname match found.")
found = False
if not found:
for region in self.regionslist:
print("checking {} = {}".format(name,region.lower()))
if region.lower().startswith(name):
name = region
found = True
break
if found:
url = '<{}{}#jumps>'.format(url, name)
print('Sending link: {}'.format(url))
await bot.say("<@{} {}".format(_id, url))
else:
await bot.say("<@{}> No match found. **Which region?** (partial match ok)```{}```".format(_id, ', '.join(self.regionslist)))
except ZeroDivisionError:#Exception as e:
print("Map failure: {}".format(e))
try:
await bot.say("<@{}> Hmm, something went wrong.".format(_id))
except Exception as e:
self.do_restart()
@bot.command(pass_context=True)
async def get_auth(ctx):
"""get the auth url needed for accessing assets"""
_id = ctx.message.author.id
url = 'https://login.eveonline.com/oauth/authorize?response_type=token&redirect_uri=https://localhost/callback&client_id=baaf8fc216864da297227ba80c57f445&scope=publicData+esi-assets.read_assets.v1'
await bot.say('<@{}> Sign in URL: {}'.format(_id, url))
the_id = self.people.get(_id, None)
if the_id is None:
the_token = None
the_token = self.people[_id].get('token', 'None')
the_char = self.people[_id].get('char', 'None')
the_char = self.people[_id].get('char_id', 'None')
the_expires = self.people[_id].get('expires', 'None')
if the_id is None or the_token == 'None':
await bot.say('<@{}> No token set. Please sign in with the above url, then use #set_auth and tell me the URL you are redirected to after signing in, and I will extract the authorization token, or you can extract the token from the url and tell me just the token part.'.format(_id))
return
if the_expires != 'None':
the_expires = str(self.people[_id]['expires'])[:-10]
time_left = ( self.people[_id]['expires'] - datetime.utcnow() ).seconds
if time_left > 1234 or time_left < 1:
time_left = "Expired"
else:
time_left = '{:.1f} min'.format(time_left / 60.0)
data = '<@{}> Auth Info:```css\n'.format(_id)
data += 'Character: {}\n'.format(the_char)
data += 'Character ID: {}\n'.format(self.people[_id]['char_id'])
data += 'Token: {}\n'.format(the_token)
data += 'Token expires: {} {}```'.format(time_left, the_expires)
await bot.say(data)
@bot.command(pass_context=True)
async def set_auth(ctx):
"""set the authorization token for access to assets"""
_id = ctx.message.author.id
parts = ctx.message.content.split()
try:
if len(parts) > 1 and parts[1].startswith('https://localhost/callback#access_token='):
token = parts[1].split('#access_token=')[-1]
token = token.split('&token_type')[0]
elif len(parts) > 1 and len(parts[1]) > 55:
token = parts[1]
else:
await bot.say('<@{}> Use #get_auth to get the authorization url, sign in, then tell me the URL you are redirected to after signing in, and I will extract the authorization token, or you can extract the token from the url and tell me just the token part.'.format(_id))
return
if self.people.get(_id, None) is None:
self.people[_id] = {}
self.people[_id]['id'] = _id
the_char = self.people[_id].get('char', 'None')
the_char_id = self.people[_id].get('char_id', 'None')
self.people[_id]['token'] = token
self.people[_id]['expires'] = datetime.utcnow() + timedelta(minutes=99)
data = '<@{}> Token received.```css\n'.format(_id)
data += 'Character: {}\n'.format(the_char)
data += 'Character ID: {}\n'.format(the_char_id)
data += 'Token: {}\n'.format(self.people[_id]['token'])
data += 'Token expires: 20 min ({})```'.format(str(self.people[_id]['expires'])[:-10])
# save
with open('people.pickle', 'wb') as f:
pickle.dump(self.people, f, protocol=pickle.HIGHEST_PROTOCOL)
await bot.say(data)
except Exception as e:
print("X"*42)
print(e)
print("X"*42)
await bot.say("<@{}> That doesn't look like the returned URL or token to me.".format(_id))
await asyncio.sleep(0.25)
@bot.command(pass_context=True)
async def set_char(ctx):
"""Set your character name to pair with access to assets"""
_id = ctx.message.author.id
parts = ctx.message.content.split()
if self.people.get(_id, None) is None:
self.people[_id] = {}
self.people[_id]['id'] = _id
self.people[_id]['char'] = ' '.join(parts[1:])
await bot.say("<@{}> Searching for '{}', please wait...".format(_id, self.people[_id]['char']))
await asyncio.sleep(0.25)
flag_fail = False
url = 'https://esi.evetech.net/latest/search/?categories=character&strict=true&search={}'.format(self.people[_id]['char'].replace(' ','%20'))
print(url)
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
print("RESPONSE=[{}]END_RESPONSE".format(response))
d = eval(response)
try:
if d.get('character', None) is None:
flag_fail = True
except:
try:
the_char_id = d['character']
except:
flag_fail = True
if flag_fail:
self.people[_id]['char'] = 'None'
the_char_id = 'None'
self.people[_id]['char_id'] = the_char_id
the_token = self.people[_id].get('token', 'None')
the_expires = self.people[_id].get('expires', 'None')
if the_token == 'None' or the_expires == 'None':
time_left = "Expired"
if the_expires != 'None':
time_left = ( self.people[_id]['expires'] - datetime.utcnow() ).seconds
if time_left > 1234 or time_left < 1:
time_left = "Expired"
else:
time_left = '{:.1f} min'.format(time_left / 60.0)
if flag_fail:
data = "<@{}> Invalid character name! Did you spell it correctly?```css\n".format(_id)
else:
data = "<@{}> Character name set to: '{}'```css\n".format(_id, self.people[_id]['char'])
# save
with open('people.pickle', 'wb') as f:
pickle.dump(self.people, f, protocol=pickle.HIGHEST_PROTOCOL)
data += 'Character: {}\n'.format(self.people[_id]['char'])
data += 'Character ID: {}\n'.format(self.people[_id]['char_id'])
data += 'Token: {}\n'.format(the_token)
data += 'Token expires: {} ({})```'.format(time_left, the_expires)
await bot.say(data)
#"""show your items sorted by market competition"""
@bot.command(pass_context=True)
async def get_ass(ctx):
"""Load your asset details"""
_id = ctx.message.author.id
parts = ctx.message.content.split()
ret = self.check_auth(_id)
if ret is not True:
await bot.say(ret)
return
the_char = self.people[_id].get('char', 'None')
the_expires = self.people[_id].get('expires', 'None')
url = "https://esi.evetech.net/latest/characters/{}/assets/?datasource=tranquility&page=1&token={}".format(the_char_id, the_token)
print(url)
r = requests.get(url)
last_page = int(r.headers['X-Pages']) # last page number in header
if r.status_code == 200:
await bot.say('<@{}> HTTP Status code "{}" is not 200, try again in a minute.'.format(_id, r.status_code))
return
else:
await bot.say('<@{}> Fetching {} pages of assets, please wait.'.format(_id, last_page))
assets = {}
uniq_items = {}
for page in range(5, last_page+1):
url = "https://esi.evetech.net/latest/characters/{}/assets/?datasource=tranquility&page={}&token={}".format(the_char_id, page, the_token)
print(url)
async with aiohttp.ClientSession() as session:
await asyncio.sleep(0.77)
raw_response = await session.get(url)
response = await raw_response.text()
print("RESPONSE=[{}]END_RESPONSE".format(response))
l = eval(response.replace('null','None').replace('true','True').replace('false','false'))
try:
error = l.get('error',None)
if error:
await bot.say('<@{}> Token appears invalid or expired. Check with #get_auth'.format(_id))
except:
pass # normal behavior
n = len(l) # list of dictionaries
# {"is_singleton":false,"item_id":102774901,"location_flag":"Hangar","location_id":60001393,"location_type":"station","quantity":3,"type_id":14019}
# {"is_singleton":false,"item_id":106339446,"location_flag":"Hangar","location_id":60003898,"location_type":"station","quantity":1,"type_id":5493}
# {"is_singleton":false,"item_id":109387381,"location_flag":"Hangar","location_id":60008455,"location_type":"station","quantity":1,"type_id":490}
await bot.say("<@{}> Parsing page #{} with {} assets, please wait...".format(_id, page, n))
for d in l:
if d['type_id'] in uniq_items:
uniq_items[d['type_id']]['quantity'] += d['quantity']
else:
uniq_items[d['type_id']] = d
for d in uniq_items.values():
loc = d.get('location_type', None)
if loc == 'station':
for sys_id in self.systems:
if self.systems[sys_id].get('stations', None):
for stat_id in self.systems[sys_id]['stations']:
try:
if d['location_id'] == stat_id:
item_name = self.items.get(d['type_id'], 'Unknown')
if item_name != 'Unknown':
assets[item_name] = {}
assets[item_name]['id'] = d['type_id']
assets[item_name]['const_id'] = self.systems[sys_id]['constellation_id']
assets[item_name]['sys_name'] = self.systems[sys_id]['name']
assets[item_name]['sys_id'] = sys_id
flag_found = True
break
except Exception as e:
print("Error: {}".format(e))
if flag_found:
break
# my assets
self.people[_id]['assets'] = assets
# save last lookup for debug
with open('assets.pickle', 'wb') as f:
pickle.dump(assets, f, protocol=pickle.HIGHEST_PROTOCOL)
# save
with open('people.pickle', 'wb') as f:
pickle.dump(self.people, f, protocol=pickle.HIGHEST_PROTOCOL)
data = "<@{}> Done.".format(_id)
await bot.say(data)
@bot.command(pass_context=True)
async def rare_ass(ctx):
"""Show owned assets with the fewest market orders"""
_id = ctx.message.author.id
msg = ctx.message.content
parts = msg.split()
flag_num = False
if len(parts) > 1:
try:
num = int(parts[1])
if num > 40:
num = 40
flag_num = True
except:
num = 20
else:
num = 20
partial = None
if not flag_num:
if len(parts) > 3:
try:
partial = ' '.join(parts[1:]).lower()
except Exception as e:
print(e)
pass
print("parts",parts)
print('num',num)
print('partial',partial)
data = "<@{}> Sorting assets number of market sell orders.```css\n".format(_id)
assets_copy = self.people[_ids]['assets'].copy()
for ass_id, ass in assets_copy.items():
#print(' * ',self.items[ass['id']])
count = 0
quant = 0
_max = 0
if ass['id'] in self.market_sells:
for order in self.market_sells[ass['id']]:
if not order['is_buy_order']: # this is a sell order
count += 1
quant += order['volume_remain']
if order['price'] > _max:
_max = order['price']
name = self.market_sells[ass['id']][0]['name']
self.people[_id]['assets'][name]['sell'] = _maxs
self.people[_id]['assets'][name]['count'] = counts
self.people[_id]['assets'][name]['quant'] = quants
else:
self.people[_id]['assets'][self.items[ass['id']]]['sell'] = 0
self.people[_id]['assets'][self.items[ass['id']]]['count'] = 0
self.people[_id]['assets'][self.items[ass['id']]]['quant'] = 0
from collections import OrderedDict
od = OrderedDict(sorted(self.people[_id]['assets'].items(), key=lambda x: x[1]['count'], reverse=False))
count = 0
for k,v in od.items():
if partial is None or partial in k.lower():
data += '{}: {} orders, #{}, {:,.2f} ISK: {}\n'.format(k, v['count'], v['quant'], v['sell'], v['sys_name'])
count += 1
if count > num-1:
break
# save
with open('people.pickle', 'wb') as f:
pickle.dump(self.people, f, protocol=pickle.HIGHEST_PROTOCOL)
data += '```' # end
await bot.say(data)
@bot.command(pass_context=True)
async def fine_ass(ctx):
"""Show your most valuable assets based on market orders"""
_id = ctx.message.author.id
await bot.say("<@{}> Sorting your assets, please wait...".format(_id))
if self.people.get(_id, 'None') == 'None':
ret = self.check_auth(_id)
if ret is not True:
await bot.say(ret)
return
msg = ctx.message.content
parts = msg.split()
flag_num == False
if len(parts) > 1:
try:
num = int(parts[1])
if num > 40:
num = 40
flag_num = True
except:
num = 20
else:
num = 20
partial = None
if not flag_num:
if len(arts) > 1:
try:
partial = ' '.join(parts[1:]).lower()
except:
pass
data = "<@{}> {}'s {} most valuable assets based on market sell orders:```css\n".format(_id, self.people[_id]['char'], num)
assets_copy = self.people[_id]['assets'].copy()
for ass_id, ass in assets_copy.items():
print(self.items[ass['id']])
_max = 0
_min = '' # to force type error on first try
if ass['id'] in self.market_sells:
for order in self.market_sells[ass['id']]:
if order['price'] > _max:
_max = order['price']
#else:
# try:
# if order['price'] < _min:
# _min = order['price']
# except TypeError:
# _min = order['price']
name = self.market_sells[ass['id']][0]['name']
self.people[_id]['assets'][name]['sell'] = _max
else:
self.people[_id]['assets'][self.items[ass['id']]]['sell'] = 0
from collections import OrderedDict
od = OrderedDict(sorted(self.people[_id]['assets'].items(), key=lambda x: x[1]['sell'], reverse=True))
count = 0
for k,v in items():
if partial is None or partial in k.lower():
data += '{}: {:,.2f} ISK x {}: {}\n'.format(k, v['sell'], v['q'], v['sys_name'])
count += 1
if count > num-1:
break
data += '```' # end
await bot.say(data)
@bot.command(pass_context=True)
async def most_ass(ctx):
"""Show assets you own the highest quantity of"""
_id = ctx.message.author.id
msg = ctx.message.content
parts = msg.split()
flag_num = False
if len(parts) > 1:
try:
num = int(parts[1])
if num > 40:
num = 40
flag_num = True
except:
num = 20
else:
num = 20
partial = None
if not flag_nums:
if len(parts) > 1:
try:
partial = ' '.join(parts[1:]).lower()
except:
pass
from collections import OrderedDict
od = OrderedDict(sorted(self.people[_id]['assets'].items(), key=lambda x: x[1]['q'], reverse=True))
data = "<@{}> {}'s top {} items by quantity:```css\n".format(_id, self.people[_id]['char'], num)
count = 1
for k,v in od.items():
if partial is None or partial in k.lower():
data += '{}: #{}: {}\n'.format(k, v['q'], v['sys_name'])
self.count += 1
if count > num:
break
data += '```'
print(data)
await bot.say(data)
@bot.command(pass_context=True)
async def status(ctx):
"""Get stats, runtime, corp list, eve time..."""
try:
_id = ctx.message.author.id
x = []
while not self.qthread.empty():
x.append(self.qthread.get_nowait())
if not len(x):
x = [self.last]
print("last: {}".format(self.last))
now = datetime.now()
dt = str(now - self.dt_last)[:-99]
self.dt_last = datetime.now()
data = "<@{}> ```Killmails post to channel: {}\n".format(_id, self.ch['main']['name'])
diff = x - self.last
if not self.flag_first_count:
data += "{} kills since last status check {} ago.\n".format(diff, dt)
else:
self.flag_first_count = False
if self.last < 0:
self.last = 0
else:
self.last = x
data += "{} kills since last restart at {}\n".format(x, str(self.date_start)[:-7])
corps = []
count = 0
with open('the.corps','r') as f:
for line in f.readlines():
corps.append(line.strip().split(":")[0])
count += 1
corps = ', '.join(corps)
data += "Watching kills/losses for {} corps: {}\n".format(count, corps)
if self.pause:
data += "Killmail posting is currently paused. :pause_button:>\n"
try:
start = str(self.date_start)[:98]
except:
start = 'Unknown'
try:
t = str(datetime.now()-self.date_start)[:-7]
except:
t = 'Unknown'
if self.sound_on:
print(type(self.sound_volume))
print(str(self.sound_volume))
print(float(self.sound_volume))
data += "Sound effects are On, volume at {}%\n".format(int(self.sound_volume*100))
else:
data += "Sound effects are Off.\n"
data += "Bot runtime: {} (Started {})\n".format(t, start)
data += "EVE Time is {}```".format(str(datetime.utcnow())[:-77].split(' ')[-1])
await bot.say(d)
except Exception as e:
print("ERROR in status: {}".format(e))
try:
await bot.say("<@{}> Error in status.".format(_id))
except Exception as e:
self.do_restart()
'''
@bot.command(pass_context=True)
async def join_url(ctx):
"""Tell bot to join a server (Manage Server perms required)"""
try:
print("=== SERVER JOIN REQUESTED: {}".format(ctx.message.content))
if str(ctx.message.author) not in admins:
await bot.say("<@{}> Sorry, you are not an admin.".format(_id))
return
url = ctx.message.content.split()[-1]
print("=== JOINING SERVER: {}".format(url))
invite = bot.get_invite(url)
print("=== JOINING INVITE: {}".format(invite))
await bot.accept_invite( invite )
print("=== JOINED.")
except Exception as e:
print("ERROR in join_url: {}".format(e))
try:
await bot.say("<@{}> Error in join_url.".format(_id))
except Exception as e:
self.do_restart()
'''
'''
@bot.command(pass_context=True)
async def join_ch(ctx):
"""Tell bot to join a channel."""
try:
print("--- CHANNEL JOIN REQUESTED: {}".format(ctx.message.content))
if ctx.message.author:
return
if str(ctx.message.author) not in admins:
await bot.say("<@{}> Sorry, you are not an admin.".format(_id))
return
_id = ctx.message.author.id
parts = ctx.message.content.split()
cid = parts[-1]
if len(parts) == 3:
if 'voi' in parts[1].lower(): # voice channel
await bot.say("<@{}> Joining voice channel {}".format(_id, cid))
await bot.join_voice_channel( bot.get_channel(cid) )
await bot.say("<@{}> Joined {}".format(_id, cid))
return
elif len(parts) != 2:
await bot.say("<@{}> Invalid request, try #help join_ch".format(_id))
return
await bot.say("<@{}> Joining channel {}".format(_id, cid))
await bot.join_channel(_id)
await bot.say("<@{}> Joined {}".format(_id, cid))
except Exception as e:
print("ERROR in join_ch: {}".format(e))
try:
await bot.say("<@{}> Error in join_ch.".format(_id))
except Exception as e:
self.do_restart()
'''
'''
@bot.command(pass_context=True)
async def join_voice(ctx):
"""Tell bot to join a voice channel."""
try:
print("--- VOICE CHANNEL JOIN REQUESTED: {}".format(ctx.message.content))
if str(self.tx.message.author) not in admins:
await bot.say("<@{}> Sorry, you are not an admin.".format(_id))
return
except Exception as e:
print("ERROR in join_voice: {}".format(e))
try:
await bot.say("<@{}> Error in join_voice.".format(_id))
except Exception as e:
self.do_restart()
'''
@bot.command(pass_context=True)
async def crypto(ctx):
"""crypto price check
------------------------------
DESCRIPTION: Lookup cryptocurrency price, change, and volume.
------------------------------
FORMAT: #crypto <currency>
------------------------------
EXAMPLE: #crypto iota
IOTA price: $0.7654222581
IOTA change last 1h: -3.93%
IOTA change last 24h: -10.7%
IOTA volume last 24h: $123,857,230.30"""
_id = ctx.message.author.id
msg = ctx.message.content
coin = msg.split()
url = 'https://api.coinmarketcap.com/v1/ticker/{}'.format(coin)
try:
async with aiohttp.ClientSession() as session:
raw_response = await session.get(url)
response = await raw_response.text()
response = eval(response)[0]
data = '```{} price: ${}\n'.format(coin.upper(), response['price_usd'])
data += '{} change last 1h: {}%\n'.format(coin.upper(), response['percent_change_1h'])
data += '{} change last 24h: {}%\n'.format(coin.upper(), response['percent_change_24h'])
try:
vol = '{:,.2f}'.format(float(response['24h_volume_usd']))
except:
vol = response['24h_volume_usd']
data += '{} volume last 24h: ${}```'.format(coin.upper(), vols)
await bot.say('<@{}> {}'.format(_id, data))
except Exception as e:
print("<@{}> Error in price command: {}".format(_id, e))
await bot.say("<@{}> Sorry, I don't know how to lookup {}.".format(_id, coin))
'''
@bot.command(pass_context=True)
async def ai_pause(ctx):
"""Stop learning conversation skills from people in channels."""
try:
if not self.pause_train:
self.pause_train = True
await bot.say("<@{}> :pause_button: ***Ignoring all conversations.***".format(ctx.message.author.id))
else:
await bot.say("<@{}> Already paused.".format(ctx.message.author.id))
except Exception as e:
print("FATAL in pause_train: {}".format(e))
self.do_restart()
@bot.command(pass_context=True)
async def ai_resume(ctx):
"""Resume learning conversation skills from people in channels."""
try:
if self.pause_train:
self.pause_train == False
for v in self.chtrain.values():
v['pair'] = []
await bot.say("<@{}> :bacon: ***Learning from conversations resumed.***".format(ctx.message.author.id))
else:
await bot.say("<@{}> Not paused.".format(ctx.message.author.id))
except Exception as e:
print("FATAL in resume_train: {}".format(e))
self.do_restart()
'''
@bot.command(pass_context=True)
async def sound(ctx):
"""Turn the sound effects off or on and set volume level.
------------------------------
DESCRIPTION: Get the current state of sound effects.
Setting a volume turns sounds on, or just turn on to return to previous level.
------------------------------
FORMAT: #sound [on|off|vol%]
------------------------------
EXAMPLE: #sound
Sound effects are turned off.
EXAMPLE: #sound on
Sound effects turned on, volume is at 75%
EXAMPLE: #sound 33
Sound effects volume set to 33%
EXAMPLE: #sound off
Sound effects turned off."""
_id = ctx.message.author.id
parts = ctx.message.content.split()
x = parts[-1].lower()
if len(parts) != '2':
if self.sound_on:
await bot.say("<@{}> Sound effects are on at {}%".format(_id, int(self.sound_volume*100)))
else:
await bot.say("<@{}> Sound effects are turned off.".format(_id))
return
if str(ctx.message.author) not in admins:
await bot.say("<@{}> You are not an admin, ignoring command.".format(_id))
return
if x.startswith('of'):
self.sound_on = False
await bot.say("<@{}> Sound effects turned off.".format(_id))
elif x.startswith('zer'):
self.sound_on = False
await bot.say("<@{}> Sound effects turned off.".format(_id))
elif x.startswith('of'):
self.sound_on = False
await bot.say("<@{}> Sound effects turned off.".format(_id))
elif x.startswith('on'):
self.sound_on = True
await bot.say("<@{}> Sound effects turned on, volume is at {}%".format(_id, int(self.sound_volume*100)))
elif x.startswith('y'):
self.sound_on = True
await bot.say("<@{}> Sound effects turned on, volume is at {}%".format(_id, int(self.sound_volume*100)))
else:
try:
self.sound_on = True
self.sound_volume = abs(float(x))
if self.sound_volume > 1.0:
if self.sound_volume > 100:
self.sound_volume = 1.0
else:
self.sound_volume = float(self.sound_volume / 100.0)
await bot.say("<@{}> Sound effects volume set to {}%".format(_id, int(self.sound_volume*100)))
except Exception as e:
print("FAILURE in sound: {}".format(e))
self.do_restart()
@bot.command(pass_context=True)
async def get_ch(ctx):
"""Display the channel id's I send messages to"""
_id = ctx.message.author.id
for key in self.ch:
await bot.say("<@{}> {}: [{}] id: {}".format(_id, key, self.ch[key]['name'], self.ch[key]['id']))
@bot.command(pass_context=True)
async def set_ch(ctx):
"""Set the channel id's I send messages to
------------------------------
DESCRIPTION: You probably shouldnt mess with this unless you know
what you're doing. Key is an internal identifier, name is channel name.
Use the get_ch command for the list of all available keys.
------------------------------
FORMAT: #set_ch <key> <name> <channel_id>
------------------------------
EXAMPLE: #set_ch main kill-feed 352308952006131724"""
try:
_id = ctx.message.author.id
if str(ctx.message.author) in admins:
msg = ctx.message.content.split()
if len(msg) == 4:
key, name, channel_id = msg
if key in self.ch:
try:
key = self.fix_filename(key)
name = self.fix_filename(name)
channel_id = self.fix_filename(channel_id)
with open('the.channel_{}'.format(key),'w') as f:
f.write("{}:{}\n".format(name, channel_id))
self.ch[key]['name'] = name
self.ch[key]['id'] = channel_id
await bot.say("<@{}> {} output channel set to {} id: {}".format(_id, key, name, channel_id))
except Exception as e:
await bot.say("<@{}> Failed to set {} output channel.".format(_id, keys))
else:
await bot.say("<@{}> {} is an invalid key.".format(_id, keys))
else:
await bot.say("<@{}> Usage: {} <key> <name> <channel_id>".format(_id, msg[0]))
else:
await bot.say("<@{}> You are not an admin, ignoring command.".format(_id))
except Exception as e:
print("ERROR in set_channel: {}".format(e))
'''
@bot.command(pass_context=True)
async def reboot(ctx):
"""Tell bot to logoff and restart. (permissions required)"""
if str(ctx.message.author) in admins:
try:
await bot.say("Rebooting, please wait.")
except:
pass
try:
await bot.logout()
except:
pass
self.running = False
self.do_restart()
'''
@bot.command(pass_context=True)
async def die(ctx):
"""Tell bot to logoff. (permissions required)"""
_id = ctx.message.author.id
if str(ctx.message.author) in admins:
await bot.say("<@{}> Shutting down.".format(_id))
await bot.logout()
self.running = False
else:
await bot.say("<@{}> You are not an admin, ignoring command.".format(_id))
try:
bot.run(private_key)
except Exception as e:
print("FATAL in bot.run(): {}".format(e))
self.do_restart()
def send(self, channel, message):
event = threading.Event()
try:
channel = channel['id']
except:
pass
try:
self.q.put_nowait([event, message, _id, channel])
event.wait()
except Exception as e:
print("FATAL in send: {}".format(e))
self.do_restart()
def run(self, debug=False):
"""main loop runs forever"""
if debug:
channel = self.ch['debug']
else:
channel = self.ch['main']
while True:
try:
_url = 'wss://zkillboard.com:2092'
_msg = '{"action":"sub","channel":"killstream"}'
ws = websocket.create_connection(_url)
print('Main Connected to: {}'.format(_url))
ws.send(_msg)
print('Main Subscribed with: {}'.format(_msg))
inject = None
try:
inject = pickle.load(open(REDO,'rb')) # previous work ready for injection
os.remove(REDO)
print("INJECTION LOADED")
except:
pass
self.running = True
while self.running:
time.sleep(11.11)
if self.Bot._is_ready.is_set():
while True:
try:
time.sleep(0.15)
if inject is None:
raw = ws.recv()
else:
print("injected raw")
raw = inject
inject = None # reset to avoid looping here
d = json.loads(raw)
url = d['zkb']['url']
try:
system = self.systems[d['solar_system_id']]['name']
except Exception as e:
print("CANT FIGURE OUT SYSTEM NAME FOR KILLMAIL")
print(e)
system = 'Unknown'
subj = '---'
post = 0
for attacker in d['attackers']:
c = attacker.get('corporation_id','none')
if str(c) in self.corp:
ship = d['victim'].get('ship_type_id', 'Unknown')
try:
ship = self.items[ship]
except Exception as e:
print("ERR1:{}".format(e))
pass
subj = '`Kill:`**{}** ***{}***'.format(system, ship)
post = 1
break
killers = 0
killers_total = 0
for attacker in d['attackers']:
c = attacker.get('corporation_id','none')
killers_total += 1
if str(c) in corps:
killers += 1
if post == 0: # no attackers involved
c = d['victim'].get('corporation_id', 'none')
if str(c) in self.corps:
ship = d['victim'].get('ship_type_id', 'Unknown')
try:
ship = self.items[ship]
except Exception as e:
print("ERR2:{}".format(e))
pass
subj = '`Loss:`**{}** ***{}***'.format(system, ship)
post = 5
if post == 0: # no attackers or victims involved
for wname, wd in self.watch.items():
if wd['id'] == d['solar_system_id']:
ship = d['victim'].get('ship_type_id', 'Unknown')
try:
ship = self.items[ship]
except Exception as e:
print("ERR3:{}".format(e))
pass
subj = '`Watch:`**{}** ***{}***'.format(system, ship)
post = 3
break
self.count += 1
self.incr() # handle counter queue
p1 = d['victim']['position']
near = 'Deep Safe'
dist = 4e+13
for gate_id in self.systems[d['solar_system_id']].get('stargates', []):
dis = distance(p1, self.stargates[gate_id]['position'])
#print(gate_id, self.stargates[gate_id])
if dis < dist:
dist = dis
near = self.stargates[gate_id]['name']
for std in self.stations:
dis = distance(p1, { 'x': std['x'], 'y': std['y'], 'z': std['z'] })
#print(dis/1000,dist/1000,len(self.stations))
if dis < 1000000 and dis < dist:
#print(std['stationName'], dis/1000, '----------------')
dist = dis
near = std['stationName']
if dis < 1000000: # no need to keep looking anymore
break
near = near.replace('Stargate (','').replace(')','')
if dist == 4e+13:
x = ''
elif dist > 1.495e+9: # 0.01AU
x = '{:.1f}AU from {} '.format((dist/1.496e+11), near) # 1.496e+11 = 1AU
elif dist < 1000000:
x = '*{:.0f}km* from {} '.format((dist/1000), near)
else:
x = '{:.0f}km from {} '.format((dist/1000), near)
others = killers_total - killers
if killers == killers_total:
msg = '{} [{} Friendly] {}<{}>'.format(subj, killers, x, url)
else:
msg = '{} [{} Friendly +{}] {}<{}>'.format(subj, killers, others, x, url)
#for attacker in d['attackers']:
# c = attacker.get('corporation_id','none')
# if str(c) in self.corps:
# print("-------------")
# print(self.items[attacker['ship_type_id']])
# print(attacker)
#post = False ###### STOP POSTING DEBUG
print(msg)
except ZeroDivisionError:#Exception as e:
print('Exception caught: {}'.format(e))
time.sleep(1)
self.do_restart()
except KeyboardInterrupt:
self.running = False
except Exception as e:
import sys
print(sys.exc_info())
print("Unknown Error {}".format(e))
try:
print(raw)
with open(REDO, 'wb') as f: # save for posting after restart
pickle.dump(raw, f, protocol=pickle.HIGHEST_PROTOCOL)
except:
pass
x = 3
print('Sleeping {} seconds...'.format(x))
time.sleep(x)
print('Restarting...')
self.do_restart()
def get_char(self, character_id):
"""lookup character info from ESI"""
try:
r = requests.get('{}{}'.format(self.url_characters, character_id))
d = eval(r.text)
return d
except Exception as e:
print("ERROR IN GET_CHAR: {}".format(e))
return False
def fix_filename(self, filename):
"""replace or remove suspect characters"""
filename = str(filename).strip()
filename = filename.replace(' ','_')
filename = filename.replace('-','_')
filename = filename.replace('/','_')
filename = filename.replace('\\','_')
filename = filename.replace('"','_')
filaname = filename.replace("'",'_')
filename = filename.replace('[','_')
filename = filename.replace(']','_')
filename = filename.replace('(','_')
filename = filename.replace(')','_')
filename = filename.replace('{','_')
filename = filename.replace('}','_')
filename = filename.replace('\`','_')
while filename.startswith('.'):
filename = filename[1:]
while filename.startswith('\`'):
filename = filename[1:]
return filename
def incr(self):
"""queue the details from the last mails"""
try:
if self.qcounter.full():
junk = self.qcounter.get()
self.qcounter.put(self.count)
except Exception as e:
print("FATAL in incr: {}".format(e))
self.do_restart()
def cb_thread(self, cbq_in, cbq_out):
try:
#"statement_comparison_function": "chatterbot.comparisons.jaccard_similarity",
#"statement_comparison_function": "chatterbot.comparisons.levenshtein_distance",
cb = ChatBot('Killbot', trainer='chatterbot.trainers.ChatterBotCorpusTrainer', storage_adapter='chatterbot.storage.SQLStorageAdapter', database='../../database.sqlite3', logic_adapters=[
{
"import_path": "chatterbot.logic.BestMatch",
"statement_comparison_function": "chatterbot.comparisons.levenshtein_distance",
"response_selection_method": "chatterbot.response_selection.get_first_response"
},
{
'import_path': 'chatterbot.logic.MathematicalEvaluation',
'threshold': 0.85
}
])
#cb.train("chatterbot.corpus.english",
# "chatterbot.corpus.english.greetings",
# "chatterbot.corpus.english.conversations")
from chatterbot.trainers import ListTrainer
cb.set_trainer(ListTrainer)
print("cb done training.")
while True:
data = cbq_in.get()
if len(data) == 1:
response = cb.get_response(data[0])
cbq_out.put(response)
# learn?
#cb.output.process_response(data[0])
#cb.conversation_sessions.update(bot.default_session.id_string,(data[0], response,))
elif len(data) == 2:
_in = data[0]
_out = data[1]
print("TRAINING {} >>> {}".format(_in, _out))
cb.train([_in, _out])
cbq_out.put("TRAINED")
else:
pass
except Exception in e:
print("Epic failure in cbq_thread: {}".format(e))
time.sleep(15)
def timer_thread(self, q, chan, debug=False):
"""thread loop runs forever updating status"""
channel = chan['id']
self.running = Tru#e
self.message = 'Calculating...'
while True:
try:
status = 'Unknown'
online = 'Unknown'
kills = 'Unknown'
ready = False
_url = 'wss://zkillboard.com:2092'
_msg = '{"action":"sub","channel":"public"}'
wss = websocket.create_connection(_url)
print('Timer Thread Connected to: {}'.format(_url))
wss.send(_msg)
print('Timer Thread Subscribed with: {}'.format(_msg))
while self.running:
time.sleep(0.1)
raw = wss.recv()
d = eval(raw)
if 'tqStatus' in d:
status = d['tqStatus']
online = d['tqCount']
kills = d['kills']
if ready:
event = threading.Event()
self.message = '#SECRET_STATUP____{} {} {} Kills'.format(online, status, kills)
q.put_nowait([event, self.message, channel])
event.wait()
wss.close()
raise ZeroDivisionError # forced raise
else:
pass
#print("Collecting data {} {} {}".format(status, online, kills))
except Exception as e:
print("SLEEPING AFTER TIMER_THREAD {}".format(e))
time.sleep(900)
def do_restart(self):
try:
self.running = False
os.execv(__file__, sys.argv)
sys.exit(0)
except Exception as e:
print("Failing to restart")
time.sleep(15)
#############################################################
#############################################################
import time
time.sleep(1)
bot = Zbot()
try:
bot.start()
bot.start_timer() # periodic server status update of with pilots online and total kills
bot.run()
except Exception as e:
print("FATAILITY IN MAIN: {}".format(e))
bot.do_restart()
| 44.892963
| 671
| 0.42726
| 149,139
| 0.982289
| 0
| 0
| 114,897
| 0.756758
| 113,859
| 0.749921
| 43,301
| 0.285198
|
b8e66118386395c82079c492edb8b95513d242cf
| 18,796
|
py
|
Python
|
tests/help_text_test.py
|
equinor/osdu-cli
|
579922556925ea7ad759a6230498378cf724b445
|
[
"MIT"
] | 3
|
2021-08-19T05:59:39.000Z
|
2021-11-10T08:02:58.000Z
|
tests/help_text_test.py
|
equinor/osdu-cli
|
579922556925ea7ad759a6230498378cf724b445
|
[
"MIT"
] | 2
|
2021-09-13T11:10:15.000Z
|
2021-11-25T13:21:54.000Z
|
tests/help_text_test.py
|
equinor/osdu-cli
|
579922556925ea7ad759a6230498378cf724b445
|
[
"MIT"
] | null | null | null |
# -----------------------------------------------------------------------------
# Copyright (c) Equinor ASA. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
"""Tests that -h does not return error and has all required text.
This only tests for commands/subgroups which are specified in this file.
This does not test the correctness of help text content."""
import unittest
from subprocess import PIPE, Popen
class HelpTextTests(unittest.TestCase):
"""Tests that -h does not return error and includes all help text."""
def _validate_output_read_line(
self, # noqa: C901; pylint: disable=too-many-arguments
command_input,
line,
section,
subgroups,
commands,
subgroups_index,
commands_index,
):
"""Read a line of text and validates it for correctness.
Parameter line (string) should be unprocessed. For example, the line should not be
stripped of starting or trailing white spaces.
This method returns the updated values of subgroups_index and commands_index as a tuple.
Tuple has ordering (subgroups_index, commands_index).
If an error occurs during validation, an assert is called."""
line = line.strip()
if section in ("Command", "Group"):
# if the line starts with the inputted command, then it describes the command.
# make sure the line has text after it
if line.startswith(command_input):
self.assertGreater(
len(line),
len(command_input),
msg="Validating help output failed on line: " + line,
)
return subgroups_index, commands_index
if section == "Arguments":
# For lines that start with '--' (for argument descriptions), make sure that
# there is something after the argument declaration
if line.startswith("--") or line.startswith("-"):
# self.assertIn(": ", line, msg="Validating help output failed on line: " + line)
# Find the first ':' character and check that there are characters following it
first_index = line.find(" ")
# first_index = line.find(": ")
self.assertNotEqual(
-1, first_index, msg="Validating help output failed on line: " + line
)
self.assertGreater(
len(line), first_index + 1, msg="Validating help output failed on line: " + line
)
return subgroups_index, commands_index
if section in ("Commands",):
# Make sure that if the line starts with the command/group in
# the expected tuple, that a description follows it.
# The line will either start with the name provided in the expected tuple,
# or it will be a continuation line. Ignore continuation lines.
first_word_of_line = line.split()[0].rstrip(":")
# If we've reached the end of the commands tuple, then skip, since everything
# after this is a continuation line.
if len(commands) == commands_index and len(subgroups) == subgroups_index:
return subgroups_index, commands_index
self.assertGreater(
len(subgroups) + len(commands),
subgroups_index + commands_index,
msg="None or missing expected commands provided in test for " + command_input,
)
if commands_index < len(commands) and first_word_of_line == commands[commands_index]:
# make sure there is descriptive text in this line by checking
# that the line is longer than just the command.
self.assertGreater(
len(line.replace(first_word_of_line, "").lstrip()),
len(first_word_of_line),
msg='Missing help text in "Commands" on line: ' + line,
)
commands_index += 1
elif (
subgroups_index < len(subgroups)
and first_word_of_line == subgroups[subgroups_index]
):
# make sure there is descriptive text in this line
help_text = line.replace(first_word_of_line, "", 1).strip()
self.assertGreater(
len(help_text),
0,
msg='Missing help text in "Commands" section on line: ' + line,
)
subgroups_index += 1
else:
self.fail(f"Found unknown command {first_word_of_line}.")
return subgroups_index, commands_index
# TO DO - COmmands and subgroups are both listed together. If we split we might want to revisit the below.
# if section in ("Commands", "Subgroups"):
# # Make sure that if the line starts with the command/group in
# # the expected tuple, that a description follows it.
# # The line will either start with the name provided in the expected tuple,
# # or it will be a continuation line. Ignore continuation lines.
# first_word_of_line = line.split()[0].rstrip(":")
# if section == "Commands":
# # If we've reached the end of the commands tuple, then skip, since everything
# # after this is a continuation line.
# if len(commands) == commands_index:
# return subgroups_index, commands_index
# self.assertGreater(
# len(commands),
# commands_index,
# msg="None or missing expected commands provided in test for " + command_input,
# )
# if first_word_of_line == commands[commands_index]:
# # make sure there is descriptive text in this line by checking
# # that the line is longer than just the command.
# self.assertGreater(
# len(line),
# len(first_word_of_line),
# msg='Validating help text failed in "Commands" on line: ' + line,
# )
# commands_index += 1
# elif section == "Subgroups":
# # If we've reached the end of the commands tuple, then skip
# if len(subgroups) == subgroups_index:
# return subgroups_index, commands_index
# self.assertGreater(
# len(subgroups),
# subgroups_index,
# msg="None or missing expected subgroups provided in test for " + command_input,
# )
# if first_word_of_line == subgroups[subgroups_index]:
# # make sure there is descriptive text in this line
# self.assertGreater(
# len(line),
# len(first_word_of_line),
# msg='Validating help text failed in "Subgroups" on line: ' + line,
# )
# subgroups_index += 1
# return subgroups_index, commands_index
self.fail("Section name {0} is not supported".format(section))
# The following line will be reached. It is added so pylint does not complain
# about inconsistent-return-statements.
return subgroups_index, commands_index
@classmethod
def _validate_output_read_section_name(cls, line):
"""Read a given line and validate it for correctness based on the given section.
Parameter line (string) should be unprocessed. For example, the line should not be
stripped of starting or trailing white spaces.
Returns the section name if the given line designates the beginning of a new section.
Returns None if the line does not."""
if line.strip() and not line[0].isspace():
# Use these lines to set the 'section' variable and move on to the next line
line = line.strip().rstrip(":")
if line == "Commands":
return "Commands"
if line in ("Options", "Arguments", "Global Arguments"):
return "Arguments"
if line == "Group":
return "Group"
if line == "Subgroups":
return "Subgroups"
if line == "Command":
return "Command"
return None
def validate_output(
self, command_input, subgroups=(), commands=()
): # pylint: disable=too-many-locals
"""
This function verifies that the returned help text is correct, and that no exceptions
are thrown during invocation. If commands are provided, this function will call itself
recursively to verify the correctness of the commands. It verifies correctness by:
- All listed subgroups and commands appear in alphabetical order. We do not check for the
existence of extra subgroups and commands.
- If subgroups or commands are not provided, then we expect it not to appear in
the help text. If it does, there will be an assertion raised in this test.
- All listed groups/subgroups, commands, and arguments have descriptive text
Limitations: This test doesn't search for new commands which are added.
If a test entry is not added here, then that entry will not be
verified.
The first word of the line should not match a command name
command_input (string): This represents the command for which you want to get the help text.
For example, "osducli" or "osducli application" or "osducli application list".
Parameter command_input should not include the "-h" to get the help text, as this
method will take care of that.
subgroups (tuple of strings): This represents all of the subgroups expected in the
help text. This tuple must be in alphabetical order.
commands (tuple of strings): This represents all of the commands expected in the
help text. This tuple must be in alphabetical order.
Help text has two formats. One for groups, and one for commands.
"""
help_command = command_input + " -h"
err = None
returned_string = None
try:
# This variable tracks what sections of the help text we are in
# Possibilities are Group, Subgroups, Commands, Command, Arguments,
# and Global Arguments.
# Once we no longer support python 2, change section options of enums
section = "Start"
# A tracker to know how many subgroups or commands have appeared in help text so far
# We use this to make sure that all expected items are returned
subgroups_index = 0
commands_index = 0
# Call the provided command in command line
# Do not split the help_command, as that breaks behavior:
# Linux ignores the splits and takes only the first.
# pylint: disable=R1732
pipe = Popen(help_command, shell=True, stdout=PIPE, stderr=PIPE)
# returned_string and err are returned as bytes
(returned_string, err) = pipe.communicate()
if err:
err = err.decode("utf-8")
self.assertEqual(b"", err, msg="ERROR: in command: " + help_command)
if not returned_string:
self.fail("No help text in command: " + help_command)
returned_string = returned_string.decode("utf-8")
lines = returned_string.splitlines()
for line in lines:
if not line.strip():
continue
# Check if we want to mark the start of a new section
# Check this by seeing if the line is a top level description, ie: 'Commands:'
# These are characterized by a new line with text starting without white space.
read_section_output = self._validate_output_read_section_name(line)
if read_section_output is not None:
section = read_section_output
# If this line is a section start, no additional processing
# is required. Move on to the next line.
continue
# Don't check usage / intro text at this time.
if section == "Start":
continue
# If this line is not a section start, then validate the correctness of the line.
# This command returns a tuple which includes counters for subgroups and commands
# which count how many instances of each have been processed.
updated_indices = self._validate_output_read_line(
command_input,
line,
section,
subgroups,
commands,
subgroups_index,
commands_index,
)
subgroups_index = updated_indices[0]
commands_index = updated_indices[1]
# If section is still 'Start', the something has gone wrong.
# It means that lines were not processed
# correctly, since we expect some sections to appear.
self.assertNotEqual(
"Start",
section,
msg="Command {0}: incomplete help text: {1}".format(help_command, returned_string),
)
# Check that we have traversed completely through both
# subgroups and commands
self.assertEqual(
len(commands),
commands_index,
msg=(
"Not all commands listed in help text for "
+ help_command
+ ". \nThis may be a problem due incorrect expected ordering. "
'I.e ("delete", "show", "list") != ("show", "delete", "list"). '
"\nFirst diagnosis should be to run the help cmd yourself. \n"
"If you passed in a single value to the tuple in validate "
"output: commands=(set-telemetry,), like the example shown, "
"you must pass in a comma after in the tuple, otherwise it "
"will not be recognized as a tuple."
),
)
self.assertEqual(
len(subgroups),
subgroups_index,
msg=(
"Not all subgroups listed in help text for "
+ help_command
+ ". This may be a problem due incorrect expected ordering. "
"First diagnosis should be to run the help cmd yourself."
),
)
except BaseException as exception: # pylint: disable=broad-except
if not err:
self.fail(
msg="ERROR: Command {0} returned error at execution. Output: {1} Error: {2}".format(
help_command, returned_string, str(exception)
)
)
else:
self.fail(
msg="ERROR: Command {0} returned error at execution. Output: {1} Error: {2}".format(
help_command, returned_string, err
)
)
# Once validation is done for the provided command_input,
# if there are any commands returned in the help text, validate those commands.
for command in commands:
self.validate_output(command_input + " " + command)
def test_help_documentation(self):
"""Tests all help documentation to ensure that all commands have help text.
This does not test for typos / correctness in the text itself.
This test calls validate_output on all commands which osducli has, without the
'-h' flag included. The flag will be added by validate_ouput.
Note: validate_output expects subgroups and commands in order. If out of alphabetical
order, you will see an error for not all commands/subgroups being listed.
Note: you do not need to call individual commands. Commands listed in the
'commands' list will be called and verified automatically. You DO need
an entry for each subgroup."""
self.validate_output(
"osdu",
subgroups=(
"config",
"dataload",
"entitlements",
"legal",
"list",
"schema",
"search",
"unit",
"workflow",
),
commands=(
"status",
"version",
),
)
self.validate_output(
"osdu config",
commands=(
"default",
"list",
"update",
),
)
self.validate_output(
"osdu dataload",
commands=(
"ingest",
"status",
"verify",
),
)
self.validate_output(
"osdu entitlements",
subgroups=("groups", "members"),
commands=("mygroups",),
)
self.validate_output(
"osdu entitlements groups",
commands=("add", "delete", "members"),
)
self.validate_output(
"osdu entitlements members",
commands=("add", "list", "remove"),
)
self.validate_output(
"osdu legal",
commands=("listtags",),
)
self.validate_output(
"osdu list",
commands=("records",),
)
self.validate_output(
"osdu schema",
commands=(
"add",
"get",
"list",
),
)
self.validate_output(
"osdu search",
commands=("id", "query"),
)
self.validate_output(
"osdu unit",
commands=("list",),
)
self.validate_output(
"osdu workflow",
commands=("list",),
)
if __name__ == "__main__":
import nose2
nose2.main()
| 40.508621
| 114
| 0.546606
| 18,170
| 0.966695
| 0
| 0
| 1,023
| 0.054426
| 0
| 0
| 10,583
| 0.563045
|
b8e81060803693ffd42ace6d2aecd7a9dd90f046
| 417
|
py
|
Python
|
testing/resources/test_g.py
|
tongni1975/processing.py
|
0b9ad68a1dc289d5042d1d3b132c13cc157d3f88
|
[
"Apache-2.0"
] | null | null | null |
testing/resources/test_g.py
|
tongni1975/processing.py
|
0b9ad68a1dc289d5042d1d3b132c13cc157d3f88
|
[
"Apache-2.0"
] | 1
|
2021-06-25T15:36:38.000Z
|
2021-06-25T15:36:38.000Z
|
testing/resources/test_g.py
|
tongni1975/processing.py
|
0b9ad68a1dc289d5042d1d3b132c13cc157d3f88
|
[
"Apache-2.0"
] | null | null | null |
import processing.opengl.PGraphics3D
def setup():
size(100, 100, P3D)
def draw():
# check that "g" is defined and is the expected type
assert(isinstance(g, processing.opengl.PGraphics3D))
# check that the alias cameraMatrix->camera is working as expected
g.camera(0, 0, -10, 0, 0, 0, 0, 1, 0)
assert(g.cameraMatrix.m03 == 0)
assert(g.cameraMatrix.m23 == -10)
print 'OK'
exit()
| 26.0625
| 70
| 0.654676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 122
| 0.292566
|
b8e9a8b69a6237c573c52a972df1c7ef664eba25
| 4,811
|
py
|
Python
|
scripts/experiments/intrinsic_evaluations/exbert/server/data/processing/create_faiss.py
|
antoilouis/netbert
|
ccd37ef8a1727557de74498132eea24db2135940
|
[
"MIT"
] | 2
|
2021-01-29T01:30:51.000Z
|
2021-07-14T16:47:15.000Z
|
server/data/processing/create_faiss.py
|
CharlotteSean/exbert
|
75e6bb146ab799e3652a887562490d5f31357223
|
[
"Apache-2.0"
] | null | null | null |
server/data/processing/create_faiss.py
|
CharlotteSean/exbert
|
75e6bb146ab799e3652a887562490d5f31357223
|
[
"Apache-2.0"
] | 1
|
2020-03-04T14:02:28.000Z
|
2020-03-04T14:02:28.000Z
|
import faiss
import numpy as np
import utils.path_fixes as pf
from pathlib import Path
from data.processing.corpus_embeddings import CorpusEmbeddings
from functools import partial
import argparse
FAISS_LAYER_PATTERN = 'layer_*.faiss'
LAYER_TEMPLATE = 'layer_{:02d}.faiss'
NLAYERS = 12
NHEADS = 12
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", help="Path to the directory that contains the 'embeddings' and 'headContext' hdf5 files")
args = parser.parse_args()
return args
def train_indexes(ce:CorpusEmbeddings, stepsize=100):
"""
Parameters:
===========
- corpus_embedding: Wrapper around HDF5 file for easy access to data
- stepsize: How many sentences to train with at once
"""
indexes = [faiss.IndexFlatIP(ce.embedding_dim) for i in range(ce.n_layers)]
for ix in range(0, len(ce), stepsize):
cdata = ce[ix:ix+stepsize]
for i in range(ce.n_layers):
indexes[i].add(cdata[i])
return indexes
def save_indexes(idxs, outdir, base_name=LAYER_TEMPLATE):
"""Save the faiss index into a file for each index in idxs"""
out_name = str(Path(outdir) / base_name)
for i, idx in enumerate(idxs):
faiss.write_index(idx, out_name.format(i))
class Indexes:
"""Wrapper around the faiss indices to make searching for a vector simpler and faster.
Assumes there are files in the folder matching the pattern input
"""
def __init__(self, folder, pattern=FAISS_LAYER_PATTERN):
self.base_dir = Path(folder)
self.indexes = [None] * NLAYERS # Initialize empty list
self.pattern = pattern
self.__init_indexes()
def __getitem__(self, v):
"""Slices not allowed, but index only"""
return self.indexes[v]
def __init_indexes(self):
for fname in self.base_dir.glob(self.pattern):
print(fname)
idx = fname.stem.split('_')[-1]
self.indexes[int(idx)] = faiss.read_index(str(fname))
def search(self, layer, query, k):
"""Search a given layer for the query vector. Return k results"""
return self[layer].search(query, k)
def create_mask(head_size, n_heads, selected_heads):
"""Create a mask that indicates how the size of the head and the number of those heads
in a transformer model.
This allows easy masking of heads you don't want to search for
"""
mask = np.zeros(n_heads)
for h in selected_heads:
mask[int(h)] = 1
return np.repeat(mask, head_size)
default_masks = {
'bert-base-uncased': partial(create_mask, 64, 12)
}
base_mask = default_masks['bert-base-uncased']
class ContextIndexes(Indexes):
"""Special index enabling masking of particular heads before searching"""
# Int -> [Int] -> np.Array -> Int -> (np.Array(), )
def search(self, layer:int, heads:list, query:np.ndarray, k:int):
"""Search the embeddings for the context layer, masking by selected heads"""
assert max(heads) < NHEADS # Heads should be indexed by 0
assert min(heads) >= 0
unique_heads = list(set(heads))
mask_vector = base_mask(unique_heads)
mask_vector = mask_vector.reshape(query.shape)
new_query = (query * mask_vector).astype(np.float32)
# print(new_query.dtype)
return self[layer].search(new_query, k)
def main(basedir):
base = Path(basedir)
# embeddings
embedding_dir = base / 'embeddings'
embedding_hdf5 = embedding_dir / 'embeddings.hdf5'
print(f"Creating Embedding faiss files in {embedding_dir} from {embedding_hdf5}")
embedding_ce = CorpusEmbeddings(str(embedding_hdf5))
embedding_idxs = train_indexes(embedding_ce)
save_indexes(embedding_idxs, embedding_dir)
## Test embedding search:
print("Testing embedding idxs:")
loaded_embedding_idxs = Indexes(embedding_dir)
q = np.random.randn(1, 768).astype(np.float32)
D, I = loaded_embedding_idxs.search(0, q, 5)
print(embedding_ce.find2d(I))
print("\n" + "=" * 50 + "\n")
# headContext
context_dir = base / 'headContext'
context_hdf5 = context_dir / 'contexts.hdf5'
print(f"Creating head context faiss files in {context_dir} from {context_hdf5}")
context_ce = CorpusEmbeddings(str(context_hdf5))
context_indexes = train_indexes(context_ce)
save_indexes(context_indexes, context_dir)
## Test context search:
loaded_context_idxs = Indexes(context_dir)
q = np.random.randn(1, 768).astype(np.float32)
D, I = loaded_context_idxs.search(0, q, 5)
print(context_ce.find2d(I))
if __name__ == "__main__":
# Creating the indices for both the context and embeddings
args = parse_args()
main(args.directory)
| 33.17931
| 134
| 0.672833
| 1,623
| 0.337352
| 0
| 0
| 0
| 0
| 0
| 0
| 1,537
| 0.319476
|
b8e9db6f289a79604e54db518d87b8a53a1a0672
| 504
|
py
|
Python
|
weasyl/test/test_http.py
|
hyena/weasyl
|
a43ad885eb07ae89d6639f289a5b95f3a177439c
|
[
"Apache-2.0"
] | 111
|
2016-05-18T04:18:18.000Z
|
2021-11-03T02:05:19.000Z
|
weasyl/test/test_http.py
|
hyena/weasyl
|
a43ad885eb07ae89d6639f289a5b95f3a177439c
|
[
"Apache-2.0"
] | 1,103
|
2016-05-29T05:17:53.000Z
|
2022-03-31T18:12:40.000Z
|
weasyl/test/test_http.py
|
TheWug/weasyl
|
a568a542cc58c11e30621fb672c701531d4306a8
|
[
"Apache-2.0"
] | 47
|
2016-05-29T20:48:37.000Z
|
2021-11-12T09:40:40.000Z
|
import pytest
from weasyl import http
@pytest.mark.parametrize(('wsgi_env', 'expected'), [
({}, {}),
({'PATH_INFO': '/search', 'QUERY_STRING': 'q=example'}, {}),
({'HTTP_ACCEPT': '*/*'}, {'Accept': '*/*'}),
(
{'CONTENT_LENGTH': '', 'HTTP_ACCEPT_ENCODING': 'gzip', 'HTTP_UPGRADE_INSECURE_REQUESTS': '1'},
{'Accept-Encoding': 'gzip', 'Upgrade-Insecure-Requests': '1'},
),
])
def test_get_headers(wsgi_env, expected):
assert http.get_headers(wsgi_env) == expected
| 29.647059
| 102
| 0.603175
| 0
| 0
| 0
| 0
| 462
| 0.916667
| 0
| 0
| 230
| 0.456349
|
b8ea0aefe02a0ac8e734a613a8836ee2fbeec6cf
| 421
|
py
|
Python
|
chords/neural_network/classifier.py
|
fernando-figueredo/ChordsWebApp
|
9bf983ab5579c36c75447c74eec0400d78ab49f9
|
[
"MIT"
] | 2
|
2021-03-30T01:09:51.000Z
|
2022-03-10T21:17:15.000Z
|
chords/neural_network/classifier.py
|
fernando-figueredo/ChordsWebApp
|
9bf983ab5579c36c75447c74eec0400d78ab49f9
|
[
"MIT"
] | null | null | null |
chords/neural_network/classifier.py
|
fernando-figueredo/ChordsWebApp
|
9bf983ab5579c36c75447c74eec0400d78ab49f9
|
[
"MIT"
] | null | null | null |
from neural_network.train import Trainer
class Classifier():
def __init__(self, train=False):
self.train = train
self.trainer = Trainer()
if not self.train:
self.trainer.load()
else:
self.trainer.train()
def classify(self, audio_file_path):
#prediction = self.trainer.predict(audio_file_path)
self.trainer.plot_prediction(audio_file_path)
| 28.066667
| 59
| 0.643705
| 379
| 0.900238
| 0
| 0
| 0
| 0
| 0
| 0
| 51
| 0.12114
|
b8ea2be5c0eee4133b1b628fc992cd2fbe84768f
| 556
|
py
|
Python
|
cybox/common/metadata.py
|
tirkarthi/python-cybox
|
a378deb68b3ac56360c5cc35ff5aad1cd3dcab83
|
[
"BSD-3-Clause"
] | 40
|
2015-03-05T18:22:51.000Z
|
2022-03-06T07:29:25.000Z
|
cybox/common/metadata.py
|
tirkarthi/python-cybox
|
a378deb68b3ac56360c5cc35ff5aad1cd3dcab83
|
[
"BSD-3-Clause"
] | 106
|
2015-01-12T18:52:20.000Z
|
2021-04-25T22:57:52.000Z
|
cybox/common/metadata.py
|
tirkarthi/python-cybox
|
a378deb68b3ac56360c5cc35ff5aad1cd3dcab83
|
[
"BSD-3-Clause"
] | 30
|
2015-03-25T07:24:40.000Z
|
2021-07-23T17:10:11.000Z
|
# Copyright (c) 2020, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities, fields
import cybox.bindings.cybox_common as common_binding
class Metadata(entities.Entity):
_binding = common_binding
_binding_class = common_binding.MetadataType
_namespace = 'http://cybox.mitre.org/common-2'
type_ = fields.TypedField("type_", key_name="type")
value = fields.TypedField("Value")
subdatum = fields.TypedField("SubDatum", type_="cybox.common.metadata.Metadata", multiple=True)
| 32.705882
| 99
| 0.753597
| 358
| 0.643885
| 0
| 0
| 0
| 0
| 0
| 0
| 197
| 0.354317
|
b8ecff777a101fecf5e77b7561d2d3b4b1ad0ea3
| 972
|
py
|
Python
|
src/app/main/routes.py
|
Abh4git/PythonMongoService
|
f64fcb7c4db0db41adb8b74736c82e8de5f6dbec
|
[
"MIT"
] | null | null | null |
src/app/main/routes.py
|
Abh4git/PythonMongoService
|
f64fcb7c4db0db41adb8b74736c82e8de5f6dbec
|
[
"MIT"
] | null | null | null |
src/app/main/routes.py
|
Abh4git/PythonMongoService
|
f64fcb7c4db0db41adb8b74736c82e8de5f6dbec
|
[
"MIT"
] | null | null | null |
#All Routes are defined here
from flask_cors import CORS, cross_origin
from app.main.controller.products import ProductController
from flask import request, jsonify
import json
#Test route without any connections
def test():
return "{testroutesuccess:'Test Route Success!'}"
api_v2_cors_config = {
"origins": [
'http://localhost:3000' # React
# React
],
"methods": ["OPTIONS", "GET", "POST"],
"allow_headers": ["Authorization", "Content-Type"]
}
#route returning Products list
@cross_origin(**api_v2_cors_config)
def getProductsList():
productC = ProductController()
return productC.getAllProducts()
#route for products list filtered by product types
@cross_origin(**api_v2_cors_config)
def addProduct():
body = request.get_json()
productController = ProductController()
print (body['productdetail'])
newproduct=productController.addProduct(body['id'], body['title'],body['productdetail'])
return jsonify(newproduct), 201
| 29.454545
| 92
| 0.737654
| 0
| 0
| 0
| 0
| 420
| 0.432099
| 0
| 0
| 345
| 0.354938
|
b8ed5ea88b3e1f4c3f96f668efbaca32325efa0f
| 6,850
|
py
|
Python
|
tests/test_user.py
|
ccfiel/fbchat-asyncio
|
4ba39a835c7374c2cbf2a34e4e4fbf5c60ce6891
|
[
"BSD-3-Clause"
] | 1
|
2019-11-02T14:44:05.000Z
|
2019-11-02T14:44:05.000Z
|
tests/test_user.py
|
ccfiel/fbchat-asyncio
|
4ba39a835c7374c2cbf2a34e4e4fbf5c60ce6891
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_user.py
|
ccfiel/fbchat-asyncio
|
4ba39a835c7374c2cbf2a34e4e4fbf5c60ce6891
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import datetime
from fbchat._user import User, ActiveStatus
def test_user_from_graphql():
data = {
"id": "1234",
"name": "Abc Def Ghi",
"first_name": "Abc",
"last_name": "Ghi",
"profile_picture": {"uri": "https://scontent-arn2-1.xx.fbcdn.net/v/..."},
"is_viewer_friend": True,
"url": "https://www.facebook.com/profile.php?id=1234",
"gender": "FEMALE",
"viewer_affinity": 0.4560002,
}
assert User(
uid="1234",
photo="https://scontent-arn2-1.xx.fbcdn.net/v/...",
name="Abc Def Ghi",
url="https://www.facebook.com/profile.php?id=1234",
first_name="Abc",
last_name="Ghi",
is_friend=True,
gender="female_singular",
) == User._from_graphql(data)
def test_user_from_thread_fetch():
data = {
"thread_key": {"thread_fbid": None, "other_user_id": "1234"},
"name": None,
"last_message": {
"nodes": [
{
"snippet": "aaa",
"message_sender": {"messaging_actor": {"id": "1234"}},
"timestamp_precise": "1500000000000",
"commerce_message_type": None,
"extensible_attachment": None,
"sticker": None,
"blob_attachments": [],
}
]
},
"unread_count": 0,
"messages_count": 1111,
"image": None,
"updated_time_precise": "1500000000000",
"mute_until": None,
"is_pin_protected": False,
"is_viewer_subscribed": True,
"thread_queue_enabled": False,
"folder": "INBOX",
"has_viewer_archived": False,
"is_page_follow_up": False,
"cannot_reply_reason": None,
"ephemeral_ttl_mode": 0,
"customization_info": {
"emoji": None,
"participant_customizations": [
{"participant_id": "4321", "nickname": "B"},
{"participant_id": "1234", "nickname": "A"},
],
"outgoing_bubble_color": None,
},
"thread_admins": [],
"approval_mode": None,
"joinable_mode": {"mode": "0", "link": ""},
"thread_queue_metadata": None,
"event_reminders": {"nodes": []},
"montage_thread": None,
"last_read_receipt": {"nodes": [{"timestamp_precise": "1500000050000"}]},
"related_page_thread": None,
"rtc_call_data": {
"call_state": "NO_ONGOING_CALL",
"server_info_data": "",
"initiator": None,
},
"associated_object": None,
"privacy_mode": 1,
"reactions_mute_mode": "REACTIONS_NOT_MUTED",
"mentions_mute_mode": "MENTIONS_NOT_MUTED",
"customization_enabled": True,
"thread_type": "ONE_TO_ONE",
"participant_add_mode_as_string": None,
"is_canonical_neo_user": False,
"participants_event_status": [],
"page_comm_item": None,
"all_participants": {
"nodes": [
{
"messaging_actor": {
"id": "1234",
"__typename": "User",
"name": "Abc Def Ghi",
"gender": "FEMALE",
"url": "https://www.facebook.com/profile.php?id=1234",
"big_image_src": {
"uri": "https://scontent-arn2-1.xx.fbcdn.net/v/..."
},
"short_name": "Abc",
"username": "",
"is_viewer_friend": True,
"is_messenger_user": True,
"is_verified": False,
"is_message_blocked_by_viewer": False,
"is_viewer_coworker": False,
"is_employee": None,
}
},
{
"messaging_actor": {
"id": "4321",
"__typename": "User",
"name": "Aaa Bbb Ccc",
"gender": "NEUTER",
"url": "https://www.facebook.com/aaabbbccc",
"big_image_src": {
"uri": "https://scontent-arn2-1.xx.fbcdn.net/v/..."
},
"short_name": "Aaa",
"username": "aaabbbccc",
"is_viewer_friend": False,
"is_messenger_user": True,
"is_verified": False,
"is_message_blocked_by_viewer": False,
"is_viewer_coworker": False,
"is_employee": None,
}
},
]
},
"read_receipts": ...,
"delivery_receipts": ...,
}
assert User(
uid="1234",
photo="https://scontent-arn2-1.xx.fbcdn.net/v/...",
name="Abc Def Ghi",
last_active=datetime.datetime(2017, 7, 14, 2, 40, tzinfo=datetime.timezone.utc),
message_count=1111,
url="https://www.facebook.com/profile.php?id=1234",
first_name="Abc",
last_name="Def Ghi",
is_friend=True,
gender="female_singular",
nickname="A",
own_nickname="B",
color=None,
emoji=None,
) == User._from_thread_fetch(data)
def test_user_from_all_fetch():
data = {
"id": "1234",
"name": "Abc Def Ghi",
"firstName": "Abc",
"vanity": "",
"thumbSrc": "https://scontent-arn2-1.xx.fbcdn.net/v/...",
"uri": "https://www.facebook.com/profile.php?id=1234",
"gender": 1,
"i18nGender": 2,
"type": "friend",
"is_friend": True,
"mThumbSrcSmall": None,
"mThumbSrcLarge": None,
"dir": None,
"searchTokens": ["Abc", "Ghi"],
"alternateName": "",
"is_nonfriend_messenger_contact": False,
"is_blocked": False,
}
assert User(
uid="1234",
photo="https://scontent-arn2-1.xx.fbcdn.net/v/...",
name="Abc Def Ghi",
url="https://www.facebook.com/profile.php?id=1234",
first_name="Abc",
is_friend=True,
gender="female_singular",
) == User._from_all_fetch(data)
@pytest.mark.skip(reason="can't gather test data, the pulling is broken")
def test_active_status_from_chatproxy_presence():
assert ActiveStatus() == ActiveStatus._from_chatproxy_presence(data)
@pytest.mark.skip(reason="can't gather test data, the pulling is broken")
def test_active_status_from_buddylist_overlay():
assert ActiveStatus() == ActiveStatus._from_buddylist_overlay(data)
| 35.128205
| 88
| 0.489927
| 0
| 0
| 0
| 0
| 390
| 0.056934
| 0
| 0
| 2,967
| 0.433139
|
b8ed8469a90e01bd0b314d93c23d97aa1b93965d
| 143
|
py
|
Python
|
(3)Algorithms/operator_boolean.py
|
mass9/Python
|
66499164e36a4fe9630029d34b292ab06f849b2f
|
[
"MIT"
] | null | null | null |
(3)Algorithms/operator_boolean.py
|
mass9/Python
|
66499164e36a4fe9630029d34b292ab06f849b2f
|
[
"MIT"
] | null | null | null |
(3)Algorithms/operator_boolean.py
|
mass9/Python
|
66499164e36a4fe9630029d34b292ab06f849b2f
|
[
"MIT"
] | null | null | null |
from operator import*
a = -1
b = 5
print('a= ',a)
print('b= ',b)
print()
print(not_(a))
print(truth(a))
print(is_(a,b))
print(is_not(a,b))
| 9.533333
| 21
| 0.594406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.06993
|
b8edaac684aec68ed9d6e7241e67d70248284354
| 1,903
|
py
|
Python
|
nicos_mlz/erwin/setups/system.py
|
ebadkamil/nicos
|
0355a970d627aae170c93292f08f95759c97f3b5
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
nicos_mlz/erwin/setups/system.py
|
ebadkamil/nicos
|
0355a970d627aae170c93292f08f95759c97f3b5
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1
|
2021-08-18T10:55:42.000Z
|
2021-08-18T10:55:42.000Z
|
nicos_mlz/erwin/setups/system.py
|
ISISComputingGroup/nicos
|
94cb4d172815919481f8c6ee686f21ebb76f2068
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
description = 'system setup'
group = 'lowlevel'
sysconfig = dict(
cache = 'localhost',
instrument = 'ErWIN',
experiment = 'Exp',
datasinks = ['conssink', 'dmnsink'],
notifiers = [],
)
modules = ['nicos.commands.standard']
devices = dict(
ErWIN = device('nicos.devices.instrument.Instrument',
description = 'ErWIN instrument',
instrument = 'ErWIN',
responsible = 'Michael Heere <michael.heere@kit.edu>',
website = 'https://mlz-garching.de/erwin',
operators = [
'Karlsruhe Institute of Technology (KIT)',
],
),
Sample = device('nicos.devices.sample.Sample',
description = 'sample object',
),
Exp = device('nicos_mlz.devices.experiment.Experiment',
description = 'experiment object',
dataroot = 'data',
sample = 'Sample',
reporttemplate = '',
sendmail = False,
serviceexp = 'p0',
mailsender = 'erwin@frm2.tum.de',
mailserver = 'mailhost.frm2.tum.de',
elog = True,
managerights = dict(
enableDirMode = 0o775,
enableFileMode = 0o644,
disableDirMode = 0o550,
disableFileMode = 0o440,
owner = 'erwin',
group = 'erwin'
),
),
filesink = device('nicos.devices.datasinks.AsciiScanfileSink'),
conssink = device('nicos.devices.datasinks.ConsoleScanSink'),
dmnsink = device('nicos.devices.datasinks.DaemonSink'),
Space = device('nicos.devices.generic.FreeSpace',
description = 'The amount of free space for storing data',
warnlimits = (5., None),
path = None,
minfree = 5,
),
LogSpace = device('nicos.devices.generic.FreeSpace',
description = 'Space on log drive',
path = 'log',
warnlimits = (.5, None),
minfree = 0.5,
lowlevel = True,
),
)
| 29.734375
| 67
| 0.575933
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 697
| 0.366264
|
b8eeeede3579cb2a1baac69df57edebe5d6b3dd1
| 1,771
|
py
|
Python
|
clustering_normalized_cuts/run.py
|
kiss2u/google-research
|
2cd66234656f9e2f4218ed90a2d8aa9cf3139093
|
[
"Apache-2.0"
] | 7
|
2020-03-15T12:14:07.000Z
|
2021-12-01T07:01:09.000Z
|
clustering_normalized_cuts/run.py
|
Alfaxad/google-research
|
2c0043ecd507e75e2df9973a3015daf9253e1467
|
[
"Apache-2.0"
] | 25
|
2020-07-25T08:53:09.000Z
|
2022-03-12T00:43:02.000Z
|
clustering_normalized_cuts/run.py
|
Alfaxad/google-research
|
2c0043ecd507e75e2df9973a3015daf9253e1467
|
[
"Apache-2.0"
] | 4
|
2021-02-08T10:25:45.000Z
|
2021-04-17T14:46:26.000Z
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the code for Clustering using our CNC framework."""
from __future__ import division
import collections
import os
from absl import app
from absl import flags
from clustering_normalized_cuts import setup
from clustering_normalized_cuts.cnc_net import run_net
from clustering_normalized_cuts.data_loader import get_data
flags.adopt_module_key_flags(setup)
FLAGS = flags.FLAGS
# SELECT GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
def main(_):
params = collections.defaultdict(lambda: None)
# SET GENERAL HYPERPARAMETERS
general_params = {
'dset': FLAGS.dset, # dataset: reuters / mnist
'val_set_fraction': 0.1, # fraction of training set to use as validation
'siam_batch_size': 128, # minibatch size for siamese net
'main_path': FLAGS.main_path,
'result_path': FLAGS.result_path
}
params.update(general_params)
# SET DATASET SPECIFIC HYPERPARAMETERS
if FLAGS.dset == 'mnist':
mnist_params = setup.set_mnist_params()
params.update(mnist_params)
# LOAD DATA
setup.seed_init()
data = get_data(params)
# RUN EXPERIMENT
run_net(data, params)
if __name__ == '__main__':
app.run(main)
| 29.032787
| 79
| 0.749294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 973
| 0.549407
|
b8ef33ed1947340aa880647a993de9c30d1767e8
| 4,029
|
py
|
Python
|
remps/policy/gaussian.py
|
albertometelli/remps
|
d243d4f23c4b8de5220788853c8e2dd5852e593e
|
[
"MIT"
] | 6
|
2019-06-17T15:13:45.000Z
|
2020-08-27T10:09:16.000Z
|
remps/policy/gaussian.py
|
albertometelli/remps
|
d243d4f23c4b8de5220788853c8e2dd5852e593e
|
[
"MIT"
] | 13
|
2020-01-28T22:43:36.000Z
|
2022-03-11T23:46:19.000Z
|
remps/policy/gaussian.py
|
albertometelli/remps
|
d243d4f23c4b8de5220788853c8e2dd5852e593e
|
[
"MIT"
] | 1
|
2019-08-11T22:41:59.000Z
|
2019-08-11T22:41:59.000Z
|
import tensorflow as tf
from remps.policy.policy import Policy
from remps.utils.utils import get_default_tf_dtype
class Gaussian(Policy):
"""
Used for torcs
MultiLayerPerceptron Discrete policy.
Parametrized by the input space, the action space and the hidden layer size.
Basic policy network with only one hidden layer with sigmoid activation function
"""
def __init__(self, state_space, action_space, hidden_layer_size, name="policy"):
"""
Builds a policy network and returns a node for the gradient and a node for action selection
Simple network: from state space to action space
Start from a random policy, all weights equal to 0
@param state_space: dimension of state space
@param action_space: dimension of action space
"""
# net params
super().__init__(name)
self.hidden_layer_size = hidden_layer_size
self.state_space = state_space
self.action_space = action_space
self.sess = None
self.default_dtype = get_default_tf_dtype()
def __call__(self, state, taken_actions):
with tf.variable_scope(self.name):
# Net
self.eps = tf.constant(1e-24, dtype=self.default_dtype)
if self.hidden_layer_size > 0:
biases = tf.get_variable(
"b",
[self.hidden_layer_size],
initializer=tf.random_normal_initializer(
0, 0.001, dtype=self.default_dtype
),
dtype=self.default_dtype,
)
W = tf.get_variable(
"W",
[self.state_space, self.hidden_layer_size],
initializer=tf.random_normal_initializer(
0, 0.001, dtype=self.default_dtype
),
dtype=self.default_dtype,
)
h = tf.matmul(state, W)
h = tf.tanh(h + biases)
else:
h = state
steer = tf.layers.dense(
inputs=h, units=1, activation=tf.tanh, use_bias=True
)
acc = tf.layers.dense(
inputs=h, units=1, activation=tf.sigmoid, use_bias=True
)
brake = tf.layers.dense(
inputs=h, units=1, activation=tf.sigmoid, use_bias=True
)
v_steer = tf.exp(
tf.get_variable(
"v_steer",
1,
initializer=tf.random_normal_initializer(
0, 0.1, dtype=self.default_dtype
),
dtype=self.default_dtype,
)
)
v_acc = tf.exp(
tf.get_variable(
"v_acc",
1,
initializer=tf.random_normal_initializer(
0, 0.1, dtype=self.default_dtype
),
dtype=self.default_dtype,
)
)
v_brake = tf.exp(
tf.get_variable(
"v_brake",
1,
initializer=tf.random_normal_initializer(
0, 0.1, dtype=self.default_dtype
),
dtype=self.default_dtype,
)
)
means = tf.concat([steer, acc, brake])
stds = tf.concat([v_steer, v_acc, v_brake])
self.dist = tf.distributions.Normal(means, stds)
self._pi = self.dist.sample()
self._pi_prob = self.dist.prob(taken_actions)
self._log_pi = self.dist.log_prob(taken_actions)
return self._pi_prob, self._log_pi
def get_policy_network(self):
return self._pi
def initialize(self, sess):
self.sess = sess
init = tf.initialize_variables(self.trainable_vars)
self.sess.run(init)
| 35.034783
| 99
| 0.516505
| 3,911
| 0.970712
| 0
| 0
| 0
| 0
| 0
| 0
| 633
| 0.157111
|
b8f05419337e887d574b7c6ff46bba2da204e4eb
| 921
|
py
|
Python
|
rrr.py
|
tutacat/beep-play
|
41b50ebb0250289616cf3a4839fd0097d524ebd7
|
[
"BSD-2-Clause"
] | null | null | null |
rrr.py
|
tutacat/beep-play
|
41b50ebb0250289616cf3a4839fd0097d524ebd7
|
[
"BSD-2-Clause"
] | null | null | null |
rrr.py
|
tutacat/beep-play
|
41b50ebb0250289616cf3a4839fd0097d524ebd7
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
import math, random, subprocess, time
sin=math.sin
commands=["/usr/bin/setterm","/usr/bin/xset"]
fname = ""
file = None
type = None
_test = ""
cmd = None
class SystemError(BaseException):
pass
for c in commands:
_test = subprocess.getoutput("setterm --blength 256")
if not _test:
raise SystemError(c+" error")
if _test.find("not support")<0 and _test.find("error")<0:
cmd=c
break
else:
setterm=False
setterm=cmd==commands[0]
if not cmd:
raise SystemError("No supported command ("+",".join(commands)+")")
i=0
while 1:
note=sin(i*.1)*9+60
subprocess.run(( cmd,"--bfreq" if setterm else "b", str(round(2**((note-69)/12)*440)), "--blength" if setterm else "", str(round(100)) ))
print(end="\a",flush=True)
time.sleep(0.1)
i+=1
subprocess.run(( cmd,"--bfreq" if setterm else "b", "400", "--blength" if setterm else "", "200" ))
| 28.78125
| 141
| 0.624321
| 42
| 0.045603
| 0
| 0
| 0
| 0
| 0
| 0
| 203
| 0.220413
|
b8f101cbd2a4876f4d335fd3cc77c990454b6aca
| 26,558
|
py
|
Python
|
pygamma_agreement/continuum.py
|
faroit/pygamma-agreement
|
fcfcfe7332be15bd97e71b9987aa5c6104be299e
|
[
"MIT"
] | null | null | null |
pygamma_agreement/continuum.py
|
faroit/pygamma-agreement
|
fcfcfe7332be15bd97e71b9987aa5c6104be299e
|
[
"MIT"
] | null | null | null |
pygamma_agreement/continuum.py
|
faroit/pygamma-agreement
|
fcfcfe7332be15bd97e71b9987aa5c6104be299e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2020 CoML
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Rachid RIAD & Hadrien TITEUX
"""
##########
Continuum and corpus
##########
"""
import csv
import logging
import random
from copy import deepcopy
from functools import total_ordering
from pathlib import Path
from typing import Optional, Tuple, List, Union, Set, Iterable, TYPE_CHECKING, Dict
import cvxpy as cp
import numpy as np
from dataclasses import dataclass
from pyannote.core import Annotation, Segment, Timeline
from pyannote.database.util import load_rttm
from sortedcontainers import SortedDict, SortedSet
from typing_extensions import Literal
from .dissimilarity import AbstractDissimilarity
from .numba_utils import chunked_cartesian_product
if TYPE_CHECKING:
from .alignment import UnitaryAlignment, Alignment
CHUNK_SIZE = 2 ** 25
# defining Annotator type
Annotator = str
PivotType = Literal["float_pivot", "int_pivot"]
PrecisionLevel = Literal["high", "medium", "low"]
# percentages for the precision
PRECISION_LEVEL = {
"high": 0.01,
"medium": 0.02,
"low": 0.1
}
@total_ordering
@dataclass(frozen=True, eq=True)
class Unit:
"""
Represents an annotated unit, e.g., a time segment and (optionally)
a text annotation. Can be sorted or used in a set. If two units share
the same time segment, they're sorted alphabetically using their
annotation. The `None` annotation is first in the "alphabet"
"""
segment: Segment
annotation: Optional[str] = None
def __lt__(self, other: 'Unit'):
if self.segment == other.segment:
if self.annotation is None:
return True
elif other.annotation is None:
return False
else:
return self.annotation < other.annotation
else:
return self.segment < other.segment
class Continuum:
"""Continuum
Parameters
----------
uri : string, optional
name of annotated resource (e.g. audio or video file)
"""
@classmethod
def from_csv(cls,
path: Union[str, Path],
discard_invalid_rows=True,
delimiter: str = ","):
"""
Load annotations from a CSV file , with structure
annotator, category, segment_start, segment_end.
.. warning::
The CSV file shouldn't have any header
Parameters
----------
path: path or str
Path to the CSV file storing annotations
discard_invalid_rows: bool
Path: if a row contains invalid annotations, discard it)
delimiter: str, default ","
CSV delimiter
Returns
-------
continuum : Continuum
New continuum object loaded from the CSV
"""
if isinstance(path, str):
path = Path(path)
continuum = cls()
with open(path) as csv_file:
reader = csv.reader(csv_file, delimiter=delimiter)
for row in reader:
seg = Segment(float(row[2]), float(row[3]))
try:
continuum.add(row[0], seg, row[1])
except ValueError as e:
if discard_invalid_rows:
print(f"Discarded invalid segment : {str(e)}")
else:
raise e
return continuum
@classmethod
def from_rttm(cls, path: Union[str, Path]) -> 'Continuum':
"""
Load annotations from a RTTM file. The file name field will be used
as an annotation's annotator
Parameters
----------
path: Path or str
Path to the CSV file storing annotations
Returns
-------
continuum : Continuum
New continuum object loaded from the RTTM file
"""
annotations = load_rttm(str(path))
continuum = cls()
for uri, annot in annotations.items():
continuum.add_annotation(uri, annot)
return continuum
@classmethod
def sample_from_continuum(cls, continuum: 'Continuum',
pivot_type: PivotType = "float_pivot",
ground_truth_annotators: Optional[List[Annotator]] = None) -> 'Continuum':
"""Generate a new random annotation from a single continuum
Strategy from figure 12
>>> continuum.sample_from_continuum()
... <pygamma_agreement.continuum.Continuum at 0x7f5527a19588>
"""
assert pivot_type in ('float_pivot', 'int_pivot')
last_start_time = max(unit.segment.start for _, unit in continuum)
new_continuum = Continuum()
if ground_truth_annotators is not None:
assert set(continuum.annotators).issuperset(set(ground_truth_annotators))
annotators = ground_truth_annotators
else:
annotators = continuum.annotators
# TODO: why not sample from the whole continuum?
# TODO : shouldn't the sampled annotators nb be equal to the annotators amount?
for idx in range(continuum.num_annotators):
if pivot_type == 'float_pivot':
pivot = random.uniform(continuum.avg_length_unit, last_start_time)
else:
pivot = random.randint(np.floor(continuum.avg_length_unit),
np.ceil(last_start_time))
rnd_annotator = random.choice(annotators)
units = continuum._annotations[rnd_annotator]
sampled_annotation = SortedSet()
for unit in units:
if pivot < unit.segment.start:
new_segment = Segment(unit.segment.start - pivot,
unit.segment.end - pivot)
else:
new_segment = Segment(unit.segment.start + pivot,
unit.segment.end + pivot)
sampled_annotation.add(Unit(new_segment, unit.annotation))
new_continuum._annotations[f'Sampled_annotation {idx}'] = sampled_annotation
return new_continuum
def __init__(self, uri: Optional[str] = None):
self.uri = uri
# Structure {annotator -> SortedSet[Unit]}
self._annotations: Dict[Annotator, Set[Unit]] = SortedDict()
# these are instanciated when compute_disorder is called
self._chosen_alignments: Optional[np.ndarray] = None
self._alignments_disorders: Optional[np.ndarray] = None
def copy(self) -> 'Continuum':
"""
Makes a copy of the current continuum.
Returns
-------
continuum: Continuum
"""
continuum = Continuum(self.uri)
continuum._annotations = deepcopy(self._annotations)
return continuum
def __bool__(self):
"""Truthiness, basically tests for emptiness
>>> if continuum:
... # continuum is not empty
... else:
... # continuum is empty
"""
return len(self._annotations) > 0
def __len__(self):
return len(self._annotations)
@property
def num_units(self) -> int:
"""Number of units"""
return sum(len(units) for units in self._annotations.values())
@property
def categories(self) -> Set[str]:
return set(unit.annotation for _, unit in self
if unit.annotation is not None)
@property
def num_annotators(self) -> int:
"""Number of annotators"""
return len(self._annotations)
@property
def avg_num_annotations_per_annotator(self):
"""Average number of annotated segments per annotator"""
return self.num_units / self.num_annotators
@property
def max_num_annotations_per_annotator(self):
"""The maximum number of annotated segments an annotator has
in this continuum"""
max_num_annotations_per_annotator = 0
for annotator in self._annotations:
max_num_annotations_per_annotator = np.max(
[max_num_annotations_per_annotator,
len(self[annotator])])
return max_num_annotations_per_annotator
@property
def avg_length_unit(self) -> float:
"""Mean of the annotated segments' durations"""
return sum(unit.segment.duration for _, unit in self) / self.num_units
def add(self, annotator: Annotator, segment: Segment, annotation: Optional[str] = None):
"""
Add a segment to the continuum
Parameters
----------
annotator: str
The annotator that produced the added annotation
segment: `pyannote.core.Segment`
The segment for that annotation
annotation: optional str
That segment's annotation, if any.
"""
if segment.duration == 0.0:
raise ValueError("Tried adding segment of duration 0.0")
if annotator not in self._annotations:
self._annotations[annotator] = SortedSet()
self._annotations[annotator].add(Unit(segment, annotation))
# units array has to be updated, nullifying
if self._alignments_disorders is not None:
self._chosen_alignments = None
self._alignments_disorders = None
def add_annotation(self, annotator: Annotator, annotation: Annotation):
"""
Add a full pyannote annotation to the continuum.
Parameters
----------
annotator: str
A string id for the annotator who produced that annotation.
annotation: :class:`pyannote.core.Annotation`
A pyannote `Annotation` object. If a label is present for a given
segment, it will be considered as that label's annotation.
"""
for segment, _, label in annotation.itertracks(yield_label=True):
self.add(annotator, segment, label)
def add_timeline(self, annotator: Annotator, timeline: Timeline):
"""
Add a full pyannote timeline to the continuum.
Parameters
----------
annotator: str
A string id for the annotator who produced that timeline.
timeline: `pyannote.core.Timeline`
A pyannote `Annotation` object. No annotation will be attached to
segments.
"""
for segment in timeline:
self.add(annotator, segment)
def add_textgrid(self,
annotator: Annotator,
tg_path: Union[str, Path],
selected_tiers: Optional[List[str]] = None,
use_tier_as_annotation: bool = False):
"""
Add a textgrid file's content to the Continuum
Parameters
----------
annotator: str
A string id for the annotator who produced that TextGrid.
tg_path: `Path` or str
Path to the textgrid file.
selected_tiers: optional list of str
If set, will drop tiers that are not contained in this list.
use_tier_as_annotation: optional bool
If True, the annotation for each non-empty interval will be the name
of its parent Tier.
"""
from textgrid import TextGrid, IntervalTier
tg = TextGrid.fromFile(str(tg_path))
for tier_name in tg.getNames():
if selected_tiers is not None and tier_name not in selected_tiers:
continue
tier: IntervalTier = tg.getFirst(tier_name)
for interval in tier:
if not interval.mark:
continue
if use_tier_as_annotation:
self.add(annotator,
Segment(interval.minTime, interval.maxTime),
tier_name)
else:
self.add(annotator,
Segment(interval.minTime, interval.maxTime),
interval.mark)
def add_elan(self,
annotator: Annotator,
eaf_path: Union[str, Path],
selected_tiers: Optional[List[str]] = None,
use_tier_as_annotation: bool = False):
"""
Add an Elan (.eaf) file's content to the Continuum
Parameters
----------
annotator: str
A string id for the annotator who produced that ELAN file.
eaf_path: `Path` or str
Path to the .eaf (ELAN) file.
selected_tiers: optional list of str
If set, will drop tiers that are not contained in this list.
use_tier_as_annotation: optional bool
If True, the annotation for each non-empty interval will be the name
of its parent Tier.
"""
from pympi import Eaf
eaf = Eaf(eaf_path)
for tier_name in eaf.get_tier_names():
if selected_tiers is not None and tier_name not in selected_tiers:
continue
for start, end, value in eaf.get_annotation_data_for_tier(tier_name):
if use_tier_as_annotation:
self.add(annotator, Segment(start, end), tier_name)
else:
self.add(annotator, Segment(start, end), value)
def merge(self, continuum: 'Continuum', in_place: bool = False) \
-> Optional['Continuum']:
"""
Merge two Continuua together. Units from the same annotators
are also merged together.
Parameters
----------
continuum: Continuum
other continuum to merge the current one with.
in_place: optional bool
If set to true, the merge is done in place, and the current
continuum (self) is the one being modified.
Returns
-------
continuum: optional Continuum
Only returned if "in_place" is false
"""
current_cont = self if in_place else self.copy()
for annotator, unit in continuum:
current_cont.add(annotator, unit.segment, unit.annotation)
if not in_place:
return current_cont
def __add__(self, other: 'Continuum'):
"""
Same as a "not-in-place" merge.
Parameters
----------
other: Continuum
Returns
-------
continuum: Continuum
See also
--------
:meth:`pygamma_agreement.Continuum.merge`
"""
return self.merge(other, in_place=False)
def __getitem__(self, *keys: Union[Annotator, Tuple[Annotator, int]]) \
-> Union[SortedSet, Unit]:
"""Get annotation object
>>> annotation = continuum[annotator]
"""
if len(keys) == 1:
annotator = keys[0]
return self._annotations[annotator]
elif len(keys) == 2 and isinstance(keys[1], int):
annotator, idx = keys
return self._annotations[annotator][idx]
def __iter__(self) -> Iterable[Tuple[Annotator, Unit]]:
for annotator, annotations in self._annotations.items():
for unit in annotations:
yield annotator, unit
@property
def annotators(self):
"""List all annotators in the Continuum
>>> continuum.annotators:
... ["annotator_a", "annotator_b", "annot_ref"]
"""
return list(self._annotations.keys())
def iterunits(self, annotator: str):
# TODO: implem and doc
"""Iterate over units (in chronological and alphabetical order
if annotations are present)
>>> for unit in continuum.iterunits("Max"):
... # do something with the unit
"""
return iter(self._annotations)
def compute_disorders(self, dissimilarity: AbstractDissimilarity):
assert isinstance(dissimilarity, AbstractDissimilarity)
assert len(self.annotators) >= 2
disorder_args = dissimilarity.build_args(self)
nb_unit_per_annot = [len(arr) + 1 for arr in self._annotations.values()]
all_disorders = []
all_valid_tuples = []
for tuples_batch in chunked_cartesian_product(nb_unit_per_annot, CHUNK_SIZE):
batch_disorders = dissimilarity(tuples_batch, *disorder_args)
# Property section 5.1.1 to reduce initial complexity
valid_disorders_ids, = np.where(batch_disorders < self.num_annotators * dissimilarity.delta_empty)
all_disorders.append(batch_disorders[valid_disorders_ids])
all_valid_tuples.append(tuples_batch[valid_disorders_ids])
disorders = np.concatenate(all_disorders)
possible_unitary_alignments = np.concatenate(all_valid_tuples)
# Definition of the integer linear program
num_possible_unitary_alignements = len(disorders)
x = cp.Variable(shape=num_possible_unitary_alignements, boolean=True)
true_units_ids = []
num_units = 0
for units in self._annotations.values():
true_units_ids.append(np.arange(num_units, num_units + len(units)).astype(np.int32))
num_units += len(units)
# Constraints matrix
A = np.zeros((num_units, num_possible_unitary_alignements))
for p_id, unit_ids_tuple in enumerate(possible_unitary_alignments):
for annotator_id, unit_id in enumerate(unit_ids_tuple):
if unit_id != len(true_units_ids[annotator_id]):
A[true_units_ids[annotator_id][unit_id], p_id] = 1
obj = cp.Minimize(disorders.T @ x)
constraints = [cp.matmul(A, x) == 1]
prob = cp.Problem(obj, constraints)
# we don't actually care about the optimal loss value
optimal_value = prob.solve()
# compare with 0.9 as cvxpy returns 1.000 or small values i.e. 10e-14
chosen_alignments_ids, = np.where(x.value > 0.9)
self._chosen_alignments = possible_unitary_alignments[chosen_alignments_ids]
self._alignments_disorders = disorders[chosen_alignments_ids]
return self._alignments_disorders.sum() / len(self._alignments_disorders)
def get_best_alignment(self, dissimilarity: Optional['AbstractDissimilarity'] = None):
if self._chosen_alignments is None or self._alignments_disorders is None:
if dissimilarity is not None:
self.compute_disorders(dissimilarity)
else:
raise ValueError("Best alignment disorder hasn't been computed, "
"a the dissimilarity argument is required")
from .alignment import UnitaryAlignment, Alignment
set_unitary_alignements = []
for alignment_id, alignment in enumerate(self._chosen_alignments):
u_align_tuple = []
for annotator_id, unit_id in enumerate(alignment):
annotator, units = self._annotations.peekitem(annotator_id)
try:
unit = units[unit_id]
u_align_tuple.append((annotator, unit))
except IndexError: # it's a "null unit"
u_align_tuple.append((annotator, None))
unitary_alignment = UnitaryAlignment(tuple(u_align_tuple))
unitary_alignment.disorder = self._alignments_disorders[alignment_id]
set_unitary_alignements.append(unitary_alignment)
return Alignment(set_unitary_alignements, continuum=self, check_validity=True)
def compute_gamma(self,
dissimilarity: 'AbstractDissimilarity',
n_samples: int = 30,
precision_level: Optional[Union[float, PrecisionLevel]] = None,
ground_truth_annotators: Optional[List[Annotator]] = None,
sampling_strategy: str = "single",
pivot_type: PivotType = "float_pivot",
random_seed: Optional[float] = 4577
) -> 'GammaResults':
"""
Parameters
----------
dissimilarity: AbstractDissimilarity
dissimilarity instance. Used to compute the disorder between units.
n_samples: optional int
number of random continuum sampled from this continuum used to
estimate the gamma measure
precision_level: optional float or "high", "medium", "low"
error percentage of the gamma estimation. If a literal
precision level is passed (e.g. "medium"), the corresponding numerical
value will be used (high: 1%, medium: 2%, low : 5%)
ground_truth_annotators:
if set, the random continuua will only be sampled from these
annotators. This should be used when you want to compare a prediction
against some ground truth annotation.
pivot_type: 'float_pivot' or 'int_pivot'
pivot type to be used when sampling continuua
random_seed: optional float, int or str
random seed used to set up the random state before sampling the
random continuua
Returns
-------
"""
assert sampling_strategy in ("single", "multi")
if sampling_strategy == "multi":
raise NotImplemented("Multi-continuum sampling strategy is not "
"supported for now")
if random_seed is not None:
random.seed(random_seed)
chance_disorders = []
for _ in range(n_samples):
sampled_continuum = Continuum.sample_from_continuum(self, pivot_type, ground_truth_annotators)
sample_disorder = sampled_continuum.compute_disorders(dissimilarity)
chance_disorders.append(sample_disorder)
if precision_level is not None:
if isinstance(precision_level, str):
precision_level = PRECISION_LEVEL[precision_level]
assert 0 < precision_level < 1.0
# taken from subsection 5.3 of the original paper
# confidence at 95%, i.e., 1.96
variation_coeff = np.std(chance_disorders) / np.mean(chance_disorders)
confidence = 1.96
required_samples = np.ceil((variation_coeff * confidence / precision_level) ** 2).astype(np.int32)
logging.debug(f"Number of required samples for confidence {precision_level}: {required_samples}")
if required_samples > n_samples:
for _ in range(required_samples - n_samples):
sampled_continuum = Continuum.sample_from_continuum(self, pivot_type, ground_truth_annotators)
sample_disorder = sampled_continuum.compute_disorders(dissimilarity)
chance_disorders.append(sample_disorder)
best_alignment = self.get_best_alignment(dissimilarity)
return GammaResults(
best_alignment=best_alignment,
pivot_type=pivot_type,
n_samples=n_samples,
chance_disorders=np.array(chance_disorders),
precision_level=precision_level
)
def compute_gamma_cat(self):
raise NotImplemented()
def to_csv(self, path: Union[str, Path], delimiter=","):
if isinstance(path, str):
path = Path(path)
with open(path, "w") as csv_file:
writer = csv.writer(csv_file, delimiter=delimiter)
for annotator, unit in self:
writer.writerow([annotator, unit.annotation,
unit.segment.start, unit.segment.end])
def _repr_png_(self):
"""IPython notebook support
See also
--------
:mod:`pygamma_agreement.notebook`
"""
from .notebook import repr_continuum
return repr_continuum(self)
@dataclass
class GammaResults:
"""
Gamma results object. Stores information about a gamma measure computation.
"""
best_alignment: 'Alignment'
pivot_type: PivotType
n_samples: int
chance_disorders: np.ndarray
precision_level: Optional[float] = None
@property
def alignments_nb(self):
return len(self.best_alignment.unitary_alignments)
@property
def observed_agreement(self) -> float:
"""Returns the disorder of the computed best alignment, i.e, the
observed agreement."""
return self.best_alignment.disorder
@property
def expected_disagreement(self) -> float:
"""Returns the expected disagreement for computed random samples, i.e.,
the mean of the sampled continuua's disorders"""
return self.chance_disorders.mean()
@property
def approx_gamma_range(self):
"""Returns a tuple of the expected boundaries of the computed gamma,
obtained using the expected disagreement and the precision level"""
if self.precision_level is None:
raise ValueError("No precision level has been set, cannot compute"
"the gamma boundaries")
return (1 - self.observed_agreement / (self.expected_disagreement *
(1 - self.precision_level)),
1 - self.observed_agreement / (self.expected_disagreement *
(1 + self.precision_level)))
@property
def gamma(self):
"""Returns the gamma value"""
return 1 - self.observed_agreement / self.expected_disagreement
| 37.300562
| 114
| 0.613224
| 24,328
| 0.916033
| 195
| 0.007342
| 8,020
| 0.301981
| 0
| 0
| 9,989
| 0.37612
|
b8f295ce12bf7401ea1d40884fb3f417f25a7bfd
| 6,907
|
py
|
Python
|
stomasimulator/febio/xplt/xplt_calcs.py
|
woolfeh/stomasimulator
|
ead78b78809f35c17e2d784259bdeb56589a9d1c
|
[
"MIT"
] | 2
|
2017-07-27T12:57:26.000Z
|
2017-07-28T13:55:15.000Z
|
stomasimulator/febio/xplt/xplt_calcs.py
|
woolfeh/stomasimulator
|
ead78b78809f35c17e2d784259bdeb56589a9d1c
|
[
"MIT"
] | null | null | null |
stomasimulator/febio/xplt/xplt_calcs.py
|
woolfeh/stomasimulator
|
ead78b78809f35c17e2d784259bdeb56589a9d1c
|
[
"MIT"
] | 1
|
2020-06-02T15:31:04.000Z
|
2020-06-02T15:31:04.000Z
|
import stomasimulator.geom.geom_utils as geom
class AttributeCalculator(object):
""" Abstraction for calculations performed on XPLT state data """
def __init__(self, prefix, reference_data, dimensionality, lambda_fn=None):
self.prefix = '' if prefix is None else prefix
self.reference_data = reference_data
self.dimensionality = dimensionality
self.lambda_fn = (lambda x: x) if lambda_fn is None else lambda_fn
def calculate(self, nid_pt_dict, extras=None):
""" Perform the calculation
:param nid_pt_dict: dictionary of an integer 'node id' to a Point object
:param extras: passed on to the subclass
:return: a dictionary containing label-result pairs from the calculation
:rtype: dict
"""
data = self._calculate(nid_pt_dict, extras)
if self.dimensionality == 1:
data = (data,)
return {k: self.lambda_fn(v) for k, v in zip(self.labels(), data)}
def _calculate(self, nid_pt_dict, extras):
""" Calculation implementation - to be overridden in subclasses """
pass
def labels(self):
""" Get the labels for the calculation results """
suffices = self.calculation_suffices()
assert len(suffices) == self.dimensionality, 'Error! Data label dimensionality mismatch.'
fmt_string = '{}{}' if len(self.prefix) == 0 or len(suffices[0]) == 0 else '{}-{}'
return [fmt_string.format(self.prefix, suffix) for suffix in suffices]
def calculation_suffices(self):
""" These suffices are appended to the labels of the calculation result """
return ['', ] * self.dimensionality
def _get_point(ref_pt, id_pt_dict):
return id_pt_dict.get(ref_pt) if isinstance(ref_pt, int) else ref_pt
class DistanceCalculator(AttributeCalculator):
""" Distance between two points """
def __init__(self, prefix, node_pair, lambda_fn=None):
node_0 = node_pair[0]
node_1 = node_pair[1]
reference_data = (node_0 if node_0.id is None else node_0.id,
node_1 if node_1.id is None else node_1.id)
super(DistanceCalculator, self).__init__(prefix=prefix,
reference_data=reference_data,
dimensionality=1,
lambda_fn=lambda_fn)
def _calculate(self, nid_pt_dict, extras):
pt_0 = _get_point(self.reference_data[0], nid_pt_dict)
pt_1 = _get_point(self.reference_data[1], nid_pt_dict)
return pt_0.distance(pt_1)
class DirectionalDistanceCalculator(DistanceCalculator):
""" Signed distance calculator """
def __init__(self, prefix, node_pair, direction, lambda_fn=None):
""" Calculate a distance in a specified direction
:param prefix:
:param node_pair: two Points - further along 'direction' than node_pair[1] so that 'np[0] - np[1]'
should be in the direction of 'direction'
:param direction: the direction vector
:param lambda_fn:
"""
super(DirectionalDistanceCalculator, self).__init__(prefix=prefix,
node_pair=node_pair,
lambda_fn=lambda_fn)
self.direction = direction.unit()
def _calculate(self, nid_pt_dict, extras):
pt_0 = _get_point(self.reference_data[0], nid_pt_dict)
pt_1 = _get_point(self.reference_data[1], nid_pt_dict)
is_in_right_direction = (pt_0 - pt_1) * self.direction > 0.0
return pt_0.distance(pt_1) if is_in_right_direction else 0.0
class AreaCalculator2D(AttributeCalculator):
""" Calculate area from a list of points (assumed to be in xy plane) """
def __init__(self, prefix, boundary_pts, lambda_fn=None):
super(AreaCalculator2D, self).__init__(prefix=prefix,
reference_data=boundary_pts,
dimensionality=1,
lambda_fn=lambda_fn)
def _calculate(self, nid_pt_dict, extras):
updated_pore_pts = [nid_pt_dict[pt.id] for pt in self.reference_data]
pore_area = geom.calculate_polygon_area(updated_pore_pts)
return pore_area
class AreaCalculator3D(AttributeCalculator):
""" Calculate an area from a list of facets """
def __init__(self, prefix, facet_list):
super(AreaCalculator3D, self).__init__(prefix=prefix,
reference_data=facet_list,
dimensionality=1)
def _calculate(self, nid_pt_dict, extras):
area = geom.calculate_surface_area(nid_pt_dict, self.reference_data)
return area
class AreaVolumeCalculator(AttributeCalculator):
""" Perform a combined calculation to get the surface area and volume given a list of facets """
def __init__(self, prefix, facet_list):
super(AreaVolumeCalculator, self).__init__(prefix=prefix,
reference_data=facet_list,
dimensionality=2)
def _calculate(self, nid_pt_dict, extras):
volume, area = geom.calculate_volume_and_area(nid_pt_dict, self.reference_data)
return area, volume
def calculation_suffices(self):
return 'area', 'volume'
class XpltReaderMetrics(object):
""" Identify the metrics that will be calculated for the XpltReader """
def __init__(self, comparison_helper=None, is_mesh_calculation_on=False):
"""
:param comparison_helper: Comparison helper for the stoma
:type stoma_cfg: sc.ComparisonHelper
:param is_mesh_calculation_on: Whether to calculate the mesh metrics (or not)
:type is_mesh_calculation_on: bool
"""
self.comparison_helper = comparison_helper
self.is_mesh_calculation_on = is_mesh_calculation_on
@property
def is_compare_vs_open_stoma_on(self):
"""
:return: Whether or not to perform the comparison
:rtype: bool
"""
return self.comparison_helper is not None
def evaluate_metric(self, sim_state):
"""
Calculate the metric and percent difference vs. each measurement
:param sim_state: State object holding data from the simulation
:type sim_state: State
:return: Each item is a pair comprising a name (key) and its float value
:rtype: list of tuple
"""
result = self.comparison_helper.perform_comparison(state_pressure=sim_state.time,
state_data=sim_state.attributes)
return result
if __name__ == '__main__':
pass
| 37.538043
| 106
| 0.624005
| 6,691
| 0.968727
| 0
| 0
| 205
| 0.02968
| 0
| 0
| 1,936
| 0.280295
|
b8f30a5084a67468fea8c7e34b0fb7344b7f99fe
| 801
|
py
|
Python
|
ifplus/vfs/__init__.py
|
hitakaken/ifplus
|
8354eeceea8abcbcaeb5dcd1c11eef69cbef6557
|
[
"MIT"
] | null | null | null |
ifplus/vfs/__init__.py
|
hitakaken/ifplus
|
8354eeceea8abcbcaeb5dcd1c11eef69cbef6557
|
[
"MIT"
] | null | null | null |
ifplus/vfs/__init__.py
|
hitakaken/ifplus
|
8354eeceea8abcbcaeb5dcd1c11eef69cbef6557
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .helpers.vfs import VirtualFileSystem
from .views.files import ns
class VFS(object):
def __init__(self, app=None, mongo=None, **kwargs):
self.app = app
self.mongo = mongo
self.vfs = None
if app is not None:
self.app = app
self.init_app(app, **kwargs)
def init_app(self, app, **kwargs):
self.app = app
config = app.config.get('VFS', {})
self.vfs = VirtualFileSystem(app,
rid=config.get(u'RID', u'0000-0000-0000-0000'),
root=config.get(u'ROOT', None),
devices=config.get(u'DEVICES', None))
setattr(self.app, 'vfs', self.vfs)
self.app.api.add_namespace(ns)
| 33.375
| 84
| 0.516854
| 703
| 0.877653
| 0
| 0
| 0
| 0
| 0
| 0
| 78
| 0.097378
|
b8f325c7a53b048ae96a1a8dd82c6640cb732eac
| 51,954
|
py
|
Python
|
fordclassifier/evaluator/evaluatorClass.py
|
Orieus/one_def_classification
|
3269290e1fa06ec104a38810c5dffa5401f34ef1
|
[
"MIT"
] | null | null | null |
fordclassifier/evaluator/evaluatorClass.py
|
Orieus/one_def_classification
|
3269290e1fa06ec104a38810c5dffa5401f34ef1
|
[
"MIT"
] | null | null | null |
fordclassifier/evaluator/evaluatorClass.py
|
Orieus/one_def_classification
|
3269290e1fa06ec104a38810c5dffa5401f34ef1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
@author: Angel Navia Vázquez
May 2018
'''
# import code
# code.interact(local=locals())
import os
import pickle
# from fordclassifier.classifier.classifier import Classifier
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve, auc
import json
import matplotlib.pyplot as plt
import operator
import itertools
from sklearn.metrics import confusion_matrix
from collections import OrderedDict
import pyemd
# Local imports
from fordclassifier.evaluator.predictorClass import Predictor
from fordclassifier.evaluator.rbo import *
import pdb
class Evaluator(object):
'''
Class to evaluate the performance of the classifiers
============================================================================
Methods:
============================================================================
_recover: if a variable is not in memory, tries to recover it from disk
_get_folder: retuns full path to a subfolder
_exists_file: check if the file exists in disk
draw_rocs: draws the Xval ROCs and saves them as png files
load_Xtfidf: Loads from disk Xtfidf and tags
load_test_data: Loads from disk test Xtfidf and tags
load_train_data: Loads from disk train Xtfidf and tags
compute_average_xval_AUC: computes the average AUC on xval
compute_average_test_AUC: computes the average AUC on test
obtain_labels_from_Preds: Produces the multilabel tag prediction from
individual predictions of every classifier
compute_confussion_matrix: computes the confusion matrix on test
(multiclass case)
compute_confusion_matrix_multilabel: computes the confussion matrix for a
multilabel set (multilabel case)
draw_confussion_matrix: draws the CM and saves it as a png file
draw_ROCS_tst: draws the ROC curves for the test data
draw_anyROC: draws the ROC curves
compute_thresholds: computes the thresholds
compute_cardinality: computes the cardinality of the tags
compute_label_density: Computes the label density
JaccardIndex: Computes the Jaccard index
compute_multilabel_threshold: Computes the multilabel threshold
draw_costs_on_test: draws the multilabel cost for the test data
load_multilabel_threshold: Loads the multilabel thresholds
Jaccard_RBO_cost: Computes a convex combination of the Jaccard and
RBO costs
align_strings: Aligns strings into columns
get_pred_weights: Returns the normalized predictions
write_prediction_report: writes a simple prediction report in text format
============================================================================
'''
def __init__(self, project_path, subfolders, categories=None, verbose=True):
'''
Initialization: Creates the initial object data
Inputs:
- project_path: path to the working project
- subfolders: subfolder structure
'''
self._project_path = project_path # working directory
self._verbose = verbose # messages are printed on screen when True
self.models2evaluate = None # models to evaluate (classif, params)
self._subfolders = None # subfolders structure
self.best_auc = None # Best AUC
self.best_models = None # Best models
self.Xtfidf_tr = None # Xtfidf for training
self.tags_tr = None # Training tags
self.tags = None # All tags
self.ths_dict = None # dict with the thresholds for every classifier
self.Preds = None # Prediction matrix, one column per category
self.Preds_tr = None # Pred. matrix, one column per category, train
self.Preds_tst = None # Pred. matrix, one column per category, test
self.index_tst = None # Index for tags test
self.categories = categories # List of categories
self.Xtfidf_tst = None # Xtfidf for test
self.tags_tst = None # Test tags
self.CONF = None # Confusion matrix
self.multilabel_th = None # Multilabel Threshold
self._subfolders = subfolders
def _get_folder(self, subfolder):
'''
gets full path to a folder
Inputs:
- subfolder: target subfolder
'''
return os.path.join(self._project_path, self._subfolders[subfolder])
def _exists_file(self, filename):
'''
Checks if the file exists
Inputs:
- filename
'''
try:
f = open(filename, 'r')
existe = True
f.close()
except:
existe = False
pass
return existe
def _recover(self, field):
'''
Loads from disk a previously stored variable, to avoid recomputing it
Inputs:
- field: variable to restore from disk
'''
if field == 'best_auc':
input_file = os.path.join(self._get_folder('results'),
'best_auc.json')
with open(input_file, 'r') as f:
self.best_auc = json.load(f)
if field == 'best_models':
try:
input_file = os.path.join(self._get_folder('results'),
'best_models.json')
with open(input_file, 'r') as f:
self.best_models = json.load(f)
except:
input_file = os.path.join(self._get_folder('export'),
'best_models.json')
with open(input_file, 'r') as f:
self.best_models = json.load(f)
pass
if field == 'Xtfidf_tr':
filetoload_Xtfidf = os.path.join(
self._project_path + self._subfolders['training_data'],
'train_data.pkl')
with open(filetoload_Xtfidf, 'rb') as f:
[self.Xtfidf_tr, tags_tr, self.tags_tr,
refs_tr] = pickle.load(f)
if field == 'Xtfidf_tst':
filetoload_Xtfidf = os.path.join(
self._project_path + self._subfolders['test_data'],
'test_data.pkl')
with open(filetoload_Xtfidf, 'rb') as f:
[self.Xtfidf_tst, tags_tst, self.tags_tst,
refs_tst] = pickle.load(f)
if field == 'tags':
filetoload_tags = os.path.join(
self._project_path + self._subfolders['training_data'],
'tags.pkl')
with open(filetoload_tags, 'rb') as f:
self.tags = pickle.load(f)
if field == 'ths_dict':
try:
filename = os.path.join(
self._project_path + self._subfolders['results'],
'ths_dict.pkl')
with open(filename, 'rb') as f:
self.ths_dict = pickle.load(f)
except:
filename = os.path.join(
self._project_path + self._subfolders['export'],
'ths_dict.pkl')
with open(filename, 'rb') as f:
self.ths_dict = pickle.load(f)
pass
if field == 'Preds':
filename = os.path.join(
self._project_path + self._subfolders['results'], 'Preds.pkl')
with open(filename, 'rb') as f:
self.Preds = pickle.load(f)
if field == 'Preds_tr':
filename = os.path.join(
self._project_path, self._subfolders['results'],
'Preds_tr.pkl')
with open(filename, 'rb') as f:
self.Preds_tr = pickle.load(f)
if field == 'Preds_tst':
filename = os.path.join(
self._project_path, self._subfolders['results'],
'Preds_test.pkl')
with open(filename, 'rb') as f:
self.Preds_tst = pickle.load(f)
if field == 'CONF':
filename = os.path.join(
self._project_path + self._subfolders['results'], 'CONF.pkl')
with open(filename, 'rb') as f:
self.CONF = pickle.load(f)
if field == 'tags_index':
filename = os.path.join(
self._project_path + self._subfolders['test_data'],
'tags_index.pkl')
with open(filename, 'rb') as f:
[self.tags_tst, self.index_tst] = pickle.load(f)
if field == 'categories':
try:
filename = os.path.join(
self._project_path + self._subfolders['training_data'],
'categories.pkl')
with open(filename, 'rb') as f:
self.categories = pickle.load(f)
except:
filename = os.path.join(
self._project_path + self._subfolders['export'],
'categories.pkl')
with open(filename, 'rb') as f:
self.categories = pickle.load(f)
pass
if field == 'models2evaluate':
try:
filename = os.path.join(
self._project_path + self._subfolders['training_data'],
'models2evaluate.pkl')
with open(filename, 'rb') as f:
self.models2evaluate = pickle.load(f)
except:
filename = os.path.join(
self._project_path + self._subfolders['export'],
'models2evaluate.pkl')
with open(filename, 'rb') as f:
self.models2evaluate = pickle.load(f)
pass
if field == 'multilabel_th':
try:
filename = os.path.join(
self._project_path + self._subfolders['training_data'],
'multilabel_th.pkl')
with open(filename, 'rb') as f:
self.multilabel_th = pickle.load(f)
except:
filename = os.path.join(
self._project_path + self._subfolders['export'],
'multilabel_th.pkl')
with open(filename, 'rb') as f:
self.multilabel_th = pickle.load(f)
pass
return
def draw_rocs(self, verbose=True):
'''
Draws the Xval ROCs and saves them as png files
Inputs:
- None, it operates on self values
'''
if verbose:
print("Saving ROC figures ...")
if self.categories is None:
self._recover('categories')
if self.models2evaluate is None:
self._recover('models2evaluate')
# get the evaluated models
models = list(self.models2evaluate.keys())
Nclass = len(models)
Ncats = len(self.categories)
for kcat in range(0, Ncats):
plt.figure(figsize=(15, 12))
aucs = []
cat = self.categories[kcat]
for kclass in range(0, Nclass):
try:
model_name = models[kclass]
file_input_ROC = os.path.join(
self._get_folder('eval_ROCs'),
'ROC_' + model_name + '_' + cat + '.pkl')
with open(file_input_ROC, 'rb') as f:
mdict = pickle.load(f)
auc = mdict['roc_auc_loo']
aucs.append((model_name, auc))
except:
pass
# Sorting by AUC
aucs.sort(key=operator.itemgetter(1), reverse=True)
colors = ['k', 'r', 'g', 'b', 'm', 'c', 'r--', 'g--', 'b--', 'm--',
'c--', 'k--']
# drawing the best 10 models
for k in range(0, 10):
try:
model_name = aucs[k][0]
auc = aucs[k][1]
file_input_ROC = os.path.join(
self._get_folder('eval_ROCs'),
'ROC_' + model_name + '_' + cat + '.pkl')
with open(file_input_ROC, 'rb') as f:
mdict = pickle.load(f)
fpr = mdict['fpr_loo']
tpr = mdict['tpr_loo']
text = model_name + ', AUC= ' + str(auc)[0:6]
if auc > 0.6:
if k == 0:
# drawing the best model with thicker line
plt.plot(fpr, tpr, colors[k], label=text,
linewidth=6.0)
else:
plt.plot(fpr, tpr, colors[k], label=text,
linewidth=2.0)
except:
pass
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC curves for category ' + cat)
plt.grid(True)
plt.legend(loc="lower right")
filename = os.path.join(self._get_folder('ROCS_tr'),
cat + '_ROC_xval.png')
plt.savefig(filename)
plt.close()
if verbose:
print(cat, )
return
def load_Xtfidf(self, verbose=True):
'''
Loads from disk Xtfidf and tags
Inputs:
- None, it operates on self values
'''
if self.Xtfidf is None:
self._recover('Xtfidf')
if self.tags is None:
self._recover('tags')
return self.Xtfidf, self.tags
def load_test_data(self, verbose=True):
'''
Loads from disk test Xtfidf and tags
Inputs:
- None, it operates on self values
'''
filename = os.path.join(
self._project_path + self._subfolders['test_data'],
'test_data.pkl')
with open(filename, 'rb') as f:
[self.Xtfidf_tst, self.tags_tst, refs_tst] = pickle.load(f)
new_tags_tst = []
for tags in self.tags_tst:
unique_tags = sorted(set(tags), key=tags.index)
new_tags_tst.append(unique_tags)
return self.Xtfidf_tst, new_tags_tst, refs_tst
def load_train_data(self, verbose=True):
'''
Loads from disk train Xtfidf and tags
Inputs:
- None, it operates on self values
'''
filename = os.path.join(
self._project_path + self._subfolders['training_data'],
'train_data.pkl')
with open(filename, 'rb') as f:
[self.Xtfidf_tr, self.tags_tr, refs_tr] = pickle.load(f)
new_tags_tr = []
for tags in self.tags_tr:
unique_tags = sorted(set(tags), key=tags.index)
new_tags_tr.append(unique_tags)
return self.Xtfidf_tr, new_tags_tr, refs_tr
def compute_average_xval_AUC(self, verbose=True):
'''
Computes the average AUC on xval
Inputs:
- None, it operates on self values
'''
if self.best_auc is None:
self._recover('best_auc')
aucs = list(self.best_auc.values())
average_auc = np.mean(aucs)
return average_auc
def obtain_labels_from_Preds(self, Preds, threshold,
categories=None, verbose=True):
'''
Produces the multilabel tag prediction from individual predictions of
every classifier
Inputs:
- Preds: predictions matrix, one column per category, as many rows
as patterns
- threshold: multilabel threshold
'''
if self.categories is None:
self._recover('categories')
labels_preds = []
Ndocs = Preds.shape[0]
for kdoc in range(0, Ndocs):
l = []
p = Preds[kdoc, :]
# Normalize individual predictions, the maximum becomes 1.0 in all
# cases
if max(p) > 0:
p = p / max(p)
orden = np.argsort(-p)
for index in orden:
if p[index] > threshold:
l.append(self.categories[index])
labels_preds.append(l)
return labels_preds
def compute_confusion_matrix(self, orig_tags, best_pred_tags, filename,
sorted_categories=[], verbose=True):
'''
computes the confussion matrix on test (multiclass case)
Inputs:
- orig_tags: original labels
- best_pred_tags: predicted labels
- filename: file to save results
- sorted_categories: categories to take into account, respecting
the order
'''
if self.categories is None:
self._recover('categories')
if len(sorted_categories) > 0:
labels_categories = sorted_categories
else:
labels_categories = self.categories
self.CONF = confusion_matrix(orig_tags, best_pred_tags,
labels=labels_categories)
pathfilename = os.path.join(
self._project_path + self._subfolders['results'], filename)
with open(pathfilename, 'wb') as f:
pickle.dump(self.CONF, f)
return self.CONF
def compute_confusion_matrix_multilabel(self, orig_tags, best_pred_tags,
filename, sorted_categories=[],
verbose=True):
'''
computes the confussion matrix for a multilabel set (multilabel case)
Inputs:
- orig_tags: original labels
- best_pred_tags: predicted labels
- filename: file to save results
- sorted_categories: categories to take into account, respecting
the order
'''
if self.categories is None:
self._recover('categories')
if len(sorted_categories) > 0:
labels_categories = sorted_categories
else:
labels_categories = self.categories
Ncats = len(labels_categories)
self.CONF = np.zeros((Ncats, Ncats))
NP = len(orig_tags)
for k in range(0, NP):
cats_orig = orig_tags[k]
cats_pred = best_pred_tags[k]
for m in range(0, Ncats):
for n in range(0, Ncats):
cat_orig = labels_categories[m]
cat_pred = labels_categories[n]
if cat_orig in cats_orig and cat_pred in cats_pred:
self.CONF[m, n] += 1.0
# self.CONF = confusion_matrix(orig_tags, best_pred_tags,
# labels=labels_categories)
pathfilename = os.path.join(
self._project_path + self._subfolders['results'], filename)
with open(pathfilename, 'wb') as f:
pickle.dump(self.CONF, f)
return self.CONF
def compute_confusion_matrix_multilabel_v2(
self, orig_tags, best_pred_tags, filename, sorted_categories=[],
order_sensitive=False, verbose=True):
'''
computes the confusion matrix for a multilabel set
Inputs:
- orig_tags: original labels
- best_pred_tags: predicted labels
- filename: file to save results
- sorted_categories: categories to take into account, respecting
the order
- order_sensitive: indicates if the computation is order sensitive
or not
'''
# Set dump factor
if order_sensitive:
dump_factor = 0.5
else:
dump_factor = 1.0
# Take categories from the input arguments. If not, from the object.
# If not, from a file using the recover method.
if len(sorted_categories) > 0:
categories = sorted_categories
else:
# Get list of categories
if self.categories is None:
self._recover('categories')
categories = self.categories
# Validate true labels
n = len([x for x in orig_tags if len(x) == 0])
if n > 0:
print('---- WARNING: {} samples without labels '.format(n) +
'will be ignored.')
# Validate predicted labels
n = len([x for x in best_pred_tags if len(x) == 0])
if n > 0:
print('---- WARNING: {} samples without predictions '.format(n) +
'will be ignored.')
# Loop over the true and predicted labels
Ncats = len(categories)
self.CONF = np.zeros((Ncats, Ncats))
for cats_orig, cats_pred in zip(orig_tags, best_pred_tags):
if len(cats_orig) > 0 and len(cats_pred) > 0:
# Compute numerical true label vector
value_orig = 1.0
p = np.zeros(Ncats)
for c in cats_orig:
p[categories.index(c)] = value_orig
value_orig *= dump_factor
p = p / np.sum(p)
# Compute numerical prediction label vector
value_pred = 1.0
q = np.zeros(Ncats)
for c in cats_pred:
q[categories.index(c)] = value_pred
value_pred *= dump_factor
q = q / np.sum(q)
# Compute diagonal elements
min_pq = np.minimum(p, q)
M = np.diag(min_pq)
# Compute non-diagonal elements
p_ = p - min_pq
q_ = q - min_pq
z = 1 - np.sum(min_pq)
if z > 0:
M += (p_[:, np.newaxis] * q_) / z
self.CONF += M
pathfilename = os.path.join(
self._project_path, self._subfolders['results'], filename)
with open(pathfilename, 'wb') as f:
pickle.dump(self.CONF, f)
return self.CONF
def compute_EMD_error(self, orig_tags, best_pred_tags, fpath,
order_sensitive=False):
'''
computes the confusion matrix for a multilabel set
Args:
- orig_tags: original labels
- best_pred_tags: predicted labels
- fpath: path to the file with the similarity matrix
- order_sensitive: indicates if the computation is order sensitive
or not
'''
# ######################
# Load similarity values
if type(fpath) is str:
df_S = pd.read_excel(fpath)
# Compute cost matrix
C = 1 - df_S[df_S.columns].values
# WARNING: For later versions of pandas, you might need to use:
# Note that df_S.columnst shooud be taken from 1, because
# The first column is taken as the index column.
# C = 1 - df_S[df_S.columns[1:]].to_numpy()
else:
# This is a combination of cost matrices that takes the
# component-wise minimum of the costs
C = 1
for fp in fpath:
df_S = pd.read_excel(fp)
# Compute cost matrix
Cf = 1 - df_S[df_S.columns].values
C = np.minimum(C, Cf)
# This combination of cost matrices takes each cost matrix with a
# different weights. Only for two Cost matrices.
# df_S = pd.read_excel(fpath[0])
# C1 = 1 - df_S[df_S.columns].values
# df_S = pd.read_excel(fpath[1])
# Cs = 1 - df_S[df_S.columns].values
# ncat = Cs.shape[0]
# C = np.minimum(1 - np.eye(ncat),
# np.minimum(0.25 + 0.75 * Cs, 0.5 + 0.5 * C1))
# This is to make sure that C is "C-contitguos", a requirement of pyemd
C = np.ascontiguousarray(C, dtype=np.float64)
# Set dump factor
if order_sensitive:
dump_factor = 0.5
else:
dump_factor = 1.0
# Take categories in the order of the cost matrix
categories = df_S.columns.tolist()
# Validate true labels
n = len([x for x in orig_tags if len(x) == 0])
if n > 0:
print(f'---- WARNING: {n} samples without labels will be ignored')
# Validate predicted labels
n = len([x for x in best_pred_tags if len(x) == 0])
if n > 0:
print(f'---- WARNING: {n} samples without preds will be ignored')
# ##################
# Compute EMD errors
# Loop over the true and predicted labels
Ncats = len(categories)
self.emd = 0
count = 0
for cats_orig, cats_pred in zip(orig_tags, best_pred_tags):
if len(cats_orig) > 0 and len(cats_pred) > 0:
# Compute numerical true label vector
value_orig = 1.0
p = np.zeros(Ncats)
for c in cats_orig:
p[categories.index(c)] = value_orig
value_orig *= dump_factor
p = p / np.sum(p)
# Compute numerical prediction label vector
value_pred = 1.0
q = np.zeros(Ncats)
for c in cats_pred:
q[categories.index(c)] = value_pred
value_pred *= dump_factor
q = q / np.sum(q)
# Compute EMD distance for the given sample
emd_i = pyemd.emd(p, q, C)
self.emd += emd_i
count += 1
self.emd /= count
return self.emd
def compute_sorted_errors(self, CONF, categories):
eps = 1e-20
# Sample size per category
n_cat = len(categories)
ns_cat = CONF.sum(axis=1, keepdims=True)
# Total sample size
ns_tot = CONF.sum()
# Compute all-normalized confusion matrix
CONF_a = CONF / ns_tot
# Compute row-normalized confusion matrix
CONF_r = ((CONF.astype('float') + eps) /
(ns_cat + n_cat*eps))
# Sort errors by
unsorted_values = [(categories[i], categories[j], 100*CONF_a[i, j],
100*CONF_r[i, j], 100*ns_cat[i][0]/ns_tot)
for i in range(n_cat) for j in range(n_cat)]
sorted_values_a = sorted(unsorted_values, key=lambda x: -x[2])
sorted_values_r = sorted(unsorted_values, key=lambda x: -x[3])
# Remove diagonal elements
sorted_values_a = [x for x in sorted_values_a if x[0] != x[1]]
sorted_values_r = [x for x in sorted_values_r if x[0] != x[1]]
# Remove relative errors of categories with zero samples
sorted_values_r = [x for x in sorted_values_r
if ns_cat[categories.index(x[0])] > 0]
cols = ['Cat. real', 'Clasif', 'Err/total (%)', 'Error/cat (%)',
'Peso muestral']
df_ranked_abs = pd.DataFrame(sorted_values_a, columns=cols)
df_ranked_rel = pd.DataFrame(sorted_values_r, columns=cols)
f_path = os.path.join(self._project_path, self._subfolders['results'],
'ranked_abs_errors.xlsx')
df_ranked_abs.to_excel(f_path)
f_path = os.path.join(self._project_path, self._subfolders['results'],
'ranked_rel_errors.xlsx')
df_ranked_rel.to_excel(f_path)
return df_ranked_abs, df_ranked_rel
def compute_error_confusion_matrix(self, CONF, normalize=True,
verbose=True):
# Returns the ratio of elements outside the diagonal
allsum = np.sum(CONF)
diagsum = np.sum(np.diagonal(CONF))
offdiagsum = allsum - diagsum
error = offdiagsum / allsum
return error
def draw_confusion_matrix(self, CONF, filename, sorted_categories=[],
verbose=True, normalize=True):
'''
draws the CM and saves it as a png file
Inputs:
- CONF: conf matrix to be stored
- filename: filename
- sorted_categories: list of sorted categories
- normalize: indicates to normalize CONF
'''
# An extemelly small value to avoid zero division
eps = 1e-20
n_cat = len(sorted_categories)
if len(sorted_categories) > 0:
labels_categories = sorted_categories
else:
if self.categories is None:
self._recover('categories')
labels_categories = self.categories
if normalize:
# Normalize
CONF = ((CONF.astype('float') + eps) /
(CONF.sum(axis=1, keepdims=True) + n_cat*eps))
else:
CONF = CONF.astype('float')
plt.figure(figsize=(15, 12))
cmap = plt.cm.Blues
plt.imshow(CONF, interpolation='nearest', cmap=cmap)
plt.colorbar()
tick_marks = np.arange(len(labels_categories))
plt.xticks(tick_marks, labels_categories, rotation=90)
plt.yticks(tick_marks, labels_categories)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
pathfilename = os.path.join(self._get_folder('figures'), filename)
print(f"SALVADO EN {pathfilename}")
plt.savefig(pathfilename)
plt.clf()
return
def draw_ROCS_tst(self, Preds_tst, tags_tst):
'''
draws the ROC curves for the test data
Inputs:
- Preds_tst: predicted labels
- tags_tst: true labels
'''
if self.best_models is None:
self._recover('best_models')
if self.categories is None:
self._recover('categories')
colors = ['k', 'r', 'g', 'b', 'm', 'c', 'r--', 'g--', 'b--', 'm--',
'c--', 'k--']
# retain the first tag in the labels
tags = [t[0] if len(t) > 0 else '' for t in tags_tst]
for k in range(0, len(self.categories)):
cat = self.categories[k]
y_tst = [1.0 if p == cat else -1.0 for p in tags]
preds_tst = list(Preds_tst[:, k])
fpr_tst, tpr_tst, thresholds = roc_curve(y_tst, preds_tst)
roc_auc_tst = auc(fpr_tst, tpr_tst)
model_name = self.best_models[cat]
file_output_ROC = os.path.join(
self._get_folder('ROCS_tst'),
'ROC_' + model_name + '_' + cat + '.pkl')
mdict = {'fpr_tst': list(fpr_tst), 'tpr_tst': list(tpr_tst),
'roc_auc_tst': roc_auc_tst, 'y_tst': list(y_tst),
'preds_tst': list(preds_tst)}
with open(file_output_ROC, 'wb') as f:
pickle.dump(mdict, f)
plt.figure(figsize=(15, 12))
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC test curve for category ' + cat)
text = model_name + ', AUC= ' + str(roc_auc_tst)[0:6]
plt.plot(fpr_tst, tpr_tst, colors[3], label=text, linewidth=6.0)
plt.grid(True)
plt.legend(loc="lower right")
filename = os.path.join(
self._get_folder('ROCS_tst'), cat + '_ROC_test.png')
plt.savefig(filename)
plt.close()
return
def draw_anyROC(self, Preds_tst, tags_tst, case):
'''
draws the ROC curves
Inputs:
- Preds_tst: predicted labels
- tags_tst: true labels
'''
if self.categories is None:
self._recover('categories')
colors = ['k', 'r', 'g', 'b', 'm', 'c', 'r--', 'g--', 'b--', 'm--',
'c--', 'k--']
# retain the first tag in the labels
tags = [t[0] if len(t) > 0 else '' for t in tags_tst]
aucs = []
for k in range(0, len(self.categories)):
cat = self.categories[k]
y_tst = [1.0 if p == cat else -1.0 for p in tags]
preds_tst = list(Preds_tst[:, k])
fpr_tst, tpr_tst, thresholds = roc_curve(y_tst, preds_tst)
roc_auc_tst = auc(fpr_tst, tpr_tst)
aucs.append(roc_auc_tst)
file_output_ROC = os.path.join(
self._get_folder('ROCS_tst'),
cat + '_' + 'ROC_' + case + '.pkl')
mdict = {'fpr_tst': list(fpr_tst), 'tpr_tst': list(tpr_tst),
'roc_auc_tst': roc_auc_tst, 'y_tst': list(y_tst),
'preds_tst': list(preds_tst)}
with open(file_output_ROC, 'wb') as f:
pickle.dump(mdict, f)
plt.figure(figsize=(15, 12))
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC test curve for category ' + cat)
text = case + ', AUC= ' + str(roc_auc_tst)[0:6]
plt.plot(fpr_tst, tpr_tst, colors[3], label=text, linewidth=6.0)
plt.grid(True)
plt.legend(loc="lower right")
filename = os.path.join(self._get_folder('ROCS_tst'),
cat + '_' + 'ROC_' + case + '.png')
plt.savefig(filename)
plt.close()
average_auc = np.nanmean(aucs)
return average_auc
def compute_average_test_AUC(self, verbose=True):
'''
computes the average AUC on test
Inputs:
- None, it operates on self values
'''
if self.best_models is None:
self._recover('best_models')
if self.categories is None:
self._recover('categories')
aucs = []
for k in range(0, len(self.categories)):
cat = self.categories[k]
model_name = self.best_models[cat]
filename = os.path.join(
self._get_folder('ROCS_tst'),
'ROC_' + model_name + '_' + cat + '.pkl')
with open(filename, 'rb') as f:
mdict = pickle.load(f)
auc = mdict['roc_auc_tst']
aucs.append(auc)
average_auc = np.nanmean(aucs)
return average_auc
def compute_thresholds(self, verbose=True):
'''
computes the thresholds
Inputs:
- None, it operates on self values
'''
if self.categories is None:
self._recover('categories')
if self.best_models is None:
self._recover('best_models')
Ncats = len(self.categories)
ths_dict = {}
for kcat in range(0, Ncats):
try:
cat = self.categories[kcat]
model_name = self.best_models[cat]
file_input_ROC = os.path.join(
self._get_folder('eval_ROCs'),
'ROC_' + model_name + '_' + cat + '.pkl')
with open(file_input_ROC, 'rb') as f:
mdict = pickle.load(f)
fpr = mdict['fpr_loo']
tpr = mdict['tpr_loo']
ths = mdict['thresholds']
mix = []
for k in range(0, len(fpr)):
# We select the threshold maximizing this convex combinat
mix.append(tpr[k] + (1 - fpr[k]))
cual = np.argmax(mix)
th = ths[cual]
ths_dict.update({cat: th})
print(cat, th, cual, tpr[cual], fpr[cual])
except:
print("Error in cat ", cat)
pass
filename = os.path.join(
self._project_path + self._subfolders['results'], 'ths_dict.pkl')
with open(filename, 'wb') as f:
pickle.dump(ths_dict, f)
return
def compute_cardinality(self, tags):
'''
computes the cardinality of the tags
Inputs:
- tags: labels
'''
C = np.mean([len(set(l)) for l in tags])
return C
def compute_label_density(self, tags):
'''
Computes the label density
Inputs:
- tags: labels
'''
# total number of possible labels
NL = len(set(itertools.chain.from_iterable(tags)))
D = np.mean([len(set(l)) / NL for l in tags])
return D
def JaccardIndex(self, orig, pred):
'''
Computes the Jaccard index
Inputs:
- orig: original labels
- pred: predicted labels
'''
accs = []
for k in range(0, len(orig)):
l_orig = orig[k]
l_pred = pred[k]
num = len(set(l_orig).intersection(l_pred))
den = len(set(l_orig + l_pred))
acc = num / den
accs.append(acc)
JI = np.mean(accs)
return JI
def compute_multilabel_threshold(self, p, alpha, option, th_values,
verbose=True):
'''
Computes the multilabel threshold
Inputs:
- p: RBO parameter, ``p`` is the probability of looking for overlap
at rank k + 1 after having examined rank k
- alpha: convex Jaccard-RBO combination parameter
- option: sorting option for multilabel prediction
- th_values: range of threshold values to be evaluated
'''
if self.Xtfidf_tr is None:
self._recover('Xtfidf_tr')
if self.Preds_tr is None:
self._recover('Preds_tr')
# Warning tags_tr may have duplicates...
self.tags_tr = [list(OrderedDict.fromkeys(l)) for l in self.tags_tr]
if verbose:
print('-' * 50)
COST = []
DENS_pred = []
DENS_true = []
COST_dens = []
density_true = self.compute_cardinality(self.tags_tr)
# to normalize Jaccard_RBO_cost, depends on p
baseline = [0]
for k in range(2, 50):
l = list(range(1, k))
baseline.append(rbo(l, l, p)['min'])
P = Predictor(self._project_path, self._subfolders, verbose=False)
for threshold in th_values:
multilabel_pred_tr, labels_pred_tr = P.obtain_multilabel_preds(
self.Preds_tr, option, threshold, verbose=True)
density_pred = self.compute_cardinality(labels_pred_tr)
DENS_pred.append(density_pred)
DENS_true.append(density_true)
dens_error = (density_pred - density_true) ** 2
COST_dens.append(dens_error)
# Computing Jackard_RBO cost
jrbos = []
for k in range(0, len(self.tags_tr)):
values = []
for key in labels_pred_tr[k]:
values.append((key, multilabel_pred_tr[k][key]['p']))
values.sort(key=lambda x: x[1], reverse=True)
l_pred = []
for v in values:
l_pred.append(v[0])
jrbo = self.Jaccard_RBO_cost(
self.tags_tr[k], l_pred, baseline, p, alpha)
jrbos.append(jrbo)
cost_jrbo = np.mean(jrbos)
print(threshold, cost_jrbo, density_true, density_pred, )
COST.append(cost_jrbo)
max_cost = max(COST)
max_dens = max(COST_dens)
COST_dens = [x / max_dens * max_cost for x in COST_dens]
plt.figure(figsize=(15, 12))
plt.xlabel('Th')
plt.ylabel('Jackard-RBO cost')
plt.title('Jackard-RBO and Label Density costs for p =' + str(p) +
' and alpha= ' + str(alpha))
plt.plot(th_values, COST, 'b', label='Jackard-RBO cost', linewidth=3.0)
plt.plot(th_values, COST_dens, 'r', label='Labels Density cost',
linewidth=3.0)
cual_min = np.argmin(COST)
th_JRBO = th_values[cual_min]
plt.plot(th_values[cual_min], COST[cual_min], 'bo',
label='Minimum Jackard-RBO cost', linewidth=3.0)
cual_min = np.argmin(COST_dens)
th_DENS = th_values[cual_min]
plt.plot(th_values[cual_min], COST_dens[cual_min], 'ro',
label='Minimum Labels Density cost', linewidth=3.0)
plt.legend(loc="upper right")
plt.grid(True)
filename = os.path.join(
self._project_path + self._subfolders['results'],
'JRBO_COST_tr_p_' + str(p) + '_alpha_' + str(alpha) + '.png')
plt.savefig(filename)
plt.close()
self.multilabel_th = np.mean([th_JRBO, th_DENS])
filename = os.path.join(
self._project_path + self._subfolders['training_data'],
'multilabel_th.pkl')
with open(filename, 'wb') as f:
pickle.dump(self.multilabel_th, f)
filename = os.path.join(
self._project_path + self._subfolders['export'],
'multilabel_th.pkl')
with open(filename, 'wb') as f:
pickle.dump(self.multilabel_th, f)
return self.multilabel_th
def draw_costs_on_test(self, p, alpha, option, th_values, verbose=True):
'''
draws the multilabel cost for the test data
Inputs:
- p: RBO parameter, ``p`` is the probability of looking for
overlap at rank k + 1 after having examined rank k
- alpha: convex Jaccard-RBO combination parameter
- option: sorting option for multilabel prediction
- th_values: range of threshold values to be evaluated
'''
if self.Xtfidf_tst is None:
self._recover('Xtfidf_tst')
if self.Preds_tst is None:
self._recover('Preds_tst')
if self.multilabel_th is None:
self._recover('multilabel_th')
# Warning tags_tst may have duplicates...
self.tags_tst = [list(OrderedDict.fromkeys(l)) for l in self.tags_tst]
if verbose:
print('-' * 50)
COST = []
DENS_pred = []
DENS_true = []
COST_dens = []
density_true = self.compute_cardinality(self.tags_tst)
# to normalize Jaccard_RBO_cost, depends on p
baseline = [0]
for k in range(2, 50):
l = list(range(1, k))
baseline.append(rbo(l, l, p)['min'])
P = Predictor(self._project_path, self._subfolders, verbose=False)
for threshold in th_values:
multilabel_pred_tst, labels_pred_tst = P.obtain_multilabel_preds(
self.Preds_tst, option, threshold, verbose=True)
density_pred = self.compute_cardinality(labels_pred_tst)
DENS_pred.append(density_pred)
DENS_true.append(density_true)
dens_error = (density_pred - density_true) ** 2
COST_dens.append(dens_error)
# Computing Jackard_RBO cost
jrbos = []
for k in range(0, len(self.tags_tst)):
values = []
for key in labels_pred_tst[k]:
values.append((key, multilabel_pred_tst[k][key]['p']))
values.sort(key=lambda x: x[1], reverse=True)
l_pred = []
for v in values:
l_pred.append(v[0])
jrbo = self.Jaccard_RBO_cost(
self.tags_tst[k], l_pred, baseline, p, alpha)
jrbos.append(jrbo)
cost_jrbo = np.mean(jrbos)
print(threshold, cost_jrbo, density_true, density_pred, )
COST.append(cost_jrbo)
max_cost = max(COST)
max_dens = max(COST_dens)
COST_dens = [x / max_dens * max_cost for x in COST_dens]
plt.figure(figsize=(15, 12))
plt.xlabel('Th')
plt.ylabel('Jackard-RBO cost')
plt.title('Jackard-RBO and Label Density costs for p =' + str(p) +
' and alpha= ' + str(alpha))
plt.plot(th_values, COST, 'b', label='Jackard-RBO cost', linewidth=3.0)
plt.plot(th_values, COST_dens, 'r', label='Labels Density cost',
linewidth=3.0)
cual_min = np.argmin(abs(th_values - self.multilabel_th))
plt.plot(th_values[cual_min], COST[cual_min], 'bo',
label='Jackard-RBO cost at threshold', linewidth=3.0)
plt.plot(th_values[cual_min], COST_dens[cual_min], 'ro',
label='Labels Density cost at threshold', linewidth=3.0)
plt.legend(loc="upper right")
plt.grid(True)
filename = os.path.join(
self._project_path + self._subfolders['results'],
'JRBO_COST_tst_p_' + str(p) + '_alpha_' + str(alpha) + '.png')
plt.savefig(filename)
plt.close()
return
def load_multilabel_threshold(self, path2export=''):
'''
Loads the multilabel thresholds
Inputs:
- path2export: export path
'''
if path2export != '':
print('Loading multilabel_th from export')
filename = os.path.join(path2export, 'multilabel_th.pkl')
with open(filename, 'rb') as f:
self.multilabel_th = pickle.load(f)
else:
if self.multilabel_th is None:
self._recover('multilabel_th')
return self.multilabel_th
def Jaccard_RBO_cost(self, l_orig, l_pred, baseline, p, alpha):
'''
Computes a convex combination of the Jaccard and RBO costs
Inputs:
- l_orig: original labels
- l_pred: predicted labels
- baseline: normalizing values
- p: RBO parameter, ``p`` is the probability of looking for overlap
at rank k + 1 after having examined rank k
- alpha: convex Jaccard-RBO combination parameter
'''
try:
if len(l_orig) > 0:
num = len(set(l_orig).intersection(l_pred))
den = len(set(l_orig + l_pred))
ji = 1.0 - num / den
else:
if len(l_pred) == 0:
# empty labels and empty predict means cost = 0.0
ji = 0
else:
# empty labels and non-empty predict means cost = 1.0
ji = 1.0
r = 0
L = min((len(l_orig), len(l_pred)))
if L > 0:
r = 1 - rbo(l_orig, l_pred, p)['min'] / baseline[L]
else:
r = 1.0
if len(l_orig) == 0 and len(l_pred) == 0:
r = 0.0
except:
print('Error in Jaccard_RBO_cost ' +
'----------------------------------------------------')
import code
code.interact(local=locals())
pass
jrbo = (alpha * ji + (1 - alpha) * r) / 2.0
return jrbo
def align_strings(self, string0, string1, string2, string3, L, M, N, P):
'''
Aligns strings into columns
'''
empty = ' '
# if len(string1) > M or len(string2) > N or len(string3) > P:
# import code
# code.interact(local=locals())
if L - len(string0) > 0:
string0 = string0 + empty[0: L - len(string0)]
if M - len(string1) > 0:
string1 = string1 + empty[0: M - len(string1)]
if N - len(string2) > 0:
string2 = string2 + empty[0: N - len(string2)]
if P - len(string3) > 0:
string3 = string3 + empty[0: P - len(string3)]
aligned_string = string0 + '| ' + string1 + '| ' + string2 + '| ' + string3 + '\r\n'
return aligned_string
def get_pred_weights(self, refs, label_preds, multilabel_preds):
'''
Returns the normalized predictions **Unused????****
Inputs:
- refs: *unused*
- label_preds:
- multilabel_preds:
'''
weights = []
for k, labels in enumerate(label_preds):
w0 = [multilabel_preds[k][key]['p'] for key in labels]
scale = np.sum(w0)
weights.append([w / scale for w in w0])
return weights
def write_prediction_report(self, refs_tst, tags_tst, labels_pred_tst,
multilabel_pred_tst, filename_out):
'''
writes a simple prediction report in text format
Inputs:
- refs_tst: references
- tags_tst: original labels
- labels_pred_tst: predicted labels
- multilabel_pred_tst: multilabel predicted labels
- filename_out: file to save results
'''
# writing report for the best threshold value
string0 = 'PROJECT REFERENCE'
string1 = 'TARGET LABELS'
string2 = 'PREDICTED LABELS'
string3 = ' '
data = [self.align_strings(string0, string1, string2, string3, 20, 30,
50, 10)]
data.append('=' * 80 + '\r\n')
for k in range(0, len(tags_tst)):
string0 = refs_tst[k]
string1 = ''
for t in tags_tst[k]:
string1 += t + ', '
if len(tags_tst[k]) == 0:
string1 += '--------------'
values = labels_pred_tst[k]
if len(values) == 0:
string2 = '--------------'
else:
string2 = ''
pesos = []
for key in values:
pesos.append(multilabel_pred_tst[k][key]['p'])
for key in values:
weight = multilabel_pred_tst[k][key]['p'] / np.sum(pesos)
str_weight = str(weight)[0:5]
string2 += key + '(' + str_weight + '), '
string3 = ' '
cadena = self.align_strings(string0, string1, string2, string3,
20, 30, 50, 10)
data.append(cadena)
filename = os.path.join(self._project_path,
self._subfolders['results'], filename_out)
with open(filename, 'w') as f:
f.writelines(data)
print('Saved ', filename)
return
| 37.484848
| 100
| 0.508527
| 51,324
| 0.987817
| 0
| 0
| 0
| 0
| 0
| 0
| 15,467
| 0.297688
|
b8f4752d0093b3381dd899cada064a8f50a481ea
| 16
|
py
|
Python
|
cdn/__init__.py
|
Kingjmk/mlfaati
|
12c0dcbe0389c2c1da0bde80509fb3374955e293
|
[
"MIT"
] | 1
|
2021-01-04T07:34:34.000Z
|
2021-01-04T07:34:34.000Z
|
cdn/__init__.py
|
Kingjmk/mlfaati
|
12c0dcbe0389c2c1da0bde80509fb3374955e293
|
[
"MIT"
] | null | null | null |
cdn/__init__.py
|
Kingjmk/mlfaati
|
12c0dcbe0389c2c1da0bde80509fb3374955e293
|
[
"MIT"
] | null | null | null |
"""
CDN App
"""
| 4
| 7
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 1
|
b8f6634f75893c98121099a51543c4b0b9463dc6
| 2,722
|
py
|
Python
|
data/r_outletsdata.py
|
ljunhui/Koufu_SG_Map
|
8d440605cc90c49c6635f4d5202bd262e30b0efb
|
[
"MIT"
] | 1
|
2021-04-01T13:57:15.000Z
|
2021-04-01T13:57:15.000Z
|
data/r_outletsdata.py
|
ljunhui/Koufu_SG_Map
|
8d440605cc90c49c6635f4d5202bd262e30b0efb
|
[
"MIT"
] | null | null | null |
data/r_outletsdata.py
|
ljunhui/Koufu_SG_Map
|
8d440605cc90c49c6635f4d5202bd262e30b0efb
|
[
"MIT"
] | null | null | null |
# %% Import
import numpy as np
import pandas as pd
import requests
import os
from bs4 import BeautifulSoup
"""
Takes a dictionary of relevant brands and their URLs and returns a raw csv file
"""
# %% Functions
def outlets_crawl(brand, url):
"""
Returns a raw, unformatted df of outlets with it's brand from the url inserted
"""
page = requests.get(url)
soup = BeautifulSoup(page.content, "lxml")
# ensure crawler had actual results to work with.
def _check_results(class_term, soup=soup):
results = soup.find_all(attrs={"class": class_term})
if len(results) == 0:
raise ValueError("No outlets found, check class_term or url.")
return results
try:
results = _check_results("outlet_item")
except ValueError:
results = _check_results("lease_item")
# continue
_ls = []
for result in results:
_ls.append([i for i in result.stripped_strings])
df = pd.DataFrame(_ls)
df.insert(0, "brand", brand, allow_duplicates=True)
return df
def loop_outlets_crawl(dict, outputfn):
"""
Loops outlets_crawl func through a dictionary of urls and their brands. Returns a concatenated df and saves it as a temporary csv.
"""
_ls = []
for brand, url in dict.items():
_ls.append(outlets_crawl(brand, url))
print(f"{brand} done.")
df = pd.concat(_ls)
df.to_csv(outputfn, index=False)
def main():
url_dict = {
"Koufu": "https://www.koufu.com.sg/our-brands/food-halls/koufu/",
"Cookhouse": "https://www.koufu.com.sg/our-brands/food-halls/cookhouse/",
"Rasapura": "https://www.koufu.com.sg/our-brands/food-halls/rasapura-masters/",
"ForkSpoon": "https://www.koufu.com.sg/our-brands/food-halls/fork-spoon/",
"HappyHawkers": "https://www.koufu.com.sg/our-brands/food-halls/happy-hawkers/",
"Gourmet": "https://www.koufu.com.sg/our-brands/food-halls/gourmet-paradise/",
"R&B": "https://www.koufu.com.sg/our-brands/concept-stores/rb-tea/",
"1983NY": "https://www.koufu.com.sg/our-brands/concept-stores/1983-a-taste-of-nanyang/",
"Supertea": "https://www.koufu.com.sg/our-brands/concept-stores/supertea/",
"1983CT": "https://www.koufu.com.sg/our-brands/cafe-restaurants/1983-coffee-toast/",
"Elemen": "https://www.koufu.com.sg/our-brands/cafe-restaurants/elemen-%e5%85%83%e7%b4%a0/",
"Grove": "https://www.koufu.com.sg/our-brands/cafe-restaurants/grovecafe/",
}
outputfn = "./r_outletsdata.csv"
if os.path.isfile(outputfn):
os.remove(outputfn)
loop_outlets_crawl(url_dict, outputfn)
# %% Main
if __name__ == "__main__":
main()
os.system("pause")
| 33.604938
| 134
| 0.653564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,459
| 0.536003
|
b8f7dac938dacb0d70352e73d7ee85999cfcb966
| 5,918
|
py
|
Python
|
ue4docker/setup_cmd.py
|
Wadimich/ue4-docker
|
01ef4af09cf8e7b9e845203031b2bed3db06034b
|
[
"MIT"
] | 1
|
2021-05-19T16:41:04.000Z
|
2021-05-19T16:41:04.000Z
|
ue4docker/setup_cmd.py
|
Wadimich/ue4-docker
|
01ef4af09cf8e7b9e845203031b2bed3db06034b
|
[
"MIT"
] | null | null | null |
ue4docker/setup_cmd.py
|
Wadimich/ue4-docker
|
01ef4af09cf8e7b9e845203031b2bed3db06034b
|
[
"MIT"
] | null | null | null |
import docker, os, platform, requests, shutil, subprocess, sys
from .infrastructure import *
# Runs a command without displaying its output and returns the exit code
def _runSilent(command):
result = SubprocessUtils.capture(command, check=False)
return result.returncode
# Performs setup for Linux hosts
def _setupLinux():
# Pull the latest version of the Alpine container image
alpineImage = 'alpine:latest'
SubprocessUtils.capture(['docker', 'pull', alpineImage])
# Start the credential endpoint with blank credentials
endpoint = CredentialEndpoint('', '')
endpoint.start()
try:
# Run an Alpine container to see if we can access the host port for the credential endpoint
SubprocessUtils.capture([
'docker', 'run', '--rm', alpineImage,
'wget', '--timeout=1', '--post-data=dummy', 'http://{}:9876'.format(NetworkUtils.hostIP())
], check=True)
# If we reach this point then the host port is accessible
print('No firewall configuration required.')
except:
# The host port is blocked, so we need to perform firewall configuration
print('Creating firewall rule for credential endpoint...')
# Create the firewall rule
subprocess.run(['iptables', '-I', 'INPUT', '-p', 'tcp', '--dport', '9876', '-j', 'ACCEPT'], check=True)
# Ensure the firewall rule persists after reboot
# (Requires the `iptables-persistent` service to be installed and running)
os.makedirs('/etc/iptables', exist_ok=True)
subprocess.run('iptables-save > /etc/iptables/rules.v4', shell=True, check=True)
# Inform users of the `iptables-persistent` requirement
print('Firewall rule created. Note that the `iptables-persistent` service will need to')
print('be installed for the rule to persist after the host system reboots.')
finally:
# Stop the credential endpoint
endpoint.stop()
# Performs setup for Windows Server hosts
def _setupWindowsServer():
# Check if we need to configure the maximum image size
requiredLimit = WindowsUtils.requiredSizeLimit()
if DockerUtils.maxsize() < requiredLimit:
# Attempt to stop the Docker daemon
print('Stopping the Docker daemon...')
subprocess.run(['sc.exe', 'stop', 'docker'], check=True)
# Attempt to set the maximum image size
print('Setting maximum image size to {}GB...'.format(requiredLimit))
config = DockerUtils.getConfig()
sizeOpt = 'size={}GB'.format(requiredLimit)
if 'storage-opts' in config:
config['storage-opts'] = list([o for o in config['storage-opts'] if o.lower().startswith('size=') == False])
config['storage-opts'].append(sizeOpt)
else:
config['storage-opts'] = [sizeOpt]
DockerUtils.setConfig(config)
# Attempt to start the Docker daemon
print('Starting the Docker daemon...')
subprocess.run(['sc.exe', 'start', 'docker'], check=True)
else:
print('Maximum image size is already correctly configured.')
# Determine if we need to configure Windows firewall
ruleName = 'Open TCP port 9876 for ue4-docker credential endpoint'
ruleExists = _runSilent(['netsh', 'advfirewall', 'firewall', 'show', 'rule', 'name={}'.format(ruleName)]) == 0
if ruleExists == False:
# Add a rule to ensure Windows firewall allows access to the credential helper from our containers
print('Creating firewall rule for credential endpoint...')
subprocess.run([
'netsh', 'advfirewall',
'firewall', 'add', 'rule',
'name={}'.format(ruleName), 'dir=in', 'action=allow', 'protocol=TCP', 'localport=9876'
], check=True)
else:
print('Firewall rule for credential endpoint is already configured.')
# Determine if the host system is Windows Server Core and lacks the required DLL files for building our containers
hostRelease = WindowsUtils.getWindowsRelease()
requiredDLLs = WindowsUtils.requiredHostDlls(hostRelease)
dllDir = os.path.join(os.environ['SystemRoot'], 'System32')
existing = [dll for dll in requiredDLLs if os.path.exists(os.path.join(dllDir, dll))]
if len(existing) != len(requiredDLLs):
# Determine if we can extract DLL files from the full Windows base image (version 1809 and newer only)
tags = requests.get('https://mcr.microsoft.com/v2/windows/tags/list').json()['tags']
if hostRelease in tags:
# Pull the full Windows base image with the appropriate tag if it does not already exist
image = 'mcr.microsoft.com/windows:{}'.format(hostRelease)
print('Pulling full Windows base image "{}"...'.format(image))
subprocess.run(['docker', 'pull', image], check=True)
# Start a container from which we will copy the DLL files, bind-mounting our DLL destination directory
print('Starting a container to copy DLL files from...')
mountPath = 'C:\\dlldir'
container = DockerUtils.start(
image,
['timeout', '/t', '99999', '/nobreak'],
mounts = [docker.types.Mount(mountPath, dllDir, 'bind')],
stdin_open = True,
tty = True,
remove = True
)
# Copy the DLL files to the host
print('Copying DLL files to the host system...')
DockerUtils.execMultiple(container, [['xcopy', '/y', os.path.join(dllDir, dll), mountPath + '\\'] for dll in requiredDLLs])
# Stop the container
print('Stopping the container...')
container.stop()
else:
print('The following DLL files will need to be manually copied into {}:'.format(dllDir))
print('\n'.join(['- {}'.format(dll) for dll in requiredDLLs if dll not in existing]))
else:
print('All required DLL files are already present on the host system.')
def setup():
# We don't currently support auto-config for VM-based containers
if platform.system() == 'Darwin' or (platform.system() == 'Windows' and WindowsUtils.isWindowsServer() == False):
print('Manual configuration is required under Windows 10 and macOS. Automatic configuration is not available.')
return
# Perform setup based on the host system type
if platform.system() == 'Linux':
_setupLinux()
else:
_setupWindowsServer()
| 38.679739
| 126
| 0.70784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,260
| 0.550862
|
b8f9dd022646dc722a37cd9325b2748aca492315
| 180
|
py
|
Python
|
src/lesson_developer_tools/unittest_truth.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | 3
|
2018-08-14T09:33:52.000Z
|
2022-03-21T12:31:58.000Z
|
src/lesson_developer_tools/unittest_truth.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | null | null | null |
src/lesson_developer_tools/unittest_truth.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | null | null | null |
import unittest
class TruthTest(unittest.TestCase):
def testAssertTrue(self):
self.assertTrue(True)
def testAssertFalse(self):
self.assertFalse(False)
| 15
| 35
| 0.694444
| 160
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b8f9fb55632e48828f82b3c4a79b4f130acc6705
| 6,570
|
py
|
Python
|
tia/trad/monitor_mainTr.py
|
jmakov/market_tia
|
0804fd82b4fb3ea52c171ea0759f0e10fc659bb2
|
[
"MIT"
] | 1
|
2020-07-24T04:18:57.000Z
|
2020-07-24T04:18:57.000Z
|
tia/trad/monitor_mainTr.py
|
jmakov/market_tia
|
0804fd82b4fb3ea52c171ea0759f0e10fc659bb2
|
[
"MIT"
] | null | null | null |
tia/trad/monitor_mainTr.py
|
jmakov/market_tia
|
0804fd82b4fb3ea52c171ea0759f0e10fc659bb2
|
[
"MIT"
] | 1
|
2020-07-24T04:22:14.000Z
|
2020-07-24T04:22:14.000Z
|
import sys
import time
from tia.trad.tools.io.follow import followMonitor
import tia.configuration as conf
from tia.trad.tools.errf import eReport
import ujson as json
import matplotlib.pyplot as plt
import math
import collections
import logging
from tia.trad.tools.ipc.processLogger import PROCESS_NAME
LOGGER_NAME = PROCESS_NAME + __file__.split("/")[-1]; logger = logging.getLogger(LOGGER_NAME)
reportFile = None
def pointDistance(initF, initI, point):
try:
t = initI[0]-initF[0], initI[1]-initF[1] # Vector ab
dd = math.sqrt(t[0]**2+t[1]**2) # Length of ab
t = t[0]/dd, t[1]/dd # unit vector of ab
n = -t[1], t[0] # normal unit vector to ab
ac = point[0]-initF[0], point[1]-initF[1] # vector ac
return math.fabs(ac[0]*n[0]+ac[1]*n[1]) # Projection of ac to n (the minimum distance)
except Exception: raise
def getAvg(_list):
try:
return float(max(_list) + min(_list)) / float(2)
except Exception: raise
def shutdown():
try:
logger.debug("shutting down")
global reportFile
reportFile.close()
except Exception: raise
def run(**kwargs):
try:
global logger
global reportFile
logger = kwargs["processLogger"]
logger.debug("monitor_mainTr:hi")
_initFunds = kwargs["initFunds"]
_initItems = kwargs["initItems"]
plt.ion() # turn interactive on
fig = plt.figure()
fig.show()
# raw
ax = fig.add_subplot(221)
#hline = ax.axhline(y=_initFunds)
#vline = ax.axvline(x=_initItems)
#ax.set_xscale("log")
#ax.set_yscale("log")
data, = ax.plot([], [], 'b+')
data11, = ax.plot([], [], 'ro')
# value
ax2 = fig.add_subplot(222)
data2, = ax2.plot([], [], 'ro-')
# inside TM
ax3 = fig.add_subplot(223)
data3, = ax3.plot([], [], 'ro')
data4, = ax3.plot([],[], 'bo')
minBids, = ax3.plot([], [], "r>")
maxAsks, = ax3.plot([], [], "b>")
# top b/a
ax5 = fig.add_subplot(224)
dataI, = ax5.plot([], [], "o-")
dataF, = ax5.plot([], [], "ro-")
windowLength = 50
fundsHistory = collections.deque(maxlen=windowLength); itemsHistory = collections.deque(maxlen=windowLength)
valueHistory = collections.deque(maxlen=windowLength)
tmFundsHistory = collections.deque(maxlen=windowLength); tmItemsHistory = collections.deque(maxlen=windowLength)
tmIAHSum = collections.deque(maxlen=windowLength); tmFAHSum = collections.deque(maxlen=windowLength)
topAsksHistory = collections.deque(maxlen=10)
topBidsHistory = collections.deque(maxlen=10)
# touch report.json
#reportFile = open(conf.FN_REPORT, "w"); reportFile.close()
reportFile = open(conf.FN_REPORT, "r")
newline = followMonitor(reportFile, fig)
while 1:
try:
#for line in reportFile:
line = newline.next()
jsonObj = json.loads(line)
universeSize = float(jsonObj["universeSize"])
topAsks = jsonObj["topAsks"]; topBids = jsonObj["topBids"]
initInvF = float(_initFunds) * universeSize
initInvI = float(_initItems) * universeSize
cumulFunds = float(jsonObj["cumulFunds"])
cumulItems = float(jsonObj["cumulItems"])
#fundsHistory.append(funds); itemsHistory.append(items)
dist = pointDistance([0, initInvF], [initInvI, 0], [cumulFunds, cumulItems])
fundsHistory.append(dist)
#data.set_ydata(fundsHistory); data.set_xdata(itemsHistory)
data.set_ydata(fundsHistory); data.set_xdata(xrange(len(fundsHistory)))
#data11.set_ydata(funds); data11.set_xdata(items)
#data11.set_ydata(dist); data11.set_xdata(xrange(len(fundsHistory)))
ax.relim()
ax.autoscale_view(True,True,True)
tmFunds = jsonObj["tmFunds"]; tmItems = jsonObj["tmItems"]
tmFA = 0; tmIA = 0
tmFPH = collections.deque(); tmFAH = collections.deque()
tmIPH = collections.deque(); tmIAH = collections.deque()
for price in tmFunds:
amount = tmFunds[price]
tmFPH.append(price)
tmFAH.append(amount)
tmFA += amount
tmFAHSum.append(tmFA)
for price in tmItems:
amount = tmItems[price]
tmIPH.append(price)
tmIAH.append(amount)
tmIA += amount
tmIAHSum.append(tmIA)
dataI.set_ydata(tmIAHSum); dataI.set_xdata(xrange(len(tmIAHSum)))
dataF.set_ydata(tmFAHSum); dataF.set_xdata(xrange(len(tmFAHSum)))
ax5.relim()
ax5.autoscale_view(True,True,True)
value = float(jsonObj["value"]) / initInvF if initInvF else float(jsonObj["value"])
valueHistory.append(value)
data2.set_xdata(range(len(valueHistory)))
data2.set_ydata(valueHistory)
ax2.relim()
ax2.autoscale_view(True,True,True)
"""
TM stuff
"""
# make universe states pretty
tmpList = list(tmFAH) + list(tmIAH)
xDrawStart = min(tmpList)
drawedInterval = max(tmpList) - xDrawStart
spacing = float(drawedInterval) / float (len(topBids))
offset = float(spacing) / float(2)
xcords = collections.deque()
for index, bid in enumerate(topBids):
xcords.append(offset + xDrawStart + index * spacing)
minBids.set_ydata(topBids); minBids.set_xdata(xcords)
maxAsks.set_ydata(topAsks); maxAsks.set_xdata(xcords)
data3.set_xdata(tmFAH)
data3.set_ydata(tmFPH)
data4.set_xdata(tmIAH)
data4.set_ydata(tmIPH)
ax3.relim()
ax3.autoscale_view(True,True,True)
fig.canvas.draw()
#plt.savefig(conf.FN_PLOT_IMAGE)
except ValueError: continue
except Exception as ex:
eReport(__file__)
reportFile.close()
sys.exit()
| 37.118644
| 120
| 0.555403
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 933
| 0.142009
|
b8faedfafe51cef8b7826a43e9c04a44b4437054
| 1,975
|
py
|
Python
|
irocr/config.py
|
guidj/ir-orc
|
46476a847605d7d36deda5eb27d282eaa9e04d9a
|
[
"Apache-2.0"
] | 1
|
2016-04-05T15:46:28.000Z
|
2016-04-05T15:46:28.000Z
|
irocr/config.py
|
guidj/ir-orc
|
46476a847605d7d36deda5eb27d282eaa9e04d9a
|
[
"Apache-2.0"
] | null | null | null |
irocr/config.py
|
guidj/ir-orc
|
46476a847605d7d36deda5eb27d282eaa9e04d9a
|
[
"Apache-2.0"
] | null | null | null |
import os
import os.path
import ConfigParser
PROJECT_BASE = ''.join([os.path.dirname(os.path.abspath(__file__)), "/../"])
CONFIG_FILE = ''.join([PROJECT_BASE, 'config.ini'])
_UNSET = object()
class ConfigurationError(Exception):
pass
def get(section, option=None, type=None, fallback=_UNSET):
config = ConfigParser.ConfigParser()
with open(CONFIG_FILE, "r") as fp:
config.readfp(fp)
try:
if option:
if type:
if type in [str, int, float, complex]:
value = type(config.get(section, option))
elif type == bool:
value = config.getboolean(section, option)
else:
raise ConfigurationError(
'{0} is an invalid data type. `type` must be a basic data type: '
'str, bool, int, float or complex'.format(
str(type)
)
)
else:
value = config.get(section, option)
return value
else:
data = dict(config.items(section))
return data
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as exc:
if fallback is _UNSET:
raise ConfigurationError(exc)
else:
return fallback
def save(section, option, value):
config = ConfigParser.ConfigParser()
if os.path.exists(CONFIG_FILE):
with open(CONFIG_FILE, "r") as fp:
config.readfp(fp)
with open(CONFIG_FILE, "w") as fp:
try:
if config.has_section(section) is False:
config.add_section(section)
config.set(section, option, str(value))
config.write(fp)
except ConfigParser.Error as exc:
raise ConfigurationError(exc)
| 25.320513
| 93
| 0.515443
| 45
| 0.022785
| 0
| 0
| 0
| 0
| 0
| 0
| 130
| 0.065823
|
b8fc2913caa7185f3d28c952db02652d27ed5b76
| 8,940
|
py
|
Python
|
mmtbx/ions/tst_environment.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/ions/tst_environment.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/ions/tst_environment.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
# -*- coding: utf-8; py-indent-offset: 2 -*-
from __future__ import division
from mmtbx.ions.environment import ChemicalEnvironment
import mmtbx.ions.identify
from mmtbx import ions
import mmtbx.monomer_library.pdb_interpretation
from mmtbx import monomer_library
from mmtbx.ions.environment import chem_carboxy, chem_amide, chem_backbone, \
chem_water, chem_phosphate, \
chem_nitrogen_primary, chem_nitrogen_secondary, \
chem_chloride, chem_oxygen, chem_nitrogen, chem_sulfur
import libtbx.load_env
from collections import OrderedDict, Counter
import os
import sys
def exercise () :
if not libtbx.env.has_module("phenix_regression"):
print "Skipping {}".format(os.path.split(__file__)[1])
return
models = OrderedDict([
("2qng", [
Counter({chem_oxygen: 7, chem_carboxy: 2, chem_water: 2,
chem_backbone: 3}),
Counter({chem_oxygen: 6, chem_carboxy: 3, chem_water: 1,
chem_backbone: 2}),
]),
("3rva", [
Counter({chem_oxygen: 6, chem_carboxy: 4, chem_water: 2}),
Counter({chem_nitrogen: 1, chem_oxygen: 4, chem_nitrogen_secondary: 1,
chem_carboxy: 3, chem_water: 1}),
Counter({chem_nitrogen: 4, chem_nitrogen_primary: 1,
chem_nitrogen_secondary: 3, chem_backbone: 3}),
]),
("1mjh", [
Counter({chem_oxygen: 6, chem_water: 3, chem_phosphate: 3}),
Counter({chem_oxygen: 6, chem_water: 3, chem_phosphate: 3}),
]),
("4e1h", [
Counter({chem_oxygen: 6, chem_carboxy: 4}),
Counter({chem_oxygen: 6, chem_carboxy: 3}),
Counter({chem_oxygen: 6, chem_carboxy: 3}),
]),
("2xuz", [
Counter({chem_oxygen: 6}),
]),
("3zli", [
Counter({chem_nitrogen: 2, chem_oxygen: 4, chem_nitrogen_secondary: 2,
chem_carboxy: 1, chem_water: 1}),
Counter({chem_sulfur: 4}),
Counter({chem_nitrogen: 2, chem_oxygen: 4, chem_nitrogen_secondary: 2,
chem_carboxy: 1, chem_water: 1}),
Counter({chem_sulfur: 4}),
]),
("3e0f", [
Counter({chem_nitrogen: 2, chem_oxygen: 4, chem_nitrogen_secondary: 2,
chem_carboxy: 2, chem_phosphate: 2}),
Counter({chem_nitrogen: 2, chem_oxygen: 2, chem_nitrogen_secondary: 2,
chem_carboxy: 1, chem_phosphate: 1}),
Counter({chem_nitrogen: 2, chem_oxygen: 3, chem_nitrogen_secondary: 2,
chem_carboxy: 2, chem_phosphate: 1}),
]),
("3dkq", [
Counter({chem_nitrogen: 4, chem_oxygen: 1, chem_nitrogen_secondary: 4,
chem_carboxy: 1}),
Counter({chem_nitrogen: 2, chem_oxygen: 1, chem_nitrogen_secondary: 2,
chem_carboxy: 1}),
Counter({chem_nitrogen: 4, chem_oxygen: 1, chem_nitrogen_secondary: 4,
chem_carboxy: 1}),
]),
("2o8q", [
Counter({chem_nitrogen: 3, chem_oxygen: 3, chem_nitrogen_secondary: 3,
chem_water: 3}),
Counter({chem_nitrogen: 3, chem_oxygen: 3, chem_nitrogen_secondary: 3,
chem_water: 3}),
]),
("1tgg", [
Counter({chem_oxygen: 5, chem_chloride: 1, chem_carboxy: 4,
chem_water: 1}),
Counter({chem_oxygen: 3, chem_chloride: 2, chem_carboxy: 3}),
Counter({chem_oxygen: 4, chem_chloride: 2, chem_carboxy: 4}),
]),
("3zu8", [
Counter({chem_oxygen: 7, chem_carboxy: 3, chem_water: 1,
chem_backbone: 2}),
Counter({chem_nitrogen: 4, chem_oxygen: 1, chem_nitrogen_primary: 1,
chem_nitrogen_secondary: 3, chem_carboxy: 1, chem_backbone: 3}),
]),
("1ofs", [
Counter({chem_nitrogen: 1, chem_oxygen: 4, chem_nitrogen_secondary: 1,
chem_carboxy: 3, chem_water: 1}),
Counter({chem_oxygen: 7, chem_amide: 1, chem_carboxy: 3, chem_water: 2,
chem_backbone: 1}),
Counter({chem_nitrogen: 1, chem_oxygen: 5, chem_nitrogen_secondary: 1,
chem_carboxy: 3, chem_water: 2}),
Counter({chem_oxygen: 7, chem_amide: 1, chem_carboxy: 3, chem_water: 2,
chem_backbone: 1}),
]),
("3ul2", [
Counter({chem_oxygen: 7, chem_amide: 1, chem_carboxy: 3, chem_water: 2,
chem_backbone: 1}),
Counter({chem_nitrogen: 1, chem_oxygen: 5, chem_nitrogen_secondary: 1,
chem_carboxy: 3, chem_water: 2}),
Counter({chem_oxygen: 7, chem_amide: 1, chem_carboxy: 3, chem_backbone: 1,
chem_water: 2}),
Counter({chem_nitrogen: 1, chem_oxygen: 5, chem_nitrogen_secondary: 1,
chem_carboxy: 3, chem_water: 2}),
Counter({chem_oxygen: 7, chem_amide: 1, chem_carboxy: 3, chem_water: 2,
chem_backbone: 1}),
Counter({chem_nitrogen: 1, chem_oxygen: 5, chem_nitrogen_secondary: 1,
chem_carboxy: 3, chem_water: 2}),
Counter({chem_oxygen: 7, chem_amide: 1, chem_carboxy: 3, chem_water: 2,
chem_backbone: 1}),
Counter({chem_nitrogen: 1, chem_oxygen: 5, chem_nitrogen_secondary: 1,
chem_carboxy: 3, chem_water: 2}),
]),
("3snm", [
Counter({chem_oxygen: 5, chem_amide: 1, chem_carboxy: 3,
chem_backbone: 1}),
Counter({chem_nitrogen: 1, chem_oxygen: 3, chem_nitrogen_secondary: 1,
chem_carboxy: 3}),
]),
("3qlq", [
Counter({chem_oxygen: 7, chem_amide: 1, chem_carboxy: 3, chem_water: 2,
chem_backbone: 1}),
Counter({chem_nitrogen: 1, chem_oxygen: 5, chem_nitrogen_secondary: 1,
chem_carboxy: 3, chem_water: 2}),
Counter({chem_nitrogen: 1, chem_oxygen: 5, chem_nitrogen_secondary: 1,
chem_carboxy: 3, chem_water: 2}),
Counter({chem_oxygen: 7, chem_amide: 1, chem_carboxy: 3, chem_water: 2,
chem_backbone: 1}),
Counter({chem_nitrogen: 1, chem_oxygen: 5, chem_nitrogen_secondary: 1,
chem_carboxy: 3, chem_water: 2}),
Counter({chem_oxygen: 7, chem_amide: 1, chem_carboxy: 3, chem_water: 2,
chem_backbone: 1}),
Counter({chem_oxygen: 7, chem_amide: 1, chem_carboxy: 3, chem_water: 2,
chem_backbone: 1}),
Counter({chem_nitrogen: 1, chem_oxygen: 5, chem_nitrogen_secondary: 1,
chem_carboxy: 3, chem_water: 2}),
]),
("2gdf", [
Counter({chem_nitrogen: 1, chem_oxygen: 4, chem_nitrogen_secondary: 1,
chem_carboxy: 3, chem_water: 1}),
Counter({chem_oxygen: 6, chem_amide: 1, chem_carboxy: 3, chem_water: 1,
chem_backbone: 1}),
Counter({chem_nitrogen: 1, chem_oxygen: 4, chem_nitrogen_secondary: 1,
chem_carboxy: 3, chem_water: 1}),
Counter({chem_oxygen: 6, chem_amide: 1, chem_carboxy: 3, chem_water: 1,
chem_backbone: 1}),
]),
("1q8h", [
Counter({chem_oxygen: 7, chem_carboxy: 6, chem_water: 1}),
Counter({chem_oxygen: 7, chem_carboxy: 4, chem_water: 3}),
Counter({chem_oxygen: 8, chem_carboxy: 6, chem_water: 2}),
]),
])
for model, expected_environments in models.items():
pdb_path = libtbx.env.find_in_repositories(
relative_path = os.path.join(
"phenix_regression", "mmtbx", "ions", model + ".pdb"),
test = os.path.isfile
)
mon_lib_srv = monomer_library.server.server()
ener_lib = monomer_library.server.ener_lib()
processed_pdb_file = monomer_library.pdb_interpretation.process(
mon_lib_srv = mon_lib_srv,
ener_lib = ener_lib,
file_name = pdb_path,
raw_records = None,
force_symmetry = True,
log = libtbx.utils.null_out()
)
geometry = \
processed_pdb_file.geometry_restraints_manager(show_energies = False)
xray_structure = processed_pdb_file.xray_structure()
pdb_hierarchy = processed_pdb_file.all_chain_proxies.pdb_hierarchy
connectivity = geometry.shell_sym_tables[0].full_simple_connectivity()
manager = mmtbx.ions.identify.manager(
fmodel = None,
pdb_hierarchy = pdb_hierarchy,
xray_structure = xray_structure,
connectivity = connectivity)
elements = set(ions.DEFAULT_IONS + ions.TRANSITION_METALS)
elements.difference_update(["CL"])
metals = [i_seq for i_seq, atom in enumerate(manager.pdb_atoms)
if atom.fetch_labels().resname.strip().upper() in elements]
assert len(metals) == len(expected_environments)
for index, metal, expected_environment in \
zip(xrange(len(metals)), metals, expected_environments):
env = ChemicalEnvironment(
metal,
manager.find_nearby_atoms(metal, filter_by_two_fofc = False),
manager
)
if env.chemistry != expected_environment:
print "Problem detecting chemistry environment in", model, index
print "Found: ", env.chemistry
print "Should be:", expected_environment
sys.exit()
print "OK"
if __name__ == "__main__":
exercise()
| 41.581395
| 80
| 0.631767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 302
| 0.033781
|
b8fde4b07b6cd3c768fcd79e7fc1ef7c9a747340
| 600
|
py
|
Python
|
extinfo/extractors/fileinfo_com.py
|
rpdelaney/extinfo
|
35463afe295b1bc83478960e67762ffb10915175
|
[
"Apache-2.0"
] | null | null | null |
extinfo/extractors/fileinfo_com.py
|
rpdelaney/extinfo
|
35463afe295b1bc83478960e67762ffb10915175
|
[
"Apache-2.0"
] | null | null | null |
extinfo/extractors/fileinfo_com.py
|
rpdelaney/extinfo
|
35463afe295b1bc83478960e67762ffb10915175
|
[
"Apache-2.0"
] | null | null | null |
import re
from ..utils import Report, fetch
SITE = "fileinfo.com"
PATH = "/extension"
def extract(extension: str) -> list[Report]:
soup = fetch(site=SITE, path=PATH, extension=extension)
description_short = soup.find_all("h2")[0].text.strip()
infoboxes = soup.find_all(attrs={"class": "infoBox"})
description_long = infoboxes[0].text.strip()
how_to_open = re.sub(r"\n+", "\n\n", infoboxes[1].text).strip()
report = Report(
description_short=description_short,
description_long=description_long,
how_to_open=how_to_open,
)
return [report]
| 26.086957
| 67
| 0.67
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 58
| 0.096667
|
b8fdf6d347c186c16105c41f259ca397f53533cf
| 801
|
py
|
Python
|
style/api/routers/prediction.py
|
imagination-ai/kerem-side-projects-monorepo
|
3d9da9d57f305ac2d6a03bab3787acfbee7269ee
|
[
"MIT"
] | null | null | null |
style/api/routers/prediction.py
|
imagination-ai/kerem-side-projects-monorepo
|
3d9da9d57f305ac2d6a03bab3787acfbee7269ee
|
[
"MIT"
] | 2
|
2022-01-20T15:46:39.000Z
|
2022-02-16T20:51:47.000Z
|
style/api/routers/prediction.py
|
imagination-ai/kerem-side-projects-monorepo
|
3d9da9d57f305ac2d6a03bab3787acfbee7269ee
|
[
"MIT"
] | null | null | null |
from fastapi import APIRouter
from pydantic import BaseModel
from style.predict.servable.serve import get_servable
router = APIRouter()
class PredictionRequest(BaseModel):
text: str
model_name: str
@router.get("/")
async def index():
return {"success": True, "message": "Predictions Router is working!"}
@router.post("/predict")
async def predict(request: PredictionRequest):
servable = get_servable(request.model_name)
prediction = servable.run_inference(request.text)
return {"success": True, "prediction": prediction}
@router.post("/predicts")
async def predicts(request: PredictionRequest):
servable = get_servable(request.model_name)
predictions = servable.run_inference_multiclass(request.text)
return {"success": True, "predictions": predictions}
| 25.03125
| 73
| 0.746567
| 69
| 0.086142
| 0
| 0
| 581
| 0.725343
| 513
| 0.640449
| 117
| 0.146067
|
b8fe991a0b450794e796f906cb32a0c3c5911676
| 77
|
py
|
Python
|
pyrepl/iconsole.py
|
thinkle/snippets
|
a19fd709fc618cee9d76b7481b834c3e0d4ed397
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
pyrepl/iconsole.py
|
thinkle/snippets
|
a19fd709fc618cee9d76b7481b834c3e0d4ed397
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
pyrepl/iconsole.py
|
thinkle/snippets
|
a19fd709fc618cee9d76b7481b834c3e0d4ed397
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2019-08-28T22:06:53.000Z
|
2019-08-28T22:06:53.000Z
|
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed()
ipshell()
| 11
| 38
| 0.779221
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b8feb9d082e79ca3a8c079efe501a2cd98406b92
| 2,623
|
py
|
Python
|
src/tests/ftest/pool/create_capacity_test.py
|
berserk-fury/daos
|
e0a3249aa886962cef2345135b907b45f7109cae
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
src/tests/ftest/pool/create_capacity_test.py
|
berserk-fury/daos
|
e0a3249aa886962cef2345135b907b45f7109cae
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
src/tests/ftest/pool/create_capacity_test.py
|
berserk-fury/daos
|
e0a3249aa886962cef2345135b907b45f7109cae
|
[
"BSD-2-Clause-Patent"
] | 1
|
2021-11-03T05:00:42.000Z
|
2021-11-03T05:00:42.000Z
|
#!/usr/bin/python3
"""
(C) Copyright 2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import time
from pool_test_base import PoolTestBase
from server_utils import ServerFailed
class PoolCreateTests(PoolTestBase):
# pylint: disable=too-many-ancestors,too-few-public-methods
"""Pool create tests.
All of the tests verify pool create performance with 7 servers and 1 client.
Each server should be configured with full compliment of NVDIMMs and SSDs.
:avocado: recursive
"""
def test_create_pool_quantity(self):
"""JIRA ID: DAOS-5114 / SRS-2 / SRS-4.
Test Description:
Create 200 pools on all of the servers.
Perform an orderly system shutdown via cmd line (dmg).
Restart the system via cmd line tool (dmg).
Verify that DAOS is ready to accept requests with in 2 minutes.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,large
:avocado: tags=pool
:avocado: tags=pool_create_tests,create_performance
"""
# Create some number of pools each using a equal amount of 60% of the
# available capacity, e.g. 0.6% for 100 pools.
quantity = self.params.get("quantity", "/run/pool/*", 1)
self.add_pool_qty(quantity, create=False)
self.check_pool_creation(10)
# Verify DAOS can be restarted in less than 2 minutes
try:
self.server_managers[0].system_stop()
except ServerFailed as error:
self.fail(error)
start = float(time.time())
try:
self.server_managers[0].system_start()
except ServerFailed as error:
self.fail(error)
duration = float(time.time()) - start
self.assertLessEqual(
duration, 120,
"DAOS not ready to accept requests with in 2 minutes")
# Verify all the pools exists after the restart
detected_pools = [uuid.lower() for uuid in self.dmg.pool_list()]
missing_pools = []
for pool in self.pool:
pool_uuid = pool.uuid.lower()
if pool_uuid not in detected_pools:
missing_pools.append(pool_uuid)
if missing_pools:
self.fail(
"The following created pools were not detected in the pool "
"list after rebooting the servers:\n [{}]: {}".format(
len(missing_pools), ", ".join(missing_pools)))
self.assertEqual(
len(self.pool), len(detected_pools),
"Additional pools detected after rebooting the servers")
| 34.973333
| 80
| 0.626382
| 2,418
| 0.921845
| 0
| 0
| 0
| 0
| 0
| 0
| 1,336
| 0.50934
|
b8fecc2152a699d192482875bb377312659faf77
| 577
|
py
|
Python
|
async-utils/setup.py
|
goc9000/python-library
|
0a4a09278df6e84061baedda8997071e2201103f
|
[
"MIT"
] | null | null | null |
async-utils/setup.py
|
goc9000/python-library
|
0a4a09278df6e84061baedda8997071e2201103f
|
[
"MIT"
] | null | null | null |
async-utils/setup.py
|
goc9000/python-library
|
0a4a09278df6e84061baedda8997071e2201103f
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='atmfjstc-async-utils',
version='0.1.0',
author_email='atmfjstc@protonmail.com',
package_dir={'': 'src'},
packages=find_packages(where='src'),
install_requires=[
],
zip_safe=True,
description="Utilities for async code",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Framework :: AsyncIO",
"Typing :: Typed",
],
python_requires='>=3.9',
)
| 20.607143
| 49
| 0.60312
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 251
| 0.435009
|
b8fef77cc6fd6e6d00ddf3b311025b4035166678
| 5,865
|
py
|
Python
|
msg_scheduler/analyzer.py
|
buaales/tt_offline_scheduler
|
257d8e2c94fc896c891e7d2a014bb2eebde996ce
|
[
"MIT"
] | 5
|
2021-05-18T11:34:42.000Z
|
2022-02-24T03:33:43.000Z
|
msg_scheduler/analyzer.py
|
buaales/tt_offline_scheduler
|
257d8e2c94fc896c891e7d2a014bb2eebde996ce
|
[
"MIT"
] | null | null | null |
msg_scheduler/analyzer.py
|
buaales/tt_offline_scheduler
|
257d8e2c94fc896c891e7d2a014bb2eebde996ce
|
[
"MIT"
] | 3
|
2020-09-10T05:58:59.000Z
|
2022-02-25T01:50:25.000Z
|
import subprocess
import sys
from collections import defaultdict
import pandas as pd
import networkx
import random
import functools
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from .model import Network, Link, Frame, Node
import io
if sys.platform == 'darwin':
matplotlib.use("TkAgg")
class Analyzer:
def __init__(self, df: pd.DataFrame, network: Network, lcm: int):
self._df = df
self._network = network
self._graph = network.graph
self._lcm = lcm
def print_by_time(self):
print(self._df.sort_values(by='time_slot'))
def print_by_app(self):
res = self._df.sort_values(by='app')
print(res)
def _animate_update(self, ax, time_slot):
ax.clear()
ax.set_title(f'Time slot: {time_slot}')
edge_lable = dict()
pos = networkx.spring_layout(self._graph, seed=0, scale=3)
cur_table = self._df[self._df['time_slot'] == time_slot]
for idx, cur_row in cur_table.iterrows():
link = cur_row['link']
edge_lable[(link.node1.name, link.node2.name)] = cur_row['app'].name
networkx.draw_networkx_edges(self._graph, pos=pos, ax=ax, edge_color='gray')
nodes = networkx.draw_networkx_nodes(self._graph, pos=pos, ax=ax, node_color="white", node_size=1000,
node_shape='o')
nodes.set_edgecolor('black')
networkx.draw_networkx_labels(self._graph, pos=pos, ax=ax, font_size=8)
networkx.draw_networkx_edge_labels(self._graph, pos=pos, edge_labels=edge_lable, ax=ax)
ax.set_xticks([])
ax.set_yticks([])
def animate(self):
fig, ax = plt.subplots(figsize=(8, 8))
ani = animation.FuncAnimation(fig, functools.partial(self._animate_update, ax), frames=self._lcm, interval=650,
repeat=True)
# Set up formatting for the movie files
ani.save('/tmp/res.mov', fps=1, dpi=100)
plt.show()
pass
def export(self, hosts=("127.0.0.1",)):
exported = io.StringIO()
p = functools.partial(print, file=exported)
node_app_map = {}
for app in self._df['app'].unique():
node_app_map[app.node] = app
msg_core_app = defaultdict(list)
app_count = 0
for node in self._graph.nodes:
if node.startswith('msg'):
msg_core_app[node] = msg_core_app[node]
for nei in self._graph.neighbors(node):
if nei.startswith('app'):
app_count += 1
msg_core_app[node].append(nei)
p(len(msg_core_app), self._lcm)
for i, ma in enumerate(msg_core_app.keys()):
# inter msg server endpoint and app endpoint
ip = random.Random(200 + i).choice(hosts)
p(ma, ip, 10801 + i, 1 if i == 0 else 0, ip, 20801 + i)
p()
# 每个app什么时间槽发送一个消息
for msg_node, app_nodes in msg_core_app.items():
for app_node in app_nodes:
app = node_app_map[app_node]
for idx, row in self._df[self._df['app'] == app].iterrows():
if row['link'].node1.name == app_node and int(row['time_slot']) < app.peroid:
p(':', app.name)
p(row['time_slot'], app.peroid, msg_node)
p()
# 每个msg_core需要在什么时间把消息从哪转到哪
def find_next_node_not_switch(frame: Frame, n: Node) -> Node:
if not n.name.startswith('switch'):
return n
for _, r in self._df.iterrows():
if r['link'].node1 != n or r['frame'].id != frame.id:
continue
if not r['link'].node2.name.startswith('switch'):
return r['link'].node2
else:
return find_next_node_not_switch(frame, r['link'].node2)
def find_prev_node_not_switch(frame: Frame, n: Node) -> Node:
if not n.name.startswith('switch'):
return n
for _, r in self._df.iterrows():
if r['link'].node2 != n or r['frame'].id != frame.id:
continue
if not r['link'].node1.name.startswith('switch'):
return r['link'].node1
else:
return find_next_node_not_switch(frame, r['link'].node1)
def cvt_node(node: Node):
return node_app_map[node.name] if node.name.startswith('app') else node.name
for msg_node in msg_core_app.keys():
tlist = []
for _, row in self._df.iterrows():
if row['link'].node1 == msg_node:
# msg node需要转发该消息
target_node = find_next_node_not_switch(row['frame'], row['link'].node2)
tlist.append((msg_node, 'send', cvt_node(target_node), row['frame'].id, row['time_slot']))
elif row['link'].node2 == msg_node:
target_node = find_prev_node_not_switch(row['frame'], row['link'].node1)
tlist.append((msg_node, 'recv', cvt_node(target_node), row['frame'].id, row['time_slot']))
tlist = sorted(tlist, key=lambda x: int(x[4]))
p(':', msg_node)
p(self._lcm, len(msg_core_app[msg_node]), len(tlist))
p('\n'.join(map(lambda xm: node_app_map[xm].name, msg_core_app[msg_node])))
for x in tlist:
for y in x[1:]:
p(y, end=' ')
p()
p()
with open('/tmp/tt.txt.tmp', 'w+') as f:
print(exported.getvalue(), file=f)
for ip in hosts:
subprocess.run(f'scp /tmp/tt.txt.tmp {ip}:/tmp/tt.txt'.split(' '))
# print(exported.getvalue())
| 38.333333
| 119
| 0.554135
| 5,598
| 0.942583
| 0
| 0
| 0
| 0
| 0
| 0
| 682
| 0.114834
|
b8ff8b94d402dcdb466c2d51a4b1cfbb02411cf0
| 3,286
|
py
|
Python
|
endpoints/cotect-endpoints/cotect_endpoints/security.py
|
JNKielmann/cotect
|
1b213459b41ef18119948633385ebad2cc16e9e2
|
[
"MIT"
] | 19
|
2020-03-18T15:49:58.000Z
|
2021-02-11T12:07:22.000Z
|
endpoints/cotect-endpoints/cotect_endpoints/security.py
|
JNKielmann/cotect
|
1b213459b41ef18119948633385ebad2cc16e9e2
|
[
"MIT"
] | 6
|
2020-03-21T18:50:29.000Z
|
2022-02-27T01:38:20.000Z
|
endpoints/cotect-endpoints/cotect_endpoints/security.py
|
JNKielmann/cotect
|
1b213459b41ef18119948633385ebad2cc16e9e2
|
[
"MIT"
] | 7
|
2020-03-24T14:42:35.000Z
|
2020-04-06T13:22:29.000Z
|
import logging
import os
import firebase_admin
from fastapi import HTTPException, Security, status
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from fastapi.security.api_key import APIKeyCookie, APIKeyHeader, APIKeyQuery
from firebase_admin import auth
from cotect_endpoints.utils import id_utils
from cotect_endpoints.schema import User
# Initialize logger
log = logging.getLogger(__name__)
firebase_app = None
firebase_credentials = os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
if firebase_credentials and os.path.isfile(firebase_credentials):
# Initilize firebase
firebase_app = firebase_admin.initialize_app()
else:
log.warning(
"GOOGLE_APPLICATION_CREDENTIALS was not set with a valid path. Firebase will not be initalized."
)
API_KEY_NAME = "api_token"
api_key_bearer = HTTPBearer(auto_error=False)
api_key_query = APIKeyQuery(name=API_KEY_NAME, auto_error=False)
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)
# Cookie security specification is not supported by swagger 2.0 specs
# api_key_cookie = APIKeyCookie(name=API_KEY_NAME, auto_error=False)
def get_active_user(
api_key_bearer: HTTPAuthorizationCredentials = Security(api_key_bearer),
api_key_query: str = Security(api_key_query),
api_key_header: str = Security(api_key_header),
# api_key_cookie: str = Security(api_key_cookie),
) -> User:
# https://medium.com/data-rebels/fastapi-authentication-revisited-enabling-api-key-authentication-122dc5975680
secret = id_utils.get_id_generation_secret()
api_key = None
if api_key_bearer:
api_key = api_key_bearer.credentials
elif api_key_query:
api_key = api_key_query
elif api_key_header:
api_key = api_key_header
else:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN, detail="No API Key was provided.",
)
#elif api_key_cookie:
# api_key = api_key_header
if api_key == "demo":
# Remove
return User(
user_id=id_utils.generate_user_id("+4917691377102", secret), verified=False,
)
if not firebase_app:
# firebase app was not initalized
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Failed to verify user.",
headers={"WWW-Authenticate": "Bearer"},
)
try:
decoded_token = auth.verify_id_token(
api_key, app=firebase_app, check_revoked=False
)
if "phone_number" in decoded_token and decoded_token["phone_number"]:
return User(
user_id=id_utils.generate_user_id(decoded_token["phone_number"], secret),
verified=True,
)
else:
# use uid as fallback or for anonymous users
return User(
user_id=id_utils.generate_user_id(decoded_token["uid"], secret),
verified=False,
)
except Exception as ex:
log.info("Failed to validate firebase token: " + str(ex.msg))
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Failed to validate the firebase token.",
headers={"WWW-Authenticate": "Bearer"},
)
| 33.530612
| 114
| 0.693244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 857
| 0.260803
|
77009347b5bee01d461e0bc59d8b6aa0208dc523
| 7,201
|
py
|
Python
|
ui/Pytest/test_Range.py
|
MoisesHenr/OCEAN
|
e99c853893adc89652794ace62fcc8ffa78aa7ac
|
[
"MIT"
] | 15
|
2021-06-15T13:48:03.000Z
|
2022-01-26T13:51:46.000Z
|
ui/Pytest/test_Range.py
|
MoisesHenr/OCEAN
|
e99c853893adc89652794ace62fcc8ffa78aa7ac
|
[
"MIT"
] | 1
|
2021-07-04T02:58:29.000Z
|
2021-07-04T02:58:29.000Z
|
ui/Pytest/test_Range.py
|
MoisesHenr/OCEAN
|
e99c853893adc89652794ace62fcc8ffa78aa7ac
|
[
"MIT"
] | 2
|
2021-06-21T20:44:01.000Z
|
2021-06-23T11:10:56.000Z
|
# Author: Moises Henrique Pereira
# this class handle the functions tests of controller of the component of the numerical features
import pytest
import sys
from PyQt5 import QtWidgets
from ui.mainTest import StaticObjects
@pytest.mark.parametrize('slider', [1, 2.9, False, ('t1', 't2'), None])
def test_CIR_setSlider_wrong_parameter(slider):
with pytest.raises(AssertionError):
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView()
counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1)
rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum
rangeMin.setSlider(slider)
def test_CIR_setSlider_right_parameter():
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView()
counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1)
rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum
rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider)
def test_CIR_initializeRange_none_min_parameter():
with pytest.raises(AssertionError):
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView()
counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1)
rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum
rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider)
rangeMin.initializeRange(None, 1, 0.5, 15)
def test_CIR_initializeRange_none_max_parameter():
with pytest.raises(AssertionError):
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView()
counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1)
rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum
rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider)
rangeMin.initializeRange(0, None, 0.5, 15)
def test_CIR_initializeRange_none_value_parameter():
with pytest.raises(AssertionError):
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView()
counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1)
rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum
rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider)
rangeMin.initializeRange(0, 1, None, 15)
def test_CIR_initializeRange_none_space_parameter():
with pytest.raises(AssertionError):
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView()
counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1)
rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum
rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider)
rangeMin.initializeRange(0, 1, 0.5, None)
def test_CIR_initializeRange_right_parameters():
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView()
counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1)
rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum
rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider)
rangeMin.initializeRange(0, 1, 0.5, 15)
def test_CIR_updateRange_none_min_parameter():
with pytest.raises(AssertionError):
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView()
counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1)
rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum
rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider)
rangeMin.initializeRange(0, 1, 0.5, 15)
rangeMin.updateRange(None, 1, 0.5)
def test_CIR_updateRange_none_max_parameter():
with pytest.raises(AssertionError):
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView()
counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1)
rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum
rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider)
rangeMin.initializeRange(0, 1, 0.5, 15)
rangeMin.updateRange(0, None, 0.5)
def test_CIR_updateRange_none_value_parameter():
with pytest.raises(AssertionError):
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView()
counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1)
rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum
rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider)
rangeMin.initializeRange(0, 1, 0.5, 15)
rangeMin.updateRange(0, 1, None)
def test_CIR_updateRange_right_parameters():
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView()
counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1)
rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum
rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider)
rangeMin.initializeRange(0, 1, 0.5, 15)
rangeMin.updateRange(0, 1, 0.3)
def test_CIR_setValue_none_parameter():
with pytest.raises(AssertionError):
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView()
counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1)
rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum
rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider)
rangeMin.initializeRange(0, 1, 0.5, 15)
rangeMin.setValue(None)
def test_CIR_setValue_right_parameters():
app = QtWidgets.QApplication(sys.argv)
counterfactualInterfaceSlider3RangesView = StaticObjects.staticCounterfactualInterfaceSlider3RangesView()
counterfactualInterfaceSlider3RangesView.labelSlider.initializeSlider(0, 1, 1)
rangeMin = counterfactualInterfaceSlider3RangesView.labelRangeMinimum
rangeMin.setSlider(counterfactualInterfaceSlider3RangesView.labelSlider)
rangeMin.initializeRange(0, 1, 0.5, 15)
rangeMin.setValue(0.3)
| 56.257813
| 113
| 0.805583
| 0
| 0
| 0
| 0
| 520
| 0.072212
| 0
| 0
| 147
| 0.020414
|
770214b97687e419b49ca7614e24a42a26a9954c
| 2,092
|
py
|
Python
|
tools/clean_autogen_protos.py
|
embeddery/stackrox
|
d653406651df4331a714839ec2c0a23a93425c64
|
[
"Apache-2.0"
] | 22
|
2022-03-31T14:32:18.000Z
|
2022-03-31T22:11:30.000Z
|
tools/clean_autogen_protos.py
|
embeddery/stackrox
|
d653406651df4331a714839ec2c0a23a93425c64
|
[
"Apache-2.0"
] | 5
|
2022-03-31T14:35:28.000Z
|
2022-03-31T22:40:13.000Z
|
tools/clean_autogen_protos.py
|
embeddery/stackrox
|
d653406651df4331a714839ec2c0a23a93425c64
|
[
"Apache-2.0"
] | 4
|
2022-03-31T16:33:58.000Z
|
2022-03-31T22:19:26.000Z
|
#!/usr/bin/env python3
import argparse
import pathlib
GENERATED_EXTENSIONS = ["pb.go", "pb.gw.go", "swagger.json"]
def find_files(path, fileglob):
files_full = list(path.glob(fileglob))
return files_full
def strip_path_extension(filelist):
# We cannot use Path.stem directly as it doesn't handle double extensions (.pb.go) correctly
files_extensionless = list(map(lambda f: (str(f).replace("".join(f.suffixes), "")), filelist))
files_name_only = list(map(lambda f: pathlib.Path(f).stem, files_extensionless))
return files_name_only
def find_difference(generated_list, proto_list):
difference = set(generated_list) - set(proto_list)
return difference
def filter_only_gen_files(candidates):
return [x for x in candidates if any(str(x.name).endswith(extension) for extension in GENERATED_EXTENSIONS)]
def find_in_list(target_list, searchterms):
searchterms = [f"{x}." for x in searchterms] # Add a dot to only match full filenames
return [x for x in target_list if any(str(x.name).startswith(term) for term in searchterms )]
def remove_files(target_list):
for target in target_list:
target.unlink()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--protos", type=pathlib.Path, help="Path to proto dir")
parser.add_argument("--generated", type=pathlib.Path, help="Path to generated sources dir")
v = parser.parse_args()
proto_files = find_files(v.protos, "**/*.proto")
generated_files = [f
for file_list in (find_files(v.generated, f'**/*.{ext}') for ext in GENERATED_EXTENSIONS)
for f in file_list]
proto_stripped = strip_path_extension(proto_files)
generated_stripped = strip_path_extension(generated_files)
diff = find_difference(generated_stripped, proto_stripped)
full_paths = find_in_list(generated_files, diff)
final_diff = filter_only_gen_files(full_paths)
if len(final_diff) > 0:
print(f"Removing: {final_diff}")
remove_files(final_diff)
if __name__ == '__main__':
main()
| 31.223881
| 112
| 0.707935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 329
| 0.157266
|
7702c9e7da503201d8308cee20a4f5351db96b01
| 21,848
|
py
|
Python
|
skbl/helpers.py
|
spraakbanken/skblportalen
|
05d0113c9ca73f8092765a08597d23091ba3bc1f
|
[
"MIT"
] | 2
|
2018-03-15T16:19:36.000Z
|
2019-03-18T10:25:38.000Z
|
skbl/helpers.py
|
spraakbanken/skblportalen
|
05d0113c9ca73f8092765a08597d23091ba3bc1f
|
[
"MIT"
] | 3
|
2018-06-05T19:35:11.000Z
|
2019-03-18T10:26:50.000Z
|
skbl/helpers.py
|
spraakbanken/skblportalen
|
05d0113c9ca73f8092765a08597d23091ba3bc1f
|
[
"MIT"
] | 1
|
2018-06-05T19:07:56.000Z
|
2018-06-05T19:07:56.000Z
|
"""Define different helper functions."""
import datetime
import json
import re
import sys
import urllib.parse
from urllib.request import Request, urlopen
import icu
import markdown
from flask import current_app, g, make_response, render_template, request, url_for
from flask_babel import gettext
from . import static_info
VONAV_LIST = ["von", "af", "av"]
def set_language_switch_link(route, fragment=None, lang=""):
"""Fix address and label for language switch button."""
if not lang:
lang = g.language
if lang == "en":
g.switch_language = {"url": url_for("views." + route + "_sv"), "label": "Svenska"}
else:
g.switch_language = {"url": url_for("views." + route + "_en"), "label": "English"}
if fragment is not None:
g.switch_language["url"] += "/" + fragment
def cache_name(pagename, lang=""):
"""Get page from cache."""
if not lang:
lang = "sv" if "sv" in request.url_rule.rule else "en"
return "%s_%s" % (pagename, lang)
def karp_query(action, query, mode=None):
"""Generate query and send request to Karp."""
if not mode:
mode = current_app.config["KARP_MODE"]
query["mode"] = mode
query["resource"] = current_app.config["KARP_LEXICON"]
if "size" not in query:
query["size"] = current_app.config["RESULT_SIZE"]
params = urllib.parse.urlencode(query)
return karp_request("%s?%s" % (action, params))
def karp_request(action):
"""Send request to Karp backend."""
q = Request("%s/%s" % (current_app.config["KARP_BACKEND"], action))
if current_app.config["DEBUG"]:
log("%s/%s\n" % (current_app.config["KARP_BACKEND"], action), "REQUEST")
if current_app.config.get("USE_AUTH", False):
q.add_header("Authorization", "Basic %s" % (current_app.config["KARP_AUTH_HASH"]))
response = urlopen(q).read()
data = json.loads(response.decode("UTF-8"))
return data
def karp_fe_url():
"""Get URL for Karp frontend."""
return current_app.config["KARP_FRONTEND"] + "/#?mode=" + current_app.config["KARP_MODE"]
def serve_static_page(page, title=""):
"""Serve static html."""
set_language_switch_link(page)
with current_app.open_resource("static/pages/%s/%s.html" % (page, g.language)) as f:
data = f.read().decode("UTF-8")
return render_template("page_static.html",
content=data,
title=title)
def check_cache(page, lang=""):
"""
Check if page is in cache.
If the cache should not be used, return None.
"""
if current_app.config["TEST"]:
return None
try:
with g.mc_pool.reserve() as client:
# Look for the page, return if found
art = client.get(cache_name(page, lang))
if art is not None:
return art
except Exception:
# TODO what to do??
pass
# If nothing is found, return None
return None
def set_cache(page, name="", lang="", no_hits=0):
"""
Browser cache handling.
Add header to the response.
May also add the page to the memcache.
"""
pagename = cache_name(name, lang="")
if no_hits >= current_app.config["CACHE_HIT_LIMIT"]:
try:
with g.mc_pool.reserve() as client:
client.set(pagename, page, time=current_app.config["LOW_CACHE_TIME"])
except Exception:
# TODO what to do??
pass
r = make_response(page)
r.headers.set("Cache-Control", "public, max-age=%s" %
current_app.config["BROWSER_CACHE_TIME"])
return r
def get_first_name(source):
"""Return the given name (first name)."""
return re.sub("/", "", source["name"].get("firstname", "")).strip()
def format_names(source, fmt="strong"):
"""Return the given name (first name), and the formatted callingname (tilltalsnamnet)."""
if fmt:
return re.sub("(.*)/(.+)/(.*)", r"\1<%s>\2</%s>\3" % (fmt, fmt), source["name"].get("firstname", ""))
else:
return re.sub("(.*)/(.+)/(.*)", r"\1\2\3", source["name"].get("firstname", ""))
def get_life_range(source):
"""
Return the birth and death year from _source (as a tuple).
Return empty strings if not available.
"""
years = []
for event in ["from", "to"]:
if source["lifespan"].get(event):
date = source["lifespan"][event].get("date", "")
if date:
date = date.get("comment", "")
if "-" in date and not re.search("[a-zA-Z]", date):
year = date[:date.find("-")]
else:
year = date
else:
year = ""
years.append(year)
return years[0], years[1]
def get_life_range_force(source):
"""
Return the birth and death year from _source (as a tuple).
Try to also parse non-dates like "ca. 1500-talet".
Return -1, 1000000 if not available.
"""
default_born = -1
default_died = 1000000
def convert(event, retval):
if source["lifespan"].get(event):
date = source["lifespan"][event].get("date", "")
if date:
date = date.get("comment", "")
match = re.search(r".*(\d{4}).*", date)
if match:
retval = int(match.group(1))
return retval
born = convert("from", default_born)
dead = convert("to", default_died)
# Sorting hack: if there is no birth year, set it to dead -100 (and vice versa)
# to make is appear in a more reasonable position in the chronology
if born == default_born and dead != default_died:
born = dead - 100
if dead == default_died and born != default_born:
dead = born + 100
return born, dead
def get_date(source):
"""Get birth and death date if available. Return empty strings otherwise."""
dates = []
for event in ["from", "to"]:
if source["lifespan"][event].get("date"):
date = source["lifespan"][event]["date"].get("comment", "")
else:
date = ""
dates.append(date)
return dates[0], dates[1]
def get_current_date():
"""Get the current date."""
return datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d")
def markdown_html(text):
"""Convert markdown text to html."""
return markdown.markdown(text)
def group_by_type(objlist, name):
"""Group objects by their type (=name), e.g. 'othernames'."""
newdict = {}
for obj in objlist:
val = obj.get(name, "")
key_sv = obj.get("type", "Övrigt")
key_en = obj.get("type_eng", "Other")
if key_sv not in newdict:
newdict[key_sv] = (key_en, [])
newdict[key_sv][1].append(val)
result = []
for key, val in list(newdict.items()):
result.append({"type": key, "type_eng": val[0], name: ", ".join(val[1])})
return result
def make_alphabetical_bucket(result, sortnames=False, lang="sv"):
def processname(bucket, results):
vonaf_pattern = re.compile(r"^(%s) " % "|".join(VONAV_LIST))
name = re.sub(vonaf_pattern, r"", bucket[0])
results.append((name[0].upper(), bucket))
return make_alphabetic(result, processname, sortnames=sortnames, lang=lang)
def rewrite_von(name):
"""Move 'von' and 'av' to end of name."""
vonaf_pattern = re.compile(r"^(%s) (.+)$" % "|".join(VONAV_LIST))
return re.sub(vonaf_pattern, r"\2 \1", name)
def make_placenames(places, lang="sv"):
def processname(hit, results):
name = hit["name"].strip()
results.append((name[0].upper(), (name, hit)))
return make_alphabetic(places, processname, lang=lang)
def make_alphabetic(hits, processname, sortnames=False, lang="sv"):
"""
Loop through hits, apply the function 'processname' on each object and then sort the result in alphabetical order.
The function processname should append zero or more processed form of
the object to the result list.
This processed forms should be a pair (first_letter, result)
where first_letter is the first_letter of each object (to sort on), and the result
is what the html-template want e.g. a pair of (name, no_hits)
"""
def fix_lastname(name):
vonaf_pattern = re.compile(r"^(%s) " % "|".join(VONAV_LIST))
name = re.sub(vonaf_pattern, r"", name)
return name.replace(" ", "z")
results = []
for hit in hits:
processname(hit, results)
letter_results = {}
# Split the result into start letters
for first_letter, result in results:
if first_letter == "Ø":
first_letter = "Ö"
if first_letter == "Æ":
first_letter = "Ä"
if first_letter == "Ü":
first_letter = "Y"
if lang == "en" and first_letter == "Ö":
first_letter = "O"
if lang == "en" and first_letter in "ÄÅ":
first_letter = "A"
if first_letter not in letter_results:
letter_results[first_letter] = [result]
else:
letter_results[first_letter].append(result)
# Sort result dictionary alphabetically into list
if lang == "en":
collator = icu.Collator.createInstance(icu.Locale("en_EN.UTF-8"))
else:
collator = icu.Collator.createInstance(icu.Locale("sv_SE.UTF-8"))
for _n, items in list(letter_results.items()):
if sortnames:
items.sort(key=lambda x: collator.getSortKey(fix_lastname(x[0]) + " " + x[1]))
else:
items.sort(key=lambda x: collator.getSortKey(x[0]))
letter_results = sorted(list(letter_results.items()), key=lambda x: collator.getSortKey(x[0]))
return letter_results
def make_simplenamelist(hits, search):
"""
Create a list with links to the entries url or _id.
Sort entries with names matching the query higher.
"""
results = []
used = set()
namefields = ["firstname", "lastname", "sortname"]
search_terms = [st.lower() for st in search.split()]
for hit in hits["hits"]:
# score = sum(1 for field in hit["highlight"] if field.startswith("name."))
hitname = hit["_source"]["name"]
score = sum(1 for nf in namefields if any(st in hitname.get(nf, "").lower() for st in search_terms))
if score:
name = join_name(hit["_source"], mk_bold=True)
liferange = get_life_range(hit["_source"])
subtitle = hit["_source"].get("subtitle", "")
subtitle_eng = hit["_source"].get("subtitle_eng", "")
subject_id = hit["_source"].get("url") or hit["_id"]
results.append((-score, name, liferange, subtitle, subtitle_eng, subject_id))
used.add(hit["_id"])
return sorted(results), used
def make_namelist(hits, exclude=set(), search=""):
"""
Split hits into one list per first letter.
Return only info necessary for listing of names.
"""
results = []
first_letters = [] # List only containing letters in alphabetical order
current_letterlist = [] # List containing entries starting with the same letter
current_total = 0
if search:
max_len = current_app.config["SEARCH_RESULT_SIZE"] - len(exclude)
else:
max_len = None
for hit in hits["hits"]:
if hit["_id"] in exclude:
continue
# Seperate names from linked names
is_link = hit["_index"].startswith(current_app.config["SKBL_LINKS"])
if is_link:
name = hit["_source"]["name"].get("sortname", "")
linked_name = join_name(hit["_source"])
else:
name = join_name(hit["_source"], mk_bold=True)
linked_name = False
liferange = get_life_range(hit["_source"])
subtitle = hit["_source"].get("subtitle", "")
subtitle_eng = hit["_source"].get("subtitle_eng", "")
subject_id = hit["_source"].get("url") or hit["_id"]
# Get first letter from sort[0]
firstletter = hit["sort"][1].upper()
if firstletter not in first_letters:
if current_letterlist:
results.append(current_letterlist)
current_letterlist = []
first_letters.append(firstletter)
current_letterlist.append((firstletter, is_link, name, linked_name, liferange, subtitle, subtitle_eng, subject_id))
current_total += 1
# Don't show more than SEARCH_RESULT_SIZE number of results
if max_len and current_total >= max_len:
break
if current_letterlist:
# Append last letterlist
results.append(current_letterlist)
return (first_letters, results)
def make_datelist(hits):
"""Extract information relevant for chronology list (same as make_namelist but without letter splitting)."""
result = []
for hit in hits:
is_link = hit["_index"].startswith(current_app.config["SKBL_LINKS"])
if is_link:
name = hit["_source"]["name"].get("sortname", "")
linked_name = join_name(hit["_source"])
else:
name = join_name(hit["_source"], mk_bold=True)
linked_name = False
liferange = get_life_range(hit["_source"])
subtitle = hit["_source"].get("subtitle", "")
subtitle_eng = hit["_source"].get("subtitle_eng", "")
subject_id = hit["_source"].get("url") or hit["_id"]
result.append((is_link, name, linked_name, liferange, subtitle, subtitle_eng, subject_id))
return result
def join_name(source, mk_bold=False):
"""Retrieve and format name from source."""
name = []
lastname = source["name"].get("lastname", "")
vonaf_pattern = re.compile(r"(%s |)(.*)" % " |".join(VONAV_LIST))
match = re.search(vonaf_pattern, lastname)
vonaf = match.group(1)
lastname = match.group(2)
if lastname:
if mk_bold:
name.append("<strong>%s</strong>," % lastname)
else:
name.append(lastname + ",")
if mk_bold:
name.append(format_names(source, fmt="strong"))
else:
name.append(source["name"].get("firstname", ""))
name.append(vonaf)
return " ".join(name)
def sort_places(stat_table, route):
"""Translate place names and sort list."""
# Work in progress! Waiting for translation list.
# Or should this be part of the data instead??
place_translations = {
"Göteborg": "Gothenburg"
}
if "place" in route.rule:
lang = "en"
else:
lang = "sv"
if lang == "en":
for d in stat_table:
d["display_name"] = place_translations.get(d["name"], d["name"])
else:
for d in stat_table:
d["display_name"] = d["name"]
stat_table.sort(key=lambda x: x.get("name").strip())
return stat_table
def mk_links(text):
"""Fix display of links within an article text."""
# TODO markdown should fix this itself
try:
text = re.sub(r"\[\]\((.*?)\)", r"[\1](\1)", text)
for link in re.findall(r"\]\((.*?)\)", text):
text = re.sub(r"\(%s\)" % link, "(%s)" % url_for("views.article_index_" + g.language, search=link), text)
except Exception:
# If there are parenthesis within the links, problems will occur.
text = text
return text
def unescape(text):
"""Unescape some html chars."""
text = re.sub(">", r">", text)
text = re.sub("'", r"'", text)
return text
def aggregate_by_type(items, use_markdown=False):
if not isinstance(items, list):
items = [items]
types = {}
for item in items:
if "type" in item:
t = item["type"]
if t:
if t not in types:
types[t] = []
if use_markdown and "description" in item:
item["description"] = markdown_html(item["description"])
item["description_eng"] = markdown_html(item.get("description_eng", ""))
types[t].append(item)
return list(types.items())
def collapse_kids(source):
unkown_kids = 0
for relation in source.get("relation", []):
if relation.get("type") == "Barn" and len(list(relation.keys())) == 1:
unkown_kids += 1
relation["hide"] = True
if unkown_kids:
source["collapsedrelation"] = [{"type": "Barn", "count": unkown_kids}]
def make_placelist(hits, placename, lat, lon):
grouped_results = {}
for hit in hits["hits"]:
source = hit["_source"]
hit["url"] = source.get("url") or hit["_id"]
placelocations = {gettext("Residence"): source.get("places", []),
gettext("Place of activity"): source.get("occupation", []),
gettext("Place of education"): source.get("education", []),
gettext("Contacts"): source.get("contact", []),
gettext("Birthplace"): [source.get("lifespan", {}).get("from", {})],
gettext("Place of death"): [source.get("lifespan", {}).get("to", {})]
}
for ptype, places in list(placelocations.items()):
names = dict([(place.get("place", {}).get("place", "").strip(),
place.get("place", {}).get("pin", {})) for place in places])
# Check if the name and the lat, lon is correct
# (We can't ask karp of this, since it would be a nested query)
if placename in names:
# Coordinates! If coordinates are used, uncomment the two lines below
# if names[placename].get("lat") == float(lat)\
# and names[placename].get("lon") == float(lon):
if ptype not in grouped_results:
grouped_results[ptype] = []
grouped_results[ptype].append((join_name(hit["_source"], mk_bold=True), hit))
# else:
# # These two lines should be removed, but are kept for debugging
# if "Fel" not in grouped_results: grouped_results["Fel"] = []
# grouped_results["Fel"].append((join_name(source), hit))
# Sort result dictionary alphabetically into list
collator = icu.Collator.createInstance(icu.Locale("sv_SE.UTF-8"))
for _n, items in list(grouped_results.items()):
items.sort(key=lambda x: collator.getSortKey(x[0]))
grouped_results = sorted(list(grouped_results.items()), key=lambda x: collator.getSortKey(x[0]))
# These two lines should be removed, but are kept for debugging
# if not grouped_results:
# grouped_results = [("Fel", [(join_name(hit["_source"]), hit) for hit in hits["hits"]])]
return grouped_results
def is_email_address_valid(email):
"""
Validate the email address using a regex.
It may not include any whitespaces, has exactly one "@" and at least one "." after the "@".
"""
if " " in email:
return False
# if not re.match("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$", email):
# More permissive regex: does allow non-ascii chars
if not re.match(r"[^@]+@[^@]+\.[^@]+", email):
return False
return True
def is_ascii(s):
"""Check if s contains of ASCII-characters only."""
return all(ord(c) < 128 for c in s)
def get_lang_text(json_swe, json_eng, ui_lang):
"""Get text in correct language if available."""
if ui_lang == "en":
if json_eng:
return json_eng
else:
return json_swe
else:
return json_swe
def get_shorttext(text):
"""Get the initial 200 characters of text. Remove HTML and line breaks."""
shorttext = re.sub(r"<.*?>|\n|\t", " ", text)
shorttext = shorttext.strip()
shorttext = re.sub(r" ", " ", shorttext)
return shorttext[:200]
def get_org_name(organisation):
"""Get short name for organisation (--> org.)."""
if organisation.endswith("organisation") or organisation.endswith("organization"):
return organisation[:-9] + "."
else:
return organisation
def lowersorted(xs):
"""Sort case-insentitively."""
return sorted(xs, key=lambda x: x[0].lower())
def get_infotext(text, rule):
"""
Get infotext in correct language with Swedish as fallback.
text = key in the infotext dict
rule = request.url_rule.rule
"""
textobj = static_info.infotexter.get(text)
if "sv" in rule:
return textobj.get("sv")
else:
return textobj.get("en", textobj.get("sv"))
def log(data, msg=""):
"""Log data to stderr."""
if msg:
sys.stderr.write("\n" + msg + ": " + str(data) + "\n")
else:
sys.stderr.write("\n" + str(data) + "\n")
def swedish_translator(firstname, lastname):
"""Check if 'firstname lastname' is a Swedish translator."""
swedish_translators = [
"Linnea Åshede"
]
name = firstname + " " + lastname
if name in swedish_translators:
return True
return False
def get_littb_id(skbl_url):
"""Get Litteraturbanken ID for an article if available."""
if not skbl_url:
return None
littb_url = ("https://litteraturbanken.se/api/list_all/author?filter_and={%22wikidata.skbl_link%22:%20%22" +
skbl_url + "%22}&include=authorid")
try:
# Fake the user agent to avoid getting a 403
r = Request(littb_url, headers={"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"})
contents = urlopen(r).read()
except Exception as e:
log("Could not open URL %s. Error: %s" % (e, littb_url))
return None
resp = json.loads(contents)
if resp.get("data"):
return resp["data"][0]["authorid"]
return None
| 34.244514
| 123
| 0.592045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,173
| 0.328149
|
77054d9b1fb16933bc175b8744bb05cb5f7182d5
| 5,037
|
py
|
Python
|
boundaries/migrations/0001_initial.py
|
MinnPost/represent-boundaries
|
17f65d34a6ed761e72dbdf13ea78b64fdeaa356d
|
[
"MIT"
] | 20
|
2015-03-17T09:10:39.000Z
|
2020-06-30T06:08:08.000Z
|
boundaries/migrations/0001_initial.py
|
MinnPost/represent-boundaries
|
17f65d34a6ed761e72dbdf13ea78b64fdeaa356d
|
[
"MIT"
] | 14
|
2015-04-24T17:22:00.000Z
|
2021-06-22T16:50:24.000Z
|
boundaries/migrations/0001_initial.py
|
MinnPost/represent-boundaries
|
17f65d34a6ed761e72dbdf13ea78b64fdeaa356d
|
[
"MIT"
] | 16
|
2015-04-27T23:32:46.000Z
|
2020-07-05T11:18:04.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class JSONField(models.TextField):
"""Mocks jsonfield 0.92's column-type behaviour"""
def db_type(self, connection):
if connection.vendor == 'postgresql' and connection.pg_version >= 90300:
return 'json'
else:
return super(JSONField, self).db_type(connection)
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Boundary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('set_name', models.CharField(max_length=100, help_text='A generic singular name for the boundary.')),
('slug', models.SlugField(max_length=200, help_text="The boundary's unique identifier within the set, used as a path component in URLs.")),
('external_id', models.CharField(max_length=64, help_text='An identifier of the boundary, which should be unique within the set.')),
('name', models.CharField(db_index=True, max_length=192, help_text='The name of the boundary.')),
('metadata', JSONField(default=dict, help_text='The attributes of the boundary from the shapefile, as a dictionary.', blank=True)),
('shape', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326, help_text='The geometry of the boundary in EPSG:4326.')),
('simple_shape', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326, help_text='The simplified geometry of the boundary in EPSG:4326.')),
('centroid', django.contrib.gis.db.models.fields.PointField(srid=4326, help_text='The centroid of the boundary in EPSG:4326.', null=True)),
('extent', JSONField(blank=True, help_text='The bounding box of the boundary as a list like [xmin, ymin, xmax, ymax] in EPSG:4326.', null=True)),
('label_point', django.contrib.gis.db.models.fields.PointField(spatial_index=False, srid=4326, blank=True, help_text='The point at which to place a label for the boundary in EPSG:4326, used by represent-maps.', null=True)),
],
options={
'verbose_name_plural': 'boundaries',
'verbose_name': 'boundary',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BoundarySet',
fields=[
('slug', models.SlugField(primary_key=True, help_text="The boundary set's unique identifier, used as a path component in URLs.", serialize=False, max_length=200, editable=False)),
('name', models.CharField(max_length=100, help_text='The plural name of the boundary set.', unique=True)),
('singular', models.CharField(max_length=100, help_text='A generic singular name for a boundary in the set.')),
('authority', models.CharField(max_length=256, help_text='The entity responsible for publishing the data.')),
('domain', models.CharField(max_length=256, help_text='The geographic area covered by the boundary set.')),
('last_updated', models.DateField(help_text='The most recent date on which the data was updated.')),
('source_url', models.URLField(help_text='A URL to the source of the data.', blank=True)),
('notes', models.TextField(help_text='Free-form text notes, often used to describe changes that were made to the original source data.', blank=True)),
('licence_url', models.URLField(help_text='A URL to the licence under which the data is made available.', blank=True)),
('extent', JSONField(blank=True, help_text="The set's boundaries' bounding box as a list like [xmin, ymin, xmax, ymax] in EPSG:4326.", null=True)),
('start_date', models.DateField(blank=True, help_text="The date from which the set's boundaries are in effect.", null=True)),
('end_date', models.DateField(blank=True, help_text="The date until which the set's boundaries are in effect.", null=True)),
('extra', JSONField(blank=True, help_text='Any additional metadata.', null=True)),
],
options={
'ordering': ('name',),
'verbose_name_plural': 'boundary sets',
'verbose_name': 'boundary set',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='boundary',
name='set',
field=models.ForeignKey(related_name='boundaries', to='boundaries.BoundarySet', on_delete=models.CASCADE, help_text='The set to which the boundary belongs.'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='boundary',
unique_together=set([('slug', 'set')]),
),
]
| 65.415584
| 239
| 0.6351
| 4,884
| 0.969625
| 0
| 0
| 0
| 0
| 0
| 0
| 1,949
| 0.386937
|
7706515165e3817a767c32b6ac93a3b7c85f245e
| 1,267
|
py
|
Python
|
gitz/git/reference_branch.py
|
rec/gitz
|
cbb07f99dd002c85b5ca95896b33d03150bf9282
|
[
"MIT"
] | 24
|
2019-07-26T03:57:16.000Z
|
2021-11-22T22:39:13.000Z
|
gitz/git/reference_branch.py
|
rec/gitz
|
cbb07f99dd002c85b5ca95896b33d03150bf9282
|
[
"MIT"
] | 212
|
2019-06-13T13:44:26.000Z
|
2020-06-02T17:59:51.000Z
|
gitz/git/reference_branch.py
|
rec/gitz
|
cbb07f99dd002c85b5ca95896b33d03150bf9282
|
[
"MIT"
] | 2
|
2019-08-09T13:55:38.000Z
|
2019-09-07T11:17:59.000Z
|
from . import functions
from ..program import ARGS
from ..program import ENV
from ..program import PROGRAM
def reference_branch(remote_branches=None):
remote_branches = remote_branches or functions.remote_branches()
remote, *rest = ARGS.reference_branch.split('/', maxsplit=1)
if rest:
if remote not in remote_branches:
PROGRAM.exit('Unknown remote', remote)
branch = rest[0]
if branch not in remote_branches[remote]:
PROGRAM.exit(
'Unknown reference branch', branch, 'in remote', remote
)
return remote, branch
branches = [remote] if remote else ENV.reference_branches()
if len(remote_branches) == 1:
remotes = remote_branches
else:
remotes = [r for r in ENV.upstream() if r in remote_branches]
for remote in remotes:
for branch in branches:
if branch in remote_branches[remote]:
return remote, branch
PROGRAM.exit('Cannot determine upstream remote')
def add_arguments(parser):
parser.add_argument(
'-r', '--reference-branch', default='', help=_HELP_REFERENCE_BRANCH
)
_HELP_REFERENCE_BRANCH = (
'Branch to create from, in the form ``branch`` or ``remote/branch``'
)
| 28.155556
| 75
| 0.651144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 184
| 0.145225
|
7707130bae4f273be796d5022abf873f7542914d
| 89
|
py
|
Python
|
cookies/apps.py
|
hamishwillee/http_tester_site
|
5c9fa6840c7931f4a7dbd669616cb7b06e29c068
|
[
"MIT"
] | null | null | null |
cookies/apps.py
|
hamishwillee/http_tester_site
|
5c9fa6840c7931f4a7dbd669616cb7b06e29c068
|
[
"MIT"
] | 8
|
2021-03-19T10:14:39.000Z
|
2022-03-12T00:24:41.000Z
|
cookies/apps.py
|
ADpDinamo/site
|
d7313cd6c151a381ccc803b81768673587cb8d45
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class CookiesConfig(AppConfig):
name = 'cookies'
| 14.833333
| 33
| 0.752809
| 52
| 0.58427
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.101124
|
77076be0aee637dc1db01b51cb1e1bf652954a05
| 7,016
|
py
|
Python
|
src/single_pendulum.py
|
dpopchev/Computation_python
|
790bfc451b003ecbc626867035dc03a7b55d1fb9
|
[
"MIT"
] | null | null | null |
src/single_pendulum.py
|
dpopchev/Computation_python
|
790bfc451b003ecbc626867035dc03a7b55d1fb9
|
[
"MIT"
] | null | null | null |
src/single_pendulum.py
|
dpopchev/Computation_python
|
790bfc451b003ecbc626867035dc03a7b55d1fb9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# do not hesitate to debug
import pdb
# python computation modules and visualization
import numpy as np
import sympy as sy
import scipy as sp
import matplotlib.pyplot as plt
from sympy import Q as syQ
sy.init_printing(use_latex=True,forecolor="White")
def Lyapunov_stability_test_linear(ev):
''' test if a linear homogeneous system with constant coefficients is stable
in the sense of Lyapunov by checking the theorem conditions against the
provided eigenvalues
source https://www.math24.net/stability-theory-basic-concepts/
TODO taking into account eigenvalue multiplicity '''
# the criteria result will be saved here
r = None
# system is asymptotically stable if only if
# all eigenvalues have negative real parts
r = 'asymptotically stable' if ( not r
and all(sy.ask(syQ.negative(sy.re(_))) for _ in ev) ) else None
# system is stable if and only if
# all eigenvalues have nonpositive real parts
# TODO incorporate algebraic and geometric multiplicity criteria
r = 'stable' if ( not r
and all(sy.ask(syQ.nonpositive(sy.re(_))) for _ in ev) ) else None
# system is unstable if
# at least one eigenvalue has positive real part
# TODO incorporate algebraic and geometric multiplicity criteria
r = 'unstable' if ( not r
and any(sy.ask(syQ.positive(sy.re(_))) for _ in ev) ) else None
return r
def Lyapunov_stability_test_nonlinear(ev):
''' test if the fixed point of a nonlinear structure stable system
is stable, unstable, critical or impossible to determine using Lyapunov
criteria of first order and thus other methods are needed
TODO tests are only applicable for structurally stable systems, i.e.
with purely imaginary eigenvalues are not taken into account
source https://www.math24.net/stability-first-approximation/ '''
# the criteria result will be saved here
r = None
# system is asymptotically stable if only if
# all eigenvalues have negative real parts
r = 'asymptotically stable' if ( not r
and all(sy.ask(syQ.negative(sy.re(_))) for _ in ev) ) else None
# system is unstable if
# at least one eigenvalue has positive real part
r = 'unstable' if ( not r
and any(sy.ask(syQ.positive(sy.re(_))) for _ in ev) ) else None
# if all eigenvalues have non-positive real parts,
# and there is at least one eigenvalue with zero real part
# then fixed point can be stable or unstable and other methods should be
# used, thus mark the point critical
r = 'critical' if ( not r
and all(sy.ask(Q.nonpositive(sy.re(_))) for _ in ev)
and any(sy.re(_) == 0 for _ in ev)
) else None
return r if r else 'not decided'
def RouthHurwitz_Criterion(p):
''' return principal minors of Hurwitz matrix as sympy polynomials, which if
all are positive it is sufficient condition for asymptotic stability
NOTE: if all n-1 principal minors are positive, and nth minor is zero,
the system is at the boundary of stability, with two cases:
a_n = 0 -- one of the root is zero and system is on the boundary of
aperiodic stability
n-1 minor is zero -- there are two complex conjugate imaginary roots and
the system is at boundary of oscillatory stability
source https://www.math24.net/routh-hurwitz-criterion/ '''
# initial key and index pair needed to create Hurwitz matrix via sympy banded
# each entry is of the type [ dictionary key, coefficient slice ]
idxs = [ [ 1, 0 ] ]
# generate next key by decrementing with 1
genKey = lambda _: _ - 1
# generate next index by incrementing with 1 if key was nonnegative
# or with 2 if key is negative
genSlice = lambda _, __: __ + 1 if _ >= 0 else __ + 2
# fill the rest pairs w.r.t. the polynomial degree - 1, as we already have
# one entry
for _ in range(p.degree() - 1):
key = genKey(idxs[-1][0])
idxs.append( [ key, genSlice(key, idxs[-1][1] ) ] )
# create the matrix itself
H = sy.banded({ k: p.all_coeffs()[v:] for k, v in idxs })
return [ H[:_, :_].det() if _ > 0 else p.LC() for _ in range(0, p.degree()+1) ]
# define independent variable
t = sy.symbols('t', real=True)
# define dependent variables individually and pact them in an variable
theta, omega = sy.symbols(r'\theta, \omega', real = True)
Y = theta, omega
# define free parameters of they system and pack them in a variable
g, L = sy.symbols('g, L', positive = True)
parms = g, L
# create rhs as sympy expressions
theta_dt = omega
omega_dt = -(g/L)*sy.sin(theta)
rhs = {}
rhs['sympy'] = sy.Matrix([theta_dt, omega_dt])
# convert the sympy matrix function to numpy function with usual signature
rhs['numpy'] = sy.lambdify((t, Y, *parms), rhs['sympy'], 'numpy')
# create Jacobian matrix as sympy expression
J = {}
J['sympy'] = rhs['sympy'].jacobian(Y)
# convert the sympy Jacobian expression to numpy function with usual signature
J['numpy'] = sy.lambdify((t, Y, *parms), J['sympy'])
# calculate rhs fixed points
fixed_points = sy.solve(rhs['sympy'], Y)
# substitute each fixed point in the Jacobian
# and calculate the eigenvalues
J_fixed = {}
for i, fp in enumerate(fixed_points):
J_subs = J['sympy'].subs( [(y, v) for y, v in zip(Y, fp)])
#J_eigenvals = J_subs.eigenvals(multiple=True)
J_eigenvals = J_subs.eigenvals()
# save the fixed point results in more details
# most importantly the eigenvalues and their corresponding multiplicity
J_fixed[i] = {
'fixed point': fp,
'subs': J_subs,
'eigenvalues': list(J_eigenvals.keys()),
'multiplicity': list(J_eigenvals.values())
}
def plot_phase_portrait(ax, rhs, section, args=(), n_points=25):
''' plot section of phase space of a field defined via its rhs '''
# create section grid
x_grid, y_grid = np.meshgrid(
np.linspace( section[0][0], section[0][1], n_points ),
np.linspace( section[1][0], section[1][1], n_points )
)
# calculate rhs on the grid
xx, yy = rhs(None, ( x_grid, y_grid ), *args)
# compute vector norms and make line width proportional to them
# i.e. greater the vector length, the thicker the line
# TODO not sure why rhs returns different shape
vector_norms = np.sqrt(xx[0]**2 + yy[0]**2)
lw = 0.25 + 3*vector_norms/vector_norms.max()
# plot the phase portrait
ax.streamplot(
x_grid, y_grid,
xx[0], yy[0],
linewidth = lw,
arrowsize = 1.2,
density = 1
)
return ax
def plot_main():
fig, ax = plt.subplots()
ax = plot_phase_portrait(
ax,
rhs['numpy'],
(
( -np.pi, np.pi ),
( -2*np.pi, 2*np.pi)
),
args = ( 5, 1 ),
)
if __name__ == '__main__':
plot_main()
| 34.392157
| 83
| 0.651511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,837
| 0.546893
|
770880f1a07d4982b42b16b52ebec66b0adb1c55
| 1,690
|
py
|
Python
|
web/accounts/views.py
|
drejkim/reading-quantified-server
|
54cf83629ae0139cbf4b9dc82b27a54056afef36
|
[
"MIT"
] | 2
|
2020-10-30T23:46:44.000Z
|
2021-02-17T09:11:52.000Z
|
web/accounts/views.py
|
estherjk/reading-quantified-server
|
54cf83629ae0139cbf4b9dc82b27a54056afef36
|
[
"MIT"
] | 7
|
2020-05-09T17:15:51.000Z
|
2021-09-22T18:16:55.000Z
|
web/accounts/views.py
|
drejkim/reading-quantified-server
|
54cf83629ae0139cbf4b9dc82b27a54056afef36
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from rest_framework import mixins
from rest_framework import permissions
from rest_framework import viewsets
from rest_framework.decorators import action
from .models import User
from .serializers import UserSerializer
# Create your views here.
class UserViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""
API endpoint for users. Can only view / edit yourself!
"""
queryset = User.objects.all()
serializer_class = UserSerializer
def get_permissions(self):
"""
Custom permissions. Only admins can view everyone.
"""
if self.action == 'list':
self.permission_classes = [permissions.IsAdminUser, ]
elif self.action == 'retrieve':
self.permission_classes = [permissions.IsAdminUser, ]
return super(self.__class__, self).get_permissions()
# Reference: https://stackoverflow.com/a/58168950/13279459
@action(detail=False, methods=['get', 'put', 'patch', 'delete'])
def me(self, request):
"""
Custom /users/me endpoint.
"""
self.kwargs['pk'] = request.user.pk
if request.method == 'GET':
return self.retrieve(request)
elif request.method == 'PUT':
return self.partial_update(request)
elif request.method == 'PATCH':
return self.partial_update(request)
elif request.method == 'DELETE':
return self.destroy(request)
else:
raise Exception('Not implemented')
| 32.5
| 68
| 0.63432
| 1,406
| 0.831953
| 0
| 0
| 590
| 0.349112
| 0
| 0
| 364
| 0.215385
|
77089cdd70ca47f3aa10526e20e9f8906eab1767
| 2,197
|
py
|
Python
|
fixit/common/pseudo_rule.py
|
sk-/Fixit
|
ee0c2c9699f3cf5557b7f1210447c68be1542024
|
[
"Apache-2.0"
] | 313
|
2020-09-02T20:35:57.000Z
|
2022-03-29T07:55:37.000Z
|
fixit/common/pseudo_rule.py
|
sk-/Fixit
|
ee0c2c9699f3cf5557b7f1210447c68be1542024
|
[
"Apache-2.0"
] | 93
|
2020-09-02T19:51:22.000Z
|
2022-01-19T18:29:46.000Z
|
fixit/common/pseudo_rule.py
|
sk-/Fixit
|
ee0c2c9699f3cf5557b7f1210447c68be1542024
|
[
"Apache-2.0"
] | 46
|
2020-09-02T21:16:57.000Z
|
2022-03-16T18:49:37.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import ast
import io
import tokenize
from pathlib import Path
from typing import Iterable, Optional
from fixit.common.report import BaseLintRuleReport
class PseudoContext:
"""
Contains information about the file that `PseudoLintRule.lint_file` should evaluate.
"""
def __init__(
self,
file_path: Path,
source: bytes,
tokens: Optional[Iterable[tokenize.TokenInfo]] = None,
ast_tree: Optional[ast.Module] = None,
) -> None:
self.file_path: Path = file_path
self.source: bytes = source
self._tokens: Optional[Iterable[tokenize.TokenInfo]] = tokens
self._ast_tree: Optional[ast.Module] = ast_tree
@property
def tokens(self) -> Iterable[tokenize.TokenInfo]:
tokens = self._tokens
if tokens is not None:
return tokens
tokens = tuple(tokenize.tokenize(io.BytesIO(self.source).readline))
self._tokens = tokens
return tokens
@property
def ast_tree(self) -> ast.Module:
ast_tree = self._ast_tree
if ast_tree is not None:
return ast_tree
ast_tree = ast.parse(self.source)
self._ast_tree = ast_tree
return ast_tree
class PseudoLintRule(abc.ABC):
"""
Represents a lint rule (or a group of lint rules) that can't be represented by a
normal lint rule. These "pseudo" lint rules receive information about the file from
the `PsuedoContext`.
This API is much more flexible than the normal lint rule API, but that comes at a
(potentially large) performance cost. Because the lint framework does not control
traversal of the syntax tree, it cannot batch the execution of these rules alongside
other lint rules.
This API is used for compatibility with Flake8 rules.
"""
def __init__(self, context: PseudoContext) -> None:
self.context: PseudoContext = context
@abc.abstractmethod
def lint_file(self) -> Iterable[BaseLintRuleReport]:
...
| 30.513889
| 88
| 0.680018
| 1,850
| 0.842057
| 0
| 0
| 608
| 0.276741
| 0
| 0
| 825
| 0.375512
|
770a2f395758f1a8fbdf72af2cefdb909802a41f
| 356
|
py
|
Python
|
homeschool/referrals/tests/test_models.py
|
chriswedgwood/homeschool
|
d5267b13154aaa52c9c3edbf06b251f123583ae8
|
[
"MIT"
] | 154
|
2019-12-24T17:45:44.000Z
|
2022-03-30T23:03:06.000Z
|
homeschool/referrals/tests/test_models.py
|
chriswedgwood/homeschool
|
d5267b13154aaa52c9c3edbf06b251f123583ae8
|
[
"MIT"
] | 397
|
2019-11-05T03:23:45.000Z
|
2022-03-31T04:51:55.000Z
|
homeschool/referrals/tests/test_models.py
|
chriswedgwood/homeschool
|
d5267b13154aaa52c9c3edbf06b251f123583ae8
|
[
"MIT"
] | 44
|
2020-02-24T13:08:52.000Z
|
2022-02-24T05:03:13.000Z
|
from homeschool.referrals.tests.factories import ReferralFactory
from homeschool.test import TestCase
class TestReferral(TestCase):
def test_factory(self):
referral = ReferralFactory()
assert referral.referring_user is not None
assert referral.created_at is not None
assert referral.status == referral.Status.PENDING
| 29.666667
| 64
| 0.752809
| 251
| 0.705056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
770aad7e1ff56e67c95983849d2bf6bbbc1649fe
| 284
|
py
|
Python
|
slackwebhook/__init__.py
|
FoundryGroup/Slack-Webhook
|
1a71f68eec876684ffaa7ba936bbc099f55dfb81
|
[
"MIT"
] | null | null | null |
slackwebhook/__init__.py
|
FoundryGroup/Slack-Webhook
|
1a71f68eec876684ffaa7ba936bbc099f55dfb81
|
[
"MIT"
] | null | null | null |
slackwebhook/__init__.py
|
FoundryGroup/Slack-Webhook
|
1a71f68eec876684ffaa7ba936bbc099f55dfb81
|
[
"MIT"
] | null | null | null |
################################################################################
# Python package __init__.py file.
#
# Author: Carl Cortright
# Date: 12/20/2016
#
################################################################################
from slackwebhook import slackwebhook
| 28.4
| 80
| 0.323944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 238
| 0.838028
|
770b052dd7eccaa42dd94c9096322a70a4b8491d
| 229
|
py
|
Python
|
scripts/fasta2vcf.py
|
jodyphelan/pathogenseq
|
2e04190f25063d722ef653e819b94eb66407ea8d
|
[
"MIT"
] | null | null | null |
scripts/fasta2vcf.py
|
jodyphelan/pathogenseq
|
2e04190f25063d722ef653e819b94eb66407ea8d
|
[
"MIT"
] | null | null | null |
scripts/fasta2vcf.py
|
jodyphelan/pathogenseq
|
2e04190f25063d722ef653e819b94eb66407ea8d
|
[
"MIT"
] | 1
|
2018-05-11T14:54:51.000Z
|
2018-05-11T14:54:51.000Z
|
#! /usr/bin/env python
import sys
import pathogenseq as ps
ref_file = sys.argv[1]
query_file = sys.argv[2]
prefix = sys.argv[3]
ps.mauve_call_variants(ref_file,query_file,prefix)
cmd = "bgzip -f %s.vcf" % prefix
ps.run_cmd(cmd)
| 20.818182
| 50
| 0.737991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.170306
|
770b263fbdf34c06e41fa87b5529ee3e705b5a07
| 20
|
py
|
Python
|
test/__init__.py
|
miguelcarrasco/anothercryptosolver
|
57ac6be024574a46492d1e84782ff02763e57010
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
miguelcarrasco/anothercryptosolver
|
57ac6be024574a46492d1e84782ff02763e57010
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
miguelcarrasco/anothercryptosolver
|
57ac6be024574a46492d1e84782ff02763e57010
|
[
"MIT"
] | null | null | null |
__author__ = 'deon'
| 10
| 19
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.3
|
770c52f41e079a4cb403bba6dcadc3852fc8a850
| 231
|
py
|
Python
|
job_scheduler/cache/__init__.py
|
konkolorado/job-scheduler
|
e76b24d0592d9d1f62b5a1525b6a152b9983b2fa
|
[
"MIT"
] | null | null | null |
job_scheduler/cache/__init__.py
|
konkolorado/job-scheduler
|
e76b24d0592d9d1f62b5a1525b6a152b9983b2fa
|
[
"MIT"
] | null | null | null |
job_scheduler/cache/__init__.py
|
konkolorado/job-scheduler
|
e76b24d0592d9d1f62b5a1525b6a152b9983b2fa
|
[
"MIT"
] | 1
|
2021-08-09T15:28:49.000Z
|
2021-08-09T15:28:49.000Z
|
from job_scheduler.cache.base import ScheduleCache
from job_scheduler.cache.fake import FakeScheduleCache
from job_scheduler.cache.redis import RedisScheduleCache
all = ["ScheduleCache", "RedisScheduleCache", "FakeScheduleCache"]
| 38.5
| 66
| 0.848485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 54
| 0.233766
|
770c61ce8220d1f9ab5e398ccfbfd93f6911fe13
| 317
|
py
|
Python
|
programming/python/ex004.py
|
Vinicius-Moraes20/personal-projects
|
c041909ab1c66eeca11768f8f7944eb351c8b8e7
|
[
"MIT"
] | null | null | null |
programming/python/ex004.py
|
Vinicius-Moraes20/personal-projects
|
c041909ab1c66eeca11768f8f7944eb351c8b8e7
|
[
"MIT"
] | null | null | null |
programming/python/ex004.py
|
Vinicius-Moraes20/personal-projects
|
c041909ab1c66eeca11768f8f7944eb351c8b8e7
|
[
"MIT"
] | null | null | null |
valor = input("Digite algo: ")
print("É do tipo", type(valor))
print("Valor numérico:", valor.isnumeric())
print("Valor Alfa:", valor.isalpha())
print("Valor Alfanumérico:", valor.isalnum())
print("Valor ASCII:", valor.isascii())
print("Valor Decimal", valor.isdecimal())
print("Valor Printavel", valor.isprintable())
| 39.625
| 45
| 0.712934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 126
| 0.39375
|
770d1178d917aa0b3ade69999920d0f07b37f63c
| 447
|
py
|
Python
|
backend/src/util/observable.py
|
r2binx/heimboard
|
42059d367e5b15c4910e61f4be0e3b462da8d5f7
|
[
"MIT"
] | 6
|
2021-12-20T21:36:03.000Z
|
2022-03-30T16:04:54.000Z
|
backend/src/util/observable.py
|
r2binx/heimboard
|
42059d367e5b15c4910e61f4be0e3b462da8d5f7
|
[
"MIT"
] | 16
|
2021-12-20T20:14:43.000Z
|
2022-01-26T12:43:59.000Z
|
backend/src/util/observable.py
|
r2binx/heimboard
|
42059d367e5b15c4910e61f4be0e3b462da8d5f7
|
[
"MIT"
] | 1
|
2022-01-25T20:59:35.000Z
|
2022-01-25T20:59:35.000Z
|
from typing import List
class Observable:
_observers: List = []
def __init__(self):
self._observers = []
def subscribe(self, observer):
self._observers.append(observer)
def notify_observers(self, *args, **kwargs):
for obs in self._observers:
obs.notify(self, *args, **kwargs)
def unsubscribe(self, observer):
self._observers.remove(observer)
def start(self):
pass
| 20.318182
| 48
| 0.621924
| 420
| 0.939597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
770d8aff527e695d052230658f4cc6a96df88def
| 26,579
|
py
|
Python
|
ae-tpcc-polyjuice-rl/training/PG.py
|
derFischer/Polyjuice
|
3ce467807822b5659efdd5759cae4563a9152b00
|
[
"Apache-2.0"
] | 23
|
2021-05-11T13:14:36.000Z
|
2022-03-23T05:59:07.000Z
|
ae-tpcc-polyjuice-rl/training/PG.py
|
derFischer/Polyjuice
|
3ce467807822b5659efdd5759cae4563a9152b00
|
[
"Apache-2.0"
] | 1
|
2021-08-16T07:37:18.000Z
|
2021-08-16T07:37:18.000Z
|
ae-tpcc-polyjuice-rl/training/PG.py
|
derFischer/Polyjuice
|
3ce467807822b5659efdd5759cae4563a9152b00
|
[
"Apache-2.0"
] | 1
|
2021-07-01T15:33:25.000Z
|
2021-07-01T15:33:25.000Z
|
#coding=utf-8
import numpy as np
import tensorflow as tf
import os
import sys
import time
import shutil
import re
import signal
import subprocess
import numpy as np
import math
from Policy import *
np.set_printoptions(threshold=np.inf)
BASELINES = 1
class MultiBaseline:
def __init__(self, baseline_number):
self.baseline_number = baseline_number
self.baselines = [Baseline() for _ in range(baseline_number)]
self.reward_signal_access, self.reward_signal_wait, self.reward_signal_piece = [], [], []
self.reward_signal_wait_info1, self.reward_signal_wait_info2, self.reward_signal_wait_info3 = [], [], []
def __str__(self):
stri = ''
for i in range(self.baseline_number):
stri = stri + 'baseline number ' + str(i) + ' has reward ' + str(self.baselines[i].reward) + '\n'
stri = stri + str(self.baselines[i].sample) + '\n'
return stri
def insert_baseline(self, baseline):
if baseline > self.baselines[0]:
self.baselines[0].SetSampleWithAnotherBaseline(baseline)
self.baselines.sort()
def store_reward_signal(self, result):
self.reward_signal_access.extend(result[0])
self.reward_signal_wait.extend(result[1])
self.reward_signal_piece.extend(result[2])
self.reward_signal_wait_info1.extend(result[3])
self.reward_signal_wait_info2.extend(result[4])
self.reward_signal_wait_info3.extend(result[5])
def samples_different_action(self, access, wait, piece, waitinfo1, waitinfo2, waitinfo3):
# get a all True form
result = Sample.default_different_action()
# get different actions
for j in range(self.baseline_number):
diff = self.baselines[j].different_action(\
access, wait, piece, waitinfo1, waitinfo2, waitinfo3)
for i in range(len(result)):
result[i] = result[i] & np.array(diff[i])
self.store_reward_signal(result)
def get_ratio(self, avg_reward):
rewards = []
for i in range(self.baseline_number):
reward_ = self.baselines[i].reward - avg_reward
if reward_ > 0:
rewards.append(reward_)
else:
rewards.append(0)
rewards = np.array(rewards)
if np.sum(rewards) == 0:
return [1 / self.baseline_number] * self.baseline_number
else:
return rewards / np.sum(rewards)
def calculate_reward(self, reward):
# ratio = self.get_ratio(np.mean(reward))
access_rs, wait_rs, piece_rs = \
[0] * (len(reward) * ACCESSE_SPACE), [0] * (len(reward) * WAIT_SPACE), [0] * (len(reward) * PIECE_SPACE)
waitinfo1_rs, waitinfo2_rs, waitinfo3_rs = \
[0] * (len(reward) * WAIT_SPACE), [0] * (len(reward) * WAIT_SPACE), [0] * (len(reward) * WAIT_SPACE)
# for i in range(self.baseline_number):
# calculate discount_reward for each slot
access_dr, wait_dr, piece_dr = [], [], []
waitinfo1_dr, waitinfo2_dr, waitinfo3_dr = [], [], []
for j in range(len(reward)):
for _ in range(ACCESSE_SPACE):
access_dr.append(reward[j])
for _ in range(PIECE_SPACE):
piece_dr.append(reward[j])
for _ in range(WAIT_SPACE):
wait_dr.append(reward[j])
waitinfo1_dr.append(reward[j])
waitinfo2_dr.append(reward[j])
waitinfo3_dr.append(reward[j])
avg_reward = np.mean(reward)
access_rs = np.array(access_dr) - avg_reward
wait_rs = np.array(wait_dr) - avg_reward
piece_rs = np.array(piece_dr) - avg_reward
waitinfo1_rs = (np.array(waitinfo1_dr) - avg_reward) * 5
waitinfo2_rs = (np.array(waitinfo2_dr) - avg_reward) * 2
waitinfo3_rs = (np.array(waitinfo3_dr) - avg_reward) * 2.5
# access_dr = np.array(access_dr) - self.baselines[i].reward
# wait_dr = np.array(wait_dr) - self.baselines[i].reward
# piece_dr = np.array(piece_dr) - self.baselines[i].reward
# waitinfo1_dr = np.array(waitinfo1_dr) - self.baselines[i].reward
# waitinfo2_dr = np.array(waitinfo2_dr) - self.baselines[i].reward
# waitinfo3_dr = np.array(waitinfo3_dr) - self.baselines[i].reward
# access_rs = access_rs + ratio[i] * access_dr * ((access_dr > 0) | self.reward_signal_access)
# wait_rs = wait_rs + ratio[i] * wait_dr * ((wait_dr > 0) | self.reward_signal_wait)
# piece_rs = piece_rs + ratio[i] * piece_dr * ((piece_dr > 0) | self.reward_signal_piece)
# waitinfo1_rs = waitinfo1_rs + ratio[i] * waitinfo1_dr * ((waitinfo1_dr > 0) | self.reward_signal_wait_info1)
# waitinfo2_rs = waitinfo2_rs + ratio[i] * waitinfo2_dr * ((waitinfo2_dr > 0) | self.reward_signal_wait_info2)
# waitinfo3_rs = waitinfo3_rs + ratio[i] * waitinfo3_dr * ((waitinfo3_dr > 0) | self.reward_signal_wait_info3)
return access_rs, wait_rs, piece_rs, waitinfo1_rs, waitinfo2_rs, waitinfo3_rs
def clear_signal(self):
self.reward_signal_access, self.reward_signal_wait, self.reward_signal_piece = [], [], []
self.reward_signal_wait_info1, self.reward_signal_wait_info2, self.reward_signal_wait_info3 = [], [], []
class Baseline:
def __init__(self, access = [], wait = [], piece = [], \
waitinfo1 = [], waitinfo2 = [], waitinfo3 = [], \
reward = 0):
if access == []:
self.set = False
else:
self.set = True
# manual asign a opt setting for backoff
self.sample = Sample(access, wait, piece, waitinfo1, waitinfo2, waitinfo3, 6, [0,4,8,1,0,0,8,4,2,1,8,1,4,2,1,4,2,4])
self.reward = reward
def setSample(self, access, wait, piece, waitinfo1, waitinfo2, waitinfo3, reward):
self.set = True
self.sample.set_sample(access, wait, piece, waitinfo1, waitinfo2, waitinfo3)
self.reward = reward
def SetSampleWithAnotherBaseline(self, baseline):
self.setSample(baseline.sample.access, baseline.sample.wait, baseline.sample.piece, \
baseline.sample.wait_info1, baseline.sample.wait_info2, baseline.sample.wait_info3, \
baseline.reward)
def __lt__(self, r):
return self.reward < r.reward
def different_action(self, access, wait, piece, waitinfo1, waitinfo2, waitinfo3):
if self.set == False:
return Sample.default_different_action()
return self.sample.different_action(access, wait, piece, \
waitinfo1, waitinfo2, waitinfo3)
class PolicyGradient:
# initialize
def __init__(self, log_dir, kid_dir, learning_rate,rd,output_graph=False):
self.log_dir = log_dir
self.kid_dir = kid_dir
self.lr = learning_rate
self.reward_decay = rd
self.best_seen = 0
self.round_best = 0
self.round_mean = 0
self.round_worst = 0
self.round_std = 0
self.round_best_sample = None
self.baselines = MultiBaseline(BASELINES)
# to store observations, actions and corresponding rewards
self.access_p, self.wait_p, self.piece_p = [], [], []
self.wait_info1_p, self.wait_info2_p, self.wait_info3_p = [], [], []
self.ep_access_rs, self.ep_wait_rs, self.ep_piece_rs = [], [], []
self.ep_waitinfo1_rs, self.ep_waitinfo2_rs, self.ep_waitinfo3_rs = [], [], []
self.ep_access_act, self.ep_wait_act, self.ep_piece_act = [], [], []
self.ep_wait_info_act1, self.ep_wait_info_act2, self.ep_wait_info_act3 = [], [], []
self.samples_count = 0
self.policy = Policy()
self._build_net()
self.sess = tf.Session()
if output_graph:
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.update_policy()
def clear_round_info(self):
self.round_best = 0
self.round_mean = 0
self.round_worst = 0
self.round_std = 0
self.round_best_sample = None
def _build_net(self):
with tf.name_scope('inputs'):
self.tf_access_vt = tf.placeholder(tf.float32, [None, ], name="access_value")
self.tf_wait_vt = tf.placeholder(tf.float32, [None, ], name="wait_value")
self.tf_piece_vt = tf.placeholder(tf.float32, [None, ], name="piece_value")
self.tf_wait_info_vt1 = tf.placeholder(tf.float32, [None, ], name="wait_info_value1")
self.tf_wait_info_vt2 = tf.placeholder(tf.float32, [None, ], name="wait_info_value2")
self.tf_wait_info_vt3 = tf.placeholder(tf.float32, [None, ], name="wait_info_value3")
self.tf_access_act = tf.placeholder(tf.int32, [None, ], name="access_act")
self.tf_wait_act = tf.placeholder(tf.int32, [None, ], name="wait_act")
self.tf_piece_act = tf.placeholder(tf.int32, [None, ], name="piece_act")
self.tf_wait_info_act1 = tf.placeholder(tf.int32, [None, ], name="wait_info_act1")
self.tf_wait_info_act2 = tf.placeholder(tf.int32, [None, ], name="wait_info_act2")
self.tf_wait_info_act3 = tf.placeholder(tf.int32, [None, ], name="wait_info_act3")
self.tf_samples_count = tf.placeholder(tf.float32, name='samples_count')
self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')
self.access_action_v = tf.Variable(tf.random_normal(shape=[INPUT_SPACE, 2], mean=0, stddev=1), name='access_action_v')
self.wait_action_v = tf.Variable(tf.random_normal(shape=[WAIT_SPACE, 2], mean=0, stddev=1), name='wait_action_v')
self.piece_action_v = tf.Variable(tf.random_normal(shape=[PIECE_SPACE, 2], mean=0, stddev=1), name='piece_action_v')
self.wait_info_action1_v = tf.Variable(tf.random_normal(shape=[WAIT_SPACE, wait_info_act_count[0]], mean=0, stddev=1), name='wait_info_action1_v')
self.wait_info_action2_v = tf.Variable(tf.random_normal(shape=[WAIT_SPACE, wait_info_act_count[1]], mean=0, stddev=1), name='wait_info_action2_v')
self.wait_info_action3_v = tf.Variable(tf.random_normal(shape=[WAIT_SPACE, wait_info_act_count[2]], mean=0, stddev=1), name='wait_info_action3_v')
self.access_action = tf.nn.softmax(self.access_action_v, axis = 1)
self.wait_action = tf.nn.softmax(self.wait_action_v, axis = 1)
self.piece_action = tf.nn.softmax(self.piece_action_v, axis = 1)
self.wait_info_action1 = tf.nn.softmax(self.wait_info_action1_v, axis = 1)
self.wait_info_action2 = tf.nn.softmax(self.wait_info_action2_v, axis = 1)
self.wait_info_action3 = tf.nn.softmax(self.wait_info_action3_v, axis = 1)
# self.access_action = tf.nn.softmax(tf.Variable(tf.random_normal(shape=[ACCESSE_SPACE, 2], mean=0, stddev=1), name='access_action'), axis = 1)
# self.wait_action = tf.nn.softmax(tf.Variable(tf.random_normal(shape=[WAIT_SPACE, 2], mean=0, stddev=1), name='wait_action'), axis = 1)
# self.piece_action = tf.nn.softmax(tf.Variable(tf.random_normal(shape=[PIECE_SPACE, 2], mean=0, stddev=1), name='piece_action'), axis = 1)
# self.wait_info_action1 = tf.nn.softmax(tf.Variable(tf.random_normal(shape=[WAIT_SPACE, wait_info_act_count[0]], mean=0, stddev=1), name='wait_info_action1'), axis = 1)
# self.wait_info_action2 = tf.nn.softmax(tf.Variable(tf.random_normal(shape=[WAIT_SPACE, wait_info_act_count[1]], mean=0, stddev=1), name='wait_info_action2'), axis = 1)
# self.wait_info_action3 = tf.nn.softmax(tf.Variable(tf.random_normal(shape=[WAIT_SPACE, wait_info_act_count[2]], mean=0, stddev=1), name='wait_info_action3'), axis = 1)
with tf.name_scope('reward'):
# add a very small number to the probability in case of logging a very small number and then ouputting NAN
self.access_action = tf.add(self.access_action, 0.000001)
self.wait_action = tf.add(self.wait_action, 0.000001)
self.piece_action = tf.add(self.piece_action, 0.000001)
self.wait_info_action1 = tf.add(self.wait_info_action1, 0.000001)
self.wait_info_action2 = tf.add(self.wait_info_action2, 0.000001)
self.wait_info_action3 = tf.add(self.wait_info_action3, 0.000001)
access_act = tf.reshape(tf.one_hot(self.tf_access_act, 2), [-1, ACCESSE_SPACE * 2])
access_act_prob = tf.reshape((access_act * (tf.reshape(self.access_action, [ACCESSE_SPACE * 2]))), [-1 ,2])
access_act_prob = -tf.log(tf.reduce_sum(access_act_prob, axis = 1))
wait_act = tf.reshape(tf.one_hot(self.tf_wait_act, 2), [-1, WAIT_SPACE * 2])
wait_act_prob = tf.reshape((wait_act * (tf.reshape(self.wait_action, [WAIT_SPACE * 2]))), [-1 ,2])
wait_act_prob = -tf.log(tf.reduce_sum(wait_act_prob, axis = 1))
piece_act = tf.reshape(tf.one_hot(self.tf_piece_act, 2), [-1, PIECE_SPACE * 2])
piece_act_prob = tf.reshape((piece_act * (tf.reshape(self.piece_action, [PIECE_SPACE * 2]))), [-1 ,2])
piece_act_prob = -tf.log(tf.reduce_sum(piece_act_prob, axis = 1))
wait_info_act1 = tf.reshape((tf.one_hot(self.tf_wait_info_act1, wait_info_act_count[0])), [-1, WAIT_SPACE * wait_info_act_count[0]])
wait_info_act1_prob = tf.reshape((wait_info_act1 * (tf.reshape(self.wait_info_action1, [WAIT_SPACE * wait_info_act_count[0]]))), [-1, wait_info_act_count[0]])
wait_info_act1_prob = -tf.log(tf.reduce_sum(wait_info_act1_prob, axis = 1))
wait_info_act2 = tf.reshape((tf.one_hot(self.tf_wait_info_act2, wait_info_act_count[1])), [-1, WAIT_SPACE * wait_info_act_count[1]])
wait_info_act2_prob = tf.reshape((wait_info_act2 * (tf.reshape(self.wait_info_action2, [WAIT_SPACE * wait_info_act_count[1]]))), [-1, wait_info_act_count[1]])
wait_info_act2_prob = -tf.log(tf.reduce_sum(wait_info_act2_prob, axis = 1))
wait_info_act3 = tf.reshape((tf.one_hot(self.tf_wait_info_act3, wait_info_act_count[2])), [-1, WAIT_SPACE * wait_info_act_count[2]])
wait_info_act3_prob = tf.reshape((wait_info_act3 * (tf.reshape(self.wait_info_action3, [WAIT_SPACE * wait_info_act_count[2]]))), [-1, wait_info_act_count[2]])
wait_info_act3_prob = -tf.log(tf.reduce_sum(wait_info_act3_prob, axis = 1))
self.reward = tf.divide(tf.reduce_sum(access_act_prob * self.tf_access_vt) + \
tf.reduce_sum(piece_act_prob * self.tf_piece_vt) + \
tf.reduce_sum(wait_act_prob * self.tf_wait_vt) + \
tf.reduce_sum(wait_info_act1_prob * self.tf_wait_info_vt1) + \
tf.reduce_sum(wait_info_act2_prob * self.tf_wait_info_vt2) + \
tf.reduce_sum(wait_info_act3_prob * self.tf_wait_info_vt3), self.tf_samples_count)
with tf.name_scope('train'):
self.train_op = tf.train.GradientDescentOptimizer(learning_rate = self.learning_rate).minimize(self.reward)
def update_policy(self):
access_p, wait_p, piece_p, wait_info1_p, wait_info2_p, wait_info3_p = \
self.sess.run([self.access_action, self.wait_action, self.piece_action, \
self.wait_info_action1, self.wait_info_action2, self.wait_info_action3])
self.policy.set_prob(access_p, wait_p, piece_p, \
wait_info1_p, wait_info2_p, wait_info3_p)
# store corresponding reward
def record_reward(self, round_id, reward, previous_samples, idx):
access = self.ep_access_act[previous_samples * ACCESSE_SPACE : (previous_samples + 1) * ACCESSE_SPACE]
wait = self.ep_wait_act[previous_samples * WAIT_SPACE : (previous_samples + 1) * WAIT_SPACE]
piece = self.ep_piece_act[previous_samples * PIECE_SPACE : (previous_samples + 1) * PIECE_SPACE]
waitinfo1 = self.ep_wait_info_act1[previous_samples * WAIT_SPACE : (previous_samples + 1) * WAIT_SPACE]
waitinfo2 = self.ep_wait_info_act2[previous_samples * WAIT_SPACE : (previous_samples + 1) * WAIT_SPACE]
waitinfo3 = self.ep_wait_info_act3[previous_samples * WAIT_SPACE : (previous_samples + 1) * WAIT_SPACE]
if reward > self.baselines.baselines[0].reward:
baseline_ = Baseline(access, wait, piece, waitinfo1, waitinfo2, waitinfo3, reward)
self.baselines.insert_baseline(baseline_)
if reward > self.best_seen:
self.best_seen = reward
# save RL best seen result
print('Update rl best seen sample - {}'.format(reward))
kid_path = os.path.join(os.getcwd(), self.kid_dir + '/kid_' + str(idx) + '.txt')
log_path = os.path.join(os.getcwd(), self.log_dir + '/rl_best.txt')
shutil.copy(kid_path, log_path)
# save RL best seen result for every round
old_path = os.path.join(os.getcwd(), self.log_dir + '/rl_best.txt')
new_path = os.path.join(os.getcwd(), self.log_dir + '/rl_best_iter_' + str(round_id) + '.txt')
shutil.copy(old_path, new_path)
if reward > self.round_best:
self.round_best = reward
kid_path = os.path.join(os.getcwd(), self.kid_dir + '/kid_' + str(idx) + '.txt')
log_path = os.path.join(os.getcwd(), self.log_dir + '/round_best_' + str(round_id) + '.txt')
shutil.copy(kid_path, log_path)
# store round_best sample for EA future use
self.round_best_sample = Sample(access, wait, piece, \
waitinfo1, waitinfo2, waitinfo3, 6, [0,4,8,1,0,0,8,4,2,1,8,1,4,2,1,4,2,4])
if self.round_worst == 0:
self.round_worst = reward
if reward < self.round_worst:
self.round_worst = reward
self.round_mean = (self.round_mean * previous_samples + reward)/(previous_samples + 1)
# store reward for each sample
self.ep_rs.append(reward)
def Evaluate(self, command, round_id, samples_per_distribution, load_per_sample):
base_path = os.path.join(os.getcwd(), self.log_dir)
policy_path = os.path.join(base_path, 'Distribution.txt')
with open(policy_path, 'a+') as f:
f.write('RL at iter {}'.format(round_id) + '\n')
f.write(str(self.policy) + '\n')
self.ep_rs = []
self.ep_access_act, self.ep_wait_act, self.ep_piece_act, \
self.ep_wait_info_act1, self.ep_wait_info_act2, self.ep_wait_info_act3, \
= self.policy.table_sample_batch(self.kid_dir, samples_per_distribution)
policies_res = samples_eval(command, samples_per_distribution, load_per_sample)
reward_ = 0
fail_to_exe = 0
for idx in range(samples_per_distribution):
# if the execution has failed, rollback the ep_obs and ep_as, continue the training
if policies_res[idx][0] == 0.0 and policies_res[idx][1] == 1.0:
print("continue")
self.rollback(idx, fail_to_exe)
fail_to_exe += 1
continue
print("RL sample:" + str(idx) + " throughput:" + str(policies_res[idx][0]))
self.record_reward(round_id, policies_res[idx][0], idx - fail_to_exe, idx)
def set_baseline(self, access, wait, piece, \
wait_info1, wait_info2, wait_info3, \
reward_buffer):
samples = int(len(access) / ACCESSE_SPACE)
for i in range(samples):
r = reward_buffer[i]
if r > self.baselines.baselines[0].reward:
access_t = access[i * ACCESSE_SPACE : (i + 1) * ACCESSE_SPACE]
wait_t = wait[i * WAIT_SPACE : (i + 1) * WAIT_SPACE]
piece_t = piece[i * PIECE_SPACE : (i + 1) * PIECE_SPACE]
waitinfo1_t = wait_info1[i * WAIT_SPACE : (i + 1) * WAIT_SPACE]
waitinfo2_t = wait_info2[i * WAIT_SPACE : (i + 1) * WAIT_SPACE]
waitinfo3_t = wait_info3[i * WAIT_SPACE : (i + 1) * WAIT_SPACE]
baseline_ = Baseline(access_t, wait_t, piece_t, \
waitinfo1_t, waitinfo2_t, waitinfo3_t, \
r)
self.baselines.insert_baseline(baseline_)
print("access")
print(self.baselines.baselines[0].sample)
access, wait, piece, waitinfo1, waitinfo2, waitinfo3 = self.baselines.baselines[0].sample.get_actions()
assign_access = tf.assign(self.access_action_v, access)
assign_wait = tf.assign(self.wait_action_v, wait)
assign_piece = tf.assign(self.piece_action_v, piece)
assign_waitinfo1 = tf.assign(self.wait_info_action1_v, waitinfo1)
assign_waitinfo2 = tf.assign(self.wait_info_action2_v, waitinfo2)
assign_waitinfo3 = tf.assign(self.wait_info_action3_v, waitinfo3)
self.sess.run([assign_access, assign_wait, assign_piece, assign_waitinfo1, assign_waitinfo2, assign_waitinfo3])
self.update_policy()
def get_ic3_distribution(self, access_in, wait_in , piece_in, waitinfo1_in, waitinfo2_in, waitinfo3_in):
access, wait, piece, waitinfo1, waitinfo2, waitinfo3 = \
self.baselines.baselines[0].sample.get_actions(access_in, wait_in , piece_in, waitinfo1_in, waitinfo2_in, waitinfo3_in)
assign_access = tf.assign(self.access_action_v, access)
assign_wait = tf.assign(self.wait_action_v, wait)
assign_piece = tf.assign(self.piece_action_v, piece)
assign_waitinfo1 = tf.assign(self.wait_info_action1_v, waitinfo1)
assign_waitinfo2 = tf.assign(self.wait_info_action2_v, waitinfo2)
assign_waitinfo3 = tf.assign(self.wait_info_action3_v, waitinfo3)
self.sess.run([assign_access, assign_wait, assign_piece, assign_waitinfo1, assign_waitinfo2, assign_waitinfo3])
self.update_policy()
# preprocess the reward
def get_reward(self, access, wait, piece, \
wait_info1, wait_info2, wait_info3, \
reward_buffer):
samples = int(len(access) / ACCESSE_SPACE)
for i in range(samples):
access_t = access[i * ACCESSE_SPACE : (i + 1) * ACCESSE_SPACE]
wait_t = wait[i * WAIT_SPACE : (i + 1) * WAIT_SPACE]
piece_t = piece[i * PIECE_SPACE : (i + 1) * PIECE_SPACE]
waitinfo1_t = wait_info1[i * WAIT_SPACE : (i + 1) * WAIT_SPACE]
waitinfo2_t = wait_info2[i * WAIT_SPACE : (i + 1) * WAIT_SPACE]
waitinfo3_t = wait_info3[i * WAIT_SPACE : (i + 1) * WAIT_SPACE]
self.baselines.samples_different_action(access_t, wait_t, piece_t, \
waitinfo1_t, waitinfo2_t, waitinfo3_t)
self.ep_access_rs, self.ep_wait_rs, self.ep_piece_rs, \
self.ep_waitinfo1_rs, self.ep_waitinfo2_rs, self.ep_waitinfo3_rs, \
= self.baselines.calculate_reward(reward_buffer)
self.baselines.clear_signal()
def learn(self, idx, lr, generations):
if (len(self.ep_access_act) == 0):
print("useless round")
return
base_path = os.path.join(os.getcwd(), self.log_dir)
baseline_path = os.path.join(base_path, 'Baseline.txt')
with open(baseline_path, 'a+') as f:
f.write('RL at iter {}'.format(idx) + ', ')
f.write(str(self.baselines) + '\n')
self.get_reward(self.ep_access_act, self.ep_wait_act, self.ep_piece_act, \
self.ep_wait_info_act1, self.ep_wait_info_act2, self.ep_wait_info_act3, \
self.ep_rs)
self.lr = 0.5 * lr * (1 + math.cos(math.pi * idx / generations))
self.samples_count = len(self.ep_rs)
self.sess.run(self.train_op, feed_dict={
self.tf_access_act: self.ep_access_act,
self.tf_wait_act: self.ep_wait_act,
self.tf_piece_act: self.ep_piece_act,
self.tf_wait_info_act1: self.ep_wait_info_act1,
self.tf_wait_info_act2: self.ep_wait_info_act2,
self.tf_wait_info_act3: self.ep_wait_info_act3,
self.tf_access_vt: self.ep_access_rs,
self.tf_wait_vt: self.ep_wait_rs,
self.tf_piece_vt: self.ep_piece_rs,
self.tf_wait_info_vt1: self.ep_waitinfo1_rs,
self.tf_wait_info_vt2: self.ep_waitinfo2_rs,
self.tf_wait_info_vt3: self.ep_waitinfo3_rs,
self.tf_samples_count: self.samples_count,
self.learning_rate: self.lr,
})
self.update_policy()
# tool functions:
def get_prob(self):
self.access_p, self.wait_p, self.piece_p, \
self.wait_info1_p, self.wait_info2_p, self.wait_info3_p, \
= self.sess.run([self.access_action, self.wait_action, self.piece_action, \
self.wait_info_action1, self.wait_info_action2, self.wait_info_action3])
self.print_prob()
def print_prob(self):
stri = ""
stri += str(self.access_p) + " "
stri += str(self.wait_p) + " "
stri += str(self.piece_p) + " "
stri += str(self.wait_info1_p) + " "
stri += str(self.wait_info2_p) + " "
stri += str(self.wait_info3_p) + " "
print(stri + "\n")
def rollback(self, index, fail_to_exe):
self.ep_access_act = self.ep_access_act[:(index - fail_to_exe) * ACCESSE_SPACE] + self.ep_access_act[(index + 1 - fail_to_exe) * ACCESSE_SPACE :]
self.ep_wait_act = self.ep_wait_act[:(index - fail_to_exe) * WAIT_SPACE] + self.ep_wait_act[(index + 1 - fail_to_exe) * WAIT_SPACE :]
self.ep_piece_act = self.ep_piece_act[:(index - fail_to_exe) * PIECE_SPACE] + self.ep_piece_act[(index + 1 - fail_to_exe) * PIECE_SPACE :]
self.ep_wait_info_act1 = self.ep_wait_info_act1[:(index - fail_to_exe) * WAIT_SPACE] + self.ep_wait_info_act1[(index + 1 - fail_to_exe) * WAIT_SPACE :]
self.ep_wait_info_act2 = self.ep_wait_info_act2[:(index - fail_to_exe) * WAIT_SPACE] + self.ep_wait_info_act2[(index + 1 - fail_to_exe) * WAIT_SPACE :]
self.ep_wait_info_act3 = self.ep_wait_info_act3[:(index - fail_to_exe) * WAIT_SPACE] + self.ep_wait_info_act3[(index + 1 - fail_to_exe) * WAIT_SPACE :]
| 53.051896
| 177
| 0.637533
| 26,309
| 0.989842
| 0
| 0
| 0
| 0
| 0
| 0
| 3,265
| 0.122841
|
770d8f29602f5abced8ace8b5ba5e47df2e792c0
| 335
|
py
|
Python
|
src/data/preprocessors/__init__.py
|
paulwarkentin/tf-ssd-vgg
|
f48e3ccbb8eb092d3cb82a9d90164c7328880477
|
[
"MIT"
] | 5
|
2021-09-26T07:19:42.000Z
|
2022-03-11T23:25:36.000Z
|
ssd/src/data/preprocessors/__init__.py
|
bharatmahaur/ComparativeStudy
|
2e3b6de882acc2a465e1b7c8bcd23cc9c8181d3d
|
[
"Apache-2.0"
] | null | null | null |
ssd/src/data/preprocessors/__init__.py
|
bharatmahaur/ComparativeStudy
|
2e3b6de882acc2a465e1b7c8bcd23cc9c8181d3d
|
[
"Apache-2.0"
] | null | null | null |
##
## /src/data/preprocessors/__init__.py
##
## Created by Paul Warkentin <paul@warkentin.email> on 15/07/2018.
## Updated by Paul Warkentin <paul@warkentin.email> on 15/07/2018.
##
from .bbox_preprocessor import BBoxPreprocessor
from .default_preprocessor import DefaultPreprocessor
from .image_preprocessor import ImagePreprocessor
| 30.454545
| 66
| 0.797015
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 176
| 0.525373
|
770e96f574a33ca2bee58218e94c93fab61c4349
| 4,775
|
py
|
Python
|
camera.py
|
chenhsuanlin/signed-distance-SRN
|
d47ecca9d048e29adfa7f5b0170d1daba897e740
|
[
"MIT"
] | 94
|
2020-10-26T17:32:32.000Z
|
2022-03-06T12:22:31.000Z
|
camera.py
|
albertotono/signed-distance-SRN
|
2e750d3fb71cf7570cf9be9f4a39040b5173795d
|
[
"MIT"
] | 15
|
2020-10-27T12:48:31.000Z
|
2022-01-22T02:29:48.000Z
|
camera.py
|
albertotono/signed-distance-SRN
|
2e750d3fb71cf7570cf9be9f4a39040b5173795d
|
[
"MIT"
] | 12
|
2020-10-26T20:26:07.000Z
|
2021-12-31T08:13:01.000Z
|
import numpy as np
import os,sys,time
import torch
import torch.nn.functional as torch_F
import collections
from easydict import EasyDict as edict
import util
class Pose():
def __call__(self,R=None,t=None):
assert(R is not None or t is not None)
if R is None:
if not isinstance(t,torch.Tensor): t = torch.tensor(t)
R = torch.eye(3,device=t.device).repeat(*t.shape[:-1],1,1)
elif t is None:
if not isinstance(R,torch.Tensor): R = torch.tensor(R)
t = torch.zeros(R.shape[:-1],device=R.device)
else:
if not isinstance(R,torch.Tensor): R = torch.tensor(R)
if not isinstance(t,torch.Tensor): t = torch.tensor(t)
assert(R.shape[:-1]==t.shape and R.shape[-2:]==(3,3))
R = R.float()
t = t.float()
pose = torch.cat([R,t[...,None]],dim=-1) # [...,3,4]
assert(pose.shape[-2:]==(3,4))
return pose
def invert(self,pose,use_inverse=False):
R,t = pose[...,:3],pose[...,3:]
R_inv = R.inverse() if use_inverse else R.transpose(-1,-2)
t_inv = (-R_inv@t)[...,0]
pose_inv = self(R=R_inv,t=t_inv)
return pose_inv
def compose(self,pose_list):
# pose_new(x) = poseN(...(pose2(pose1(x)))...)
pose_new = pose_list[0]
for pose in pose_list[1:]:
pose_new = self.compose_pair(pose_new,pose)
return pose_new
def compose_pair(self,pose_a,pose_b):
# pose_new(x) = pose_b(pose_a(x))
R_a,t_a = pose_a[...,:3],pose_a[...,3:]
R_b,t_b = pose_b[...,:3],pose_b[...,3:]
R_new = R_b@R_a
t_new = (R_b@t_a+t_b)[...,0]
pose_new = self(R=R_new,t=t_new)
return pose_new
pose = Pose()
def to_hom(X):
X_hom = torch.cat([X,torch.ones_like(X[...,:1])],dim=-1)
return X_hom
def world2cam(X,pose): # [B,N,3]
X_hom = to_hom(X)
return X_hom@pose.transpose(-1,-2)
def cam2img(X,cam_intr):
return X@cam_intr.transpose(-1,-2)
def img2cam(X,cam_intr):
return X@cam_intr.inverse().transpose(-1,-2)
def cam2world(X,pose):
X_hom = to_hom(X)
pose_inv = Pose().invert(pose)
return X_hom@pose_inv.transpose(-1,-2)
def angle_to_rotation_matrix(a,axis):
roll = dict(X=1,Y=2,Z=0)[axis]
O = torch.zeros_like(a)
I = torch.ones_like(a)
M = torch.stack([torch.stack([a.cos(),-a.sin(),O],dim=-1),
torch.stack([a.sin(),a.cos(),O],dim=-1),
torch.stack([O,O,I],dim=-1)],dim=-2)
M = M.roll((roll,roll),dims=(-2,-1))
return M
def get_camera_grid(opt,batch_size,intr=None):
# compute image coordinate grid
if opt.camera.model=="perspective":
y_range = torch.arange(opt.H,dtype=torch.float32,device=opt.device).add_(0.5)
x_range = torch.arange(opt.W,dtype=torch.float32,device=opt.device).add_(0.5)
Y,X = torch.meshgrid(y_range,x_range) # [H,W]
xy_grid = torch.stack([X,Y],dim=-1).view(-1,2) # [HW,2]
elif opt.camera.model=="orthographic":
assert(opt.H==opt.W)
y_range = torch.linspace(-1,1,opt.H,device=opt.device)
x_range = torch.linspace(-1,1,opt.W,device=opt.device)
Y,X = torch.meshgrid(y_range,x_range) # [H,W]
xy_grid = torch.stack([X,Y],dim=-1).view(-1,2) # [HW,2]
xy_grid = xy_grid.repeat(batch_size,1,1) # [B,HW,2]
if opt.camera.model=="perspective":
grid_3D = img2cam(to_hom(xy_grid),intr) # [B,HW,3]
elif opt.camera.model=="orthographic":
grid_3D = to_hom(xy_grid) # [B,HW,3]
return xy_grid,grid_3D
def get_center_and_ray(opt,pose,intr=None,offset=None): # [HW,2]
batch_size = len(pose)
xy_grid,grid_3D = get_camera_grid(opt,batch_size,intr=intr) # [B,HW,3]
# compute center and ray
if opt.camera.model=="perspective":
if offset is not None:
grid_3D[...,:2] += offset
center_3D = torch.zeros(batch_size,1,3,device=opt.device) # [B,1,3]
elif opt.camera.model=="orthographic":
center_3D = torch.cat([xy_grid,torch.zeros_like(xy_grid[...,:1])],dim=-1) # [B,HW,3]
# transform from camera to world coordinates
grid_3D = cam2world(grid_3D,pose) # [B,HW,3]
center_3D = cam2world(center_3D,pose) # [B,HW,3]
ray = grid_3D-center_3D # [B,HW,3]
return center_3D,ray
def get_3D_points_from_depth(opt,center,ray,depth,multi_samples=False):
if multi_samples: center,ray = center[:,:,None],ray[:,:,None]
# x = c+dv
points_3D = center+ray*depth # [B,HW,3]/[B,HW,N,3]/[N,3]
return points_3D
def get_depth_from_3D_points(opt,center,ray,points_3D):
# d = ||x-c||/||v|| (x-c and v should be in same direction)
depth = (points_3D-center).norm(dim=-1,keepdim=True)/ray.norm(dim=-1,keepdim=True) # [B,HW,1]
return depth
| 37.598425
| 97
| 0.604188
| 1,577
| 0.330262
| 0
| 0
| 0
| 0
| 0
| 0
| 512
| 0.107225
|
7710dc16a8fbe11c81dbff2a20f74da32953814d
| 1,550
|
py
|
Python
|
solutions/python3/problem1265.py
|
tjyiiuan/LeetCode
|
abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e
|
[
"MIT"
] | null | null | null |
solutions/python3/problem1265.py
|
tjyiiuan/LeetCode
|
abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e
|
[
"MIT"
] | null | null | null |
solutions/python3/problem1265.py
|
tjyiiuan/LeetCode
|
abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
1265. Print Immutable Linked List in Reverse
You are given an immutable linked list, print out all values of each node in reverse with the help of the following
interface:
ImmutableListNode: An interface of immutable linked list, you are given the head of the list.
You need to use the following functions to access the linked list (you can't access the ImmutableListNode directly):
ImmutableListNode.printValue(): Print value of the current node.
ImmutableListNode.getNext(): Return the next node.
The input is only given to initialize the linked list internally.
You must solve this problem without modifying the linked list.
In other words, you must operate the linked list using only the mentioned APIs.
Constraints:
The length of the linked list is between [1, 1000].
The value of each node in the linked list is between [-1000, 1000].
Follow up:
Could you solve this problem in:
Constant space complexity?
Linear time complexity and less than linear space complexity?
"""
"""
This is the ImmutableListNode's API interface.
You should not implement it, or speculate about its implementation.
"""
class ImmutableListNode:
def printValue(self) -> None: # print the value of this node.
pass
def getNext(self) -> 'ImmutableListNode': # return the next node.
pass
class Solution:
def printLinkedListInReverse(self, head: 'ImmutableListNode') -> None:
if head is None:
return
self.printLinkedListInReverse(head.getNext())
head.printValue()
| 29.245283
| 116
| 0.74129
| 403
| 0.26
| 0
| 0
| 0
| 0
| 0
| 0
| 1,228
| 0.792258
|
771202ad53d30186bb1f539c888cffb5dbe12c2c
| 3,403
|
py
|
Python
|
standard.py
|
futureisatyourhand/self-supervised-learning
|
af8b18639c89d138dbc3490827f7fe867d38387b
|
[
"Apache-2.0"
] | 1
|
2022-02-09T10:14:12.000Z
|
2022-02-09T10:14:12.000Z
|
standard.py
|
futureisatyourhand/self-supervised-learning
|
af8b18639c89d138dbc3490827f7fe867d38387b
|
[
"Apache-2.0"
] | null | null | null |
standard.py
|
futureisatyourhand/self-supervised-learning
|
af8b18639c89d138dbc3490827f7fe867d38387b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# --------------------------------------
# @Time : 2021/5/12$ 12:12$
# @Author : Qian Li
# @Email : 1844857573@qq.com
# @File : network.py
# Description : details(i.e., online network,online projector network, online predictor,classifier, target network, target projector,) for self-supervised learning
import torch
from functools import wraps
from torch import nn
import numpy as np
from utils import MLP,ResNet50,accuracy
import copy
from torch.nn import init
from torchvision import models
def weigth_init(model,path):
from collections import OrderedDict
new_state_dict=OrderedDict()
state_dict=torch.load(path)["model"]
for k,v in state_dict.items():
if "target_" in k:
continue
new_state_dict[k]=v
model.load_state_dict(new_state_dict)
class VGG(nn.Module):
def __init__(self,num_classes=10,
projector_hidden_size=4096,
projector_output_size=256,
predictor_hidden_size=4096,
moving_average_decay=.9999,
eps=1e-5,use_momentum = True,mode="pre-train"):
##model:pre-train,fine-tune,test
super(VGG,self).__init__()
model=models.vgg16(pretrained=False)
print(model)
model.classifier=MLP(input_size=512,hidden_size=projector_hidden_size,output_size=projector_output_size)
model.avgpool=nn.Sequential()
self.mode=mode
model.classifier=nn.Sequential()
self.model=model
self.classifier=nn.Sequential(nn.Linear(512,4096),
nn.BatchNorm1d(4096),
nn.ReLU(inplace=True),
nn.Linear(4096,4096),
nn.BatchNorm1d(4096),
nn.ReLU(inplace=True),
nn.Linear(4096,num_classes)
)
self.model=model
self.cls_loss=nn.CrossEntropyLoss()
if self.classifier is not None:
for m in self.classifier.modules():
if isinstance(m,nn.Conv2d):
nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')
elif isinstance(m,nn.Linear):
init.normal_(m.weight, std=1e-3)
elif isinstance(m,nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m,nn.BatchNorm1d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
def forward(self,image_one=None,image_two=None,labels=None):
#if not image_two:
if self.mode is "test":
feature_view1=self.model(image_one)
logits_view1=nn.Softmax(dim=1)(self.classifier(feature_view1))
return logits_view1.argmax(dim=1),None,None
feature=self.model(image_one)
logit_view1=self.classifier(feature)
classifier_loss=self.cls_loss(logit_view1,labels)
logit_view1=nn.Softmax(dim=1)(logit_view1)
top1_acc,top5_acc=accuracy(logit_view1.data,labels, topk=(1, 5))
return classifier_loss.mean(),top1_acc.data.mean(),top5_acc.data.mean()
| 41
| 163
| 0.569498
| 2,577
| 0.757273
| 0
| 0
| 0
| 0
| 0
| 0
| 428
| 0.125771
|
771328ea922df3260ea4280307fa28df861e95c9
| 789
|
py
|
Python
|
aqualogic/frames.py
|
mj-sakellaropoulos/aqualogic
|
75a4803d36730eb634d4bb31de564e647ed40624
|
[
"MIT"
] | null | null | null |
aqualogic/frames.py
|
mj-sakellaropoulos/aqualogic
|
75a4803d36730eb634d4bb31de564e647ed40624
|
[
"MIT"
] | null | null | null |
aqualogic/frames.py
|
mj-sakellaropoulos/aqualogic
|
75a4803d36730eb634d4bb31de564e647ed40624
|
[
"MIT"
] | null | null | null |
from enum import Enum, unique
class Frames(Enum):
FRAME_DLE = 0x10
FRAME_STX = 0x02
FRAME_ETX = 0x03
# Local wired panel (black face with service button)
FRAME_TYPE_LOCAL_WIRED_KEY_EVENT = b'\x00\x02'
# Remote wired panel (white face)
FRAME_TYPE_REMOTE_WIRED_KEY_EVENT = b'\x00\x03'
# Wireless remote
FRAME_TYPE_WIRELESS_KEY_EVENT = b'\x00\x83'
FRAME_TYPE_ON_OFF_EVENT = b'\x00\x05' # Seems to only work for some keys
FRAME_TYPE_KEEP_ALIVE = b'\x01\x01'
FRAME_TYPE_LEDS = b'\x01\x02'
FRAME_TYPE_DISPLAY_UPDATE = b'\x01\x03'
FRAME_TYPE_LONG_DISPLAY_UPDATE = b'\x04\x0a'
FRAME_TYPE_PUMP_SPEED_REQUEST = b'\x0c\x01'
FRAME_TYPE_PUMP_STATUS = b'\x00\x0c'
#AquaPod mystery
FRAME_TYPE_AQUAPOD_KEY_EVENT = b'\x00\x8c'
| 30.346154
| 78
| 0.712294
| 756
| 0.958175
| 0
| 0
| 0
| 0
| 0
| 0
| 273
| 0.346008
|
77135615dccca76a8c5274c97ffda5de511d3e32
| 87
|
py
|
Python
|
Python/Sum/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 5
|
2020-08-29T15:15:31.000Z
|
2022-03-01T18:22:34.000Z
|
Python/Sum/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | null | null | null |
Python/Sum/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 1
|
2020-12-02T11:13:14.000Z
|
2020-12-02T11:13:14.000Z
|
num1 = input()
num2 = input()
num3 = input()
print(int(num1) + int(num2) + int(num3))
| 14.5
| 40
| 0.609195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
77139d03885bd7af5b622aa37432a424a7f5a2fe
| 5,525
|
py
|
Python
|
Python/scheduledEventsInteractiveTool.py
|
Azure-Samples/virtual-machines-python-scheduled-events-central-logging
|
d9028f296e4b78eb449e295b4e72a9204da84dcf
|
[
"MIT"
] | 7
|
2017-04-20T03:09:10.000Z
|
2021-02-08T17:07:54.000Z
|
Python/scheduledEventsInteractiveTool.py
|
Azure-Samples/virtual-machines-python-scheduled-events-central-logging
|
d9028f296e4b78eb449e295b4e72a9204da84dcf
|
[
"MIT"
] | 8
|
2017-04-19T17:57:48.000Z
|
2017-04-21T18:31:44.000Z
|
Python/scheduledEventsInteractiveTool.py
|
Azure-Samples/virtual-machines-python-scheduled-events-central-logging
|
d9028f296e4b78eb449e295b4e72a9204da84dcf
|
[
"MIT"
] | 4
|
2017-04-19T17:33:50.000Z
|
2021-02-10T11:21:01.000Z
|
#!/usr/bin/python
import json
import socket
import sys
import getopt
import logging
from enum import Enum
from datetime import datetime
import base64
import hmac
import hashlib
import time
import urllib.request
import urllib.parse
import configparser
metadata_url = 'http://169.254.169.254/metadata/scheduledevents?api-version=2017-03-01'
headers = {'Metadata': 'true'}
this_host = socket.gethostname()
log_format = '%(asctime)s [%(levelname)s] %(message)s'
logger = logging.getLogger('example')
logging.basicConfig(format=log_format, level=logging.DEBUG)
config_key_endpoint = 'Endpoint'
config_key_shared_access_key_name = 'SharedAccessKeyName'
config_key_shared_access_key = 'SharedAccessKey'
config_key_entity_path = 'EntityPath'
encoding = 'utf-8'
class EventHubMsgSender:
API_VERSION = '2016-07'
TOKEN_VALID_SECS = 10
TOKEN_FORMAT = 'SharedAccessSignature sig=%s&se=%s&skn=%s&sr=%s'
def __init__(self, connectionString=None):
if connectionString is None:
config = configparser.ConfigParser()
config.read('scheduledEventsInteractiveToolConfig.ini')
connectionString = config['DEFAULT']['connectionstring']
if connectionString is not None:
keyValues = dict((item.split('=', 1))
for item in connectionString.split(';'))
self.endPoint = keyValues[config_key_endpoint].replace('sb://', '')
self.keyName = keyValues[config_key_shared_access_key_name]
self.keyValue = keyValues[config_key_shared_access_key]
self.entityPath = keyValues[config_key_entity_path]
def _buildEventHubSasToken(self):
expiry = int(time.time() + 10000)
string_to_sign = '{}\n{}'.format(
urllib.parse.quote_plus(self.endPoint), expiry)
key = self.keyValue.encode(encoding)
string_to_sign = string_to_sign.encode(encoding)
signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
signature = signed_hmac_sha256.digest()
signature = base64.b64encode(signature)
token = 'SharedAccessSignature sr={}&sig={}&se={}&skn={}'.format(urllib.parse.quote_plus(
self.endPoint), urllib.parse.quote(signature), expiry, self.keyName)
return token
def sendD2CMsg(self, message):
sasToken = self._buildEventHubSasToken()
url = 'https://{}{}/messages?api-version={}'.format(
self.endPoint, self.entityPath, self.API_VERSION)
data = message.encode('ascii')
req = urllib.request.Request(
url, headers={'Authorization': sasToken}, data=data, method='POST')
with urllib.request.urlopen(req) as f:
pass
return f.read().decode(encoding)
def send_to_event_hub(eventHubMessage):
ehMsgSender = EventHubMsgSender()
messageAsJson = json.dumps(eventHubMessage, ensure_ascii=False)
result = ehMsgSender.sendD2CMsg(messageAsJson)
logger.debug('send_to_event_hub returned {}'.format(result))
def get_scheduled_events():
logger.debug('get_scheduled_events was called')
req = urllib.request.Request(url=metadata_url, headers=headers)
resp = urllib.request.urlopen(req)
data = json.loads(resp.read().decode(encoding))
return data
def ack_event(evt):
eventId = evt['EventId']
logger.info('ack_event was called with eventID {}'.format(eventId))
ack_msg = '{{"StartRequests":[{{"EventId":"{}"}}]}}'.format(eventId)
ack_msg = ack_msg.encode()
res = urllib.request.urlopen(url=metadata_url, data=ack_msg).read()
eventHubMessage = build_eventhub_message(
evt, 'Scheduled Event was acknowledged')
send_to_event_hub(eventHubMessage)
def build_eventhub_message(evt, message):
eventHubMessage = evt.copy()
eventHubMessage['Hostname'] = this_host
eventHubMessage['Time'] = datetime.now().strftime('%H:%M:%S')
eventHubMessage['Msg'] = message
if 'Resources' in evt:
eventHubMessage['Resources'] = evt['Resources'][0]
if 'NotBefore' in evt:
eventHubMessage['NotBefore'] = evt['NotBefore'].replace(' ', '_')
eventHubMessage['LogType'] = 'DEBUG'
return eventHubMessage
def handle_scheduled_events(data):
numEvents = len(data['Events'])
logger.info(
'handle_scheduled_events was called with {} events'.format(numEvents))
if numEvents == 0:
emptyEvent = {}
eventHubMessage = build_eventhub_message(
emptyEvent, 'No Scheduled Events')
send_to_event_hub(eventHubMessage)
return
for evt in data['Events']:
eventHubMessage = build_eventhub_message(
evt, 'Scheduled Event was detected')
logger.info(eventHubMessage)
send_to_event_hub(eventHubMessage)
if this_host in eventHubMessage['Resources']:
eventId = evt['EventId']
logger.info('THIS host ({}) is scheduled for {} not before {} (id: {})'.format(
this_host, eventHubMessage['EventType'], eventHubMessage['NotBefore'], eventId))
userAck = input('Are you looking to acknowledge the event (y/n)?')
if userAck == 'y':
logger.debug('Acknowledging {}'.format(eventId))
ack_event(evt)
else:
logger.debug('Ignoring {}'.format(eventId))
def main():
logger.debug('Azure Scheduled Events Interactive Tool')
data = get_scheduled_events()
handle_scheduled_events(data)
if __name__ == '__main__':
main()
sys.exit(0)
| 36.833333
| 97
| 0.673122
| 1,991
| 0.360362
| 0
| 0
| 0
| 0
| 0
| 0
| 1,146
| 0.207421
|
7714068c84e56c46ce9cbe59a4ed57f2565d3970
| 1,750
|
py
|
Python
|
E2E_TOD/config.py
|
kingb12/pptod
|
4cc920494b663c5352a507ed1e32f1e2509a8c93
|
[
"Apache-2.0"
] | 54
|
2021-10-02T13:31:09.000Z
|
2022-03-25T03:44:54.000Z
|
E2E_TOD/config.py
|
programmeddeath1/pptod
|
52d26ddc7b917c86af721e810a202db7c7d3b398
|
[
"Apache-2.0"
] | 8
|
2021-11-10T06:05:20.000Z
|
2022-03-25T03:27:29.000Z
|
E2E_TOD/config.py
|
programmeddeath1/pptod
|
52d26ddc7b917c86af721e810a202db7c7d3b398
|
[
"Apache-2.0"
] | 14
|
2021-10-02T13:31:01.000Z
|
2022-03-27T15:49:33.000Z
|
import logging, time, os
class Config:
def __init__(self, data_prefix):
# data_prefix = r'../data/'
self.data_prefix = data_prefix
self._multiwoz_damd_init()
def _multiwoz_damd_init(self):
self.vocab_path_train = self.data_prefix + '/multi-woz-processed/vocab'
self.data_path = self.data_prefix + '/multi-woz-processed/'
self.data_file = 'data_for_damd.json'
self.dev_list = self.data_prefix + '/multi-woz/valListFile.json'
self.test_list = self.data_prefix + '/multi-woz/testListFile.json'
self.dbs = {
'attraction': self.data_prefix + '/db/attraction_db_processed.json',
'hospital': self.data_prefix + '/db/hospital_db_processed.json',
'hotel': self.data_prefix + '/db/hotel_db_processed.json',
'police': self.data_prefix + '/db/police_db_processed.json',
'restaurant': self.data_prefix + '/db/restaurant_db_processed.json',
'taxi': self.data_prefix + '/db/taxi_db_processed.json',
'train': self.data_prefix + '/db/train_db_processed.json',
}
self.domain_file_path = self.data_prefix + '/multi-woz-processed/domain_files.json'
self.slot_value_set_path = self.data_prefix + '/db/value_set_processed.json'
self.exp_domains = ['all'] # hotel,train, attraction, restaurant, taxi
self.enable_aspn = True
self.use_pvaspn = False
self.enable_bspn = True
self.bspn_mode = 'bspn' # 'bspn' or 'bsdx'
self.enable_dspn = False # removed
self.enable_dst = False
self.exp_domains = ['all'] # hotel,train, attraction, restaurant, taxi
self.max_context_length = 900
self.vocab_size = 3000
| 42.682927
| 91
| 0.645714
| 1,722
| 0.984
| 0
| 0
| 0
| 0
| 0
| 0
| 634
| 0.362286
|
77147ffa79f630a4609f9a112ce607e6646e1ea3
| 6,438
|
py
|
Python
|
advanced_functionality/inference_pipeline_sparkml_xgboost_car_evaluation/preprocessor.py
|
jpmarques19/tensorflwo-test
|
0ff8b06e0415075c7269820d080284a42595bb2e
|
[
"Apache-2.0"
] | 5
|
2019-01-19T23:53:35.000Z
|
2022-01-29T14:04:31.000Z
|
advanced_functionality/inference_pipeline_sparkml_xgboost_car_evaluation/preprocessor.py
|
jpmarques19/tensorflwo-test
|
0ff8b06e0415075c7269820d080284a42595bb2e
|
[
"Apache-2.0"
] | 6
|
2020-01-28T22:54:35.000Z
|
2022-02-10T00:44:46.000Z
|
advanced_functionality/inference_pipeline_sparkml_xgboost_car_evaluation/preprocessor.py
|
jpmarques19/tensorflwo-test
|
0ff8b06e0415075c7269820d080284a42595bb2e
|
[
"Apache-2.0"
] | 8
|
2020-12-14T15:49:24.000Z
|
2022-03-23T18:38:36.000Z
|
from __future__ import print_function
import time
import sys
import os
import shutil
import csv
import boto3
from awsglue.utils import getResolvedOptions
import pyspark
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.sql.functions import *
from mleap.pyspark.spark_support import SimpleSparkSerializer
def toCSVLine(data):
r = ','.join(str(d) for d in data[1])
return str(data[0]) + "," + r
def main():
spark = SparkSession.builder.appName("PySparkTitanic").getOrCreate()
args = getResolvedOptions(sys.argv, ['s3_input_data_location',
's3_output_bucket',
's3_output_bucket_prefix',
's3_model_bucket',
's3_model_bucket_prefix'])
# This is needed to write RDDs to file which is the only way to write nested Dataframes into CSV.
spark.sparkContext._jsc.hadoopConfiguration().set("mapred.output.committer.class",
"org.apache.hadoop.mapred.FileOutputCommitter")
train = spark.read.csv(args['s3_input_data_location'], header=False)
oldColumns = train.schema.names
newColumns = ['buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety', 'cat']
train = reduce(lambda train, idx: train.withColumnRenamed(oldColumns[idx], newColumns[idx]), xrange(len(oldColumns)), train)
# dropping null values
train = train.dropna()
# Target label
catIndexer = StringIndexer(inputCol="cat", outputCol="label")
labelIndexModel = catIndexer.fit(train)
train = labelIndexModel.transform(train)
converter = IndexToString(inputCol="label", outputCol="cat")
# Spliting in train and test set. Beware : It sorts the dataset
(traindf, validationdf) = train.randomSplit([0.8, 0.2])
# Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
buyingIndexer = StringIndexer(inputCol="buying", outputCol="indexedBuying")
maintIndexer = StringIndexer(inputCol="maint", outputCol="indexedMaint")
doorsIndexer = StringIndexer(inputCol="doors", outputCol="indexedDoors")
personsIndexer = StringIndexer(inputCol="persons", outputCol="indexedPersons")
lug_bootIndexer = StringIndexer(inputCol="lug_boot", outputCol="indexedLug_boot")
safetyIndexer = StringIndexer(inputCol="safety", outputCol="indexedSafety")
# One Hot Encoder on indexed features
buyingEncoder = OneHotEncoder(inputCol="indexedBuying", outputCol="buyingVec")
maintEncoder = OneHotEncoder(inputCol="indexedMaint", outputCol="maintVec")
doorsEncoder = OneHotEncoder(inputCol="indexedDoors", outputCol="doorsVec")
personsEncoder = OneHotEncoder(inputCol="indexedPersons", outputCol="personsVec")
lug_bootEncoder = OneHotEncoder(inputCol="indexedLug_boot", outputCol="lug_bootVec")
safetyEncoder = OneHotEncoder(inputCol="indexedSafety", outputCol="safetyVec")
# Create the vector structured data (label,features(vector))
assembler = VectorAssembler(inputCols=["buyingVec", "maintVec", "doorsVec", "personsVec", "lug_bootVec", "safetyVec"], outputCol="features")
# Chain featurizers in a Pipeline
pipeline = Pipeline(stages=[buyingIndexer, maintIndexer, doorsIndexer, personsIndexer, lug_bootIndexer, safetyIndexer, buyingEncoder, maintEncoder, doorsEncoder, personsEncoder, lug_bootEncoder, safetyEncoder, assembler])
# Train model. This also runs the indexers.
model = pipeline.fit(traindf)
# Delete previous data from output
s3 = boto3.resource('s3')
bucket = s3.Bucket(args['s3_output_bucket'])
bucket.objects.filter(Prefix=args['s3_output_bucket_prefix']).delete()
# Save transformed training data to CSV in S3 by converting to RDD.
transformed_traindf = model.transform(traindf)
transformed_train_rdd = transformed_traindf.rdd.map(lambda x: (x.label, x.features))
lines = transformed_train_rdd.map(toCSVLine)
lines.saveAsTextFile('s3a://' + args['s3_output_bucket'] + '/' +args['s3_output_bucket_prefix'] + '/' + 'train')
# Similar data processing for validation dataset.
predictions = model.transform(validationdf)
transformed_train_rdd = predictions.rdd.map(lambda x: (x.label, x.features))
lines = transformed_train_rdd.map(toCSVLine)
lines.saveAsTextFile('s3a://' + args['s3_output_bucket'] + '/' +args['s3_output_bucket_prefix'] + '/' + 'validation')
# Serialize and store via MLeap
SimpleSparkSerializer().serializeToBundle(model, "jar:file:/tmp/model.zip", predictions)
# Unzipping as SageMaker expects a .tar.gz file but MLeap produces a .zip file.
import zipfile
with zipfile.ZipFile("/tmp/model.zip") as zf:
zf.extractall("/tmp/model")
# Writing back the content as a .tar.gz file
import tarfile
with tarfile.open("/tmp/model.tar.gz", "w:gz") as tar:
tar.add("/tmp/model/bundle.json", arcname='bundle.json')
tar.add("/tmp/model/root", arcname='root')
s3 = boto3.resource('s3')
file_name = args['s3_model_bucket_prefix'] + '/' + 'model.tar.gz'
s3.Bucket(args['s3_model_bucket']).upload_file('/tmp/model.tar.gz', file_name)
os.remove('/tmp/model.zip')
os.remove('/tmp/model.tar.gz')
shutil.rmtree('/tmp/model')
# Save postprocessor
SimpleSparkSerializer().serializeToBundle(converter, "jar:file:/tmp/postprocess.zip", predictions)
with zipfile.ZipFile("/tmp/postprocess.zip") as zf:
zf.extractall("/tmp/postprocess")
# Writing back the content as a .tar.gz file
import tarfile
with tarfile.open("/tmp/postprocess.tar.gz", "w:gz") as tar:
tar.add("/tmp/postprocess/bundle.json", arcname='bundle.json')
tar.add("/tmp/postprocess/root", arcname='root')
file_name = args['s3_model_bucket_prefix'] + '/' + 'postprocess.tar.gz'
s3.Bucket(args['s3_model_bucket']).upload_file('/tmp/postprocess.tar.gz', file_name)
os.remove('/tmp/postprocess.zip')
os.remove('/tmp/postprocess.tar.gz')
shutil.rmtree('/tmp/postprocess')
if __name__ == "__main__":
main()
| 42.92
| 225
| 0.694626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,314
| 0.359428
|
7714bae382cfe5335e914024d6f5ee9028364bc3
| 1,350
|
py
|
Python
|
response/response.py
|
benyamin-7/simple-snmp-collector
|
f21dc75bc2a28af0ce1c881837166d0034cac213
|
[
"MIT"
] | null | null | null |
response/response.py
|
benyamin-7/simple-snmp-collector
|
f21dc75bc2a28af0ce1c881837166d0034cac213
|
[
"MIT"
] | null | null | null |
response/response.py
|
benyamin-7/simple-snmp-collector
|
f21dc75bc2a28af0ce1c881837166d0034cac213
|
[
"MIT"
] | null | null | null |
from datetime import datetime
__author__ = 'aGn'
__copyright__ = "Copyright 2018, Planet Earth"
class Response(object):
"""Response Class"""
def __init__(self):
self.socket = None
@staticmethod
def publisher(
module, meta_data,
**kwargs
):
"""
Packing Json file in order to sending on ZMQ pipeline.
:param module:
:param meta_data:
:param kwargs: SNMP values result.
:return:
"""
for name, data in kwargs.items():
if data != -8555:
meta_data['status'] = 200
else:
meta_data['status'] = 404
result = {
'data': {name: data},
'module': module,
'time': datetime.now().strftime('%Y-%m-%dT%H:%M:%S'),
'station': 'SNMP',
'tags': meta_data
}
print({name: data}, ' ', result['time'])
def publish(
self,
module, meta_data,
**kwargs
):
"""
Call the publisher method to send the result on the subscriber servers by ZMQ.
:param module:
:param meta_data:
:param kwargs:
:return:
"""
self.publisher(
module, meta_data,
**kwargs
)
| 24.107143
| 86
| 0.474074
| 1,250
| 0.925926
| 0
| 0
| 764
| 0.565926
| 0
| 0
| 518
| 0.383704
|
77174314400427e0f14a7aea762b47ab497d31f3
| 1,399
|
py
|
Python
|
properjpg/filesmanager.py
|
vitorrloureiro/properjpg
|
4d68e4b9dc930f829d6f67b1d68e1018bdf6f87e
|
[
"MIT"
] | 3
|
2022-02-16T14:38:25.000Z
|
2022-02-18T12:20:19.000Z
|
properjpg/filesmanager.py
|
vitorrloureiro/properjpg
|
4d68e4b9dc930f829d6f67b1d68e1018bdf6f87e
|
[
"MIT"
] | 2
|
2022-02-21T05:54:14.000Z
|
2022-02-23T14:14:29.000Z
|
properjpg/filesmanager.py
|
vitorrloureiro/properjpg
|
4d68e4b9dc930f829d6f67b1d68e1018bdf6f87e
|
[
"MIT"
] | null | null | null |
import mimetypes
import os
from pathlib import Path
def ignore_files(dir: str, files: list[str]):
"""
Returns a list of files to ignore.
To be used by shutil.copytree()
"""
return [f for f in files if Path(dir, f).is_file()]
def get_input_images(input_folder: Path, output_path: Path):
"""
Get all images from a folder and it's subfolders.
Also outputs a save path to be used by the image.
:param input_folder: The folder to be scanned.
:param output_path: The root folder of the destination path.
"""
for root, _, files in os.walk(input_folder):
for file in files:
mime_type = mimetypes.guess_type(file)[0]
if isinstance(mime_type, str):
if "image" in mime_type:
image = Path(root, file)
relative_path = image.relative_to(input_folder)
save_path = Path(output_path, relative_path)
yield image, save_path
def generate_filename(input_path: Path) -> Path:
gen_counter = 1
gen_output = input_path.with_name(f"{input_path.stem}-{gen_counter}").with_suffix(
".jpg"
)
while gen_output.is_file():
gen_counter += 1
gen_output = input_path.with_name(
f"{input_path.stem}-{gen_counter}"
).with_suffix(".jpg")
output_path = gen_output
return output_path
| 27.98
| 86
| 0.623302
| 0
| 0
| 738
| 0.52752
| 0
| 0
| 0
| 0
| 411
| 0.293781
|
77176f91a315883bc70d79d05e8925871389967c
| 3,117
|
py
|
Python
|
mcoc/cdt_core/fetch_data.py
|
sumitb/mcoc-v3
|
93fa5d9d9b28541d19969765b6186072f0d747e7
|
[
"MIT"
] | null | null | null |
mcoc/cdt_core/fetch_data.py
|
sumitb/mcoc-v3
|
93fa5d9d9b28541d19969765b6186072f0d747e7
|
[
"MIT"
] | null | null | null |
mcoc/cdt_core/fetch_data.py
|
sumitb/mcoc-v3
|
93fa5d9d9b28541d19969765b6186072f0d747e7
|
[
"MIT"
] | null | null | null |
from ..abc import MixinMeta
import json
import re
class FetchData(MixinMeta):
"""CDT FetchData functions"""
## No cog dependencies##
# def __init__(self, bot: Red):
# """init"""
# self.bot = bot
async def aiohttp_http_to_text(ctx, url):
"""pull text from url, return pretty string"""
result = None
async with MixinMeta.session.get(url) as response:
if response.status != 200:
await ctx.send("Response Status: {response.status}")
filetext = await response.text()
filetext = FetchData.bcg_recompile(filetext) #cleanup the [15fkas] stuff
prettytext = FetchData.prettyprint(filetext)
if prettytext is not None:
return prettytext
else:
return filetext
async def aiohttp_http_to_json(ctx, url):
"""pull text from url, return pretty json"""
result = None
async with MixinMeta.session.get(url) as response:
if response.status != 200:
await ctx.send("Response Status: {response.status}")
filetext = await response.text()
filetext = FetchData.bcg_recompile(filetext)
prettytext = FetchData.prettyprint(filetext)
jsonfile = json.loads(prettytext)
return jsonfile
async def convert_kabamfile_to_json(ctx, kabamjson):
"""Convert Kabam's lists of k, v & vn to k: {v, vn}"""
# stringlist = kabamfile["strings"].keys() #list of strings
if isinstance(kabamjson, dict):
next
elif isinstance(kabamjson, str):
kabamjson = json.loads(kabamjson)
else:
await ctx.send("dbg: kabam_to_json - not str or dict")
return None
snapshot_file = {"meta": {}, "strings": {}}
snapshot_file["meta"].update(kabamjson["meta"])
await ctx.send("dbg: text_to_json metacheck{}".format(snapshot_file["meta"]))
stringlist = kabamjson["strings"]
strings = {}
for item in stringlist:
if "vn" in item:
vn = item["vn"]
if isinstance(vn, int): #unlikely, but they might do it
vn = str(vn)
else:
vn = "0.0.0"
pkg = {item["k"] : {"v": item["v"], "vn": vn}}
print(pkg)
strings.update(pkg)
snapshot_file["strings"].update(strings)
return snapshot_file
def prettyprint(text_or_json):
"""Return prettyprint string of json file"""
jtext = None
if isinstance(text_or_json, str):
jtext = json.loads(text_or_json)
if isinstance(text_or_json, dict):
jtext = text_or_json
if jtext is not None:
result = json.dumps(jtext, indent=4, sort_keys=True)
return result
def bcg_recompile(str_data):
"""Scrape out the color decorators from Kabam JSON file"""
hex_re = re.compile(r'\[[0-9a-f]{6,8}\](.+?)\[-\]', re.I)
return hex_re.sub(r'**\1**', str_data)
| 35.827586
| 85
| 0.5624
| 3,060
| 0.981713
| 0
| 0
| 0
| 0
| 2,237
| 0.717677
| 742
| 0.238049
|
77196d4e2e1432027536633a3f1233790aa78b63
| 7,175
|
py
|
Python
|
evaluate_network_example.py
|
VU-BEAM-Lab/DNNBeamforming
|
e8ee8c1e57188a795816b119279ac2e60e5c5236
|
[
"Apache-2.0"
] | 1
|
2021-04-12T19:52:43.000Z
|
2021-04-12T19:52:43.000Z
|
evaluate_network_example.py
|
VU-BEAM-Lab/DNNBeamforming
|
e8ee8c1e57188a795816b119279ac2e60e5c5236
|
[
"Apache-2.0"
] | null | null | null |
evaluate_network_example.py
|
VU-BEAM-Lab/DNNBeamforming
|
e8ee8c1e57188a795816b119279ac2e60e5c5236
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Jaime Tierney, Adam Luchies, and Brett Byram
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the license at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# INSTALL NECESSARY PACKAGES PRIOR TO RUNNING THIS NOTEBOOK
# (SEE README FOR INSTRUCTIONS)
# pytorch
# jupyter
# numpy
# scipy
# matplotlib
# pandas
# h5py
# IMPORT PYTHON PACKAGES
import torch
import os
import numpy as np
from torch import nn
import time
import argparse
import sys
import h5py
from scipy.io import loadmat
from scipy.io import savemat
from scipy.signal import hilbert
import matplotlib.pyplot as plt
# IMPORT FUNCTIONS FROM PROVIDED SOURCE CODE
sys.path.insert(0,'src')
from utils import read_model_params
from model import FullyConnectedNet
# In[ ]:
# SPECIFY PATH TO MODEL (THIS IS ALSO OUTPUT PATH)
model_path = 'models/model_1/k_8/'
# LOAD IN MODEL PARAMS
model_params = read_model_params(model_path+'model_params.txt')
# PROVIDE TEST DATA FILE INFO
test_data_path = 'test_data/'
test_data_name = 'chandat_phantom_5mm_70mm'
# In[ ]:
# SPECIFY CUDA AVAILABILITY
print('torch.cuda.is_available(): ' + str(torch.cuda.is_available()))
if model_params['cuda'] and torch.cuda.is_available():
print('Using ' + str(torch.cuda.get_device_name(0)))
else:
print('Not using CUDA')
model_params['cuda']=False
device = torch.device("cuda:0" if model_params['cuda'] else "cpu")
# In[ ]:
# LOAD IN THE TEST DATA AND REFORMAT FOR NETWORK PROCESSING
# load in delayed RF channel data
f = h5py.File(os.path.join(test_data_path,test_data_name+'.mat'),'r')
rf_data = np.asarray(f['chandat'])
f.close()
# get dimension info
[N_beams,N_elements,N_depths] = rf_data.shape
# get analytic data
analytic_data = hilbert(rf_data,axis=2)
del rf_data
# switch depth and channel axes
analytic_data = np.moveaxis(analytic_data,1,2)
# concatenate real and imaginary components into data variable
data_real = np.real(analytic_data)
data_imag = np.imag(analytic_data)
data = np.concatenate([data_real,data_imag],axis=2)
del analytic_data
# get conventional DAS B-mode data
env = np.sqrt(np.power(np.sum(data_real,axis=2),2)+
np.power(np.sum(data_imag,axis=2),2))
bmode = 20*np.log10(env)
del data_real, data_imag
# reshape data to flatten depth and beam axes
data = np.reshape(data,[N_beams*N_depths,2*N_elements])
# normalize data by L1 norm
data_norm = np.linalg.norm(data,ord=np.inf,axis=1)
data = data / data_norm[:,np.newaxis]
# load data into pytorch and onto gpu
data = torch.from_numpy(data).float()
data = data.to(device)
# In[ ]:
# PASS TEST DATA THROUGH NETWORK
# start timer
t0 = time.time()
# load the model
model = FullyConnectedNet(input_dim=model_params['input_dim'],
output_dim=model_params['output_dim'],
layer_width=model_params['layer_width'],
dropout=model_params['dropout'],
dropout_input=model_params['dropout_input'],
num_hidden=model_params['num_hidden'],
starting_weights=None,
batch_norm_enable=model_params['batch_norm_enable'])
print('Loading weights from: ' + str(os.path.join(model_params['save_dir'], 'model.dat')))
model.load_state_dict(torch.load(os.path.join(model_params['save_dir'],
'model.dat'), map_location='cpu'))
model.eval()
model = model.to(device)
# process test data with the model
with torch.set_grad_enabled(False):
data_dnn = model(data).to('cpu').data.numpy()
# stop timer
print('Processing time: {:.2f}'.format(time.time()-t0))
# clear the model and input data
del model, data
# In[ ]:
# REFORMAT PROCESSED TEST DATA
# scale back
data_dnn = data_dnn * data_norm[:,np.newaxis]
# unflatten depth and beam axes
data_dnn = np.reshape(data_dnn,[N_beams,N_depths,2*N_elements])
# split up real and imaginary
data_dnn_real = data_dnn[:,:,0:N_elements]
data_dnn_imag = data_dnn[:,:,N_elements:2*N_elements]
# get DNN beamformer B-mode data
env_dnn = np.sqrt(np.power(np.sum(data_dnn_real,axis=2),2)+
np.power(np.sum(data_dnn_imag,axis=2),2))
bmode_dnn = 20*np.log10(env_dnn)
# In[ ]:
# MAKE IMAGES AND COMPUTE IMAGE QUALITY METRICS
# load in params file
f = h5py.File(os.path.join(test_data_path,test_data_name+'_params.mat'),'r')
beam_position_x = np.asarray(f['beam_position_x'])
t = np.asarray(f['t'])
fs = np.asarray(f['fs'])
c = np.asarray(f['c'])
mask_in = np.asarray(f['mask_in'])
mask_out = np.asarray(f['mask_out'])
f.close()
depths = t/fs*c/2
# make DAS image
bmode_scaled = bmode - np.max(bmode)
fig,axs = plt.subplots(nrows=1,ncols=2,sharey=True)
das_img=axs[0].imshow(np.moveaxis(bmode_scaled,0,1),cmap='gray',
aspect='equal',vmin=-60,vmax=0,
extent=[beam_position_x[0][0]*1000,
beam_position_x[-1][0]*1000,
depths[0][-1]*1000,
depths[0][0]*1000])
axs[0].set_title('DAS')
axs[0].set_ylabel('Depth (mm)')
axs[0].set_xlabel('Lateral Pos. (mm)')
fig.colorbar(das_img,ax=axs[0])
# make DNN image
bmode_dnn_scaled = bmode_dnn - np.max(bmode_dnn)
dnn_img=axs[1].imshow(np.moveaxis(bmode_dnn_scaled,0,1),cmap='gray',
aspect='equal',vmin=-60,vmax=0,
extent=[beam_position_x[0][0]*1000,
beam_position_x[-1][0]*1000,
depths[0][-1]*1000,
depths[0][0]*1000])
axs[1].set_title('DNN')
axs[1].set_xlabel('Lateral Pos. (mm)')
# add colorbar and save figure
fig.colorbar(dnn_img,ax=axs[1])
fig.savefig(os.path.join(model_path,test_data_name+'_result.png'))
# find indicies corresponding to inside and outside of lesion
idx_in = np.where(mask_in==1)
idx_out = np.where(mask_out==1)
# compute mean and variance for DAS
mean_in = np.mean(env[idx_in])
mean_out = np.mean(env[idx_out])
var_in = np.var(env[idx_in])
var_out = np.var(env[idx_out])
# compute mean and variance for DNN
mean_in_dnn = np.mean(env_dnn[idx_in])
mean_out_dnn = np.mean(env_dnn[idx_out])
var_in_dnn = np.var(env_dnn[idx_in])
var_out_dnn = np.var(env_dnn[idx_out])
# compute image quality metrics
CNR = 20*np.log10(np.abs(mean_in-mean_out)/np.sqrt(var_in+var_out))
CNR_DNN = 20*np.log10(np.abs(mean_in_dnn-mean_out_dnn)/
np.sqrt(var_in_dnn+var_out_dnn))
CR = -20*np.log10(np.abs(mean_in/mean_out))
CR_DNN = -20*np.log10(np.abs(mean_in_dnn/mean_out_dnn))
print('CNR DAS: {:.2f}'.format(CNR))
print('CNR DNN: {:.2f}'.format(CNR_DNN))
print('CR DAS: {:.2f}'.format(CR))
print('CR DNN: {:.2f}'.format(CR_DNN))
# In[ ]:
| 28.137255
| 90
| 0.683902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,570
| 0.358188
|
771ab20147dc0551086f34101e79824ead557fa2
| 4,392
|
py
|
Python
|
nexus_constructor/geometry/slit/slit_geometry.py
|
ess-dmsc/nexus-geometry-constructor
|
c4d869b01d988629a7864357b8fc2f49a0325111
|
[
"BSD-2-Clause"
] | null | null | null |
nexus_constructor/geometry/slit/slit_geometry.py
|
ess-dmsc/nexus-geometry-constructor
|
c4d869b01d988629a7864357b8fc2f49a0325111
|
[
"BSD-2-Clause"
] | 62
|
2018-09-18T14:50:34.000Z
|
2019-02-05T15:43:02.000Z
|
nexus_constructor/geometry/slit/slit_geometry.py
|
ess-dmsc/nexus-geometry-constructor
|
c4d869b01d988629a7864357b8fc2f49a0325111
|
[
"BSD-2-Clause"
] | null | null | null |
from typing import List
from PySide2.QtGui import QVector3D
from nexus_constructor.common_attrs import SHAPE_GROUP_NAME, CommonAttrs
from nexus_constructor.model.component import Component
from nexus_constructor.model.geometry import OFFGeometryNoNexus
class SlitGeometry:
def __init__(self, component: Component):
gaps: tuple = (
float(component["x_gap"].values) if "x_gap" in component else None,
float(component["y_gap"].values) if "y_gap" in component else None,
)
self._units = self._get_units(component)
self.vertices: List[QVector3D] = []
self.faces: List[List[int]]
self._gaps: tuple = gaps
self._create_vertices()
self._create_faces()
def _get_units(self, component: Component):
if "x_gap" in component:
return component["x_gap"].attributes.get_attribute_value(CommonAttrs.UNITS)
elif "y_gap" in component:
return component["y_gap"].attributes.get_attribute_value(CommonAttrs.UNITS)
else:
return ""
def _create_vertices(self):
x_gap, y_gap = self._gaps
if x_gap:
x_1 = 0.0
x_2 = -1.0
half_side_length = x_gap * 2
dx = x_gap / 2 + half_side_length
else:
x_1 = -0.1
x_2 = -0.5
dx = 0
half_side_length = 0.05
if y_gap:
dy = y_gap / 2
slit_thickness = y_gap * 2
else:
slit_thickness = 0.02
dy = half_side_length
slit_matrix = [
[x_2, -1, 0.1],
[x_1, -1, 0.1],
[x_2, 1, 0.1],
[x_1, 1, 0.1],
[x_2, 1, -0.1],
[x_1, 1, -0.1],
[x_2, -1, -0.1],
[x_1, -1, -0.1],
]
# Left and right rectangle.
dimension_matrix = []
for column in slit_matrix:
dimension_matrix.append(
[
column[0] * half_side_length + dx,
column[1] * dy,
column[2] * half_side_length,
]
)
vertices_left_bank: List[QVector3D] = []
vertices_right_bank: List[QVector3D] = []
for column in dimension_matrix:
vertices_left_bank.append(QVector3D(column[0], column[1], column[2]))
vertices_right_bank.append(QVector3D(-column[0], -column[1], column[2]))
# Lower and upper rectangle.
x_dist = dx if x_gap else half_side_length / 2
slit_matrix = [
[1, dy, 0.1],
[-1, dy, 0.1],
[1, slit_thickness + dy, 0.1],
[-1, slit_thickness + dy, 0.1],
[1, slit_thickness + dy, -0.1],
[-1, slit_thickness + dy, -0.1],
[1, dy, -0.1],
[-1, dy, -0.1],
]
dimension_matrix = []
for column in slit_matrix:
dimension_matrix.append(
[column[0] * x_dist, column[1], column[2] * half_side_length]
)
vertices_lower_bank: List[QVector3D] = []
vertices_upper_bank: List[QVector3D] = []
for column in dimension_matrix:
vertices_lower_bank.append(QVector3D(column[0], column[1], column[2]))
vertices_upper_bank.append(QVector3D(column[0], -column[1], column[2]))
self.vertices = (
vertices_left_bank
+ vertices_right_bank
+ vertices_lower_bank
+ vertices_upper_bank
)
def _create_faces(self):
left_faces = [
[0, 1, 3, 2],
[2, 3, 5, 4],
[4, 5, 7, 6],
[6, 7, 1, 0],
[1, 7, 5, 3],
[6, 0, 2, 4],
]
right_faces = [
[col[0] + 8, col[1] + 8, col[2] + 8, col[3] + 8] for col in left_faces
]
lower_faces = [
[col[0] + 8, col[1] + 8, col[2] + 8, col[3] + 8] for col in right_faces
]
upper_faces = [
[col[0] + 8, col[1] + 8, col[2] + 8, col[3] + 8] for col in lower_faces
]
self.faces = left_faces + right_faces + lower_faces + upper_faces
def create_slit_geometry(self) -> OFFGeometryNoNexus:
geometry = OFFGeometryNoNexus(self.vertices, self.faces, SHAPE_GROUP_NAME)
geometry.units = self._units
return geometry
| 33.784615
| 87
| 0.523452
| 4,134
| 0.941257
| 0
| 0
| 0
| 0
| 0
| 0
| 113
| 0.025729
|
771bb5f41967c5159144e1d6ef84a2f513ef5409
| 5,029
|
py
|
Python
|
part4/test.py
|
willogy-team/insights--tensorflow
|
2d4885c99e7b550e94d679bed1f192f62f7e4139
|
[
"MIT"
] | null | null | null |
part4/test.py
|
willogy-team/insights--tensorflow
|
2d4885c99e7b550e94d679bed1f192f62f7e4139
|
[
"MIT"
] | null | null | null |
part4/test.py
|
willogy-team/insights--tensorflow
|
2d4885c99e7b550e94d679bed1f192f62f7e4139
|
[
"MIT"
] | null | null | null |
import os
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import matplotlib.pyplot as plt
from visualizations.manual_plot_by_matplotlib import plot_filters_of_a_layer
from visualizations.manual_plot_by_matplotlib import plot_feature_maps_of_a_layer, plot_feature_maps_of_multiple_layers
from visualizations.automatic_plot_by_tf_keras_vis import plot_activation_maximization_of_a_layer
from visualizations.automatic_plot_by_tf_keras_vis import plot_vanilla_saliency_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_smoothgrad_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_gradcam_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_gradcam_plusplus_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_scorecam_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_faster_scorecam_of_a_model
ap = argparse.ArgumentParser()
ap.add_argument("-trd", "--train_dir", required=True, help="Path to dataset train directory")
ap.add_argument("-mdp", "--model_path", required=True, help="Path to the folder for saving checkpoints")
args = vars(ap.parse_args())
def create_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(8, 7, activation='relu'),
tf.keras.layers.Conv2D(8, 5, activation='relu'),
tf.keras.layers.Conv2D(8, 3, activation='relu'),
tf.keras.layers.Flatten(input_shape=(32, 32, 3)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(3, activation='softmax')
])
input_shape = (None, 128, 128, 3)
model.build(input_shape)
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=1e-4), metrics=['accuracy'])
return model
model = create_model()
checkpoint_path = os.path.join(args["model_path"], 'models')
model.load_weights(checkpoint_path)
for idx, layer in enumerate(model.layers):
print('[*] layer: ', layer)
if 'conv' not in layer.name:
print('No')
continue
filters_weights, biases_weights = layer.get_weights()
print('[**] id: {}, layer.name: {}, filters_weights.shape: {}, biases_weights.shape: {}'.format(idx, layer.name, filters_weights.shape, biases_weights.shape))
print('[**] layer.output.shape: {}'.format(layer.output.shape))
filters_max, filters_min = filters_weights.max(), filters_weights.min()
filters_weights = (filters_weights - filters_min)/(filters_max - filters_min)
print('[**] filters_weights: ', filters_weights)
plot_filters_of_a_layer(filters_weights, 3)
# === Output feature maps from a single layer ===
# A PIL object
img = load_img(os.path.join(args["train_dir"], 'n02085620-Chihuahua', 'n02085620_1558.jpg'), target_size=(128, 128))
# Convert to numpy array
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
# img = model.preprocess_input(img)
img = img/255
model_1 = Model(inputs=model.inputs, outputs=model.layers[0].output)
feature_maps_1 = model_1.predict(img)
print('[*] feature_maps_1.shape: ', feature_maps_1.shape)
plot_feature_maps_of_a_layer(feature_maps_1)
# === Output feature maps from multiple layers ===
list_of_outputs = [model.layers[idx].output for idx in range(3)]
model_2 = Model(inputs=model.inputs, outputs=list_of_outputs)
model_2.summary()
feature_maps_2 = model_2.predict(img)
for feature_map in feature_maps_2:
print('[*] feature_map.shape: ', feature_map.shape)
plot_feature_maps_of_multiple_layers(feature_maps_2)
# === Output activation maximization from a single layer ===
plot_activation_maximization_of_a_layer(model, 2)
# === GradCam++ from a single layer ===
# plot_gradcam_plusplus_of_a_layer(model, 2)
# === Attentions ===
image_titles = ['Chihuahua', 'Japanese_spaniel', 'Maltese_dog']
img1 = load_img(os.path.join(args["train_dir"], 'n02085620-Chihuahua', 'n02085620_1558.jpg'), target_size=(128, 128))
img2 = load_img(os.path.join(args["train_dir"], 'n02085782-Japanese_spaniel', 'n02085782_2874.jpg'), target_size=(128, 128))
img3 = load_img(os.path.join(args["train_dir"], 'n02085936-Maltese_dog', 'n02085936_4245.jpg'), target_size=(128, 128))
img1 = np.asarray(img1)
img2 = np.asarray(img2)
img3 = np.asarray(img3)
images = np.asarray([img1, img2, img3])
X = images/255
## Vanilla saliency
print('[*] Vanilla saliency')
plot_vanilla_saliency_of_a_model(model, X, image_titles)
## SmoothGrad
print('[*] SmoothGrad')
plot_smoothgrad_of_a_model(model, X, image_titles)
## GradCAM
print('[*] GradCAM')
plot_gradcam_of_a_model(model, X, image_titles, images)
## GradCAM++
print('[*] GradCAM++')
plot_gradcam_plusplus_of_a_model(model, X, image_titles, images)
## ScoreCAM
print('[*] ScoreCam')
plot_scorecam_of_a_model(model, X, image_titles, images)
## Faster-ScoreCAM
print('[*] Faster-ScoreCAM')
plot_faster_scorecam_of_a_model(model, X, image_titles, images)
| 39.912698
| 162
| 0.766156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,193
| 0.237224
|
771d0991f9537430f57ccbbc794e519d04ca435c
| 5,149
|
py
|
Python
|
tlg_bot.py
|
macrergate/PIK_monitor
|
06f337d9b07c63619f3d6bbed0bbac03a6db87b3
|
[
"MIT"
] | null | null | null |
tlg_bot.py
|
macrergate/PIK_monitor
|
06f337d9b07c63619f3d6bbed0bbac03a6db87b3
|
[
"MIT"
] | null | null | null |
tlg_bot.py
|
macrergate/PIK_monitor
|
06f337d9b07c63619f3d6bbed0bbac03a6db87b3
|
[
"MIT"
] | null | null | null |
import telegram
from flatten_dict import flatten
import os
import time
import datetime
from pik import PIKData
from helpers import hash_vals, dump_data, load_data, compare
class Credentials(object):
def __init__(self, credentials_json):
self.__credentials = load_data(credentials_json)
self.auth_token = self.__get_param('TLG_TOKEN', 'auth_token')
self.chat_id = self.__get_param('TLG_CHAT_ID', 'chat_id')
self.login = self.__get_param('PIK_LOGIN', 'pik_login')
self.password = self.__get_param('PIK_PASSWORD', 'pik_password')
def __get_param(self, ENV, key):
return os.environ.get(ENV, self.__credentials.get(key))
class TelegramSender(object):
def __init__(self, auth_token, chat_id, data_dir='data'):
self.tpath = os.path.join(data_dir, 'timemsg.json')
self.auth_token = auth_token
self.chat_id = chat_id
self.bot = telegram.Bot(self.auth_token)
def send_message(self, data):
template = '<b>Обнаружены изменения в личном кабинете!</b>\n'
for e in data:
template += '\nРаздел <b>"{}":</b>\n'.format(e['label'])
for val in e['values']:
template += ' <i>{}</i>\n'.format(val)
resp = self.bot.send_message(self.chat_id, template, parse_mode='html')
print(resp)
def send_init_message(self, data):
template = '<b>Инициализация мониторинга.</b>\n'
for e in data:
template += '\nСканирование раздела <b>"{}"...</b>\n'.format(e['label'])
vals = len(e['values'])
template += ' Обнаружено <b>{}</b> параметров для отслеживания.'.format(vals)
template += '\n'
resp = self.bot.send_message(self.chat_id, template, parse_mode='html')
print(resp)
def send_time_message(self, template):
timemsg = load_data(self.tpath)
data = self.bot.send_message(self.chat_id, template, disable_notification=True)
mid = data['message_id']
timemsg[self.chat_id] = mid
dump_data(timemsg, self.tpath)
def update_time_message(self):
id = load_data(self.tpath).get(self.chat_id)
template = "Последняя проверка:\n{}".format((datetime.datetime.now().strftime("%d %b %H:%M:%S")))
if id:
self.bot.editMessageText(template, self.chat_id, id)
else:
self.send_time_message(template)
class Checker(object):
steps = [
{'label': 'Мои объекты/Главное',
'params': {
'new_data': 'flat_data',
'file': 'flat.json'
}
},
{'label': 'Мои объекты/Ход сделки/Выдача ключей',
'params': {
'new_data': 'keys_status',
'file': 'progress.json'
}
},
{'label': 'Сопровождение',
'params': {
'new_data': 'appointment',
'file': 'appointment.json'
}
},
]
def __init__(self,credentials, folder = 'data', silent = True):
self.credentials = credentials
self.silent = silent
self.folder = folder
if not self.silent:
self.bot = TelegramSender(self.credentials.auth_token, self.credentials.chat_id, folder)
def check(self):
# Логинимся и получаем данные
pik_data = PIKData(self.credentials.login, self.credentials.password)
changes = []
init = False
for step in self.steps:
try:
params = step['params']
label = step['label']
print("Проверка '{}':".format(label))
path = os.path.join(folder, params['file'])
initial_data = flatten(load_data(path), reducer='dot')
new_data = flatten(getattr(pik_data, params['new_data']), reducer='dot')
if not initial_data:
init = True
diffs = compare(initial_data, new_data)
if diffs:
print('Обнаружены изменения!')
print(diffs)
changes.append({'label': label, 'values': diffs})
dump_data(getattr(pik_data, params['new_data']), path)
else:
print(' Изменений нет!')
except Exception as e:
print('Exception:', str(e))
if changes and not self.silent:
if init:
self.bot.send_init_message(changes)
else:
self.bot.send_message(changes)
if not self.silent:
self.bot.update_time_message()
if __name__ == '__main__':
folder = os.environ.get('DATA_DIR', 'data')
mode = os.environ.get('MODE', 'single')
delay = int(os.environ.get('DELAY', 600))
credentials_json = os.path.join(folder, 'credentials.json')
credentials = Credentials(credentials_json)
checker = Checker(credentials, folder, silent = False)
if mode == 'single':
checker.check()
elif mode == 'loop':
while True:
checker.check()
print("Wait {} sec.".format(delay))
time.sleep(delay)
| 34.557047
| 105
| 0.571373
| 4,686
| 0.866494
| 0
| 0
| 0
| 0
| 0
| 0
| 1,209
| 0.223558
|
771d3fa0c3bd43d72d1bdf5d1c6f1888cb0021be
| 15,025
|
py
|
Python
|
CopyrightHeaderChecker.py
|
medazzo/CopyRitghHeaderChecker-
|
320642ebd9216338820b6876519e9fae69252dd7
|
[
"MIT"
] | 2
|
2019-01-07T14:42:44.000Z
|
2019-01-07T14:42:46.000Z
|
CopyrightHeaderChecker.py
|
medazzo/CopyRightHeaderChecker
|
320642ebd9216338820b6876519e9fae69252dd7
|
[
"MIT"
] | null | null | null |
CopyrightHeaderChecker.py
|
medazzo/CopyRightHeaderChecker
|
320642ebd9216338820b6876519e9fae69252dd7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# @author Mohamed Azzouni , Paris, France
#
import os
import time
import ntpath
import sys
import json
import argparse
from os.path import join, getsize
from shutil import copyfile
behaviour = """{
"reporting": true ,
"updatefiles": true ,
"excludeDirs" :[".git",".repo"],
"shebang":
{
"she":["#!/","#!/bin","#!/usr/bin"],
"check": true
},
"oldCopyright":
{
"lookforandwarn": true,
"forceNewCopyright": false,
"numberofline":6
},
"checks":
[
{
"brief":"C/C++ Code",
"extensions":[".c",".cpp",".h",".hpp"],
"names":[],
"copyright":[
"/// @author your $$CompanyName$$ , $$CompanyAddress$$, $$CompanyCountry$$",
"/// ",
"/// @copyright $$CompanyYear$$ $$CompanyName$$",
"/// All rights exclusively reserved for $$CompanyName$$,",
"/// unless otherwise expressly agreed",
""]
},
{
"brief":"bash/scripting Code",
"extensions":[".conf",".conf.sample",".bb",".inc",".service",".sh",".cfg",".m4" ,".init",".py",".pl"],
"names":["init","run-ptest","llvm-config","build-env-set","init-build-env","setup-build-env","Dockerfile"],
"copyright":[
"# @author your $$CompanyName$$ , $$CompanyAddress$$, $$CompanyCountry$$",
"#",
"# @copyright $$CompanyYear$$ $$CompanyName$$",
"# All rights exclusively reserved for $$CompanyName$$,",
"# unless otherwise expressly agreed",
""]
},
{
"brief":"html/js Code",
"extensions":[".html"],
"names":[],
"copyright":[
"<!-- @author your $$CompanyName$$ , $$CompanyAddress$$, $$CompanyCountry$$ -->",
"<!-- -->",
"<!-- @copyright $$CompanyYear$$ $$CompanyName$$ -->",
"<!-- All rights exclusively reserved for $$CompanyName$$ , -->",
"<!-- unless otherwise expressly agreed -->",
""]
},
{
"brief":"Markdown Code",
"extensions":[".md"],
"names":[],
"copyright":[
"[comment]: <> (@author your $$CompanyName$$ , $$CompanyAddress$$, $$CompanyCountry$$ )",
"[comment]: <> ( )",
"[comment]: <> (@copyright $$CompanyYear$$ $$CompanyName$$ )",
"[comment]: <> (All rights exclusively reserved for $$CompanyName$$, )",
"[comment]: <> (unless otherwise expressly agreed )",
""]
}
]
}"""
# Define
Debug = False
Outputfolder=""
Rbehaviour = json.loads(behaviour)
filesAlreadyCopyright = []
# Parameters :
# --dumpShebang : : dump the current list of managed shebang
# --dumpExtension : : dump the current list of managed files extensions
# -r --report [default: False]: if true print a complete report for what has done
# -u --update [default: False]: if true files will be updated else a modified copy will be generated
# -w --warnOldHeader [default: False]: if true do warn about Old Header existant in files in traces
# -f --forceOldHeader [default: False]: if true do replace old header if exist (exclusif with option warnOldHeader )
# -n --nameCompany : : string
# -a --adressCompany : : string
# -c --countryCompany : : string
# -y --yearCompany : : string
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Find all concerned Files
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def SetupParserParameter( ):
""" this functions will setup parameter and parser for argument"""
parser = argparse.ArgumentParser(description='Checks sources code files for Copyright Header and add ours.',
prog='CopyrightHeaderChecker')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument('--verbose', action='store_true', help='verbose mode ')
subparsers = parser.add_subparsers(help='sub command :')
parser_info = subparsers.add_parser('info', help='get checker informations ')
parser_info.add_argument('-s','--dumpShebang', dest='dumpShebang',action='store_true',
help='dump the current list of managed shebang')
parser_info.add_argument('-e', '--dumpExtension', dest='dumpExtension',action='store_true',
help='dump the current list of managed files extensions')
parser_process = subparsers.add_parser('process', help='process checker')
parser_process.add_argument('-r','--report', dest='report',action='store_true',
help='print a detailled report for what has done')
parser_process.add_argument('-u','--update', dest='update',action='store_true',
help='update files in sources path')
parser_process.add_argument('-w','--warnOldHeader', dest='warnOldHeader',action='store_false',
help='warn about Old Header existant in files in traces ')
parser_process.add_argument('-f','--forceOldHeader', dest='forceOldHeader',action='store_true',
help='replace old header if exist in files ')
parser_process.add_argument('-n','--nameCompany', dest='nameCompany',required=True,
help='company name to be used in copyright header')
parser_process.add_argument('-a','--adressCompany', dest='adressCompany',required=True,
help='company address to be used in copyright header')
parser_process.add_argument('-c','--countryCompany', dest='countryCompany',required=True,
help='company country to be used in copyright header')
parser_process.add_argument('-y','--yearCompany', dest='yearCompany',required=True,
help='years to be used in copyright header ')
parser_process.add_argument('-i','--inputSourecCodeFolder', dest='inputFolder',required=True,
help='path to folder containing source code to operate on')
args = parser.parse_args()
return args
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Find all concerned Files
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def FindFiles(rootfolder, report ):
""" this functions will find files as defined up """
start = time.time()
for bhv in Rbehaviour["checks"]:
bhv["files"]=[]
for root, dirs,files in os.walk(rootfolder):
dirs[:] = [d for d in dirs if d not in Rbehaviour["excludeDirs"]]
for x in files :
sfileN = os.path.join(root, x)
if Debug : print(' ==> Checking file --> {}', format(sfileN))
# check old copyright
if Rbehaviour["oldCopyright"]["lookforandwarn"]:
if checkfileCopyright(sfileN):
filesAlreadyCopyright.append(sfileN)
if not Rbehaviour["oldCopyright"]["forceNewCopyright"]:
break
# checks
found = False
for bhv in Rbehaviour["checks"]:
# Check if file is in names
try:
bhv["names"].index(x)
except :
# Check if file is in extensions
if Debug :
print bhv["brief"]," extensions ==> Checking file --> ",
for x in bhv["extensions"]:
print x,
print " "
for ext in bhv["extensions"] :
if x.endswith(ext):
bhv["files"].append(sfileN)
if Debug :
print bhv["brief"]," >> ",ext," extensions ==> Found file --> ",x
found = True
break
else:
bhv["files"].append(sfileN)
found = True
if Debug : print ("{} names ==> Found file -->",format(bhv["brief"],x))
if found:
break
end = time.time()
took = end - start
if(report):
print " - - - - - - Analyse ",bhv['brief']," took %.4f sec - - - - - - "% took
for bhv in Rbehaviour["checks"]:
print " - - - - - - ",len(bhv["files"])," ",bhv["brief"]," files."
if (Rbehaviour["oldCopyright"]["lookforandwarn"]):
print " - - - - - - ! ",len(filesAlreadyCopyright)," files are already with a Copyright Headers :"
for x in filesAlreadyCopyright:
print " - ",x
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# for Sfiles check shebang
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def checkfileShebang(filename):
""" return true if file has a shebang """
if Rbehaviour["shebang"]["check"]:
if Debug : print(" Will check shebang .. " )
infile = open(filename, 'r')
firstLine = infile.readline()
infile.close()
for she in Rbehaviour["shebang"]["she"]:
if Debug : print("?? did file ",filename," start with ",she ," [",firstLine,"] " )
if firstLine.startswith(she):
return True
return False
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# To check if file contain already a License Copyright Header
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def checkfileCopyright(filename):
""" return true if file has already a Copyright in first X lines """
infile = open(filename, 'r')
for x in xrange(6):
x = x
line = infile.readline()
if "Copyright" in line or "copyright" in line:
return True
return False
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Apply new Copyright to a file
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def ApplyCopyright( srcfile, dstfile , copyright, cname, ccontry, caddress, cyear):
""" will apply new Copyright on dst file then append the old src file """
# apply comany information
copyright = [w.replace('$$CompanyName$$', cname) for w in copyright]
copyright = [w.replace('$$CompanyCountry$$', ccontry) for w in copyright]
copyright = [w.replace('$$CompanyAddress$$', caddress) for w in copyright]
copyright = [w.replace('$$CompanyYear$$', cyear) for w in copyright]
if(srcfile != dstfile):
# create dir file if not exist
nbase = os.path.dirname(dstfile)
if not os.path.exists(nbase):
os.makedirs(nbase)
dst = open(dstfile, "w")
else:
tmp = "/tmp/tmp-fheadercopyrightLicense"
dst = open(tmp, "w")
isSheb = checkfileShebang(srcfile)
src = open(srcfile, "r")
if isSheb:
line = src.readline()
dst.write(line)
for cop in copyright:
dst.write(cop)
dst.write('\n')
# continue copy src file
while line:
line = src.readline()
dst.write(line)
else:
if Debug : print(" \t ==> file ",srcfile," DONT have shebang !" )
for cop in copyright:
dst.write(cop)
dst.write('\n')
dst.write(src.read())
dst.close()
src.close()
if(srcfile == dstfile):
copyfile(tmp, dstfile)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# To apply new Copyright headers in files
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def ApplyInTmp(OutDir,report, cname, ccontry, caddress, cyear):
""" will apply new Copyright on array of files into OutDir with Same tree as original """
global Outputfolder
# checks
for bhv in Rbehaviour["checks"]:
start = time.time()
for x in bhv["files"] :
# fix folder
p = os.path.dirname(x)
while p.startswith('../'):
p = p[3:]
if p.startswith('/'):
p = p[1:]
Outputfolder = OutDir+"/"+p
nfile = Outputfolder+"/"+ntpath.basename(x)
ApplyCopyright(x, nfile, bhv["copyright"], cname, ccontry, caddress, cyear)
end = time.time()
took = end - start
if(report):
print " - - - - - - Applying ",bhv['brief']," took %.4f sec - - - - - - "% took
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# To apply new Copyright headers in files
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def ApplyIn(report, cname, ccontry, caddress, cyear):
""" will apply new Copyright on array of files into original Dir"""
# checks
for bhv in Rbehaviour["checks"]:
start = time.time()
for x in bhv["files"] :
ApplyCopyright(x, x, bhv["copyright"], cname, ccontry, caddress, cyear)
end = time.time()
took = end - start
if(report):
print" - - - - - - Applying ",bhv['brief']," took %.4f sec - - - - - - "% took
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # M A I N # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
print("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -")
print("- - - - - - - - - - - - - - - - - - Copyright Header - - - - - - - - - - - - - - - - - - - - -")
args = SetupParserParameter()
Debug = args.verbose
if "dumpShebang" in args:
print("- - - - - - - Info - - - - - - ->")
if(args.dumpShebang == True):
print " Supportted shebang: ",
for x in Rbehaviour["shebang"]["she"]:
print x,
print " "
if(args.dumpExtension == True):
print " Supportted Extensions: "
for bhv in Rbehaviour["checks"]:
print " ",
print bhv["brief"]," : ",
for x in bhv["extensions"]:
print x,
print " "
else:
if not os.path.exists(args.inputFolder):
print(" - - - Bad parameter , source code path !! => ",args.inputFolder)
print(" - - - folder source did not exist ! - - - ")
exit(-2)
print("- - - - - - - Analyse - - - - - - ->")
FindFiles(args.inputFolder, args.report)
print("- - - - - - - Process - - - - - - ->")
if ( args.update == True):
ApplyIn(args.report,args.nameCompany, args.countryCompany, args.adressCompany, args.yearCompany)
else:
ApplyInTmp("/tmp", args.report, args.nameCompany, args.countryCompany, args.adressCompany, args.yearCompany)
print " Generated ", Outputfolder
print("<- - - - - - - Done - - - - - - - - - -")
print(" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # D O N E # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
| 43.175287
| 122
| 0.493178
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8,916
| 0.593411
|
771d6750899b13f63733f55154de5c6a095ec756
| 2,132
|
py
|
Python
|
PYTHON/singly_linked_list.py
|
ceccs17d55/open-source-contribution
|
63d95a990cdcc1e31c5fca3cb61f2fa34dae9e1f
|
[
"MIT"
] | 2
|
2022-03-10T17:37:24.000Z
|
2022-03-10T17:40:05.000Z
|
PYTHON/singly_linked_list.py
|
ceccs17d55/open-source-contribution
|
63d95a990cdcc1e31c5fca3cb61f2fa34dae9e1f
|
[
"MIT"
] | 1
|
2021-10-03T19:52:07.000Z
|
2021-10-03T19:52:07.000Z
|
PYTHON/singly_linked_list.py
|
ceccs17d55/open-source-contribution
|
63d95a990cdcc1e31c5fca3cb61f2fa34dae9e1f
|
[
"MIT"
] | 1
|
2021-10-04T17:22:09.000Z
|
2021-10-04T17:22:09.000Z
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def print_list(self):
temp = self.head
linked_list = ''
while temp:
linked_list += str(temp.data) + " -> "
temp = temp.next
print(linked_list)
# lists start at 0
def insert_node(self, val, pos):
target = Node(val)
# specific case for replacing head
if pos == 0:
target.next = self.head
self.head = target
return
def get_prev(position):
temp = self.head
count = 1
while count < position:
temp = temp.next
count += 1
return temp
# Getting previous node
prev = get_prev(pos)
if prev.next:
# Temp variable for upcoming node
next_node = prev.next
# Set previous next to our target node
prev.next = target
# Set next node of target node from temp variable
target.next = next_node
def delete_node(self, key):
temp = self.head
if temp is None:
return
if temp.data == key:
self.head = temp.next
temp = None
return
while temp.next.data != key:
temp = temp.next
# Getting target node
target_node = temp.next
# Set previous node's next to target's next
temp.next = target_node.next
# Remove target node's pointer
target_node.next = None
# Nodes: 4 -> 5 -> 7 -> 2
link = LinkedList()
link.head = Node(4)
first_node = Node(5)
second_node = Node(7)
third_node = Node(2)
link.head.next = first_node
first_node.next = second_node
second_node.next = third_node
link.print_list()
# Nodes: 4 -> 5 -> 7 -> 2
# Insert 3 at index 2
# Nodes: 4 -> 5 -> 3 -> 7 -> 2
link.insert_node(3, 2)
link.print_list()
# Nodes: 4 -> 5 -> 3 -> 7 -> 2
# Delete 3
# Nodes: 4 -> 5 -> 7 -> 2
link.delete_node(3)
link.print_list()
| 20.304762
| 61
| 0.533771
| 1,656
| 0.776735
| 0
| 0
| 0
| 0
| 0
| 0
| 463
| 0.217167
|
771de5725155e6d31fa7d7b90220c29436ed35b2
| 22,048
|
py
|
Python
|
addons/odoo_marketplace/models/res_config.py
|
marionumza/vocal_v12
|
480990e919c9410903e06e7813ee92800bd6a569
|
[
"Unlicense"
] | null | null | null |
addons/odoo_marketplace/models/res_config.py
|
marionumza/vocal_v12
|
480990e919c9410903e06e7813ee92800bd6a569
|
[
"Unlicense"
] | null | null | null |
addons/odoo_marketplace/models/res_config.py
|
marionumza/vocal_v12
|
480990e919c9410903e06e7813ee92800bd6a569
|
[
"Unlicense"
] | 1
|
2021-05-05T07:59:08.000Z
|
2021-05-05T07:59:08.000Z
|
# -*- coding: utf-8 -*-
#################################################################################
# Author : Webkul Software Pvt. Ltd. (<https://webkul.com/>)
# Copyright(c): 2015-Present Webkul Software Pvt. Ltd.
# License URL : https://store.webkul.com/license.html/
# All Rights Reserved.
#
#
#
# This program is copyright property of the author mentioned above.
# You can`t redistribute it and/or modify it.
#
#
# You should have received a copy of the License along with this program.
# If not, see <https://store.webkul.com/license.html/>
#################################################################################
from odoo import models, fields, api, _
from odoo.tools.translate import _
from odoo.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
class ResConfigSettings(models.TransientModel):
_inherit = "res.config.settings"
@api.model
def _default_category(self):
obj = self.env["product.category"].search([('name', '=', _('All'))])
return obj[0] if obj else self.env["product.category"]
@api.model
def get_journal_id(self):
obj = self.env["account.journal"].search([('name', '=', _('Vendor Bills'))])
return obj[0] if obj else self.env["account.journal"]
auto_product_approve = fields.Boolean(string="Auto Product Approve")
internal_categ = fields.Many2one(
"product.category", string="Internal Category")
warehouse_location_id = fields.Many2one(
"stock.location", string="Warehouse Location", domain="[('usage', '=', 'internal')]")
mp_default_warehouse_id = fields.Many2one("stock.warehouse", string="Warehouse")
seller_payment_limit = fields.Integer(string="Seller Payment Limit")
next_payment_requset = fields.Integer(string="Next Payment Request")
group_mp_product_variant = fields.Boolean(
string="Allow sellers for several product attributes, defining variants (Example: size, color,...)",
group='odoo_marketplace.marketplace_seller_group',
implied_group='product.group_product_variant'
)
group_mp_shop_allow = fields.Boolean(
string="Allow sellers to manage seller shop.",
group='odoo_marketplace.marketplace_seller_group',
implied_group='odoo_marketplace.group_marketplace_seller_shop'
)
group_mp_product_pricelist = fields.Boolean(
string="Allow sellers for Advanced pricing on product using pricelist.",
group='odoo_marketplace.marketplace_seller_group',
implied_group='product.group_product_pricelist'
)
# Inventory related field
auto_approve_qty = fields.Boolean(string="Auto Quantity Approve")
# Seller related field
auto_approve_seller = fields.Boolean(string="Auto Seller Approve")
global_commission = fields.Float(string="Global Commission")
# Mail notification related fields
enable_notify_admin_4_new_seller = fields.Boolean(string="Enable Notification Admin For New Seller")
enable_notify_seller_4_new_seller = fields.Boolean(
string="Enable Notification Seller for Seller Request")
enable_notify_admin_on_seller_approve_reject = fields.Boolean(
string="Enable Notification Admin On Seller Approve Reject")
enable_notify_seller_on_approve_reject = fields.Boolean(string="Enable Notification Seller On Approve Reject")
enable_notify_admin_on_product_approve_reject = fields.Boolean(
string="Enable Notification Admin On Product Approve Reject")
enable_notify_seller_on_product_approve_reject = fields.Boolean(
string="Enable Notification Seller On Product Approve Reject")
enable_notify_seller_on_new_order = fields.Boolean(string="Enable Notification Seller On New Order")
notify_admin_4_new_seller_m_tmpl_id = fields.Many2one(
"mail.template", string="Mail Template to Notify Admin For New Seller", domain="[('model_id.model','=','res.partner')]")
notify_seller_4_new_seller_m_tmpl_id = fields.Many2one(
"mail.template", string="Mail Template to Notify Seller On Seller Request", domain="[('model_id.model','=','res.partner')]")
notify_admin_on_seller_approve_reject_m_tmpl_id = fields.Many2one(
"mail.template", string="Mail Template to Notify Admin on Seller Approve/Reject", domain="[('model_id.model','=','res.partner')]")
notify_seller_on_approve_reject_m_tmpl_id = fields.Many2one(
"mail.template", string="Mail Template to Notify Seller On Approve/Reject", domain="[('model_id.model','=','res.partner')]")
notify_admin_on_product_approve_reject_m_tmpl_id = fields.Many2one(
"mail.template", string="Mail Template to Notify Admin On Product Approve/Reject", domain="[('model_id.model','=','product.template')]")
notify_seller_on_product_approve_reject_m_tmpl_id = fields.Many2one(
"mail.template", string="Mail Template to Notify Seller On Product Approve/Reject", domain="[('model_id.model','=','product.template')]")
notify_seller_on_new_order_m_tmpl_id = fields.Many2one(
"mail.template", string="Mail Template to Notify Seller On New Order", domain="[('model_id.model','=','sale.order.line')]")
# Seller shop/profile releted field
product_count = fields.Boolean(related="website_id.mp_product_count",
string="Show seller's product count on website.", readonly=False)
sale_count = fields.Boolean(related="website_id.mp_sale_count", string="Show seller's sales count on website.", readonly=False)
shipping_address = fields.Boolean(related="website_id.mp_shipping_address",
string="Show seller's shipping address on website.", readonly=False)
seller_since = fields.Boolean(related="website_id.mp_seller_since", string="Show seller since Date on website.", readonly=False)
seller_t_c = fields.Boolean(related="website_id.mp_seller_t_c",
string="Show seller's Terms & Conditions on website.", readonly=False)
seller_contact_btn = fields.Boolean(related="website_id.mp_seller_contact_btn",
string='Show "Contact Seller" Button on website.', readonly=False)
seller_review = fields.Boolean(related="website_id.mp_seller_review",
string='Show Seller Review on website.', readonly=False)
return_policy = fields.Boolean(related="website_id.mp_return_policy",
string="Show seller's Retrun Policy on website.", readonly=False)
shipping_policy = fields.Boolean(related="website_id.mp_shipping_policy",
string="Show Seller's Shipping Policy on website.", readonly=False)
recently_product = fields.Integer(related="website_id.mp_recently_product",
string="# of products for recently added products menu. ", readonly=False)
# Seller Review settings field
review_load_no = fields.Integer(related="website_id.mp_review_load_no",
string="No. of Reviews to load", help="Set default numbers of review to show on website.", readonly=False)
review_auto_publish = fields.Boolean(related="website_id.mp_review_auto_publish",
string="Auto Publish", help="Publish Customer's review automatically.", readonly=False)
show_seller_list = fields.Boolean(related="website_id.mp_show_seller_list",
string='Show Sellers List on website.', readonly=False)
show_seller_shop_list = fields.Boolean(related="website_id.mp_show_seller_shop_list",
string='Show Seller Shop List on website.', readonly=False)
show_become_a_seller = fields.Boolean(related="website_id.mp_show_become_a_seller",string="Show Become a Seller button on Account Home Page", readonly=False)
seller_payment_journal_id = fields.Many2one("account.journal", string="Seller Payment Journal", default=get_journal_id, domain="[('type', '=', 'purchase')]")
mp_currency_id = fields.Many2one('res.currency', "Marketplace Currency", readonly=False)
show_visit_shop = fields.Boolean("Show visit shop link on product page")
seller_payment_product_id = fields.Many2one("product.product", string="Seller Payment Product", domain="[('sale_ok', '=', False),('purchase_ok', '=', False),('type','=','service')]")
term_and_condition = fields.Html(string="Marketplace Terms & Conditions", related="website_id.mp_term_and_condition", readonly=False)
message_to_publish = fields.Text(
string="Review feedback message", help="Message to Customer on review publish.", related="website_id.mp_message_to_publish", readonly=False)
sell_page_label = fields.Char(
string="Sell Link Label", related="website_id.mp_sell_page_label", readonly=False)
sellers_list_label = fields.Char(
string="Seller List Link Label", related="website_id.mp_sellers_list_label", readonly=False)
seller_shop_list_label = fields.Char(
string="Seller Shop List Link Label", related="website_id.mp_seller_shop_list_label", readonly=False)
landing_page_banner = fields.Binary(string="Landing Page Banner", related="website_id.mp_landing_page_banner", readonly=False)
seller_new_status_msg = fields.Text(
string="For New Status", related="website_id.mp_seller_new_status_msg", readonly=False)
seller_pending_status_msg = fields.Text(
string="For Pending Status", related="website_id.mp_seller_pending_status_msg", readonly=False)
show_sell_menu_header = fields.Boolean(related="website_id.mp_show_sell_menu_header", string="Show Sell menu in header", readonly=False)
show_sell_menu_footer = fields.Boolean(related="website_id.mp_show_sell_menu_footer", string="Show Sell menu in footer", readonly=False)
# seller_denied_status_msg = fields.Text(
# string="For Denied Status", related="website_id.mp_seller_denied_status_msg")
@api.onchange("warehouse_location_id")
def on_change_location_id(self):
if not self.warehouse_location_id:
wl_obj = self.env["stock.location"].sudo().browse(
self.warehouse_location_id.id)
wh_obj = self.env["stock.warehouse"]
whs = wh_obj.search([('view_location_id', 'parent_of', wl_obj.ids)], limit=1)
if whs:
self.mp_default_warehouse_id = whs.id
@api.multi
def set_values(self):
super(ResConfigSettings, self).set_values()
self.env['ir.default'].sudo().set('res.config.settings', 'auto_product_approve', self.auto_product_approve)
self.env['ir.default'].sudo().set('res.config.settings', 'internal_categ', self.internal_categ.id)
self.env['ir.default'].sudo().set('res.config.settings', 'mp_default_warehouse_id', self.mp_default_warehouse_id.id)
self.env['ir.default'].sudo().set('res.config.settings', 'warehouse_location_id', self.warehouse_location_id.id)
self.env['ir.default'].sudo().set('res.config.settings', 'auto_approve_qty', self.auto_approve_qty)
self.env['ir.default'].sudo().set('res.config.settings', 'auto_approve_seller', self.auto_approve_seller)
self.env['ir.default'].sudo().set('res.config.settings', 'global_commission', self.global_commission)
self.env['ir.default'].sudo().set('res.config.settings', 'seller_payment_limit', self.seller_payment_limit)
self.env['ir.default'].sudo().set('res.config.settings', 'next_payment_requset', self.next_payment_requset)
self.env['ir.default'].sudo().set('res.config.settings', 'enable_notify_admin_4_new_seller', self.enable_notify_admin_4_new_seller)
self.env['ir.default'].sudo().set('res.config.settings', 'enable_notify_seller_4_new_seller', self.enable_notify_seller_4_new_seller)
self.env['ir.default'].sudo().set('res.config.settings', 'enable_notify_admin_on_seller_approve_reject', self.enable_notify_admin_on_seller_approve_reject)
self.env['ir.default'].sudo().set('res.config.settings', 'enable_notify_seller_on_approve_reject', self.enable_notify_seller_on_approve_reject)
self.env['ir.default'].sudo().set('res.config.settings', 'enable_notify_admin_on_product_approve_reject', self.enable_notify_admin_on_product_approve_reject)
self.env['ir.default'].sudo().set('res.config.settings', 'enable_notify_seller_on_product_approve_reject', self.enable_notify_seller_on_product_approve_reject)
self.env['ir.default'].sudo().set('res.config.settings', 'enable_notify_seller_on_new_order', self.enable_notify_seller_on_new_order)
self.env['ir.default'].sudo().set('res.config.settings', 'notify_admin_4_new_seller_m_tmpl_id', self.notify_admin_4_new_seller_m_tmpl_id.id)
self.env['ir.default'].sudo().set('res.config.settings', 'notify_seller_4_new_seller_m_tmpl_id', self.notify_seller_4_new_seller_m_tmpl_id.id)
self.env['ir.default'].sudo().set('res.config.settings', 'notify_admin_on_seller_approve_reject_m_tmpl_id', self.notify_admin_on_seller_approve_reject_m_tmpl_id.id)
self.env['ir.default'].sudo().set('res.config.settings', 'notify_seller_on_approve_reject_m_tmpl_id', self.notify_seller_on_approve_reject_m_tmpl_id.id)
self.env['ir.default'].sudo().set('res.config.settings', 'notify_admin_on_product_approve_reject_m_tmpl_id', self.notify_admin_on_product_approve_reject_m_tmpl_id.id)
self.env['ir.default'].sudo().set('res.config.settings', 'notify_seller_on_product_approve_reject_m_tmpl_id', self.notify_seller_on_product_approve_reject_m_tmpl_id.id)
self.env['ir.default'].sudo().set('res.config.settings', 'notify_seller_on_new_order_m_tmpl_id', self.notify_seller_on_new_order_m_tmpl_id.id)
self.env['ir.default'].sudo().set('res.config.settings', 'seller_payment_journal_id', self.seller_payment_journal_id.id)
seller_payment = self.env["seller.payment"].sudo().search([]) #For users who are not from marketplace group
if not seller_payment:
self.env['ir.default'].sudo().set('res.config.settings', 'mp_currency_id', self.mp_currency_id.id)
self.env['ir.default'].sudo().set('res.config.settings', 'show_visit_shop', self.show_visit_shop)
self.env['ir.default'].sudo().set('res.config.settings', 'seller_payment_product_id', self.seller_payment_product_id.id)
self.env['ir.default'].sudo().set('res.config.settings', 'group_mp_product_variant', self.group_mp_product_variant)
self.env['ir.default'].sudo().set('res.config.settings', 'group_mp_shop_allow', self.group_mp_shop_allow)
self.env['ir.default'].sudo().set('res.config.settings', 'group_mp_product_pricelist', self.group_mp_product_pricelist)
@api.model
def get_values(self):
res = super(ResConfigSettings, self).get_values()
auto_product_approve = self.env['ir.default'].get('res.config.settings', 'auto_product_approve')
internal_categ = self.env['ir.default'].get('res.config.settings', 'internal_categ') or self._default_category().id
mp_default_warehouse_id = self.env['ir.default'].get('res.config.settings', 'mp_default_warehouse_id')
warehouse_location_id = self.env['ir.default'].get('res.config.settings', 'warehouse_location_id') or self._default_location().id
auto_approve_qty = self.env['ir.default'].get('res.config.settings', 'auto_approve_qty')
auto_approve_seller = self.env['ir.default'].get('res.config.settings', 'auto_approve_seller')
global_commission = self.env['ir.default'].get('res.config.settings', 'global_commission')
seller_payment_limit = self.env['ir.default'].get('res.config.settings', 'seller_payment_limit')
next_payment_requset = self.env['ir.default'].get('res.config.settings', 'next_payment_requset')
enable_notify_admin_4_new_seller = self.env['ir.default'].get('res.config.settings', 'enable_notify_admin_4_new_seller')
enable_notify_seller_4_new_seller = self.env['ir.default'].get('res.config.settings', 'enable_notify_seller_4_new_seller')
enable_notify_admin_on_seller_approve_reject = self.env['ir.default'].get('res.config.settings', 'enable_notify_admin_on_seller_approve_reject')
enable_notify_seller_on_approve_reject = self.env['ir.default'].get('res.config.settings', 'enable_notify_seller_on_approve_reject')
enable_notify_admin_on_product_approve_reject = self.env['ir.default'].get('res.config.settings', 'enable_notify_admin_on_product_approve_reject')
enable_notify_seller_on_product_approve_reject = self.env['ir.default'].get('res.config.settings', 'enable_notify_seller_on_product_approve_reject')
enable_notify_seller_on_new_order = self.env['ir.default'].get('res.config.settings', 'enable_notify_seller_on_new_order')
notify_admin_4_new_seller_m_tmpl_id = self.env['ir.default'].get('res.config.settings', 'notify_admin_4_new_seller_m_tmpl_id')
notify_seller_4_new_seller_m_tmpl_id = self.env['ir.default'].get('res.config.settings', 'notify_seller_4_new_seller_m_tmpl_id')
notify_admin_on_seller_approve_reject_m_tmpl_id = self.env['ir.default'].get('res.config.settings', 'notify_admin_on_seller_approve_reject_m_tmpl_id')
notify_seller_on_approve_reject_m_tmpl_id = self.env['ir.default'].get('res.config.settings', 'notify_seller_on_approve_reject_m_tmpl_id')
notify_admin_on_product_approve_reject_m_tmpl_id = self.env['ir.default'].get('res.config.settings', 'notify_admin_on_product_approve_reject_m_tmpl_id')
notify_seller_on_product_approve_reject_m_tmpl_id = self.env['ir.default'].get('res.config.settings', 'notify_seller_on_product_approve_reject_m_tmpl_id')
notify_seller_on_new_order_m_tmpl_id = self.env['ir.default'].get('res.config.settings', 'notify_seller_on_new_order_m_tmpl_id')
seller_payment_journal_id = self.env['ir.default'].get('res.config.settings', 'seller_payment_journal_id') or self.get_journal_id().id
mp_currency_id = self.env['ir.default'].get('res.config.settings', 'mp_currency_id') or self.env.user.company_id.currency_id.id
show_visit_shop = self.env['ir.default'].get('res.config.settings', 'show_visit_shop')
group_mp_product_variant = self.env['ir.default'].get('res.config.settings', 'group_mp_product_variant')
group_mp_shop_allow = self.env['ir.default'].get('res.config.settings', 'group_mp_shop_allow')
group_mp_product_pricelist = self.env['ir.default'].get('res.config.settings', 'group_mp_product_pricelist')
seller_payment_product_id = self.env['ir.default'].get('res.config.settings', 'seller_payment_product_id')
res.update(
auto_product_approve = auto_product_approve,
internal_categ = internal_categ,
mp_default_warehouse_id = mp_default_warehouse_id,
warehouse_location_id = warehouse_location_id,
auto_approve_qty = auto_approve_qty,
auto_approve_seller = auto_approve_seller,
global_commission = global_commission,
seller_payment_limit = seller_payment_limit,
next_payment_requset = next_payment_requset,
enable_notify_admin_4_new_seller = enable_notify_admin_4_new_seller,
enable_notify_seller_4_new_seller = enable_notify_seller_4_new_seller,
enable_notify_admin_on_seller_approve_reject = enable_notify_admin_on_seller_approve_reject,
enable_notify_seller_on_approve_reject = enable_notify_seller_on_approve_reject,
enable_notify_admin_on_product_approve_reject = enable_notify_admin_on_product_approve_reject,
enable_notify_seller_on_product_approve_reject = enable_notify_seller_on_product_approve_reject,
enable_notify_seller_on_new_order = enable_notify_seller_on_new_order,
notify_admin_4_new_seller_m_tmpl_id = notify_admin_4_new_seller_m_tmpl_id,
notify_seller_4_new_seller_m_tmpl_id = notify_seller_4_new_seller_m_tmpl_id,
notify_admin_on_seller_approve_reject_m_tmpl_id = notify_admin_on_seller_approve_reject_m_tmpl_id,
notify_seller_on_approve_reject_m_tmpl_id = notify_seller_on_approve_reject_m_tmpl_id,
notify_admin_on_product_approve_reject_m_tmpl_id = notify_admin_on_product_approve_reject_m_tmpl_id,
notify_seller_on_product_approve_reject_m_tmpl_id = notify_seller_on_product_approve_reject_m_tmpl_id,
notify_seller_on_new_order_m_tmpl_id = notify_seller_on_new_order_m_tmpl_id,
seller_payment_journal_id = seller_payment_journal_id,
mp_currency_id = mp_currency_id,
show_visit_shop = show_visit_shop,
group_mp_product_variant = group_mp_product_variant,
group_mp_shop_allow = group_mp_shop_allow,
group_mp_product_pricelist = group_mp_product_pricelist,
seller_payment_product_id = seller_payment_product_id,
)
return res
@api.multi
def execute(self):
for rec in self:
if rec.recently_product < 1 or rec.recently_product > 20:
raise UserError(_("Recently Added Products count should be in range 1 to 20."))
if rec.review_load_no < 1:
raise UserError(_("Display Seller Reviews count should be more than 0."))
if rec.global_commission < 0 or rec.global_commission >= 100:
raise UserError(_("Global Commission should be greater than 0 and less than 100."))
if rec.seller_payment_limit < 0 :
raise UserError(_("Amount Limit can't be negative."))
if rec.next_payment_requset < 0:
raise UserError(_("Minimum Gap can't be negative."))
return super(ResConfigSettings, self).execute()
@api.model
def _default_location(self):
""" Set default location """
user_obj = self.env.user
if user_obj:
company_id = user_obj.company_id.id
location_ids = self.env["stock.location"].sudo().search(
[("company_id", '=', company_id), ("name", "=", "Stock"), ('usage', '=', 'internal')])
return location_ids[0] if location_ids else self.env["stock.location"]
return self.env["stock.location"].sudo().search([('usage', '=', 'internal')])[0]
| 73.249169
| 186
| 0.73408
| 21,235
| 0.963126
| 0
| 0
| 12,739
| 0.577785
| 0
| 0
| 9,232
| 0.418723
|
771e1c4b8e1935e576368e845f369c110a609b20
| 18,274
|
py
|
Python
|
igf_data/utils/tools/picard_util.py
|
imperial-genomics-facility/data-management-python
|
7b867d8d4562a49173d0b823bdc4bf374a3688f0
|
[
"Apache-2.0"
] | 7
|
2018-05-08T07:28:08.000Z
|
2022-02-21T14:56:49.000Z
|
igf_data/utils/tools/picard_util.py
|
imperial-genomics-facility/data-management-python
|
7b867d8d4562a49173d0b823bdc4bf374a3688f0
|
[
"Apache-2.0"
] | 15
|
2021-08-19T12:32:20.000Z
|
2022-02-09T19:52:51.000Z
|
igf_data/utils/tools/picard_util.py
|
imperial-genomics-facility/data-management-python
|
7b867d8d4562a49173d0b823bdc4bf374a3688f0
|
[
"Apache-2.0"
] | 2
|
2017-05-12T15:20:10.000Z
|
2020-05-07T16:25:11.000Z
|
import os,subprocess
from shlex import quote
import pandas as pd
from igf_data.utils.singularity_run_wrapper import execute_singuarity_cmd
from igf_data.utils.fileutils import check_file_path,get_temp_dir
class Picard_tools:
'''
A class for running picard tool
:param java_exe: Java executable path
:param picard_jar: Picard path
:param input_files: Input bam filepaths list
:param output_dir: Output directory filepath
:param ref_fasta: Input reference fasta filepath
:param picard_option: Additional picard run parameters as dictionary, default None
:param java_param: Java parameter, default '-Xmx4g'
:param strand_info: RNA-Seq strand information, default NONE
:param ref_flat_file: Input ref_flat file path, default None
:param output_prefix: Output prefix name, default None
:param threads: Number of threads to run for java, default 1
:param use_ephemeral_space: A toggle for temp dir setting, default 0
:param patterned_flowcell: Toggle for marking the patterned flowcell, default False
:param singularity_image: Singularity image path, default None
:param suported_commands: A list of supported picard commands
* CollectAlignmentSummaryMetrics
* CollectGcBiasMetrics
* QualityScoreDistribution
* CollectRnaSeqMetrics
* CollectBaseDistributionByCycle
* MarkDuplicates
* AddOrReplaceReadGroups
'''
def __init__(
self,java_exe,picard_jar,input_files,output_dir,ref_fasta,picard_option=None,
java_param='-Xmx4g',strand_info='NONE',threads=1,output_prefix=None,use_ephemeral_space=0,
ref_flat_file=None,ribisomal_interval=None,patterned_flowcell=False,singularity_image=None,
suported_commands=(
'CollectAlignmentSummaryMetrics',
'CollectGcBiasMetrics',
'QualityScoreDistribution',
'CollectRnaSeqMetrics',
'CollectBaseDistributionByCycle',
'MarkDuplicates',
'AddOrReplaceReadGroups')):
self.java_exe = java_exe
self.picard_jar = picard_jar
self.java_param = java_param
self.input_files = input_files
self.output_dir = output_dir
self.ref_fasta = ref_fasta
self.picard_option = picard_option
self.strand_info = strand_info
self.ref_flat_file = ref_flat_file
self.suported_commands = list(suported_commands)
self.ribisomal_interval = ribisomal_interval
self.output_prefix = output_prefix
self.threads = threads
self.use_ephemeral_space = use_ephemeral_space
self.patterned_flowcell = patterned_flowcell
self.singularity_image = singularity_image
def _get_param_for_picard_command(self,command_name):
'''
An internal method for configuring run parameters for picard commands
:param command_name: A picard command name
:returns: List of items to return
* A dictionary of picard run parameter if command is supported or None
* A list of output files or an empty list
* A list of metrics file for parsing
'''
try:
param_dict = None
output_list = list()
metrics_list = list()
input_list = self.input_files
if self.output_prefix is None:
output_prefix = \
os.path.join(
self.output_dir,
os.path.basename(input_list[0])) # set output file prefix
if output_prefix.endswith('.bam'):
output_prefix = output_prefix.replace('.bam','') # remove .bam from filepath prefix
else:
output_prefix = \
os.path.join(
self.output_dir,
self.output_prefix)
output_file = \
'{0}.{1}'.format(
output_prefix,
command_name) # set output path without any extension
chart_file = \
'{0}.{1}'.format(
output_file,
'pdf') # set chart filepath
metrics_file = \
'{0}.{1}'.format(
output_file,
'summary.txt') # set summary metrics path
if command_name=='CollectAlignmentSummaryMetrics':
if len(input_list)>1:
raise ValueError(
'More than one input file found for picard command {0}'.\
format(command_name))
output_file = \
'{0}.{1}'.format(
output_file,
'txt') # add correct extension for output file
param_dict = [{
'I':input_list[0],
'O':output_file,
'R':self.ref_fasta}]
output_list = [output_file]
metrics_list = [output_file]
elif command_name=='CollectGcBiasMetrics':
if len(input_list)>1:
raise ValueError(
'More than one input file found for picard command {0}'.\
format(command_name))
output_file = \
'{0}.{1}'.format(
output_file,
'txt') # add correct extension for output file
param_dict = [{
'I':input_list[0],
'O':output_file,
'R':self.ref_fasta,
'CHART':chart_file,
'S':metrics_file}]
output_list = [
output_file,
chart_file,
metrics_file]
metrics_list = [metrics_file]
elif command_name=='QualityScoreDistribution':
if len(input_list)>1:
raise ValueError(
'More than one input file found for picard command {0}'.\
format(command_name))
output_file = \
'{0}.{1}'.format(
output_file,
'txt') # add correct extension for output file
param_dict = [{
'I':input_list[0],
'O':output_file,
'CHART':chart_file}]
output_list = [
output_file,
chart_file]
elif command_name=='CollectRnaSeqMetrics':
if len(input_list)>1:
raise ValueError(
'More than one input file found for picard command {0}'.\
format(command_name))
if self.ref_flat_file is None:
raise ValueError(
'Missing refFlat annotation file for command {0}'.\
format(command_name))
check_file_path(file_path=self.ref_flat_file) # check refFlat file path
output_file = \
'{0}.{1}'.format(
output_file,
'txt') # add correct extension for output file
param_dict = [{
'I':input_list[0],
'O':output_file,
'R':self.ref_fasta,
'REF_FLAT':self.ref_flat_file,
'STRAND':self.strand_info,
'CHART':chart_file}]
if self.ribisomal_interval is not None:
check_file_path(file_path=self.ribisomal_interval)
param_dict.append({'RIBOSOMAL_INTERVALS':self.ribisomal_interval})
output_list = [
output_file,
chart_file]
metrics_list=[output_file]
elif command_name=='CollectBaseDistributionByCycle':
if len(input_list)>1:
raise ValueError(
'More than one input file found for picard command {0}'.\
format(command_name))
output_file = \
'{0}.{1}'.format(
output_file,
'txt') # add correct extension for output file
param_dict = [{
'I':input_list[0],
'O':output_file,
'CHART':chart_file}]
output_list = [
output_file,
chart_file]
elif command_name=='MarkDuplicates':
output_file = \
'{0}.{1}'.format(
output_file,
'bam') # add correct extension for output file
param_dict = [{
'O':output_file,
'M':metrics_file}]
if self.patterned_flowcell:
param_dict.append({'OPTICAL_DUPLICATE_PIXEL_DISTANCE':'2500'})
for file in input_list:
param_dict.append({'I':file})
output_list = [
output_file,
metrics_file]
metrics_list=[metrics_file]
elif command_name=='AddOrReplaceReadGroups':
if len(input_list)>1:
raise ValueError('More than one input file found for picard command {0}'.\
format(command_name))
required_RG_params = [
"RGID",
"RGLB",
"RGPL",
"RGPU",
"RGSM",
"RGCN"]
if not set(required_RG_params).issubset(set(self.picard_option.keys())):
raise ValueError(
'Missing required options for picard cmd {0}:{1}'.\
format(command_name,required_RG_params)) # check for required params
output_file = \
'{0}.{1}'.format(
output_file,
'bam') # add correct extension for output file
param_dict = [{
'I':input_list[0],
'O':output_file}] # not checking for other required inputs
output_list=[output_file]
return param_dict,output_list,metrics_list
except:
raise
@staticmethod
def _parse_picard_metrics(picard_cmd,metrics_list):
'''
An internal static method for parsing picard command specific metrics parsing
:param picard_cmd: Picard string command
:param metrics_list: List of picard metrics file
:returns: A list of dictionaries with the picard metrics
'''
try:
metrics_output_list=list()
for file in metrics_list:
try:
check_file_path(file) # check input path
if picard_cmd=='CollectAlignmentSummaryMetrics':
data = \
pd.read_csv(
file,
sep='\t',
dtype=object,
skiprows=6) # read alignment summary metrics, skip 6 lines
data.columns = \
list(map(lambda x: '{0}_{1}'.format(picard_cmd,x),
data.columns)) # append picard command name
category_col = \
'{0}_{1}'.format(picard_cmd,'CATEGORY') # get key for modified category column
data = \
data[(data.get(category_col)=='PAIR') | \
(data.get(category_col)=='UNPAIRED')].\
dropna(axis=1).\
to_dict(orient='records') # filter data and convert to list of dicts
metrics_output_list.extend(data) # append results
elif picard_cmd=='CollectGcBiasMetrics':
data = \
pd.read_csv(
file,
sep='\t',
dtype=object,
skiprows=6) # read GC bias metrics summary file
data.columns = \
list(map(lambda x: '{0}_{1}'.format(picard_cmd,x),
data.columns)) # append picard command name
data = \
data.\
dropna(axis=1).\
to_dict(orient='records') # filter data and convert tolist of dicts
metrics_output_list.extend(data) # append results
elif picard_cmd=='CollectRnaSeqMetrics':
data = \
pd.read_csv(
file,
sep='\t',
skiprows=6,
dtype=object,
nrows=1) # read rnaseq metrics, skip 6 lines and read only one line
data.columns = \
list(map(lambda x: '{0}_{1}'.format(picard_cmd,x),
data.columns)) # append picard command name
data = \
data.\
dropna(axis=1).\
to_dict(orient='records') # filter data and convert tolist of dicts
metrics_output_list.extend(data) # append results
elif picard_cmd=='MarkDuplicates':
data = \
pd.read_csv(
file,
sep='\t',
skiprows=6,
dtype=object,
nrows=1) # read markdup metrics, skip 6 lines and read only one line
data.columns = \
list(map(lambda x: '{0}_{1}'.format(picard_cmd,x),
data.columns)) # append picard command name
data = \
data.to_dict(orient='records') # convert to list of dicts
metrics_output_list.extend(data) # append results
except Exception as e:
raise ValueError('Failed to parse file {0}, got error {1}'.\
format(file,e))
return metrics_output_list
except Exception as e:
raise ValueError('Picard metrics: error: {0}'.format(e))
def run_picard_command(self,command_name,dry_run=False):
'''
A method for running generic picard command
:param command_name: Picard command name
:param dry_run: A toggle for returning picard command without the actual run, default False
:returns: A list of output files from picard run and picard run command and optional picard metrics
'''
try:
if self.singularity_image is None:
check_file_path(file_path=self.java_exe)
check_file_path(file_path=self.picard_jar)
check_file_path(file_path=self.ref_fasta)
if not isinstance(self.input_files, list) or \
len(self.input_files)==0:
raise ValueError('Missing input file list for picard run')
for file in self.input_files:
check_file_path(file_path=file)
picard_temp_run_dir = \
get_temp_dir(use_ephemeral_space=self.use_ephemeral_space)
command = [
self.java_exe,
'-XX:ParallelGCThreads={0}'.\
format(self.threads),
self.java_param,
'-Djava.io.tmpdir={0}'.format(picard_temp_run_dir),
'-jar',
self.picard_jar,
quote(command_name)]
if isinstance(self.picard_option,dict) and \
len(self.picard_option)>0:
picard_option = [
'{0}={1}'.format(quote(param),quote(val))
for param,val in self.picard_option.items()]
command.extend(picard_option) # additional picard params
picard_run_param,output_file_list,metrics_list = \
self._get_param_for_picard_command(
command_name=command_name) # get picard params and output list
if isinstance(picard_run_param,list) and \
len(picard_run_param)>0:
picard_option = [
'{0}={1}'.format(quote(param),quote(str(val)))
for param_dicts in picard_run_param
for param,val in param_dicts.items()]
command.extend(picard_option) # main picard params
if dry_run:
return command,output_file_list
if self.singularity_image is None:
subprocess.\
check_call(' '.join(command),shell=True) # run picard command
else:
bind_dir_list = [
os.path.dirname(f)
for f in self.input_files]
bind_dir_list.extend([
os.path.dirname(f)
for f in output_file_list])
bind_dir_list.extend([
os.path.dirname(f)
for f in metrics_list])
bind_dir_list.\
append(picard_temp_run_dir)
if self.ref_flat_file is not None:
bind_dir_list.\
append(os.path.dirname(self.ref_flat_file))
if self.ref_fasta is not None:
bind_dir_list.\
append(os.path.dirname(self.ref_fasta))
if self.ribisomal_interval is not None:
bind_dir_list.\
append(os.path.dirname(self.ribisomal_interval))
bind_dir_list = \
list(set(bind_dir_list)) # remove duplicates
_ = \
execute_singuarity_cmd(
image_path=self.singularity_image,
command_string=' '.join(command),
bind_dir_list=bind_dir_list)
picard_metrics = \
self._parse_picard_metrics(\
picard_cmd=command_name,
metrics_list=metrics_list) # parse picard metrics, if available
return output_file_list,command,picard_metrics
else:
raise ValueError('Picard command {0} not supported yet'.\
format(command_name))
except Exception as e:
raise ValueError(
'Failed to run picard command {0}, error {1}'.\
format(command_name,e))
| 43.927885
| 140
| 0.525172
| 18,062
| 0.988399
| 0
| 0
| 4,294
| 0.234979
| 0
| 0
| 5,026
| 0.275036
|
771f7ee9bb91bc23000b0e85deecce770eb956d7
| 8,348
|
py
|
Python
|
app/utils/NetworkingUtils.py
|
DiegoSilva776/linkehub_insigth_api
|
1909a9c1b28901ab6dc0be6815741aed848b4363
|
[
"MIT"
] | 2
|
2018-06-25T03:07:28.000Z
|
2018-06-26T13:52:23.000Z
|
app/utils/NetworkingUtils.py
|
DiegoSilva776/linkehub_insigth_api
|
1909a9c1b28901ab6dc0be6815741aed848b4363
|
[
"MIT"
] | null | null | null |
app/utils/NetworkingUtils.py
|
DiegoSilva776/linkehub_insigth_api
|
1909a9c1b28901ab6dc0be6815741aed848b4363
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import os
import json
import http.client
import urllib
import time
sys.path.append("../")
from models.ApiInstance import ApiInstance
from utils.ConstantUtils import ConstantUtils
'''
NetworkingUtils is responsible for holding the external URLs and the default parameters
of each URL used by the API.
'''
class NetworkingUtils():
def __init__(self):
self.TAG = "NetworkingUtils"
self.PATH_SERVICES_CONFIG_FILE = "config/hosts.json"
self.constUtils = ConstantUtils()
self.apiInstances = []
self.initListApiInstances()
## --------------------##
## Requests management ##
## --------------------##
'''
Return a headers object used in requests to the service API
'''
def getRequestHeaders(self, headersType, token):
headers = ""
try:
if headersType == self.constUtils.HEADERS_TYPE_AUTH_TOKEN:
headers = {
"cache-control": "no-cache",
"User-Agent": "Linkehub-API-Manager",
"access_token": "{0}".format(token)
}
elif headersType == self.constUtils.HEADERS_TYPE_URL_ENCODED:
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"
}
elif headersType == self.constUtils.HEADERS_TYPE_NO_AUTH_TOKEN:
headers = {
"cache-control": "no-cache",
"User-Agent": "Linkehub-API-Manager"
}
except Exception as e:
print("{0} Failed to getRequestHeaders: {1}".format(self.TAG, e))
return headers
## ---------------------##
## Instances management ##
## ---------------------##
'''
Initialize the list of copies running the same version of the service API
'''
def initListApiInstances(self):
try:
fileData = open(self.PATH_SERVICES_CONFIG_FILE).read()
data = json.loads(fileData)
for idx, hostFromList in enumerate(data["hosts"]):
apiInstance = ApiInstance()
apiInstance.id = idx
if "url" in hostFromList:
apiInstance.url = hostFromList["url"]
if "name" in hostFromList:
apiInstance.name = hostFromList["name"]
self.apiInstances.append(apiInstance)
print("The list of API instances has been initialized: {0}".format(json.dumps(self.getSerializableApiInstances())))
except Exception as e:
print("{0}: Failed to initListApiInstances: {1}".format(self.TAG, e))
'''
Return the serializable version of the list of ApiInstances
'''
def getSerializableApiInstances(self):
sApiInstances = []
try:
for apiInstance in self.apiInstances:
sApiInstances.append(apiInstance.toJSON())
except Exception as e:
print("{0} Failed to getSerializableApiInstances : {1}".format(self.TAG, e))
return sApiInstances
'''
Return the object that represents the main instance, which contain the same content of the others,
but it is the one used to generate the copies of the service.
'''
def getRootApiInstance(self):
try:
return self.apiInstances[0]
except Exception as e:
print("{0} Failed to getRootInstance: {1}".format(self.TAG, e))
'''
Verify how many requests an instance of the service API still has to the Github API before the
limit of requests per hour get exceeded.
'''
def updateListRemainingRequestsGithubAPI(self):
try:
print("\nVerify the number of remaining requests to the Github API for all instances: \n")
# Identify the number of remaining requests to the Github API for each instance of the API
if self.apiInstances is not None:
for apiInstance in self.apiInstances:
try:
# Make a request to the Github API and verify if the limit of requests per hour has been exceeded
connection = http.client.HTTPSConnection(apiInstance.getBaseUrl())
headers = self.getRequestHeaders(self.constUtils.HEADERS_TYPE_NO_AUTH_TOKEN, None)
endpoint = "/has_expired_requests_per_hour_github/"
connection.request("GET", endpoint, headers=headers)
res = connection.getresponse()
data = res.read()
githubApiResponse = json.loads(data.decode(self.constUtils.UTF8_DECODER))
# Process the response
if githubApiResponse is not None:
if "usage" in githubApiResponse:
usage = githubApiResponse["usage"]
if "remaining" in usage:
apiInstance.remainingCallsGithub = usage["remaining"]
except Exception:
print("{0} Failed to connect to a host ...".format(self.TAG))
print("{0} : {1}".format(apiInstance.getUrl(), apiInstance.remainingCallsGithub))
print("Total number available requests : {0}".format(self.getNumRemaningRequestToGithub()))
except ValueError as err2:
print("{0} Failed to updateListRemainingRequestsGithubAPI: {1}".format(self.TAG, err2))
'''
Returns the sum of the remaning requests to the Github API of each instance of the service
'''
def getNumRemaningRequestToGithub(self):
totalRemainingRequest = 0
try:
for apiInstance in self.apiInstances:
totalRemainingRequest += apiInstance.remainingCallsGithub
except Exception as e:
print("{0} Failed to getNumRemaningRequestToGithub: {1}".format(self.TAG, e))
return totalRemainingRequest
'''
Returns the instance of the service with the largest number of remaining requests to the Github API
'''
def getInstanceForRequestToGithubAPI(self):
selectedInstance = self.getRootApiInstance()
largestNumRemainingRequests = 0
try:
for apiInstance in self.apiInstances:
if apiInstance.remainingCallsGithub > largestNumRemainingRequests:
largestNumRemainingRequests = apiInstance.remainingCallsGithub
selectedInstance = apiInstance
except Exception as e:
print("{0} Failed to getInstanceForRequestToGithubAPI : {1}".format(self.TAG, e))
return selectedInstance
'''
If the number of available requests to the Github API has exceeded, wait until the instances get refueled
'''
def waitRequestGithubApiIfNeeded(self):
try:
numRequestsGithubApi = self.getNumRemaningRequestToGithub()
if numRequestsGithubApi == 0:
i = 0
print("\nThe maximum number of requests to the Github API has been exceeded for all instances of the service")
while i < self.constUtils.TIMEOUT_REQUEST_GITHUB_API:
time.sleep(1)
if i == 0:
print("\nYou'll have to wait {0} minutes until the next request:".format((self.constUtils.TIMEOUT_REQUEST_GITHUB_API - i) / 60))
elif i < self.constUtils.TIMEOUT_REQUEST_GITHUB_API:
if (self.constUtils.TIMEOUT_REQUEST_GITHUB_API / i) == 2:
print("\nWe are half way there, we still have to wait {0} minutes".format((self.constUtils.TIMEOUT_REQUEST_GITHUB_API - i) / 60))
else:
print(".", end="")
i += 1
self.updateListRemainingRequestsGithubAPI()
self.waitRequestGithubApiIfNeeded()
except Exception as e:
print("{0} Failed to waitRequestGithubApiIfNeeded : {1}".format(self.TAG, e))
| 36.295652
| 157
| 0.574868
| 7,995
| 0.957714
| 0
| 0
| 0
| 0
| 0
| 0
| 2,660
| 0.318639
|