hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38d68c3be83a3f35ee2c9d807898670a1ff4278e | 575 | py | Python | test/helpers/pylint/findWarnings.py | drewrisinger/pyGSTi | dd4ad669931c7f75e026456470cf33ac5b682d0d | [
"Apache-2.0"
] | 1 | 2021-12-19T15:11:09.000Z | 2021-12-19T15:11:09.000Z | test/helpers/pylint/findWarnings.py | drewrisinger/pyGSTi | dd4ad669931c7f75e026456470cf33ac5b682d0d | [
"Apache-2.0"
] | null | null | null | test/helpers/pylint/findWarnings.py | drewrisinger/pyGSTi | dd4ad669931c7f75e026456470cf33ac5b682d0d | [
"Apache-2.0"
] | null | null | null | from .helpers import get_pylint_output, write_output
from ..automation_tools import read_json
def find_warnings():
print('Generating warnings in all of pygsti. This takes around 30 seconds')
config = read_json('config/pylint_config.json')
blacklist = config['blacklisted-warnings']
commands = [config['pylint-version'],
'--disable=R,C,E,%s' % ','.join(blacklist),
'--rcfile=%s' % config['config-file'],
'--reports=n'] + config['packages']
output = get_pylint_output(commands, 'warnings')
| 44.230769 | 79 | 0.633043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.373913 |
38d7b65bf66287fe4e9a38e2e928310e83dfbed0 | 47,316 | py | Python | orangecontrib/esrf/syned/widgets/extension/ow_ebs.py | oasys-esrf-kit/OASYS1-ESRF-Extensions | 5df1ac01a1319d900380196c850504e09e36f9d6 | [
"MIT"
] | null | null | null | orangecontrib/esrf/syned/widgets/extension/ow_ebs.py | oasys-esrf-kit/OASYS1-ESRF-Extensions | 5df1ac01a1319d900380196c850504e09e36f9d6 | [
"MIT"
] | null | null | null | orangecontrib/esrf/syned/widgets/extension/ow_ebs.py | oasys-esrf-kit/OASYS1-ESRF-Extensions | 5df1ac01a1319d900380196c850504e09e36f9d6 | [
"MIT"
] | 1 | 2020-06-22T05:46:07.000Z | 2020-06-22T05:46:07.000Z | import os, sys
import numpy
import scipy.constants as codata
from syned.storage_ring.magnetic_structures.undulator import Undulator
from syned.storage_ring.magnetic_structures import insertion_device
from PyQt5.QtGui import QPalette, QColor, QFont
from PyQt5.QtWidgets import QMessageBox, QApplication
from PyQt5.QtCore import QRect
from orangewidget import gui
from orangewidget import widget
from orangewidget.settings import Setting
from oasys.widgets.widget import OWWidget
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.widgets.gui import ConfirmDialog
from syned.storage_ring.light_source import LightSource, ElectronBeam
from syned.beamline.beamline import Beamline
from oasys.widgets.gui import ConfirmDialog
import orangecanvas.resources as resources
m2ev = codata.c * codata.h / codata.e
VERTICAL = 1
HORIZONTAL = 2
BOTH = 3
class OWEBS(OWWidget):
name = "ESRF-EBS ID Light Source"
description = "Syned: ESRF-EBS ID Light Source"
icon = "icons/ebs.png"
priority = 1
maintainer = "Manuel Sanchez del Rio"
maintainer_email = "srio(@at@)esrf.eu"
category = "ESRF-EBS Syned Light Sources"
keywords = ["data", "file", "load", "read"]
outputs = [{"name":"SynedData",
"type":Beamline,
"doc":"Syned Beamline",
"id":"data"}]
want_main_area = 1
MAX_WIDTH = 1320
MAX_HEIGHT = 700
IMAGE_WIDTH = 860
IMAGE_HEIGHT = 645
CONTROL_AREA_WIDTH = 405
TABS_AREA_HEIGHT = 650
TABS_AREA_HEIGHT = 625
CONTROL_AREA_WIDTH = 450
electron_energy_in_GeV = Setting(6.0)
electron_energy_spread = Setting(0.001)
ring_current = Setting(0.2)
number_of_bunches = Setting(0.0)
moment_xx = Setting(0.0)
moment_xxp = Setting(0.0)
moment_xpxp = Setting(0.0)
moment_yy = Setting(0.0)
moment_yyp = Setting(0.0)
moment_ypyp = Setting(0.0)
electron_beam_size_h = Setting(0.0)
electron_beam_divergence_h = Setting(0.0)
electron_beam_size_v = Setting(0.0)
electron_beam_divergence_v = Setting(0.0)
electron_beam_emittance_h = Setting(0.0)
electron_beam_emittance_v = Setting(0.0)
electron_beam_beta_h = Setting(0.0)
electron_beam_beta_v = Setting(0.0)
electron_beam_alpha_h = Setting(0.0)
electron_beam_alpha_v = Setting(0.0)
electron_beam_eta_h = Setting(0.0)
electron_beam_eta_v = Setting(0.0)
electron_beam_etap_h = Setting(0.0)
electron_beam_etap_v = Setting(0.0)
type_of_properties = Setting(1)
auto_energy = Setting(0.0)
auto_harmonic_number = Setting(1)
K_horizontal = Setting(1.0)
K_vertical = Setting(1.0)
period_length = Setting(0.010)
number_of_periods = Setting(10)
ebs_id_index = Setting(0)
gap_mm = Setting(0.0)
gap_min = Setting(5.0)
gap_max = Setting(20.0)
harmonic_max = Setting(3)
a0 = Setting(20.0)
a1 = Setting(0.2)
a2 = Setting(0.0)
a3 = Setting(0.0)
a4 = Setting(0.0)
a5 = Setting(0.0)
a6 = Setting(0.0)
# data_url = 'ftp://ftp.esrf.eu/pub/scisoft/syned/resources/jsrund.csv'
# create it in nice with the ID app: /segfs/tango/bin/jsrund
data_url = os.path.join(resources.package_dirname("orangecontrib.esrf.syned.data"), 'jsrund.csv')
data_dict = None
def __init__(self):
self.get_data_dictionary_csv()
# OLD FORMAT
# self.data_url = "https://raw.githubusercontent.com/srio/shadow3-scripts/master/ESRF-LIGHTSOURCES-EBS/ebs_ids.json"
# self.get_data_dictionary()
self.runaction = widget.OWAction("Send Data", self)
self.runaction.triggered.connect(self.send_data)
self.addAction(self.runaction)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Send Data", callback=self.send_data)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button = gui.button(button_box, self, "Reset Fields", callback=self.callResetSettings)
font = QFont(button.font())
font.setItalic(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Red'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button.setFixedWidth(150)
gui.separator(self.controlArea)
geom = QApplication.desktop().availableGeometry()
self.setGeometry(QRect(round(geom.width()*0.05),
round(geom.height()*0.05),
round(min(geom.width()*0.98, self.MAX_WIDTH)),
round(min(geom.height()*0.95, self.MAX_HEIGHT))))
self.setMaximumHeight(self.geometry().height())
self.setMaximumWidth(self.geometry().width())
self.controlArea.setFixedWidth(self.CONTROL_AREA_WIDTH)
self.tabs_setting = oasysgui.tabWidget(self.controlArea)
self.tabs_setting.setFixedHeight(self.TABS_AREA_HEIGHT)
self.tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5)
self.tab_sou = oasysgui.createTabPage(self.tabs_setting, "Light Source Setting")
gui.comboBox(self.tab_sou, self, "ebs_id_index", label="Load ID parameters from database list: ", labelWidth=350,
items=self.get_id_list(), callback=self.set_id, sendSelectedValue=False, orientation="horizontal")
self.electron_beam_box = oasysgui.widgetBox(self.tab_sou, "Electron Beam/Machine Parameters", addSpace=False, orientation="vertical")
oasysgui.lineEdit(self.electron_beam_box, self, "electron_energy_in_GeV", "Energy [GeV]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.electron_beam_box, self, "electron_energy_spread", "Energy Spread", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.electron_beam_box, self, "ring_current", "Ring Current [A]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
gui.comboBox(self.electron_beam_box, self, "type_of_properties", label="Electron Beam Properties", labelWidth=350,
items=["From 2nd Moments", "From Size/Divergence", "From Twiss papameters","Zero emittance", "EBS (S28D)"],
callback=self.update_electron_beam,
sendSelectedValue=False, orientation="horizontal")
self.left_box_2_1 = oasysgui.widgetBox(self.electron_beam_box, "", addSpace=False, orientation="vertical", height=150)
oasysgui.lineEdit(self.left_box_2_1, self, "moment_xx", "<x x> [m^2]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_1, self, "moment_xxp", "<x x'> [m.rad]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_1, self, "moment_xpxp", "<x' x'> [rad^2]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_1, self, "moment_yy", "<y y> [m^2]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_1, self, "moment_yyp", "<y y'> [m.rad]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_1, self, "moment_ypyp", "<y' y'> [rad^2]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
self.left_box_2_2 = oasysgui.widgetBox(self.electron_beam_box, "", addSpace=False, orientation="vertical", height=150)
oasysgui.lineEdit(self.left_box_2_2, self, "electron_beam_size_h", "Horizontal Beam Size \u03c3x [m]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_2, self, "electron_beam_size_v", "Vertical Beam Size \u03c3y [m]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_2, self, "electron_beam_divergence_h", "Horizontal Beam Divergence \u03c3'x [rad]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_2, self, "electron_beam_divergence_v", "Vertical Beam Divergence \u03c3'y [rad]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
self.left_box_2_3 = oasysgui.widgetBox(self.electron_beam_box, "", addSpace=False, orientation="horizontal",height=150)
self.left_box_2_3_l = oasysgui.widgetBox(self.left_box_2_3, "", addSpace=False, orientation="vertical")
self.left_box_2_3_r = oasysgui.widgetBox(self.left_box_2_3, "", addSpace=False, orientation="vertical")
oasysgui.lineEdit(self.left_box_2_3_l, self, "electron_beam_emittance_h", "\u03B5x [m.rad]",labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_l, self, "electron_beam_alpha_h", "\u03B1x", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_l, self, "electron_beam_beta_h", "\u03B2x [m]", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_l, self, "electron_beam_eta_h", "\u03B7x", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_l, self, "electron_beam_etap_h", "\u03B7'x", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_r, self, "electron_beam_emittance_v", "\u03B5y [m.rad]",labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_r, self, "electron_beam_alpha_v", "\u03B1y", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_r, self, "electron_beam_beta_v", "\u03B2y [m]", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_r, self, "electron_beam_eta_v", "\u03B7y", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_r, self, "electron_beam_etap_v", "\u03B7'y", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
gui.rubber(self.controlArea)
###################
left_box_1 = oasysgui.widgetBox(self.tab_sou, "ID Parameters", addSpace=True, orientation="vertical")
oasysgui.lineEdit(left_box_1, self, "period_length", "Period Length [m]", labelWidth=260,
valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(left_box_1, self, "number_of_periods", "Number of Periods", labelWidth=260,
valueType=float, orientation="horizontal", callback=self.update)
left_box_1 = oasysgui.widgetBox(self.tab_sou, "Setting", addSpace=True, orientation="vertical")
# oasysgui.lineEdit(left_box_1, self, "K_horizontal", "Horizontal K", labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(left_box_1, self, "K_vertical", "Vertical K", labelWidth=260,
valueType=float, orientation="horizontal", callback=self.set_K)
oasysgui.lineEdit(left_box_1, self, "gap_mm", "Undulator Gap [mm]",
labelWidth=250, valueType=float, orientation="horizontal",
callback=self.set_gap)
left_box_2 = oasysgui.widgetBox(left_box_1, "", addSpace=False, orientation="vertical")
oasysgui.lineEdit(left_box_2, self, "auto_energy", "Photon Energy [eV]",
labelWidth=250, valueType=float, orientation="horizontal",
callback=self.auto_set_undulator_V)
oasysgui.lineEdit(left_box_2, self, "auto_harmonic_number", "Harmonic",
labelWidth=250, valueType=int, orientation="horizontal",
callback=self.auto_set_undulator_V)
####################################################
tab_util = oasysgui.createTabPage(self.tabs_setting, "Settings")
left_box_0 = oasysgui.widgetBox(tab_util, "Advanced settings",
addSpace=False, orientation="vertical", height=450)
oasysgui.lineEdit(left_box_0, self, "gap_min", "minimum gap",
labelWidth=260, valueType=float, orientation="horizontal",
callback=self.update)
oasysgui.lineEdit(left_box_0, self, "gap_max", "maximum gap (for plots)",
labelWidth=260, valueType=float, orientation="horizontal",
callback=self.update)
oasysgui.lineEdit(left_box_0, self, "harmonic_max", "maximum harmonic (for plots)",
labelWidth=260, valueType=int, orientation="horizontal",
callback=self.update)
left_box_00 = oasysgui.widgetBox(left_box_0, "Gap parametrization", addSpace=False, orientation="vertical")
oasysgui.lineEdit(left_box_00, self, "a0", "a0", labelWidth=260, valueType=float, orientation="horizontal", callback=self.set_K)
oasysgui.lineEdit(left_box_00, self, "a1", "a1", labelWidth=260, valueType=float, orientation="horizontal", callback=self.set_K)
oasysgui.lineEdit(left_box_00, self, "a2", "a2", labelWidth=260, valueType=float, orientation="horizontal", callback=self.set_K)
oasysgui.lineEdit(left_box_00, self, "a3", "a3", labelWidth=260, valueType=float, orientation="horizontal", callback=self.set_K)
oasysgui.lineEdit(left_box_00, self, "a4", "a4", labelWidth=260, valueType=float, orientation="horizontal", callback=self.set_K)
oasysgui.lineEdit(left_box_00, self, "a5", "a5", labelWidth=260, valueType=float, orientation="horizontal", callback=self.set_K)
oasysgui.lineEdit(left_box_00, self, "a6", "a6", labelWidth=260, valueType=float, orientation="horizontal", callback=self.set_K)
self.initializeTabs()
# self.populate_gap_parametrization()
# self.populate_electron_beam()
# self.populate_magnetic_structure()
# self.set_ebs_electron_beam()
self.populate_settings_after_setting_K()
self.set_visible()
self.update()
def get_id_list(self):
out_list = [("ID%02d %s" % (self.data_dict["straight_section"][i], self.data_dict["id_name"][i])) for i in
range(len(self.data_dict["id_name"]))]
out_list.insert(0,"<None>") # We add None at the beginning: ebs_id_index is the dict index plus one
return out_list
def titles(self):
return ["K vs Gap", "B vs Gap", "Gap vs resonance energy", "Power vs Gap"]
def xtitles(self):
return ['Gap [mm]'] * len(self.titles())
def ytitles(self):
return ['K', 'B [T]', 'Photon energy [eV]', 'Power [W]']
def initializeTabs(self):
self.tabs = oasysgui.tabWidget(self.mainArea)
self.tab = [oasysgui.createTabPage(self.tabs, "Info",),
oasysgui.createTabPage(self.tabs, "K vs Gap"),
oasysgui.createTabPage(self.tabs, "B vs Gap"),
oasysgui.createTabPage(self.tabs, "Resonance vs Gap"),
oasysgui.createTabPage(self.tabs, "Power vs Gap"),
]
for tab in self.tab:
tab.setFixedHeight(self.IMAGE_HEIGHT)
tab.setFixedWidth(self.IMAGE_WIDTH)
self.info_id = oasysgui.textArea(height=self.IMAGE_HEIGHT-5, width=self.IMAGE_WIDTH-5)
profile_box = oasysgui.widgetBox(self.tab[0], "", addSpace=True, orientation="horizontal",
height = self.IMAGE_HEIGHT, width=self.IMAGE_WIDTH-5)
profile_box.layout().addWidget(self.info_id)
n_plots = len(self.titles())
self.plot_canvas = [None] * (1 + n_plots)
for i in range(n_plots):
self.plot_canvas[i] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[i].setDefaultPlotLines(True)
self.plot_canvas[i].setActiveCurveColor(color='blue')
self.plot_canvas[i].setGraphXLabel(self.xtitles()[i])
self.plot_canvas[i].setGraphYLabel(self.ytitles()[i])
self.plot_canvas[i].setGraphTitle(self.titles()[i])
self.plot_canvas[i].setInteractiveMode(mode='zoom')
for index in range(0, 4):
self.tab[index + 1].layout().addWidget(self.plot_canvas[index])
self.tabs.setCurrentIndex(1)
def check_magnetic_structure(self):
congruence.checkPositiveNumber(self.K_horizontal, "Horizontal K")
congruence.checkPositiveNumber(self.K_vertical, "Vertical K")
congruence.checkStrictlyPositiveNumber(self.period_length, "Period Length")
congruence.checkStrictlyPositiveNumber(self.number_of_periods, "Number of Periods")
def get_magnetic_structure(self):
return insertion_device.InsertionDevice(K_horizontal=self.K_horizontal,
K_vertical=self.K_vertical,
period_length=self.period_length,
number_of_periods=self.number_of_periods)
def set_ebs_electron_beam(self):
self.type_of_properties = 1
self.electron_beam_size_h = 30.1836e-6
self.electron_beam_size_v = 3.63641e-6
self.electron_beam_divergence_h = 4.36821e-6
self.electron_beam_divergence_v = 1.37498e-6
#
eb = self.get_light_source().get_electron_beam()
moment_xx, moment_xxp, moment_xpxp, moment_yy, moment_yyp, moment_ypyp = eb.get_moments_all()
self.moment_xx = moment_xx
self.moment_yy = moment_yy
self.moment_xxp = moment_xxp
self.moment_yyp = moment_yyp
self.moment_xpxp = moment_xpxp
self.moment_ypyp = moment_ypyp
ex, ax, bx, ey, ay, by = eb.get_twiss_no_dispersion_all()
self.electron_beam_beta_h = bx
self.electron_beam_beta_v = by
self.electron_beam_alpha_h = ax
self.electron_beam_alpha_v = ay
self.electron_beam_eta_h = ex
self.electron_beam_eta_v = ey
self.electron_beam_etap_h = 0.0
self.electron_beam_etap_v = 0.0
self.electron_beam_emittance_h = 1.3166e-10
self.electron_beam_emittance_v = 5e-12
def update_electron_beam(self):
if self.type_of_properties == 4:
self.set_ebs_electron_beam()
self.set_visible()
self.update()
def update(self):
self.update_info()
self.update_plots()
def update_info(self):
syned_light_source = self.get_light_source()
syned_electron_beam = syned_light_source.get_electron_beam()
syned_undulator = syned_light_source.get_magnetic_structure()
gamma = self.gamma()
if self.ebs_id_index == 0:
id = "<None>"
else:
id = "ID%02d %s" % (self.data_dict["straight_section"][self.ebs_id_index-1], self.data_dict["id_name"][self.ebs_id_index-1])
info_parameters = {
"electron_energy_in_GeV":self.electron_energy_in_GeV,
"gamma":"%8.3f"%self.gamma(),
"ring_current":"%4.3f "%syned_electron_beam.current(),
"K_horizontal":syned_undulator.K_horizontal(),
"K_vertical": syned_undulator.K_vertical(),
"period_length": syned_undulator.period_length(),
"number_of_periods": syned_undulator.number_of_periods(),
"undulator_length": syned_undulator.length(),
"resonance_energy":"%6.3f"%syned_undulator.resonance_energy(gamma,harmonic=1),
"resonance_energy3": "%6.3f" % syned_undulator.resonance_energy(gamma,harmonic=3),
"resonance_energy5": "%6.3f" % syned_undulator.resonance_energy(gamma,harmonic=5),
"B_horizontal":"%4.2F"%syned_undulator.magnetic_field_horizontal(),
"B_vertical": "%4.2F" % syned_undulator.magnetic_field_vertical(),
"cc_1": "%4.2f" % (1e6*syned_undulator.gaussian_central_cone_aperture(gamma,1)),
"cc_3": "%4.2f" % (1e6*syned_undulator.gaussian_central_cone_aperture(gamma,3)),
"cc_5": "%4.2f" % (1e6*syned_undulator.gaussian_central_cone_aperture(gamma,5)),
# "cc_7": "%4.2f" % (self.gaussian_central_cone_aperture(7)*1e6),
"sigma_rad": "%5.2f" % (1e6*syned_undulator.get_sigmas_radiation(gamma,harmonic=1)[0]),
"sigma_rad_prime": "%5.2f" % (1e6*syned_undulator.get_sigmas_radiation(gamma,harmonic=1)[1]),
"sigma_rad3": "%5.2f" % (1e6*syned_undulator.get_sigmas_radiation(gamma,harmonic=3)[0]),
"sigma_rad_prime3": "%5.2f" % (1e6*syned_undulator.get_sigmas_radiation(gamma,harmonic=3)[1]),
"sigma_rad5": "%5.2f" % (1e6 * syned_undulator.get_sigmas_radiation(gamma, harmonic=5)[0]),
"sigma_rad_prime5": "%5.2f" % (1e6 * syned_undulator.get_sigmas_radiation(gamma, harmonic=5)[1]),
"first_ring_1": "%5.2f" % (1e6*syned_undulator.get_resonance_ring(gamma, harmonic=1, ring_order=1)),
"first_ring_3": "%5.2f" % (1e6*syned_undulator.get_resonance_ring(gamma, harmonic=3, ring_order=1)),
"first_ring_5": "%5.2f" % (1e6*syned_undulator.get_resonance_ring(gamma, harmonic=5, ring_order=1)),
"Sx": "%5.2f" % (1e6*syned_undulator.get_photon_sizes_and_divergences(syned_electron_beam)[0]),
"Sy": "%5.2f" % (1e6*syned_undulator.get_photon_sizes_and_divergences(syned_electron_beam)[1]),
"Sxp": "%5.2f" % (1e6*syned_undulator.get_photon_sizes_and_divergences(syned_electron_beam)[2]),
"Syp": "%5.2f" % (1e6*syned_undulator.get_photon_sizes_and_divergences(syned_electron_beam)[3]),
"und_power": "%5.2f" % syned_undulator.undulator_full_emitted_power(gamma,syned_electron_beam.current()),
"CF_h": "%4.3f" % syned_undulator.approximated_coherent_fraction_horizontal(syned_electron_beam,harmonic=1),
"CF_v": "%4.3f" % syned_undulator.approximated_coherent_fraction_vertical(syned_electron_beam,harmonic=1),
"CF": "%4.3f" % syned_undulator.approximated_coherent_fraction(syned_electron_beam,harmonic=1),
"url": self.data_url,
"id": id,
"gap": "%4.3f" % self.calculate_gap_from_K(),
"a0": "%4.3f" % self.a0,
"a1": "%4.3f" % self.a1,
"a2": "%4.3f" % self.a2,
"a3": "%4.3f" % self.a3,
"a4": "%4.3f" % self.a4,
"a5": "%4.3f" % self.a5,
"a6": "%4.3f" % self.a6,
}
self.info_id.setText(self.info_template().format_map(info_parameters))
# self.tabs[0].setText(self.info_template().format_map(info_parameters))
def info_template(self):
return \
"""
data url: {url}
id_name: {id}
================ input parameters ===========
Electron beam energy [GeV]: {electron_energy_in_GeV}
Electron current: {ring_current}
Period Length [m]: {period_length}
Number of Periods: {number_of_periods}
Horizontal K: {K_horizontal}
Vertical K: {K_vertical}
==============================================
Electron beam gamma: {gamma}
Undulator Length [m]: {undulator_length}
Horizontal Peak Magnetic field [T]: {B_horizontal}
Vertical Peak Magnetic field [T]: {B_vertical}
Total power radiated by the undulator [W]: {und_power}
Gap in use: {gap} mm
Using gap parametrization:
a0: {a0}
a1: {a1}
a2: {a2}
a3: {a3}
a4: {a4}
a5: {a5}
a6: {a6}
Resonances:
Photon energy [eV]:
for harmonic 1 : {resonance_energy}
for harmonic 3 : {resonance_energy3}
for harmonic 5 : {resonance_energy5}
Central cone (RMS urad):
for harmonic 1 : {cc_1}
for harmonic 3 : {cc_3}
for harmonic 5 : {cc_5}
First ring at (urad):
for harmonic 1 : {first_ring_1}
for harmonic 3 : {first_ring_3}
for harmonic 5 : {first_ring_5}
Sizes and divergences of radiation :
at 1st harmonic: sigma: {sigma_rad} um, sigma': {sigma_rad_prime} urad
at 3rd harmonic: sigma: {sigma_rad3} um, sigma': {sigma_rad_prime3} urad
at 5th harmonic: sigma: {sigma_rad5} um, sigma': {sigma_rad_prime5} urad
Sizes and divergences of photon source (convolution) at resonance (1st harmonic): :
Sx: {Sx} um
Sy: {Sy} um,
Sx': {Sxp} urad
Sy': {Syp} urad
Approximated coherent fraction at 1st harmonic:
Horizontal: {CF_h}
Vertical: {CF_v}
Coherent fraction 2D (HxV): {CF}
"""
def get_magnetic_structure(self):
return Undulator(K_horizontal=self.K_horizontal,
K_vertical=self.K_vertical,
period_length=self.period_length,
number_of_periods=self.number_of_periods)
def check_magnetic_structure_instance(self, magnetic_structure):
if not isinstance(magnetic_structure, Undulator):
raise ValueError("Magnetic Structure is not a Undulator")
def populate_magnetic_structure(self):
# if magnetic_structure is None:
index = self.ebs_id_index - 1
self.K_horizontal = 0.0
self.K_vertical = numpy.round(self.data_dict["Kmax"][index],3)
self.period_length = numpy.round(self.data_dict["id_period"][index],3)
self.number_of_periods = numpy.round(self.data_dict["id_length"][index] / self.period_length,3)
def populate_gap_parametrization(self):
index = self.ebs_id_index - 1
self.a0 = self.data_dict["a0"][index]
self.a1 = self.data_dict["a1"][index]
self.a2 = self.data_dict["a2"][index]
self.a3 = self.data_dict["a3"][index]
self.a4 = self.data_dict["a4"][index]
self.a5 = self.data_dict["a5"][index]
self.a6 = self.data_dict["a6"][index]
def populate_settings_after_setting_K(self):
syned_undulator = self.get_light_source().get_magnetic_structure()
self.auto_energy = numpy.round(syned_undulator.resonance_energy(self.gamma(),
harmonic=self.auto_harmonic_number),3)
self.gap_mm = numpy.round(self.calculate_gap_from_K(), 3)
def set_gap(self, which=VERTICAL):
if self.gap_mm < self.gap_min:
if ConfirmDialog.confirmed(self, message="Gap is smaller than minimum. Set to minimum?"):
self.gap_mm = self.gap_min
if self.gap_mm > self.gap_max:
if ConfirmDialog.confirmed(self, message="Gap is larger than maximum. Set to maximum?"):
self.gap_mm = self.gap_max
if self.gap_mm < self.gap_min:
raise Exception("Gap is smaller than minimum")
if self.gap_mm > self.gap_max:
raise Exception("Gap is larger than maximum")
K = numpy.round(self.calculate_K_from_gap(), 3)
if which == VERTICAL:
self.K_vertical = K
self.K_horizontal = 0.0
if which == BOTH:
Kboth = round(K / numpy.sqrt(2), 6)
self.K_vertical = Kboth
self.K_horizontal = Kboth
if which == HORIZONTAL:
self.K_horizontal = K
self.K_vertical = 0.0
self.populate_settings_after_setting_K()
self.update()
def set_id(self):
if self.ebs_id_index !=0:
self.populate_gap_parametrization()
self.populate_magnetic_structure()
self.gap_min = self.data_dict["id_minimum_gap_mm"][self.ebs_id_index-1]
self.populate_settings_after_setting_K()
self.update()
def set_K(self):
self.populate_settings_after_setting_K()
self.update()
def auto_set_undulator_V(self):
self.set_resonance_energy(VERTICAL)
def auto_set_undulator_H(self):
self.set_resonance_energy(HORIZONTAL)
def auto_set_undulator_B(self):
self.set_resonance_energy(BOTH)
def set_resonance_energy(self, which=VERTICAL):
congruence.checkStrictlyPositiveNumber(self.auto_energy, "Set Undulator at Energy")
congruence.checkStrictlyPositiveNumber(self.auto_harmonic_number, "As Harmonic #")
congruence.checkStrictlyPositiveNumber(self.electron_energy_in_GeV, "Energy")
congruence.checkStrictlyPositiveNumber(self.period_length, "Period Length")
wavelength = self.auto_harmonic_number*m2ev/self.auto_energy
K = round(numpy.sqrt(2*(((wavelength*2*self.gamma()**2)/self.period_length)-1)), 6)
Kmax = self.calculate_K_from_gap(self.gap_min)
Kmin = self.calculate_K_from_gap(self.gap_max)
if numpy.isnan(K):
if ConfirmDialog.confirmed(self, message="Impossible configuration. Set to Kmin=%f?" % (Kmin)):
K = numpy.round(Kmin,4)
if (K > Kmax):
if ConfirmDialog.confirmed(self, message="Needed K (%f) > Kmax (%f). Reset to Kmax?" % (K, Kmax)):
K = numpy.round(Kmax,4)
if (K < Kmin):
if ConfirmDialog.confirmed(self, message="Needed K (%f) < Kmin (%f). Reset to Kmin?" % (K, Kmin)):
K = numpy.round(Kmin,4)
if which == VERTICAL:
self.K_vertical = K
self.K_horizontal = 0.0
if which == BOTH:
Kboth = round(K / numpy.sqrt(2), 6)
self.K_vertical = Kboth
self.K_horizontal = Kboth
if which == HORIZONTAL:
self.K_horizontal = K
self.K_vertical = 0.0
self.populate_settings_after_setting_K()
self.update()
def plot_graph(self, plot_canvas_index, curve_name, x_values, y_values, xtitle="", ytitle="",
color='blue', replace=True):
self.plot_canvas[plot_canvas_index].addCurve(x_values, y_values, curve_name, symbol='', color=color, replace=replace) #'+', '^', ','
self.plot_canvas[plot_canvas_index].setGraphXLabel(xtitle)
self.plot_canvas[plot_canvas_index].setGraphYLabel(ytitle)
self.plot_canvas[plot_canvas_index].replot()
def update_plots(self):
gap_mm = numpy.linspace(self.gap_min * 0.9, self.gap_max * 1.1, 1000)
Karray = self.calculate_K_from_gap(gap_mm)
Karray_horizontal = numpy.zeros_like(Karray)
Bfield = Karray / (self.period_length * codata.e / (2 * numpy.pi * codata.m_e * codata.c))
E1_array = self.calculate_resonance_energy(Karray)
ptot = (self.number_of_periods /6) * codata.value('characteristic impedance of vacuum') * \
self.ring_current * codata.e * 2 * numpy.pi * codata.c * self.gamma()**2 * \
(Karray**2 + Karray_horizontal**2) / self.period_length
self.plot_graph(0, self.titles()[0], gap_mm, Karray, xtitle=self.xtitles()[0], ytitle=self.ytitles()[0])
self.plot_graph(1, self.titles()[1], gap_mm, Bfield, xtitle=self.xtitles()[1], ytitle=self.ytitles()[1])
#
#
#
xtitle = "Photon energy [keV]"
ytitle = "Gap [mm]"
colors = ['green', 'black', 'red', 'brown', 'orange', 'pink'] * self.harmonic_max
for i in range(1, self.harmonic_max+1):
self.plot_canvas[2].addCurve(E1_array * i* 1e-3, gap_mm,
"harmonic %d" % i,
xlabel=xtitle, ylabel=ytitle,
symbol='', color=colors[i-1])
self.plot_canvas[2].getLegendsDockWidget().setFixedHeight(150)
self.plot_canvas[2].getLegendsDockWidget().setVisible(True)
self.plot_canvas[2].setActiveCurve("harmonic 1")
self.plot_canvas[2].replot()
#
#
#
self.plot_graph(3, self.titles()[3], gap_mm, ptot, xtitle=self.xtitles()[3], ytitle=self.ytitles()[3])
def calculate_resonance_energy(self, Karray):
theta_x = 0.0
theta_z = 0.0
K_vertical = Karray
K_horizontal = numpy.zeros_like(K_vertical)
wavelength = (self.period_length / (2.0 * self.gamma() ** 2)) * \
(1 + K_vertical ** 2 / 2.0 + K_horizontal ** 2 / 2.0 + \
self.gamma() ** 2 * (theta_x ** 2 + theta_z ** 2))
wavelength /= self.auto_harmonic_number
frequency = codata.c / wavelength * self.auto_harmonic_number
energy_in_ev = codata.h * frequency / codata.e
return energy_in_ev * self.auto_harmonic_number
def calculate_K_from_gap(self, gap_mm=None):
if gap_mm is None: gap_mm = self.gap_mm
# index = self.ebs_id_index - 1
id_period_mm = self.period_length * 1e3 # xxxx data_dict["id_period_mm"][index]
Bmax = numpy.zeros_like(gap_mm)
Bmax += self.a1 * numpy.exp(-1 * numpy.pi * (gap_mm - self.a0) / id_period_mm)
Bmax += self.a2 * numpy.exp(-2 * numpy.pi * (gap_mm - self.a0) / id_period_mm)
Bmax += self.a3 * numpy.exp(-3 * numpy.pi * (gap_mm - self.a0) / id_period_mm)
Bmax += self.a4 * numpy.exp(-4 * numpy.pi * (gap_mm - self.a0) / id_period_mm)
Bmax += self.a5 * numpy.exp(-5 * numpy.pi * (gap_mm - self.a0) / id_period_mm)
Bmax += self.a6 * numpy.exp(-6 * numpy.pi * (gap_mm - self.a0) / id_period_mm)
Kmax = Bmax * (id_period_mm * 1e-3) * codata.e / (2 * numpy.pi * codata.m_e * codata.c)
return Kmax
def calculate_gap_from_K(self, Kvalue=None):
if Kvalue is None: Kvalue = self.K_vertical
gap_mm_array = numpy.linspace( self.gap_min * 0.9, self.gap_max * 1.1, 1000)
K_array = self.calculate_K_from_gap(gap_mm_array)
if ((Kvalue < K_array.min()) or (Kvalue > K_array.max())):
raise Exception("Cannot interpolate...")
gap_interpolated = numpy.interp(Kvalue, numpy.flip(K_array), numpy.flip(gap_mm_array))
return gap_interpolated
def gamma(self):
return 1e9*self.electron_energy_in_GeV / (codata.m_e * codata.c**2 / codata.e)
def set_visible(self):
self.left_box_2_1.setVisible(self.type_of_properties == 0)
self.left_box_2_2.setVisible(self.type_of_properties == 1)
self.left_box_2_3.setVisible(self.type_of_properties == 2)
def check_data(self):
congruence.checkStrictlyPositiveNumber(self.electron_energy_in_GeV , "Energy")
congruence.checkStrictlyPositiveNumber(self.electron_energy_spread, "Energy Spread")
congruence.checkStrictlyPositiveNumber(self.ring_current, "Ring Current")
if self.type_of_properties == 0:
congruence.checkPositiveNumber(self.moment_xx , "Moment xx")
congruence.checkPositiveNumber(self.moment_xpxp , "Moment xpxp")
congruence.checkPositiveNumber(self.moment_yy , "Moment yy")
congruence.checkPositiveNumber(self.moment_ypyp , "Moment ypyp")
elif self.type_of_properties == 1:
congruence.checkPositiveNumber(self.electron_beam_size_h , "Horizontal Beam Size")
congruence.checkPositiveNumber(self.electron_beam_divergence_h , "Vertical Beam Size")
congruence.checkPositiveNumber(self.electron_beam_size_v , "Horizontal Beam Divergence")
congruence.checkPositiveNumber(self.electron_beam_divergence_v , "Vertical Beam Divergence")
elif self.type_of_properties == 2:
congruence.checkPositiveNumber(self.electron_beam_emittance_h, "Horizontal Beam Emittance")
congruence.checkPositiveNumber(self.electron_beam_emittance_v, "Vertical Beam Emittance")
congruence.checkNumber(self.electron_beam_alpha_h, "Horizontal Beam Alpha")
congruence.checkNumber(self.electron_beam_alpha_v, "Vertical Beam Alpha")
congruence.checkNumber(self.electron_beam_beta_h, "Horizontal Beam Beta")
congruence.checkNumber(self.electron_beam_beta_v, "Vertical Beam Beta")
congruence.checkNumber(self.electron_beam_eta_h, "Horizontal Beam Dispersion Eta")
congruence.checkNumber(self.electron_beam_eta_v, "Vertical Beam Dispersion Eta")
congruence.checkNumber(self.electron_beam_etap_h, "Horizontal Beam Dispersion Eta'")
congruence.checkNumber(self.electron_beam_etap_v, "Vertical Beam Dispersion Eta'")
self.check_magnetic_structure()
def send_data(self):
try:
self.check_data()
self.send("SynedData", Beamline(light_source=self.get_light_source()))
except Exception as e:
QMessageBox.critical(self, "Error", str(e.args[0]), QMessageBox.Ok)
self.setStatusMessage("")
self.progressBarFinished()
def get_light_source(self):
electron_beam = ElectronBeam(energy_in_GeV=self.electron_energy_in_GeV,
energy_spread=self.electron_energy_spread,
current=self.ring_current,
number_of_bunches=self.number_of_bunches)
if self.type_of_properties == 0:
electron_beam.set_moments_horizontal(self.moment_xx,self.moment_xxp,self.moment_xpxp)
electron_beam.set_moments_vertical(self.moment_yy, self.moment_yyp, self.moment_ypyp)
elif self.type_of_properties == 1:
electron_beam.set_sigmas_all(sigma_x=self.electron_beam_size_h,
sigma_y=self.electron_beam_size_v,
sigma_xp=self.electron_beam_divergence_h,
sigma_yp=self.electron_beam_divergence_v)
elif self.type_of_properties == 2:
electron_beam.set_twiss_horizontal(self.electron_beam_emittance_h,
self.electron_beam_alpha_h,
self.electron_beam_beta_h,
self.electron_beam_eta_h,
self.electron_beam_etap_h)
electron_beam.set_twiss_vertical(self.electron_beam_emittance_v,
self.electron_beam_alpha_v,
self.electron_beam_beta_v,
self.electron_beam_eta_v,
self.electron_beam_etap_v)
elif self.type_of_properties == 3:
electron_beam.set_moments_all(0,0,0,0,0,0)
return LightSource(name="EBS lightsource",
electron_beam = electron_beam,
magnetic_structure = self.get_magnetic_structure())
def callResetSettings(self):
if ConfirmDialog.confirmed(parent=self, message="Confirm Reset of the Fields?"):
try:
self.resetSettings()
except:
pass
def populate_electron_beam(self, electron_beam=None):
if electron_beam is None:
electron_beam = ElectronBeam(
energy_in_GeV = 6.0,
energy_spread = 0.001,
current = 0.2,
number_of_bunches = 1,
moment_xx = (3.01836e-05)**2,
moment_xxp = (0.0)**2,
moment_xpxp = (4.36821e-06)**2,
moment_yy = (3.63641e-06)**2,
moment_yyp = (0.0)**2,
moment_ypyp = (1.37498e-06)**2,
)
self.electron_energy_in_GeV = electron_beam._energy_in_GeV
self.electron_energy_spread = electron_beam._energy_spread
self.ring_current = electron_beam._current
self.number_of_bunches = electron_beam._number_of_bunches
self.type_of_properties = 1
self.moment_xx = electron_beam._moment_xx
self.moment_xxp = electron_beam._moment_xxp
self.moment_xpxp = electron_beam._moment_xpxp
self.moment_yy = electron_beam._moment_yy
self.moment_yyp = electron_beam._moment_yyp
self.moment_ypyp = electron_beam._moment_ypyp
x, xp, y, yp = electron_beam.get_sigmas_all()
self.electron_beam_size_h = x
self.electron_beam_size_v = y
self.electron_beam_divergence_h = xp
self.electron_beam_divergence_v = yp
def get_data_dictionary_csv(self):
url = self.data_url
tofloat = lambda s: numpy.array(['0.0' if v == '' else v for v in s]).astype(float)
try:
filename = url # 'ftp://ftp.esrf.eu/pub/scisoft/syned/resources/jsrund.csv'
ishift = 1
a = numpy.genfromtxt(filename, dtype=str, delimiter=',', skip_header=3, skip_footer=1, converters=None, \
missing_values={0: "11.000"}, filling_values={0: "XXX"}, usecols=None, names=None,
excludelist=None, \
deletechars=" !#$%&'()*+, -./:;<=>?@[\]^{|}~", replace_space='', autostrip=True, \
case_sensitive=True, defaultfmt='f%i', unpack=None, usemask=False, loose=True, \
invalid_raise=True, max_rows=None, encoding='bytes')
straight_section = a[:, 0].astype(int)
id_name = a[:, 1]
# for i in range(straight_section.size):
# id_name[i] = "ID%02d %s" % (straight_section[i], id_name[i])
id_period = 1e-3 * a[:, 2 + ishift].astype(float)
id_period_mm = a[:, 2 + ishift].astype(float)
id_length = 1e-3 * a[:, 3 + ishift].astype(float)
id_minimum_gap_mm = tofloat(a[:, 4 + ishift])
a0 = tofloat(a[:, 5 + ishift])
a1 = tofloat(a[:, 6 + ishift])
a2 = tofloat(a[:, 7 + ishift])
a3 = tofloat(a[:, 8 + ishift])
a4 = tofloat(a[:, 9 + ishift])
a5 = tofloat(a[:, 10 + ishift])
a6 = tofloat(a[:, 11 + ishift])
Bmax = numpy.zeros_like(a0)
Bmax += a1 * numpy.exp(-1 * numpy.pi * (id_minimum_gap_mm - a0) / id_period_mm)
Bmax += a2 * numpy.exp(-2 * numpy.pi * (id_minimum_gap_mm - a0) / id_period_mm)
Bmax += a3 * numpy.exp(-3 * numpy.pi * (id_minimum_gap_mm - a0) / id_period_mm)
Bmax += a4 * numpy.exp(-4 * numpy.pi * (id_minimum_gap_mm - a0) / id_period_mm)
Bmax += a5 * numpy.exp(-5 * numpy.pi * (id_minimum_gap_mm - a0) / id_period_mm)
Bmax += a6 * numpy.exp(-6 * numpy.pi * (id_minimum_gap_mm - a0) / id_period_mm)
Kmax = Bmax * id_period * codata.e / (2 * numpy.pi * codata.m_e * codata.c)
# print("\n\n%5s %10s %15s %15s %15s %15s" % ("sect", "name", "Gmin", "Bmax", "Kmax", "a0"))
# print("%5s %10s %15s %15s %15s %15s" % ("====", "====", "====", "====", "====", "===="))
# for i in range(Bmax.size):
# print("%5d %10s %15.3f %15.3f %15.3f %15.3f" % (
# straight_section[i], id_name[i], id_minimum_gap_mm[i], Bmax[i], Kmax[i], a0[i]))
out_dict = {}
out_dict["straight_section"] = straight_section.tolist()
out_dict["id_name"] = id_name.tolist()
out_dict["id_minimum_gap_mm"] = id_minimum_gap_mm.tolist()
out_dict["Bmax"] = Bmax.tolist()
out_dict["Kmax"] = Kmax.tolist()
out_dict["straight_section"] = straight_section.tolist()
out_dict["id_period"] = id_period.tolist()
out_dict["id_period_mm"] = id_period_mm.tolist()
out_dict["id_length"] = id_length.tolist()
out_dict["a0"] = a0.tolist()
out_dict["a1"] = a1.tolist()
out_dict["a2"] = a2.tolist()
out_dict["a3"] = a3.tolist()
out_dict["a4"] = a4.tolist()
out_dict["a5"] = a5.tolist()
out_dict["a6"] = a6.tolist()
except:
out_dict = {}
out_dict["straight_section"] = []
out_dict["id_name"] = []
out_dict["id_minimum_gap_mm"] = []
out_dict["Bmax"] = []
out_dict["Kmax"] = []
out_dict["straight_section"] = []
out_dict["id_period"] = []
out_dict["id_period_mm"] = []
out_dict["id_length"] = []
out_dict["a0"] = []
out_dict["a1"] = []
out_dict["a2"] = []
out_dict["a3"] = []
out_dict["a4"] = []
out_dict["a5"] = []
out_dict["a6"] = []
self.data_dict = out_dict
# OLD data format...
def get_data_dictionary(self):
import json
from urllib.request import urlopen
file_url = self.data_url # "https://raw.githubusercontent.com/srio/shadow3-scripts/master/ESRF-LIGHTSOURCES-EBS/ebs_ids.json"
u = urlopen(file_url)
ur = u.read()
url = ur.decode(encoding='UTF-8')
dictionary = json.loads(url)
self.data_dict = dictionary
if __name__ == "__main__":
from PyQt5.QtWidgets import QApplication
a = QApplication(sys.argv)
ow = OWEBS()
ow.show()
a.exec_()
# data_dict = get_data_dictionary()
# out_list = [("ID%02d %s" % (data_dict["straight_section"][i],data_dict["id_name"][i])) for i in range(len(data_dict["id_name"]))]
# print(out_list)
# data_dict_old = data_dict
# data_dict = get_data_dictionary_csv()
# out_list = [("ID%02d %s" % (data_dict["straight_section"][i],data_dict["id_name"][i])) for i in range(len(data_dict["id_name"]))]
# print(out_list)
# for key in data_dict.keys():
# if key != "id_name":
# print(numpy.array(data_dict[key]) - numpy.array(data_dict_old[key]))
# print(data_dict["id_name"])
# print(data_dict_old["id_name"]) | 45.715942 | 207 | 0.622897 | 45,616 | 0.964071 | 0 | 0 | 0 | 0 | 0 | 0 | 9,115 | 0.192641 |
38d929d0301c7b6e0ff15401a2613c1800d9088f | 139 | py | Python | test_project/test_app/urls.py | ninemoreminutes/django-trails | fabe179fc4b5829f8c73805faa027f5e866ade7c | [
"BSD-3-Clause"
] | 2 | 2018-10-18T19:34:40.000Z | 2019-11-13T16:00:53.000Z | test_project/test_app/urls.py | ninemoreminutes/django-trails | fabe179fc4b5829f8c73805faa027f5e866ade7c | [
"BSD-3-Clause"
] | 4 | 2020-02-11T23:21:07.000Z | 2020-06-05T19:21:05.000Z | test_project/test_app/urls.py | ninemoreminutes/django-trails | fabe179fc4b5829f8c73805faa027f5e866ade7c | [
"BSD-3-Clause"
] | null | null | null | # Django
from django.urls import re_path
# Test App
from .views import index
urlpatterns = [
re_path(r'^$', index, name='index'),
]
| 12.636364 | 40 | 0.669065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.215827 |
38da00d41601d677e1dec72d26270b4ae74fa86c | 5,927 | py | Python | bin/analysis/ipa/constraints/node.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
] | 6 | 2015-09-19T18:22:33.000Z | 2020-11-29T15:21:17.000Z | bin/analysis/ipa/constraints/node.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
] | 1 | 2015-08-04T08:03:46.000Z | 2015-08-04T08:03:46.000Z | bin/analysis/ipa/constraints/node.py | ncbray/pystream | 70bba5646d6512adb6803564c22268d3424c66d8 | [
"Apache-2.0"
] | 1 | 2019-12-09T08:27:09.000Z | 2019-12-09T08:27:09.000Z | # Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from language.python import ast # Debugging
from . import split
from .. calling import cpa
from .. model import objectname
class Critical(object):
__slots__ = 'values', 'diff', 'isCritical', '_dirty', 'node'
def __init__(self, context, node):
self.node = node
cm = self.getManager(context)
self.values = cm.empty()
self.diff = cm.empty()
self.isCritical = False
self._dirty = False
def getManager(self, context):
return context.analysis.criticalmanager
def markDirty(self, context, node):
assert node is self.node
if not self._dirty:
assert node not in context.dirtycriticals
self._dirty = True
context.dirtyCritical(node, self)
def propagate(self, context, node):
assert node is self.node
assert node not in context.dirtycriticals
assert self._dirty, node
self._dirty = False
cm = self.getManager(context)
diff = self.diff
self.values = cm.inplaceUnion(self.values, diff)
self.diff = cm.empty()
for constraint in node.next:
constraint.criticalChanged(context, node, diff)
def updateValues(self, context, node, values):
assert node is self.node
cm = self.getManager(context)
diff = cm.tempDiff(values, self.values)
if diff:
if node.next:
self.diff = cm.inplaceUnion(self.diff, diff)
self.markDirty(context, node)
else:
assert not self.diff
self.values = cm.inplaceUnion(self.values, diff)
def updateSingleValue(self, context, node, value):
assert node is self.node
cm = self.getManager(context)
if value not in self.values and value not in self.diff:
diff = cm.coerce([value])
self.updateValues(context, node, diff)
def markCritical(self, context, node):
assert node is self.node
if not self.isCritical:
self.isCritical = True
self.updateSingleValue(context, node, node.name)
class ConstraintNode(object):
__slots__ = 'context', 'name', 'ci', 'values', 'valuediff', 'null', 'dirty', 'prev', 'next', 'typeSplit', 'exactSplit', 'flags', 'flagsdiff', 'critical'
def __init__(self, context, name, ci=False):
assert not isinstance(name, ast.DoNotCare), name
self.context = context
self.name = name
self.ci = ci
self.next = []
self.prev = []
# Value flow
self.values = context.analysis.valuemanager.empty()
self.valuediff = context.analysis.valuemanager.empty()
self.null = False
self.dirty = False
self.typeSplit = None
self.exactSplit = None
# Flag flow
self.flags = 0
self.flagsdiff = 0
self.critical = Critical(context, self)
def clearFlags(self):
self.flags = 0
self.flagsdiff = 0
def updateFlags(self, flags):
diff = ~self.flags & flags
new = self.flagsdiff | diff
if new != self.flagsdiff:
self.flagsdiff = new
if not self.dirty:
self.dirty = True
self.context.dirtyFlags(self)
def attachTypeSplit(self, callback):
if self.typeSplit is None:
self.typeSplit = split.TypeSplitConstraint(self)
self.context.constraint(self.typeSplit)
self.typeSplit.addSplitCallback(callback)
def getFiltered(self, typeFilter):
if typeFilter is cpa.anyType:
return self
else:
return self.typeSplit.objects[typeFilter]
def attachExactSplit(self, callback):
if self.exactSplit is None:
self.exactSplit = split.ExactSplitConstraint(self)
self.context.constraint(self.exactSplit)
self.exactSplit.addSplitCallback(callback)
def addNext(self, constraint):
self.next.append(constraint)
def addPrev(self, constraint):
self.prev.append(constraint)
def markDirty(self):
if not self.dirty:
self.dirty = True
self.context.dirtySlot(self)
def updateValues(self, values):
vm = self.context.analysis.valuemanager
# Not retained, so set manager is not used
diff = vm.tempDiff(values, self.values)
if diff:
for value in diff:
assert value.isObjectName(), value
if self.next:
self.valuediff = vm.inplaceUnion(self.valuediff, diff)
self.markDirty()
else:
assert not self.valuediff
self.values = vm.inplaceUnion(self.values, diff)
return True
else:
return False
def updateSingleValue(self, value):
assert value.isObjectName(), value
if value not in self.values and value not in self.valuediff:
vm = self.context.analysis.valuemanager
diff = vm.coerce([value])
if self.next:
self.valuediff = vm.inplaceUnion(self.valuediff, diff)
self.markDirty()
else:
assert not self.valuediff
self.values = vm.inplaceUnion(self.values, diff)
return True
else:
return False
def markNull(self):
if not self.null:
self.null = True
if self.next:
# HACK this is an expensive way of communicating with the
# few consumers that care. Fortunately, this is rare.
self.markDirty()
def clearNull(self):
# Can only be done before the node is observed.
assert not self.next
self.null = False
def propagate(self):
assert self.dirty
self.dirty = False
# Update the sets of objects
# Must be done before any callback is performed, as a
# cyclic dependency could update these values
vm = self.context.analysis.valuemanager
diff = self.valuediff
self.values = vm.inplaceUnion(self.values, diff)
self.valuediff = vm.empty()
for constraint in self.next:
constraint.changed(self.context, self, diff)
def __repr__(self):
return "slot(%r/%d)" % (self.name, id(self))
def isNode(self):
return True
| 26.698198 | 153 | 0.713683 | 5,222 | 0.881053 | 0 | 0 | 0 | 0 | 0 | 0 | 1,091 | 0.184073 |
38da46eb39dadf4a7638b1ed59d690feefc47f80 | 3,655 | py | Python | levels/7/initialize_db.py | industrydive/stripe-ctf-2-vm | 1987516471957a791ed02c70ac4ae6b7aeb72b69 | [
"Apache-2.0"
] | 36 | 2015-05-11T20:22:55.000Z | 2021-09-26T07:36:49.000Z | levels/7/initialize_db.py | industrydive/stripe-ctf-2-vm | 1987516471957a791ed02c70ac4ae6b7aeb72b69 | [
"Apache-2.0"
] | null | null | null | levels/7/initialize_db.py | industrydive/stripe-ctf-2-vm | 1987516471957a791ed02c70ac4ae6b7aeb72b69 | [
"Apache-2.0"
] | 16 | 2016-03-08T16:25:46.000Z | 2022-03-16T06:28:51.000Z | #!/usr/bin/env python
import sys
from datetime import datetime
from random import SystemRandom
import bcrypt
import sqlite3
import client
import db
import settings
conn = db.DB(settings.database)
conn.debug = True
c = conn.cursor
db.rewrite_entropy_file(settings.entropy_file)
rand = SystemRandom()
def rand_choice(alphabet, length):
return ''.join(rand.choice(alphabet) for i in range(length))
alphanum = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
def rand_alnum(length):
return rand_choice(alphanum, length)
def main(level_password):
create_tables()
add_users()
add_waffles(level_password)
add_logs()
def add_users():
add_user(1, 'larry', rand_alnum(16), 1)
add_user(2, 'randall', rand_alnum(16), 1)
add_user(3, 'alice', rand_alnum(16), 0)
add_user(4, 'bob', rand_alnum(16), 0)
add_user(5, 'ctf', 'password', 0)
def add_waffles(level_password):
add_waffle('liege', 1, level_password)
add_waffle('dream', 1, rand_alnum(14))
add_waffle('veritaffle', 0, rand_alnum(14))
add_waffle('chicken', 1, rand_alnum(14))
add_waffle('belgian', 0, rand_alnum(14))
add_waffle('brussels', 0, rand_alnum(14))
add_waffle('eggo', 0, rand_alnum(14))
def add_logs():
gen_log(1, '/orders', {'waffle': 'eggo', 'count': 10,
'lat': 37.351, 'long': -119.827})
gen_log(1, '/orders', {'waffle': 'chicken', 'count': 2,
'lat': 37.351, 'long': -119.827})
gen_log(2, '/orders', {'waffle': 'dream', 'count': 2,
'lat': 42.39561, 'long': -71.13051},
date=datetime(2007, 9, 23, 14, 38, 00))
gen_log(3, '/orders', {'waffle': 'veritaffle', 'count': 1,
'lat': 42.376, 'long': -71.116})
def create_tables():
c.execute('drop table if exists users')
c.execute('''
CREATE TABLE users(
id int not null primary key,
name varchar(255) not null,
password varchar(255) not null,
premium int not null,
secret varchar(255) not null,
unique (name)
)
''')
c.execute('drop table if exists waffles')
c.execute('''
CREATE TABLE waffles(
name varchar(255) not null primary key,
premium int not null,
confirm varchar(255) not null
)
''')
c.execute('drop table if exists logs')
c.execute('''
CREATE TABLE logs(
user_id int not null,
path varchar(255) not null,
body text not null,
date timestamp not null default current_timestamp
)
''')
c.execute('create index user_id on logs (user_id)')
c.execute('create index date on logs (date)')
def add_user(uid, username, password, premium):
hashed = bcrypt.hashpw(password, bcrypt.gensalt(10))
secret = rand_alnum(14)
data = {'id': uid, 'name': username, 'password': hashed,
'premium': premium, 'secret': secret}
conn.insert('users', data)
def get_user(uid):
return conn.select_one('users', {'id': uid})
def add_waffle(name, premium, confirm):
data = {'name': name, 'premium': premium, 'confirm': confirm}
conn.insert('waffles', data)
def gen_log(user_id, path, params, date=None):
user = get_user(user_id)
# generate signature using client library
cl = client.Client(None, user_id, user['secret'])
body = cl._make_post(params)
# prepare data for insert
data = {'user_id': user_id, 'path': path, 'body': body}
if date:
data['date'] = date
conn.insert('logs', data)
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'usage: initialize_db.py LEVEL_PASSWORD'
sys.exit(1)
main(sys.argv[1])
| 28.115385 | 75 | 0.631464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,311 | 0.358687 |
38dbca903f2a85d1301cee64efa4e83e79d5c1ce | 112 | py | Python | jp.atcoder/abc251/abc251_a/31673235.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc251/abc251_a/31673235.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc251/abc251_a/31673235.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | def main() -> None:
s = input()
print(s * (6 // len(s)))
if __name__ == "__main__":
main()
| 14 | 29 | 0.446429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.089286 |
38dbed609de1a2695c5a854a2a4f693c2e1e7e38 | 286 | py | Python | day3/classwork/1.py | LencoDigitexer/AI | e22f5e63df46c56b57da5191d9536e00c5c50782 | [
"MIT"
] | null | null | null | day3/classwork/1.py | LencoDigitexer/AI | e22f5e63df46c56b57da5191d9536e00c5c50782 | [
"MIT"
] | null | null | null | day3/classwork/1.py | LencoDigitexer/AI | e22f5e63df46c56b57da5191d9536e00c5c50782 | [
"MIT"
] | null | null | null | '''
Вводится последовательность из N чисел. Найти произведение и
количество положительных среди них чисел
'''
proizvedenie = 1
a = input().split(" ")
for i in range(0, len(a)):
proizvedenie *= int(a[i])
print("Произведение " + str(proizvedenie))
print("Кол-во чисел " + str(len(a))) | 28.6 | 60 | 0.695804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 251 | 0.635443 |
38dcfaea3f6eeb906d708162cee888578263698d | 553 | py | Python | Codes/Liam/001_two_sum.py | liuxiaohui1221/algorithm | d80e64185ceb4798ac5389bfbd226dc1d406f6b5 | [
"Apache-2.0"
] | 256 | 2017-10-25T13:02:15.000Z | 2022-02-25T13:47:59.000Z | Codes/Liam/001_two_sum.py | liuxiaohui1221/algorithm | d80e64185ceb4798ac5389bfbd226dc1d406f6b5 | [
"Apache-2.0"
] | 56 | 2017-10-27T01:34:20.000Z | 2022-03-01T00:20:55.000Z | Codes/Liam/001_two_sum.py | liuxiaohui1221/algorithm | d80e64185ceb4798ac5389bfbd226dc1d406f6b5 | [
"Apache-2.0"
] | 83 | 2017-10-25T12:51:53.000Z | 2022-02-15T08:27:03.000Z | # 执行用时 : 348 ms
# 内存消耗 : 13 MB
# 方案:哈希表
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# 创建哈希表{value:idx}
record = {}
# 遍数组
for idx, value in enumerate(nums):
# 如果差值在哈希表中,返回对应索引 以及 循环中本次idx
# 如果差值不在,则在哈希表中插入该value:idx
if (target - value) in record.keys():
return [record[target - value], idx]
else:
record[value] = idx
| 23.041667 | 52 | 0.486438 | 601 | 0.893016 | 0 | 0 | 0 | 0 | 0 | 0 | 334 | 0.496285 |
38deced1bd0383a7623b3e0121291bf13f2bd046 | 9,939 | py | Python | ckanext/doi/lib/metadata.py | NINAnor/ckanext-doi | 2f93c03545885ae0795cea78f49aac66295df763 | [
"MIT"
] | 16 | 2015-01-15T17:20:33.000Z | 2021-04-28T04:26:02.000Z | ckanext/doi/lib/metadata.py | NINAnor/ckanext-doi | 2f93c03545885ae0795cea78f49aac66295df763 | [
"MIT"
] | 34 | 2015-02-18T16:44:06.000Z | 2022-03-03T19:05:11.000Z | ckanext/doi/lib/metadata.py | NINAnor/ckanext-doi | 2f93c03545885ae0795cea78f49aac66295df763 | [
"MIT"
] | 14 | 2015-04-17T10:36:42.000Z | 2022-03-01T08:13:55.000Z | #!/usr/bin/env python3
# encoding: utf-8
#
# This file is part of ckanext-doi
# Created by the Natural History Museum in London, UK
import logging
from ckan.lib.helpers import lang as ckan_lang
from ckan.model import Package
from ckan.plugins import PluginImplementations, toolkit
from ckanext.doi.interfaces import IDoi
from ckanext.doi.lib import xml_utils
from ckanext.doi.lib.errors import DOIMetadataException
from ckanext.doi.lib.helpers import date_or_none, get_site_url, package_get_year
log = logging.getLogger(__name__)
def build_metadata_dict(pkg_dict):
'''
Build/extract a basic dict of metadata that can then be passed to build_xml_dict.
:param pkg_dict: dict of package details
'''
metadata_dict = {}
# collect errors instead of throwing them immediately; some data may not be correctly handled
# by this base method but will be handled correctly by plugins that implement IDoi
errors = {}
# required fields first (identifier will be added later)
required = {
'creators': [],
'titles': [],
'publisher': None,
'publicationYear': None,
'resourceType': None
}
def _add_required(key, get_func):
try:
required[key] = get_func()
except Exception as e:
errors[key] = e
# CREATORS
_add_required('creators', lambda: [{
'full_name': pkg_dict.get('author')
}])
# TITLES
_add_required('titles', lambda: [{
'title': pkg_dict.get('title')
}])
# PUBLISHER
_add_required('publisher', lambda: toolkit.config.get('ckanext.doi.publisher'))
# PUBLICATION YEAR
_add_required('publicationYear', lambda: package_get_year(pkg_dict))
# TYPE
_add_required('resourceType', lambda: pkg_dict.get('type'))
# now the optional fields
optional = {
'subjects': [],
'contributors': [],
'dates': [],
'language': '',
'alternateIdentifiers': [],
'relatedIdentifiers': [],
'sizes': [],
'formats': [],
'version': '',
'rightsList': [],
'descriptions': [],
'geolocations': [],
'fundingReferences': []
}
# SUBJECTS
# use the tag list
try:
tags = pkg_dict.get('tag_string', '').split(',')
tags += [tag['name'] if isinstance(tag, dict) else tag for tag in pkg_dict.get('tags', [])]
optional['subjects'] = [{'subject': tag} for tag in sorted({t for t in tags if t != ''})]
except Exception as e:
errors['subjects'] = e
# CONTRIBUTORS
# use the author and maintainer; no splitting or parsing for either
# no try/except for this because it's just a simple .get() and if that doesn't work then we
# want to know
author = pkg_dict.get('author')
maintainer = pkg_dict.get('maintainer')
if author is not None:
optional['contributors'].append(
{
'contributor_type': 'Researcher',
'full_name': author
})
if maintainer is not None:
optional['contributors'].append({
'contributor_type': 'DataManager',
'full_name': maintainer
})
# DATES
# created, updated, and doi publish date
date_errors = {}
try:
optional['dates'].append({
'dateType': 'Created',
'date': date_or_none(pkg_dict.get('metadata_created'))
})
except Exception as e:
date_errors['created'] = e
try:
optional['dates'].append({
'dateType': 'Updated',
'date': date_or_none(pkg_dict.get('metadata_modified'))
})
except Exception as e:
date_errors['updated'] = e
if 'doi_date_published' in pkg_dict:
try:
optional['dates'].append({
'dateType': 'Issued',
'date': date_or_none(pkg_dict.get('doi_date_published'))
})
except Exception as e:
date_errors['doi_date_published'] = e
# LANGUAGE
# use language set in CKAN
try:
optional['language'] = ckan_lang()
except Exception as e:
errors['language'] = e
# ALTERNATE IDENTIFIERS
# add permalink back to this site
try:
permalink = f'{get_site_url()}/dataset/{pkg_dict["id"]}'
optional['alternateIdentifiers'] = [{
'alternateIdentifierType': 'URL',
'alternateIdentifier': permalink
}]
except Exception as e:
errors['alternateIdentifiers'] = e
# RELATED IDENTIFIERS
# nothing relevant in default schema
# SIZES
# sum up given sizes from resources in the package and convert from bytes to kilobytes
try:
resource_sizes = [r.get('size') or 0 for r in pkg_dict.get('resources', []) or []]
total_size = [f'{int(sum(resource_sizes) / 1024)} kb']
optional['sizes'] = total_size
except Exception as e:
errors['sizes'] = e
# FORMATS
# list unique formats from package resources
try:
formats = list(
set(filter(None, [r.get('format') for r in pkg_dict.get('resources', []) or []])))
optional['formats'] = formats
except Exception as e:
errors['formats'] = e
# VERSION
# doesn't matter if there's no version, it'll get filtered out later
optional['version'] = pkg_dict.get('version')
# RIGHTS
# use the package license and get details from CKAN's license register
license_id = pkg_dict.get('license_id', 'notspecified')
try:
if license_id != 'notspecified' and license_id is not None:
license_register = Package.get_license_register()
license = license_register.get(license_id)
if license is not None:
optional['rightsList'] = [
{
'url': license.url,
'identifier': license.id
}
]
except Exception as e:
errors['rightsList'] = e
# DESCRIPTIONS
# use package notes
optional['descriptions'] = [
{
'descriptionType': 'Other',
'description': pkg_dict.get('notes', '')
}
]
# GEOLOCATIONS
# nothing relevant in default schema
# FUNDING
# nothing relevant in default schema
metadata_dict.update(required)
metadata_dict.update(optional)
for plugin in PluginImplementations(IDoi):
# implementations should remove relevant errors from the errors dict if they successfully
# handle an item
metadata_dict, errors = plugin.build_metadata_dict(pkg_dict, metadata_dict, errors)
for k in required:
if metadata_dict.get(k) is None and errors.get(k) is None:
errors[k] = DOIMetadataException('Required field cannot be None')
required_errors = {k: e for k, e in errors.items() if k in required}
if len(required_errors) > 0:
error_msg = f'Could not extract metadata for the following required keys: ' \
f'{", ".join(required_errors)}'
log.exception(error_msg)
for k, e in required_errors.items():
log.exception(f'{k}: {e}')
raise DOIMetadataException(error_msg)
optional_errors = {k: e for k, e in errors.items() if k in optional}
if len(required_errors) > 0:
error_msg = f'Could not extract metadata for the following optional keys: ' \
f'{", ".join(optional_errors)}'
log.debug(error_msg)
for k, e in optional_errors.items():
log.debug(f'{k}: {e}')
return metadata_dict
def build_xml_dict(metadata_dict):
'''
Builds a dictionary that can be passed directly to datacite.schema42.tostring() to generate xml.
Previously named metadata_to_xml but renamed as it's not actually producing any xml,
it's just formatting the metadata so a separate function can then generate the xml.
:param metadata_dict: a dict of metadata generated from build_metadata_dict
:return: dict that can be passed directly to datacite.schema42.tostring()
'''
# required fields first (DOI will be added later)
xml_dict = {
'creators': [],
'titles': metadata_dict.get('titles', []),
'publisher': metadata_dict.get('publisher'),
'publicationYear': str(metadata_dict.get('publicationYear')),
'types': {
'resourceType': metadata_dict.get('resourceType'),
'resourceTypeGeneral': 'Dataset'
},
'schemaVersion': 'http://datacite.org/schema/kernel-4',
}
for creator in metadata_dict.get('creators', []):
xml_dict['creators'].append(xml_utils.create_contributor(**creator))
optional = [
'subjects',
'contributors',
'dates',
'language',
'alternateIdentifiers',
'relatedIdentifiers',
'sizes',
'formats',
'version',
'rightsList',
'descriptions',
'geolocations',
'fundingReferences'
]
for k in optional:
v = metadata_dict.get(k)
try:
has_value = v is not None and len(v) > 0
except:
has_value = False
if not has_value:
continue
if k == 'contributors':
xml_dict['contributors'] = []
for contributor in v:
xml_dict['contributors'].append(xml_utils.create_contributor(**contributor))
elif k == 'dates':
item = []
for date_entry in v:
date_entry_copy = {k: v for k, v in date_entry.items()}
date_entry_copy['date'] = str(date_entry_copy['date'])
item.append(date_entry_copy)
xml_dict[k] = item
else:
xml_dict[k] = v
for plugin in PluginImplementations(IDoi):
xml_dict = plugin.build_xml_dict(metadata_dict, xml_dict)
return xml_dict
| 32.06129 | 100 | 0.598652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,885 | 0.390884 |
38e09a14bcf404479b71e05553e37caf4d388eb7 | 3,417 | py | Python | oo/carros.py | AdnirJunior/pythonbirds | bdc848291c3c6f22e6647768103bf7bd2863145b | [
"MIT"
] | null | null | null | oo/carros.py | AdnirJunior/pythonbirds | bdc848291c3c6f22e6647768103bf7bd2863145b | [
"MIT"
] | null | null | null | oo/carros.py | AdnirJunior/pythonbirds | bdc848291c3c6f22e6647768103bf7bd2863145b | [
"MIT"
] | null | null | null |
"""Voce deve criar uma classe carro que vai assumir dois atributos compostos por duas classes:
1) Motor
2) Direcao
O motor terá a responsabilidade de controlar a velocidade.
Ele Oferece os seguintes atributos:
1) Atribuo de dado velocidade
2) Método acelerar, que deverá incrementar 1 unidade
3) Médoto freardecrementar a velocidade de 2 unidades
A Direcao tera a responsabilidade de controlar a direcao, ela oferece os seguintes atributos:
1) Valor de direcao com valores possiveis norte, sul, leste, oeste
2) Método girar a direita -
2) Método girar a esquerda
N
O L
S
exemplo:
# Testando motor
>>> motor = Motor()
>>> motor.velocidade
0
>>> motor.acelerar()
>>> motor.velocidade
1
>>> motor.acelerar()
>>> motor.velocidade
2
>>> motor.acelerar()
>>> motor.velocidade
3
>>> motor.frear()
>>> motor.velocidade
1
>>> motor.frear()
>>> motor.velocidade
0
>>> #Testando direcao
>>> direcao = Direcao()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Sul'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Sul'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Norte'
>>> carro = Carro(direcao, motor)
>>> carro.calcular_velocidade()
0
>>> carro.acelerar()
>>> carro.calcular_velocidade()
1
>>> carro.acelerar()
>>> carro.calcular_velocidade()
2
>>> carro.frear()
>>> carro.calcular_velocidade()
0
>>> carro.calcular_direcao()
'Norte'
>>> carro.girar_a_direita()
>>> carro.calcular_direcao()
'Leste'
>>> carro.girar_a_esquerda()
>>> carro.calcular_direcao()
'Norte'
>>> carro.girar_a_esquerda()
>>> carro.calcular_direcao()
'Oeste'
"""
NORTE = 'Norte'
LESTE = 'Leste'
SUL = 'Sul'
OESTE = 'Oeste'
class Direcao:
def __init__(self):
self.valor = NORTE
def girar_a_direita(self):
if self.valor == NORTE:
self.valor = LESTE
elif self.valor == LESTE:
self.valor = SUL
elif self.valor == SUL:
self.valor = OESTE
elif self.valor == OESTE:
self.valor = NORTE
def girar_a_esquerda(self):
if self.valor == NORTE:
self.valor = OESTE
elif self.valor == OESTE:
self.valor = SUL
elif self.valor == SUL:
self.valor = LESTE
elif self.valor == LESTE:
self.valor = NORTE
class Carro:
def __init__(self, direcao, motor):
self.direcao = direcao
self.motor = motor
def calcular_velocidade(self):
return self.motor.velocidade
def acelerar(self):
self.motor.acelerar()
def frear(self):
self.motor.frear()
def calcular_direcao(self):
return self.direcao.valor
def girar_a_direita(self):
self.direcao.girar_a_direita()
def girar_a_esquerda(self):
self.direcao.girar_a_esquerda()
class Motor:
def __init__(self):
self.velocidade = 0
def acelerar(self):
self.velocidade += 1
def frear(self):
self.velocidade -= 2
self.velocidade = max(0, self.velocidade) | 21.903846 | 94 | 0.625988 | 1,356 | 0.396144 | 0 | 0 | 0 | 0 | 0 | 0 | 2,025 | 0.591586 |
38e2780c1d8295a2aa1e5df333294d92a0dd54a1 | 3,482 | py | Python | src/icon/exception.py | goldworm-icon/gw-iconsdk | b54722b0d19f286c0d0b8d751ff8cbcabd29f86a | [
"Apache-2.0"
] | 2 | 2019-05-28T22:37:49.000Z | 2020-01-07T11:40:15.000Z | src/icon/exception.py | goldworm-icon/gw-iconsdk | b54722b0d19f286c0d0b8d751ff8cbcabd29f86a | [
"Apache-2.0"
] | 1 | 2020-02-04T12:05:31.000Z | 2020-02-04T12:05:31.000Z | src/icon/exception.py | goldworm-icon/gw-iconsdk | b54722b0d19f286c0d0b8d751ff8cbcabd29f86a | [
"Apache-2.0"
] | 1 | 2019-08-01T08:42:15.000Z | 2019-08-01T08:42:15.000Z | # -*- coding: utf-8 -*-
from enum import IntEnum, unique
from typing import Optional, Any
class SDKException(Exception):
@unique
class Code(IntEnum):
OK = 0
KEY_STORE_ERROR = 1
ADDRESS_ERROR = 2
BALANCE_ERROR = 3
DATA_TYPE_ERROR = 4
JSON_RPC_ERROR = 5
ZIP_MEMORY_ERROR = 6
URL_ERROR = 7
BUILDER_ERROR = 8
ARG_ERROR = 9
HOOK_ERROR = 10
def __str__(self) -> str:
return str(self.name).capitalize().replace("_", " ")
def __init__(self, code: Code, message: Optional[str], user_data: Any = None):
self._code = code
self._message = message if isinstance(message, str) else str(code)
self._user_data = user_data
@property
def code(self):
return self._code
@property
def message(self):
return self._message
@property
def user_data(self) -> Any:
return self._user_data
def __str__(self):
return f"{self.message} ({self._code.value})"
class KeyStoreException(SDKException):
""""Error when making or loading a keystore file."""
def __init__(self, message: Optional[str], user_data: Any = None):
super().__init__(SDKException.Code.KEY_STORE_ERROR, message, user_data)
class AddressException(SDKException):
"""Error when having an invalid address."""
def __init__(self, message: Optional[str], user_data: Any = None):
super().__init__(SDKException.Code.ADDRESS_ERROR, message, user_data)
class BalanceException(SDKException):
"""Error when having an invalid balance."""
def __init__(self, message: Optional[str], user_data: Any = None):
super().__init__(SDKException.Code.BALANCE_ERROR, message, user_data)
class DataTypeException(SDKException):
"""Error when data type is invalid."""
def __init__(self, message: Optional[str], user_data: Any = None):
super().__init__(SDKException.Code.DATA_TYPE_ERROR, message, user_data)
class ArgumentException(SDKException):
def __init__(self, message: Optional[str], user_data: Any = None):
super().__init__(SDKException.Code.ARG_ERROR, message, user_data)
class JSONRPCException(SDKException):
"""Error when get JSON-RPC Error Response."""
def __init__(self, message: Optional[str], user_data: Any = None):
super().__init__(SDKException.Code.JSON_RPC_ERROR, message, user_data)
class ZipException(SDKException):
""""Error while write zip in memory"""
def __init__(self, message: Optional[str], user_data: Any = None):
super().__init__(SDKException.Code.ZIP_MEMORY_ERROR, message, user_data)
class URLException(SDKException):
"""Error regarding invalid URL"""
def __init__(self, message: Optional[str], user_data: Any = None):
super().__init__(SDKException.Code.URL_ERROR, message, user_data)
class BuilderException(SDKException):
"""Error for sdk method misuse
"""
def __init__(self, message: Optional[str], user_data: Any = None):
super().__init__(SDKException.Code.BUILDER_ERROR, message, user_data)
class HookException(SDKException):
def __init__(self, message: Optional[str], user_data: Any = None):
super().__init__(SDKException.Code.HOOK_ERROR, message, user_data)
class TimeoutException(SDKException):
def __init__(self, message: Optional[str], user_data: Any = None):
super().__init__(SDKException.Code.HOOK_ERROR, message, user_data)
| 30.017241 | 82 | 0.681505 | 3,355 | 0.963527 | 0 | 0 | 593 | 0.170304 | 0 | 0 | 397 | 0.114015 |
38e34f16224fab03d0d16ac00cd37fd07c95962e | 528 | py | Python | test/test_add_movie.py | Dananas732/php4dvd_lyzlov | 9fa3deb51eda7406475fdc4cf8faec8eb84aa61e | [
"Apache-2.0"
] | null | null | null | test/test_add_movie.py | Dananas732/php4dvd_lyzlov | 9fa3deb51eda7406475fdc4cf8faec8eb84aa61e | [
"Apache-2.0"
] | null | null | null | test/test_add_movie.py | Dananas732/php4dvd_lyzlov | 9fa3deb51eda7406475fdc4cf8faec8eb84aa61e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from model.movie import Movie
from model.user import User
from fixture.selenium_fixture import app
def test_add_movie(app):
app.session.login(User.Admin())
old_list = app.movie.get_movie_list()
app.movie.add_movie(Movie(film_name='name', film_year='2016'))
new_list = app.movie.get_movie_list()
assert old_list == new_list
app.session.logout()
def test_add_movie_empty(app):
app.session.login(User.Admin())
app.movie.check_field_in_add_form()
app.session.logout()
| 26.4 | 66 | 0.721591 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.066288 |
38e58db88d17dcc51ab2794712747a7a6f27aee6 | 3,752 | py | Python | main.py | dashdeckers/Wildfire-Control-Python | 8ace34e389b4d177b84efe964e8d327a82eb3ed6 | [
"MIT"
] | 2 | 2019-03-21T12:37:00.000Z | 2021-04-11T13:38:57.000Z | main.py | dashdeckers/Wildfire-Control-Python | 8ace34e389b4d177b84efe964e8d327a82eb3ed6 | [
"MIT"
] | 3 | 2020-01-28T22:37:00.000Z | 2022-02-10T00:15:14.000Z | main.py | dashdeckers/Wildfire-Control-Python | 8ace34e389b4d177b84efe964e8d327a82eb3ed6 | [
"MIT"
] | null | null | null | # Argument handling
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--run", action="store_true",
help="Start the learning process")
parser.add_argument("-m", "--memories", type=int, default=100,
help="Number of runs of demonstration data to initialize with")
parser.add_argument("-e", "--episodes", type=int, default=10000,
help="Number of episodes / runs to learn for")
parser.add_argument("-t", "--type", type=str, default="DQN",
choices=["DQN", "SARSA", "DDQN", "BOTH", "Baseline", "Human"],
help="The algorithm to use")
parser.add_argument("-n", "--name", type=str, default="no_name",
help="A custom name to give the saved log and model files")
args = parser.parse_args()
if args.run and args.name == "no_name":
parser.error("You should provide a name when running a learning session")
if args.type in ["Baseline", "DQN", "SARSA", "DDQN", "BOTH"] and args.episodes == 0:
parser.error("You should specify the number of episodes for the algorithm")
# Suppress the many unnecessary TensorFlow warnings
import os, sys
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR)
# Create the simulation
from Simulation.forest_fire import ForestFire
forestfire = ForestFire()
# Start learning straight away
if args.run:
if args.type == "Baseline":
print(f"Running Baseline with {args.episodes} episodes")
else:
print(f"Running {args.type} with {args.memories} "
f"memories and {args.episodes} episodes")
if args.type in ["DQN", "Baseline"]:
from DQN import DQN
if args.type == "SARSA":
from DQN_SARSA import DQN_SARSA as DQN
if args.type == "DDQN":
from DQN_DUEL import DQN_DUEL as DQN
if args.type == "BOTH":
from DQN_BOTH import DQN_BOTH as DQN
Agent = DQN(forestfire, args.name)
if args.type == "Baseline":
Agent.collect_memories(args.episodes, perform_baseline=True)
else:
Agent.collect_memories(args.memories)
Agent.learn(args.episodes)
# Don't start learning
else:
# Run the simulation in human mode
if args.type == "Human":
from misc import run_human
run_human(forestfire)
# Just import everything for interactive mode
else:
from misc import run_human, time_simulation_run
from DQN import DQN
from DQN_SARSA import DQN_SARSA
from DQN_DUEL import DQN_DUEL
from DQN_BOTH import DQN_BOTH
# Create the agents
DQN = DQN(forestfire, verbose=False)
DQN_SARSA = DQN_SARSA(forestfire, verbose=False)
DQN_DUEL = DQN_DUEL(forestfire, verbose=False)
DQN_BOTH = DQN_BOTH(forestfire, verbose=False)
# Get a list of imported algorithms to play with
options = [o for o in dir() \
if not o.startswith("__") \
and not o in ["os", "code", "tf", "argparse",
"args", "parser", "ForestFire"]]
# Display those algorithms for ease of use
msg = (
f"\nImported the following functions and algorithms for interactive mode:"
f"\n{[o for o in options]}\n"
f"Load a model with .load_model, play optimally with .play_optimal.\n"
)
# Drop the user in the interpreter, if the script is not already called with -i
if sys.flags.interactive:
print(msg)
else:
import code
code.interact(banner=msg, local=locals())
| 36.427184 | 88 | 0.61194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,366 | 0.364072 |
38e5c75c5a33d505486249e9804993dae69406b3 | 20,561 | py | Python | util/job_launching/procman.py | LauXy/accel-sim-framework | 4c2bf09a79d6b57bb10fe1898700930a5dd5531f | [
"BSD-2-Clause"
] | 88 | 2020-07-15T22:07:38.000Z | 2022-03-29T03:52:55.000Z | util/job_launching/procman.py | LauXy/accel-sim-framework | 4c2bf09a79d6b57bb10fe1898700930a5dd5531f | [
"BSD-2-Clause"
] | 68 | 2020-08-12T13:44:59.000Z | 2022-03-28T19:34:56.000Z | util/job_launching/procman.py | LauXy/accel-sim-framework | 4c2bf09a79d6b57bb10fe1898700930a5dd5531f | [
"BSD-2-Clause"
] | 54 | 2020-07-24T21:05:56.000Z | 2022-03-31T13:28:23.000Z | #!/usr/bin/env python
# 2020 (c) Tim Rogers, Purdue University
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution. Neither the name of
# The University of British Columbia nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# This file implements ProcMan:
# A simple process manager that allows you to queue up tasks and launch them
# once the previous tasks are done. This is useful if you want to launch
# a few hundred tests, but only have 40 cores, you can queue them all up
# using the ProcMan, then let it continuously run 40 at a time.
# This is essentially a poor-man's torque, slurm, condor, etc for a single node.
# As the goal of proc-man is a dependency-free single-node version of slurm/torque
# it is used in a similar way. Replace:
# sbatch <file decribing task>
# qsub <file describing task>
# with
# ./procmany.py <file describing task>
# After all the processes you want are submitted, start the procman using:
# ./procman.py -S
# This will create a new process that launchs and manages all the tasks you previously queued
# To avoid inter-process synchronization issues, procman operates using a
# producer/consumer model and once a procman has been started, new work cannot
# be added to that procman. However, more than one procman can run at once. That is,
# you can call:
# ./procman.py mybashA.sh
# ...
# ./procman.py mybashZ.sh
# ./procman.py -S
# ./procman.py mybash1.sh
# ...
# ./procman.py mybash26.sh
# ./procman.py -S
# And it will work, launching 2 procmans that loosely co-ordinate resource usage.
# By default procman will attempt to launch as many jobs as there are cores on the machine
# this can be changes with the "-c <numCores>" option.
#
# Some other useful commands:
# ./procman.py -s # launches a self-test to confirm that procman is working (takes 1-2 mins)
# ./procman.py -p # prints the state of all procmans and their jobs
# ./procman.py -k # kill all the jobs procman is running
# ./procman.py --help # prints all the options available
#
# NOTE: procman only works when jobs are submitted from one process. i.e.
# the user cannot spawn 2 processes and have each process concurrently
# attempt to enque work. Supporting such a system adds more complication
# and is not a common case. Also procman is designed for managing one user's
# processes and knows nothing about what other users are doing on the machine.
from optparse import OptionParser
import pickle
import subprocess
from subprocess import Popen, PIPE
import common
import os, signal
import stat
import time
import psutil
import shutil
import datetime
import re
import socket
import sys
import glob
import copy
this_directory = os.path.dirname(os.path.realpath(__file__)) + "/"
procManStateFolder = os.path.join(this_directory, "procman")
procManStateFile = os.path.join(procManStateFolder, "procman.{0}.pickle".format(socket.gethostname().strip()))
class Job:
def __init__(self, outF, errF, workingDir, command):
self.outF = outF
self.errF = errF
self.workingDir = workingDir
self.command = command
self.procId = None
self.POpenObj = None
self.maxVmSize = 0
self.runningTime = 0
self.status = "WAITING_TO_RUN"
self.name = None
self.id = None
self.hostname = "UNKNOWN"
def string(self):
return "status={0}: [name={8},procId={1},maxVmSize={2},runningTime={3},outF={4}," \
"errF={5},workingDir={6},command={7}]"\
.format(self.status,
self.procId,
self.maxVmSize,
self.runningTime,
self.outF,
self.errF,
self.workingDir,
self.command,
self.name)
def __str__(self):
return self.string()
def __repr__(self):
return self.string()
class ProcMan:
def __init__(self, jobLimit, pickleFile):
self.initialize(jobLimit, pickleFile)
def __init__(self, jobLimit):
self.initialize(jobLimit, procManStateFile)
def saveState(self):
pickle.dump(self, open(self.pickleFile, "w+"))
def clear(self):
if not self.mutable or len(self.activeJobs) > 0 or len(self.completeJobs) > 0:
sys.exit("ProcMans that have been started should not be cleared")
del self.queuedJobs [:]
def initialize(self, jobLimit, pickleFile):
self.queuedJobs = []
self.activeJobs = {}
self.completeJobs = {}
self.jobLimit = jobLimit
self.nextJobId = 1
self.tickingProcess = None
self.mutable = True
self.pickleFile = pickleFile
def queueJob(self, job):
if not self.mutable:
sys.exit("This ProcMan has already been started. No new jobs can be queued.")
job.id = self.nextJobId
self.queuedJobs.append(job)
self.nextJobId += 1
return job.id
def spawnProcMan(self, sleepTime):
if not self.mutable:
sys.exit("This ProcMan has already been started. No new spawning can occur.")
shutil.copy(self.pickleFile, self.pickleFile + ".tmp")
p = Popen([__file__,"-f", self.pickleFile + ".tmp", "-t", str(sleepTime)],
cwd=this_directory
)
print "ProcMan spawned [pid={0}]".format(p.pid)
def killJobs(self):
print "Killing {0} jobs".format(len(self.activeJobs))
for jid, activeJob in self.activeJobs.iteritems():
try:
p = psutil.Process(activeJob.procId)
except (psutil.NoSuchProcess,psutil.AccessDenied) as e:
print e
continue
for child in p.children(recursive=True):
os.kill(child.pid,9)
os.kill(activeJob.procId,9)
def tick(self):
if self.tickingProcess == None:
self.tickingProcess = os.getpid()
self.pickleFile = self.pickleFile + ".{0}".format(self.tickingProcess)
elif self.tickingProcess != os.getpid():
sys.exit("To support concurrent ProcMans in different processes, each procman can only be ticked by one process")
self.mutable = False
# test jobs for completion
jobsMoved = set()
for jid, activeJob in self.activeJobs.iteritems():
jobActive = True
# for an active session, need to poll or else the thing
# never dies. If ProcMan is launched with just a file (and
# did not launch the processes itself, we can just use the kill
# with CONT signal to see if its still alive.
if activeJob.POpenObj != None:
activeJob.POpenObj.poll()
try:
os.kill(activeJob.procId,signal.SIGCONT)
except OSError:
jobActive = False
if jobActive:
try:
p = psutil.Process(activeJob.procId)
mem = p.memory_info().vms
for child in p.children(recursive=True):
mem += child.memory_info().vms
activeJob.maxVmSize = max(mem, activeJob.maxVmSize)
activeJob.runningTime = \
datetime.datetime.now() \
- datetime.datetime.fromtimestamp(p.create_time())
activeJob.runningTime = str(activeJob.runningTime).split('.')[0]
except (psutil.NoSuchProcess,psutil.AccessDenied) as e:
print e
else:
activeJob.status = "COMPLETE_NO_OTHER_INFO"
self.completeJobs[activeJob.id] = activeJob
jobsMoved.add(activeJob.id)
for jobId in jobsMoved:
del self.activeJobs[jobId]
othersCores = self.getCPUCountFromOtherProcMans()
# launch new jobs when old ones complete
while (len(self.activeJobs) + othersCores) < self.jobLimit and len(self.queuedJobs) > 0:
newJob = self.queuedJobs.pop(0)
newJob.POpenObj = Popen(newJob.command,
stdout=open(newJob.outF,"w+"),
stderr=open(newJob.errF,"w+"),
cwd=newJob.workingDir)
newJob.procId = newJob.POpenObj.pid
newJob.hostname = socket.gethostname().strip()
newJob.status = "RUNNING"
self.activeJobs[newJob.id] = newJob
def getCPUCountFromOtherProcMans(self):
othersCores = 0
for pickleFile in glob.glob(os.path.join(os.path.dirname(self.pickleFile),"*pickle*")):
if pickleFile != self.pickleFile:
otherProcMan = pickle.load(open(pickleFile))
othersCores += len(otherProcMan.activeJobs)
return othersCores
def getState(self):
string = "queuedJobs={0}, activeJobs={1}, completeJobs={2}\n"\
.format(len(self.queuedJobs), len(self.activeJobs), len(self.completeJobs))
string += "\nqueuedJobs:\n"
for job in self.queuedJobs:
string += "\t{0}\n".format(job)
string += "\nactiveJobs:\n"
for jid,job in self.activeJobs.iteritems():
string += "\t{0}\n".format(job)
string += "\ncompleteJobs:\n"
for jid,job in self.completeJobs.iteritems():
string += "\t{0}\n".format(job)
return string
def getJob(self, jobId):
if jobId in self.activeJobs:
return self.activeJobs[jobId]
elif jobId in self.completeJobs:
return self.completeJobs[jobId]
else:
for job in self.queuedJobs:
if jobId == job.id:
return job
return None
def complete(self):
return len(self.queuedJobs) == 0 and len(self.activeJobs) == 0
def selfTest():
testPath = os.path.join(this_directory, "test")
if not os.path.isdir(testPath):
os.makedirs(testPath)
jobScript = os.path.join(testPath, "testScript.sh")
open(jobScript,"w+").write("#!/bin/bash\nsleep 20s")
st = os.stat(jobScript)
os.chmod(jobScript, st.st_mode | stat.S_IEXEC)
print "Starting synchronous selfTest"
procMan = ProcMan(4)
for i in range(5):
procMan.queueJob(
Job(
outF=os.path.join(testPath, "out.{0}.txt".format(i)),
errF=os.path.join(testPath, "err.{0}.txt".format(i)),
workingDir=testPath,
command=jobScript
)
)
print procMan.getState()
while not procMan.complete():
procMan.tick()
print procMan.getState()
time.sleep(3)
print "Passed synchronous selfTest"
print "Starting asynchronous selfTest"
for i in range(int(psutil.cpu_count()*1.2)):
jobScript = os.path.join(testPath, "testSlurm.{0}.sh".format(i))
open(jobScript,"w+").write("#!/bin/bash\n"\
"#SBATCH -J test.{0}\n".format(i) +\
"#SBATCH --output={0}\n".format(os.path.join(testPath, "out.{0}.txt".format(i))) +\
"#SBATCH --error={0}\n".format(os.path.join(testPath, "out.{0}.txt".format(i))) +\
"sleep 20s")
st = os.stat(jobScript)
os.chmod(jobScript, st.st_mode | stat.S_IEXEC)
out, err = subprocess.Popen([os.path.join(this_directory, "procman.py"),\
jobScript], stdout=PIPE).communicate()
if err != None:
sys.exit(err)
print "Queued Job {0}".format(out)
print "Starting Jobs"
subprocess.Popen([os.path.join(this_directory, "procman.py"),\
"-S", "-t", "5"], stdout=PIPE)
out = ""
while out != "Nothing Active":
time.sleep(1)
out, err = subprocess.Popen([os.path.join(this_directory, "procman.py"),\
"-p"], stdout=PIPE).communicate()
out = out.strip()
if err != None:
sys.exit(err)
print out
print "Asynchronous test passed"
print "Starting multi ProcMan test"
JOBS_PER_PROCMAN = int(psutil.cpu_count()*1.2) / 4
for j in range(4):
for i in range(JOBS_PER_PROCMAN):
jobNum = j*JOBS_PER_PROCMAN + i
jobScript = os.path.join(testPath, "testSlurm.{0}.sh".format(jobNum))
open(jobScript,"w+").write("#!/bin/bash\n"\
"#SBATCH -J test.{0}\n".format(jobNum) +\
"#SBATCH --output={0}\n".format(os.path.join(testPath, "out.{0}.txt".format(jobNum))) +\
"#SBATCH --error={0}\n".format(os.path.join(testPath, "out.{0}.txt".format(jobNum))) +\
"sleep 20s")
st = os.stat(jobScript)
os.chmod(jobScript, st.st_mode | stat.S_IEXEC)
out, err = subprocess.Popen([os.path.join(this_directory, "procman.py"),\
jobScript], stdout=PIPE).communicate()
if err != None:
sys.exit(err)
print "ProcMan {0}: Queued Job {0}".format(j, out)
print "ProcMan {0}: Starting Jobs".format(j)
subprocess.Popen([os.path.join(this_directory, "procman.py"),\
"-S", "-t", "5"], stdout=PIPE)
out = ""
while out != "Nothing Active":
time.sleep(1)
out, err = subprocess.Popen([os.path.join(this_directory, "procman.py"),\
"-p"], stdout=PIPE).communicate()
out = out.strip()
if err != None:
sys.exit(err)
print out
print "Multi-ProcMan test passed"
shutil.rmtree(testPath)
def main():
parser = OptionParser()
parser.add_option("-s", "--selfTest", dest="selfTest",
help="launched the selftester.", action="store_true")
parser.add_option("-f", "--file", dest="file",
help="File with the processes to manage.", default=procManStateFile)
parser.add_option("-t", "--sleepTime", dest="sleepTime",
help="Tune how often. ProcMan looks for completed jobs",
type=int, default=30)
parser.add_option("-c", "--cores", dest="cores",
help="how many cores to use",
type=int, default=psutil.cpu_count())
parser.add_option("-S", "--start", dest="start",action="store_true",
help="Just spawn the manager")
parser.add_option("-p", "--printState", dest="printState",action="store_true",
help="Print the state of the manager")
parser.add_option("-k", "--kill", dest="kill",action="store_true",
help="Kill all managed processes")
parser.add_option("-j", "--procManForJob", dest="procManForJob",default=None, type=int,
help="Return the path of the pickle file for the ProcMan managing this job." )
(options, args) = parser.parse_args()
if options.selfTest:
selfTest()
elif options.kill:
procmanfiles = glob.glob(options.file + ".*")
for f in procmanfiles:
print "Killing active jobs in Procman: {0}".format(os.path.basename(f))
procMan = pickle.load(open(f))
procMan.killJobs()
elif options.printState:
numProcMans = 0
numQueued = 0
numActive = 0
numComplete = 0
procmanfiles = glob.glob(options.file + ".*")
if len(procmanfiles) == 0:
print "Nothing Active"
else:
for f in procmanfiles:
numProcMans += 1
procMan = pickle.load(open(f))
numQueued += len(procMan.queuedJobs)
numActive += len(procMan.activeJobs)
numComplete += len(procMan.completeJobs)
print "Procman: {0}".format(os.path.basename(f))
print procMan.getState()
print "Total Procmans={0}, Total Queued={1}, Total Running={2}, Total Complete={3}"\
.format(numProcMans, numQueued, numActive, numComplete)
elif options.start:
if not os.path.exists(options.file):
sys.exit("Nothing to start {0} does not exist".format(options.file))
procMan = pickle.load(open(options.file))
procMan.spawnProcMan(options.sleepTime)
procMan.clear()
procMan.saveState()
elif options.procManForJob != None:
procmanfiles = glob.glob(options.file + ".*")
for f in procmanfiles:
procMan = pickle.load(open(f))
j = procMan.getJob(options.procManForJob)
if j != None:
print procMan.pickleFile
break
elif len(args) == 1:
# To make this work the same as torque and slurm - if you just give it one argument,
# we assume it's a pointer to a job file you want to submit.
if os.path.exists(options.file):
procMan = pickle.load(open(options.file))
if not procMan.mutable:
sys.exit("Error - this procman has already started")
else:
procMan = ProcMan(options.cores)
exec_file = args[0]
st = os.stat(exec_file)
os.chmod(exec_file, st.st_mode | stat.S_IEXEC)
# slurmToJob
job = Job("","",os.getcwd(),exec_file)
job.id = procMan.queueJob(job)
contents = ""
for line in open(exec_file).readlines():
if line.startswith("#SBATCH"):
nameMatch = re.match(r"#SBATCH -J (.*)", line.strip())
if nameMatch:
job.name = nameMatch.group(1)
outFMatch = re.match(r"#SBATCH --output=(.*)", line.strip())
if outFMatch:
job.outF = outFMatch.group(1)
errFMatch = re.match(r"#SBATCH --error=(.*)", line.strip())
if errFMatch:
job.errF = errFMatch.group(1)
line = re.sub(r"\$SLURM_JOB_ID", str(job.id), line)
contents += line
with open(exec_file, "w+") as f:
f.write(contents)
job.outF = re.sub("\%j", str(job.id), job.outF)
job.errF = re.sub("\%j", str(job.id), job.errF)
procMan.saveState()
print job.id
else:
options.file = common.file_option_test( options.file, "", this_directory )
if options.file == "":
sys.exit("Please specify the file containing the processes to manage with -f.")
procMan = pickle.load(open(options.file))
if procMan.tickingProcess!= None:
sys.exit("This procman is already running {0}".format(os.path.basename(options.file)))
procMan.pickleFile = options.file
os.remove(options.file)
if len(procMan.queuedJobs) > 0:
while not procMan.complete():
procMan.tick()
procMan.saveState()
time.sleep(options.sleepTime)
os.remove(procMan.pickleFile)
if __name__ == '__main__':
if not os.path.exists(procManStateFolder):
os.makedirs(procManStateFolder)
main()
| 41.453629 | 127 | 0.594961 | 6,914 | 0.336268 | 0 | 0 | 0 | 0 | 0 | 0 | 6,559 | 0.319002 |
38e6e2acda50235d67dfac9f5ec598d66c8ddcc1 | 2,005 | py | Python | nabu/processing/tfreaders/numpy_float_array_as_tfrecord_reader.py | Darleen2019/Nabu-MSSS | 5e862cbf846d45b8a317f87588533f3fde9f0726 | [
"MIT"
] | 18 | 2017-10-16T13:12:46.000Z | 2022-02-15T01:20:00.000Z | nabu/processing/tfreaders/numpy_float_array_as_tfrecord_reader.py | Darleen2019/Nabu-MSSS | 5e862cbf846d45b8a317f87588533f3fde9f0726 | [
"MIT"
] | null | null | null | nabu/processing/tfreaders/numpy_float_array_as_tfrecord_reader.py | Darleen2019/Nabu-MSSS | 5e862cbf846d45b8a317f87588533f3fde9f0726 | [
"MIT"
] | 9 | 2017-10-03T18:10:10.000Z | 2020-11-13T08:26:31.000Z | '''@file numpy_float_array_as_tfrecord_reader.py
contains the NumpyFloatArrayAsTfrecordReader class'''
import os
import numpy as np
import tensorflow as tf
import tfreader
import pdb
class NumpyFloatArrayAsTfrecordReader(tfreader.TfReader):
'''reader for numpy float arrays'''
def _read_metadata(self, datadirs):
'''read the input dimension
Args:
datadir: the directory where the metadata was written
Returns:
the metadata as a dictionary
'''
metadata = dict()
#read the non-time dimensions of the data
with open(os.path.join(datadirs[0], 'nontime_dims')) as fid:
metadata['nontime_dims'] = fid.read().strip().split(',')
metadata['nontime_dims'] = map(int,metadata['nontime_dims'])
for datadir in datadirs:
with open(os.path.join(datadir, 'nontime_dims')) as fid:
nontime_dims=fid.read().strip().split(',')
nontime_dims=map(int,nontime_dims)
if metadata['nontime_dims'] != nontime_dims:
raise Exception(
'all reader dimensions must be the same')
return metadata
def _create_features(self):
'''
creates the information about the features
Returns:
A dict mapping feature keys to FixedLenFeature, VarLenFeature,
and SparseFeature values
'''
return {'data': tf.FixedLenFeature([], dtype=tf.string)}
def _process_features(self, features):
'''process the read features
features:
A dict mapping feature keys to Tensor and SparseTensor values
Returns:
a pair of tensor and sequence length
'''
data = tf.decode_raw(features['data'], tf.float32)
resh_dims = [-1] + self.metadata['nontime_dims']
data = tf.reshape(data, resh_dims)
sequence_length = tf.shape(data)[0]
return data, sequence_length
| 30.378788 | 78 | 0.613965 | 1,819 | 0.907232 | 0 | 0 | 0 | 0 | 0 | 0 | 941 | 0.469327 |
38e9981ca280b378dc3584ec5d46dac7194e4dbd | 643 | py | Python | apps/telegram_bot/bot/filters.py | alena-kono/bot-valley | e8403f75bcd4e4b3f84bbe8c00a53d763e7c9776 | [
"MIT"
] | null | null | null | apps/telegram_bot/bot/filters.py | alena-kono/bot-valley | e8403f75bcd4e4b3f84bbe8c00a53d763e7c9776 | [
"MIT"
] | 1 | 2022-03-01T12:18:11.000Z | 2022-03-01T12:18:11.000Z | apps/telegram_bot/bot/filters.py | alena-kono/bot-valley | e8403f75bcd4e4b3f84bbe8c00a53d763e7c9776 | [
"MIT"
] | null | null | null | import telegram
from telegram.ext.filters import MessageFilter
from apps.telegram_bot.preferences import global_preferences
class CryptoCurrencyFilter(MessageFilter):
"""A custom MessageFilter that filters telegram text messages by
the condition of entering the list of BUTTONS_CRYPTO_CURRENCIES_FROM.
"""
def filter(self, message: telegram.Message) -> bool:
currencies_str = global_preferences.get("buttons__crypto_currencies_from")
currencies_str.replace(" ", "")
currencies_list = currencies_str.split(",")
if message.text in currencies_list:
return True
return False
| 33.842105 | 82 | 0.735614 | 515 | 0.800933 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.290824 |
c7f9c8239458c9257f86279f06d4314af6312cd7 | 2,664 | py | Python | functest/func_py_custom_renderer.py | mitsuba-rei/lightmetrica-v3 | db5b7d5a9a245fb7c0d25124433c38d09b62813e | [
"MIT"
] | null | null | null | functest/func_py_custom_renderer.py | mitsuba-rei/lightmetrica-v3 | db5b7d5a9a245fb7c0d25124433c38d09b62813e | [
"MIT"
] | null | null | null | functest/func_py_custom_renderer.py | mitsuba-rei/lightmetrica-v3 | db5b7d5a9a245fb7c0d25124433c38d09b62813e | [
"MIT"
] | null | null | null | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Custom renderer in Python
#
# This test demostrates how to create an custom renderer using component extension in Python.
# %load_ext autoreload
# %autoreload 2
import os
import imageio
import pandas as pd
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import lmfunctest as ft
import lmscene
import lightmetrica as lm
os.getpid()
# %load_ext lightmetrica_jupyter
@lm.pylm_component('renderer::ao')
class Renderer_AO(lm.Renderer):
"""Simple ambient occlusion renderer"""
def construct(self, prop):
self.film = lm.Film.castFrom(lm.comp.get(prop['output']))
if self.film is None:
return False
self.spp = prop['spp']
return True
def render(self, scene):
w = self.film.size().w
h = self.film.size().h
rng = lm.Rng(42)
lm.progress.start(lm.progress.ProgressMode.Samples, w*h, 0)
def process(index):
x = index % w
y = int(index / w)
rp = np.array([(x+.5)/w, (y+.5)/h])
ray = scene.primaryRay(rp, self.film.aspectRatio())
hit = scene.intersect(ray)
if hit is None:
return
V = 0
for i in range(self.spp):
n, u, v = hit.geom.orthonormalBasis(-ray.d)
d = lm.math.sampleCosineWeighted(rng)
r = lm.Ray(hit.geom.p, np.dot(d, [u,v,n]))
if scene.intersect(r, lm.Eps, .2) is None:
V += 1
V /= self.spp
self.film.setPixel(x, y, np.full(3, V))
lm.progress.update(y*w+x)
for i in range(w*h):
process(i)
lm.progress.end()
lm.init('user::default', {})
lm.parallel.init('parallel::openmp', {
'numThreads': 1
})
lm.log.init('logger::jupyter', {})
lm.progress.init('progress::jupyter', {})
lm.info()
# Scene
lm.asset('film_output', 'film::bitmap', {
'w': 640,
'h': 360
})
lmscene.load(ft.env.scene_path, 'fireplace_room')
lm.build('accel::sahbvh', {})
lm.render('renderer::ao', {
'output': lm.asset('film_output'),
'spp': 5
})
img = np.flip(np.copy(lm.buffer(lm.asset('film_output'))), axis=0)
f = plt.figure(figsize=(15,15))
ax = f.add_subplot(111)
ax.imshow(np.clip(np.power(img,1/2.2),0,1))
plt.show()
| 24.218182 | 93 | 0.582958 | 1,268 | 0.475976 | 0 | 0 | 1,303 | 0.489114 | 0 | 0 | 764 | 0.286787 |
c7fa73d5a956f1ec56de8229d7e06f9ce5ec0668 | 668 | py | Python | Ingest/Reddit.py | andersonpaac/Credd | b22f86167d6a2ced080b159a5ab822eba5faa791 | [
"MIT"
] | null | null | null | Ingest/Reddit.py | andersonpaac/Credd | b22f86167d6a2ced080b159a5ab822eba5faa791 | [
"MIT"
] | 7 | 2020-03-24T17:05:52.000Z | 2022-03-11T23:47:42.000Z | Ingest/Reddit.py | andersonpaac/Creddit | b22f86167d6a2ced080b159a5ab822eba5faa791 | [
"MIT"
] | null | null | null | import json
import praw
from Analysis.Common import Constants
def get_reddit_instance(config_json_fname: str = Constants.CONFIG_FNAME):
"""
Given path to a file containing the credentials for reddit API's client_id, secret, user agent. This will return
the praw instance.
:param config_json_fname:
:return:
"""
with open(config_json_fname) as json_data:
config_creds = json.load(json_data)
json_data.close()
reddit = praw.Reddit(client_id=config_creds['client_id'],
client_secret=config_creds['client_secret'],
user_agent=config_creds['user_agent'])
return reddit
| 33.4 | 116 | 0.684132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.347305 |
c7fbbec52e6a6987d6dd31cf694220a3b9b2bd09 | 301 | py | Python | web crawler functions/get_next_target.py | akshaynagpal/python_web_crawler | a74af25db4c9f819105621868a6a9a7337a2a770 | [
"MIT"
] | 1 | 2022-03-06T21:00:45.000Z | 2022-03-06T21:00:45.000Z | web crawler functions/get_next_target.py | akshaynagpal/python_web_crawler | a74af25db4c9f819105621868a6a9a7337a2a770 | [
"MIT"
] | null | null | null | web crawler functions/get_next_target.py | akshaynagpal/python_web_crawler | a74af25db4c9f819105621868a6a9a7337a2a770 | [
"MIT"
] | null | null | null | def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None,0
else:
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
| 27.363636 | 51 | 0.58804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.053156 |
c7fbc823f8a546465b13ed0456c3ae2fd7b2cfaf | 3,249 | py | Python | botwinick_math/maths/aggregate.py | dbotwinick/python-botwinick-math | 52f038c277e27243f82aed2000137fb2ee6f1dae | [
"BSD-3-Clause"
] | null | null | null | botwinick_math/maths/aggregate.py | dbotwinick/python-botwinick-math | 52f038c277e27243f82aed2000137fb2ee6f1dae | [
"BSD-3-Clause"
] | null | null | null | botwinick_math/maths/aggregate.py | dbotwinick/python-botwinick-math | 52f038c277e27243f82aed2000137fb2ee6f1dae | [
"BSD-3-Clause"
] | null | null | null | # author: Drew Botwinick, Botwinick Innovations
# title: occasionally trivial support functions for aggregating data for python 2/3 [only numpy as dependency]
# NOTE: these functions are generally tested meant for 1D although they may apply or be easily extended to nd
# license: 3-clause BSD
import numpy as np
flat_max = np.max
flat_min = np.min
flat_percentile = np.percentile
flat_mean = np.average
def flat_abs_maximum(data, preserve_sign=True):
"""
Function to return the absolute maximum value in an array. By default,
this function will preserve the sign, meaning that if an array contains [-75, -25, 0, 25, 50]
then the function will return -75 because that value has the highest magnitude but it will return
the original value (preserving the sign).
Removing the sign preservation basically makes this function a composite of abs and max.
:param data: data array source
:param preserve_sign: whether or not to preserve the sign of the output, default is True
:return: largest absolute value in the data array
"""
data = np.asarray(data)
abs_data = np.abs(data)
subset = np.unravel_index(np.argmax(abs_data), data.shape)
return data[subset] if preserve_sign else abs_data[subset]
def flat_abs_minimum(data, preserve_sign=True):
"""
Function to return the absolute minimum value in an array. Note that, by default, this function will
reserve the sign.
For example, if an array contains [-100, -24, 1, 2] then the function will return 1 because that value
has the smallest magnitude. If an array contained [-100, -50, -2, -1] the the function would return -1
because that value has the smallest magnitude; however, the sign would preserved (by default).
Removing the sign preservation basically makes this function a composite of abs and min.
:param data: data array source
:param preserve_sign: whether or not to preserve the sign of the output, default is True
:return: smallest absolute value in the data array
"""
data = np.asarray(data)
abs_data = np.abs(data)
subset = np.unravel_index(np.argmin(abs_data), data.shape)
return data[subset] if preserve_sign else abs_data[subset]
def partition_top(data, n, return_indices=False):
"""
Function to return the average of the top n values in an array
:param data: data array source
:param n: the number of values of interest (n)
:param return_indices: whether to return the indices array
:return: top n values if n < data.size or all values if n is None, <=0 or >= data.size, also index array if `return_indices`
"""
data = np.asarray(data)
if n is None or n <= 0 or n >= data.size:
return data
n = min(data.size, n) - 1
idx = np.argpartition(data, n)[:n]
result = data[idx]
if return_indices:
return result, idx
return result
def flat_top_average(data, n):
"""
Function to return the average of the top n values in an array
:param data: data array source
:param n: the number of values of interest (n)
:return: average of top n values if n < data.size or average of data if n > data.size
"""
return np.average(partition_top(data, n, return_indices=False))
| 39.144578 | 128 | 0.711296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,242 | 0.690058 |
c7fd3ba288c68945f082fd623cdb47c96e4e8977 | 1,485 | py | Python | leetcode-algorithms/107. Binary Tree Level Order Traversal II/107.binary-tree-level-order-traversal-ii.py | cnyy7/LeetCode_EY | 44e92f102b61f5e931e66081ed6636d7ecbdefd4 | [
"MIT"
] | null | null | null | leetcode-algorithms/107. Binary Tree Level Order Traversal II/107.binary-tree-level-order-traversal-ii.py | cnyy7/LeetCode_EY | 44e92f102b61f5e931e66081ed6636d7ecbdefd4 | [
"MIT"
] | null | null | null | leetcode-algorithms/107. Binary Tree Level Order Traversal II/107.binary-tree-level-order-traversal-ii.py | cnyy7/LeetCode_EY | 44e92f102b61f5e931e66081ed6636d7ecbdefd4 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=107 lang=python3
#
# [107] Binary Tree Level Order Traversal II
#
# https://leetcode.com/problems/binary-tree-level-order-traversal-ii/description/
#
# algorithms
# Easy (48.66%)
# Likes: 1105
# Dislikes: 201
# Total Accepted: 289.3K
# Total Submissions: 572.4K
# Testcase Example: '[3,9,20,null,null,15,7]'
#
# Given a binary tree, return the bottom-up level order traversal of its nodes'
# values. (ie, from left to right, level by level from leaf to root).
#
#
# For example:
# Given binary tree [3,9,20,null,null,15,7],
#
# 3
# / \
# 9 20
# / \
# 15 7
#
#
#
# return its bottom-up level order traversal as:
#
# [
# [15,7],
# [9,20],
# [3]
# ]
#
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
stack = deque([root])
ans = collections.deque()
while stack:
one,length = [],len(stack)
for _ in range(length):
node = stack.popleft()
one.append(node.val)
if node.left:
stack.append(node.left)
if node.right:
stack.append(node.right)
ans.appendleft(one)
return list(ans)
# @lc code=end
| 21.521739 | 81 | 0.552862 | 568 | 0.378414 | 0 | 0 | 0 | 0 | 0 | 0 | 881 | 0.586942 |
c7feef185f3cd2a9de34cd9ca911002e06e8cf12 | 3,383 | py | Python | openrave/python/examples/tutorial_iklookat.py | jdsika/TUM_HOly | a2ac55fa1751a3a8038cf61d29b95005f36d6264 | [
"MIT"
] | 2 | 2015-11-13T16:40:57.000Z | 2017-09-15T15:37:19.000Z | openrave/python/examples/tutorial_iklookat.py | jdsika/holy | a2ac55fa1751a3a8038cf61d29b95005f36d6264 | [
"MIT"
] | 1 | 2016-06-13T01:29:51.000Z | 2016-06-14T00:38:27.000Z | openrave/python/examples/tutorial_iklookat.py | jdsika/holy | a2ac55fa1751a3a8038cf61d29b95005f36d6264 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2011 Rosen Diankov (rosen.diankov@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shows how to use lookat inverse kinematics to maintain line of sight with a moving object.
.. examplepre-block:: tutorial_iklookat
.. examplepost-block:: tutorial_iklookat
"""
from __future__ import with_statement # for python 2.5
__author__ = 'Rosen Diankov'
import time
import openravepy
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
def main(env,options):
"Main example code."
env.Load(options.scene)
robot = env.GetRobots()[0]
robot.SetActiveManipulator(options.manipname)
# generate the ik solver
ikmodel = databases.inversekinematics.InverseKinematicsModel(robot, iktype=IkParameterization.Type.Lookat3D)
if not ikmodel.load():
ikmodel.autogenerate()
while True:
with env:
# move the robot in a random collision-free position and call the IK
while True:
target=ikmodel.manip.GetTransform()[0:3,3]+(random.rand(3)-0.5)
solutions = ikmodel.manip.FindIKSolutions(IkParameterization(target,IkParameterization.Type.Lookat3D),IkFilterOptions.CheckEnvCollisions)
if len(solutions) > 0:
break
h=env.plot3(array([target]),20.0)
for i in random.permutation(len(solutions))[0:min(100,len(solutions))]:
with env:
robot.SetDOFValues(solutions[i],ikmodel.manip.GetArmIndices())
T = ikmodel.manip.GetTransform()
globaldir = numpy.dot(T[0:3,0:3],ikmodel.manip.GetDirection())
dist = linalg.norm(T[0:3,3]-target)+0.4
hray = env.drawlinelist(array([T[0:3,3], T[0:3,3]+dist*globaldir]),5,colors=[0.1,0.1,1])
env.UpdatePublishedBodies()
time.sleep(0.1)
from optparse import OptionParser
from openravepy.misc import OpenRAVEGlobalArguments
@openravepy.with_destroy
def run(args=None):
"""Command-line execution of the example.
:param args: arguments for script to parse, if not specified will use sys.argv
"""
parser = OptionParser(description='Shows how to use different IK solutions for arms with few joints.')
OpenRAVEGlobalArguments.addOptions(parser)
parser.add_option('--scene',action="store",type='string',dest='scene',default='data/pr2test1.env.xml',
help='Scene file to load (default=%default)')
parser.add_option('--manipname',action="store",type='string',dest='manipname',default='head_torso',
help='name of manipulator to use (default=%default)')
(options, leftargs) = parser.parse_args(args=args)
OpenRAVEGlobalArguments.parseAndCreateThreadedUser(options,main,defaultviewer=True)
if __name__ == "__main__":
run()
| 42.2875 | 153 | 0.688738 | 0 | 0 | 0 | 0 | 834 | 0.246527 | 0 | 0 | 1,368 | 0.404375 |
c7ff80b02f36412b36398e9a909bcd286ba8d17e | 2,328 | py | Python | notebooks/calcs/rating.py | pcejrowski/iwi | 89111b589e7d9ccf0b60269fc98daf7e50f7d95f | [
"MIT"
] | null | null | null | notebooks/calcs/rating.py | pcejrowski/iwi | 89111b589e7d9ccf0b60269fc98daf7e50f7d95f | [
"MIT"
] | null | null | null | notebooks/calcs/rating.py | pcejrowski/iwi | 89111b589e7d9ccf0b60269fc98daf7e50f7d95f | [
"MIT"
] | null | null | null |
import metrics
def rate(artToCatSim, label, membershipData, categoryTree, artNamesDict, catNamesDict):
# countBefore1 = artToCatSim.count_nonzero()
# for article in membershipData:
# id_and_categories = article.split('\t')
# articleId = int(id_and_categories[0])
# del id_and_categories[0]
# cats = [int(x) for x in id_and_categories]
# for cat in cats:
# artToCatSim[articleId, cat] = 0
#
# artToCatSim.eliminate_zeros()
# countAfter1 = artToCatSim.count_nonzero()
# print('removed=' + str(countAfter1 - countBefore1) + ' and left=' + str(countAfter1))
# raw_art_labels = map(lambda x: x.split(), data['po_slowach-articles_dict-simple-20120104'])
# art_labels = dict(map(lambda (x, y): [y, x], raw_art_labels))
#
# raw_cat_labels = map(lambda x: x.split(), data['po_slowach-cats_dict-simple-20120104'])
# cat_labels = dict(map(lambda (x, y): [y, x], raw_cat_labels))
# from sets import Set
# x = Set()
# y = Set()
#
#
# for t in topConnections:
# x.add(str(t[1]))
# y.add(str(t[0]))
#
# for t in topConnections:
# if t[1] != 18941 and t[1] < 13983:
# try:
# print(art_labels[str(t[0])] + ' - ' + cat_labels[str(t[1])])
# except:
# sys.stdout.write('.')
topConnections = getTopConnections(artToCatSim)
metrics.generateSampleForManualRating(topConnections, artNamesDict, catNamesDict, label)
manualMetric = metrics.fromManualRating(label)
print("Manual: {}".format(manualMetric))
childParentMetric = metrics.numberOfChildParentConnections(topConnections, categoryTree)
print("ChildParent: {}".format(childParentMetric))
exisitingConnsMetric = metrics.numberOfExistingConnections(artToCatSim, membershipData)
print("ExisitngConns: {}".format(exisitingConnsMetric))
variance = metrics.variance(artToCatSim)
print("Variance: {}".format(variance))
def getTopConnections(artToCatSim, number = None):
sorted = sort_coo(artToCatSim.tocoo())
if not number:
number = len(sorted)/10
top = sorted[-number:]
return top
def sort_coo(m):
from itertools import izip
tuples = izip(m.row, m.col, m.data)
return sorted(tuples, key=lambda x: (x[2])) | 35.815385 | 97 | 0.64433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,165 | 0.50043 |
2a0163abf280cb04a1d37410665435dd6e038977 | 495 | py | Python | tests/test_socfaker_application.py | priamai/soc-faker | 51b587f0cec52212136905280406e915006d2afc | [
"MIT"
] | 122 | 2020-02-21T16:06:54.000Z | 2022-03-21T13:53:03.000Z | tests/test_socfaker_application.py | priamai/soc-faker | 51b587f0cec52212136905280406e915006d2afc | [
"MIT"
] | 13 | 2020-01-29T16:37:05.000Z | 2022-01-27T21:30:10.000Z | tests/test_socfaker_application.py | priamai/soc-faker | 51b587f0cec52212136905280406e915006d2afc | [
"MIT"
] | 20 | 2020-04-10T11:59:29.000Z | 2022-02-10T09:20:26.000Z | def test_socfaker_application_status(socfaker_fixture):
assert socfaker_fixture.application.status in ['Active', 'Inactive', 'Legacy']
def test_socfaker_application_account_status(socfaker_fixture):
assert socfaker_fixture.application.account_status in ['Enabled', 'Disabled']
def test_socfaker_name(socfaker_fixture):
assert socfaker_fixture.application.name
def test_socfaker_application_logon_timestamp(socfaker_fixture):
assert socfaker_fixture.application.logon_timestamp | 45 | 82 | 0.842424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.090909 |
2a029f302b32cd792495b8fa2cc70f93a578db3c | 1,241 | py | Python | test.py | OSSome01/ekalavya | 30933c94308afd86d2135ea9b131005c88042df4 | [
"MIT"
] | 2 | 2021-06-12T07:43:10.000Z | 2021-06-12T15:10:50.000Z | test.py | OSSome01/ekalavya | 30933c94308afd86d2135ea9b131005c88042df4 | [
"MIT"
] | null | null | null | test.py | OSSome01/ekalavya | 30933c94308afd86d2135ea9b131005c88042df4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('begineer_tutorial')
import sys
import rospy
import cv2
import numpy as np
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class image_converter:
def __init__(self):
#self.image_pub = rospy.Publisher("image_topic_2",Image,queue_size=10)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/rgb/image_color",Image,self.callback)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
#cv2.imshow('cv_image', cv_image)
image = cv2.cvtColor(cv_image , cv2.COLOR_BGR2HSV)
lower_range = np.array([30,150,50])
upper_range = np.array([255,255,180])
mask = cv2.inRange(image , lower_range, upper_range)
res = cv2.bitwise_and(cv_image, cv_image, mask=mask)
cv2.imshow("Image window", res)
def main(args):
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
| 25.326531 | 84 | 0.720387 | 705 | 0.56809 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.186946 |
2a0340d6bee8c182f82274f58caefa62290e5be1 | 461 | py | Python | configs/retinanet/retinanet_r50_fpn_1x_bdd100k.py | XDong18/mmdetection | 482ada168a644f350d51d85783d309677441eeb0 | [
"Apache-2.0"
] | null | null | null | configs/retinanet/retinanet_r50_fpn_1x_bdd100k.py | XDong18/mmdetection | 482ada168a644f350d51d85783d309677441eeb0 | [
"Apache-2.0"
] | null | null | null | configs/retinanet/retinanet_r50_fpn_1x_bdd100k.py | XDong18/mmdetection | 482ada168a644f350d51d85783d309677441eeb0 | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/bdd100k_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
# model
model = dict(
bbox_head=dict(
num_classes=10, # bdd100k class number
)
)
# data loader
data = dict(
samples_per_gpu=4, # TODO samples pre gpu
workers_per_gpu=2,
)
| 20.954545 | 72 | 0.665944 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.490239 |
2a06d2484aaf8403e4a1aedc38951691db8afbda | 1,278 | py | Python | questions/max-number-of-k-sum-pairs/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 141 | 2017-12-12T21:45:53.000Z | 2022-03-25T07:03:39.000Z | questions/max-number-of-k-sum-pairs/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 32 | 2015-10-05T14:09:52.000Z | 2021-05-30T10:28:41.000Z | questions/max-number-of-k-sum-pairs/Solution.py | marcus-aurelianus/leetcode-solutions | 8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6 | [
"MIT"
] | 56 | 2015-09-30T05:23:28.000Z | 2022-03-08T07:57:11.000Z | """
You are given an integer array nums and an integer k.
In one operation, you can pick two numbers from the array whose sum equals k and remove them from the array.
Return the maximum number of operations you can perform on the array.
Example 1:
Input: nums = [1,2,3,4], k = 5
Output: 2
Explanation: Starting with nums = [1,2,3,4]:
- Remove numbers 1 and 4, then nums = [2,3]
- Remove numbers 2 and 3, then nums = []
There are no more pairs that sum up to 5, hence a total of 2 operations.
Example 2:
Input: nums = [3,1,3,4,3], k = 6
Output: 1
Explanation: Starting with nums = [3,1,3,4,3]:
- Remove the first two 3's, then nums = [1,4,3]
There are no more pairs that sum up to 6, hence a total of 1 operation.
Constraints:
1 <= nums.length <= 105
1 <= nums[i] <= 109
1 <= k <= 109
"""
class Solution:
def maxOperations(self, nums: List[int], k: int) -> int:
c = collections.Counter(nums)
r = 0
for n, v in c.items():
t = k - n
if t not in c:
continue
if t == n:
m = v // 2
r += m
c[n] = v - m
continue
m = min(v, c[t])
r += m
c[n] = v - m
c[t] = c[t] - m
return r | 25.56 | 108 | 0.544601 | 478 | 0.373438 | 0 | 0 | 0 | 0 | 0 | 0 | 799 | 0.624219 |
2a0c441daafd819f5e8b9d51026c63ee758e4a18 | 8,377 | py | Python | tests/core/service_test.py | dnephin/Tron | bd0f763421c6de50551e9a4b0e4a1c0c8ceb450a | [
"Apache-2.0"
] | null | null | null | tests/core/service_test.py | dnephin/Tron | bd0f763421c6de50551e9a4b0e4a1c0c8ceb450a | [
"Apache-2.0"
] | null | null | null | tests/core/service_test.py | dnephin/Tron | bd0f763421c6de50551e9a4b0e4a1c0c8ceb450a | [
"Apache-2.0"
] | null | null | null | import mock
from testify import setup, assert_equal, TestCase, run
from testify.assertions import assert_not_equal
from tests.assertions import assert_mock_calls
from tests.testingutils import autospec_method
from tron.core import service, serviceinstance
from tron import node, command_context, event, eventloop
from tron.core.serviceinstance import ServiceInstance
class ServiceStateTestCase(TestCase):
@setup
def setup_service(self):
self.instances = mock.create_autospec(
serviceinstance.ServiceInstanceCollection)
self.service = mock.Mock(enabled=True, instances=self.instances)
def test_state_disabled(self):
self.service.enabled = False
state = service.ServiceState.from_service(self.service)
assert_equal(state, service.ServiceState.DISABLED)
def test_state_up(self):
self.service.enabled = True
state = service.ServiceState.from_service(self.service)
assert_equal(state, service.ServiceState.UP)
self.instances.is_up.assert_called_with()
def test_state_degraded(self):
self.service.enabled = True
self.instances.all.return_value = False
self.instances.is_starting.return_value = False
self.instances.is_up.return_value = False
state = service.ServiceState.from_service(self.service)
assert_equal(state, service.ServiceState.DEGRADED)
class ServiceTestCase(TestCase):
@setup
def setup_service(self):
self.config = mock.MagicMock()
self.instances = mock.create_autospec(
serviceinstance.ServiceInstanceCollection,
stop=mock.Mock(), start=mock.Mock(), state_data=mock.Mock(),
restore=mock.Mock())
self.service = service.Service(self.config, self.instances)
autospec_method(self.service.watch)
self.service.repair_callback = mock.create_autospec(
eventloop.UniqueCallback)
@mock.patch('tron.core.service.node')
def test_from_config(self, mock_node):
node_store = mock.create_autospec(node.NodePoolRepository)
mock_node.NodePoolRepository.get_instance.return_value = node_store
node_store.get_by_name.return_value = mock.create_autospec(node.Node)
context = mock.create_autospec(command_context.CommandContext)
service_inst = service.Service.from_config(self.config, context)
collection = service_inst.instances
assert_equal(service_inst.config, self.config)
assert_equal(collection.node_pool, node_store.get_by_name.return_value)
assert_equal(collection.context, context)
def test_enable(self):
autospec_method(self.service.repair)
self.service.enable()
assert self.service.enabled
self.service.repair.assert_called_with()
def test_disable(self):
self.service.disable()
assert not self.service.enabled
self.instances.stop.assert_called_with()
self.service.repair_callback.cancel.assert_called_with()
def test_repair(self):
autospec_method(self.service.notify)
count = 3
created_instances = [
mock.create_autospec(ServiceInstance) for _ in xrange(count)]
self.instances.create_missing.return_value = created_instances
self.service.repair()
self.instances.clear_failed.assert_called_with()
assert_equal(self.service.watch.mock_calls,
[mock.call(inst.get_observable(), True) for inst in created_instances])
self.instances.restore.assert_called_with()
self.instances.start.assert_called_with()
self.service.notify.assert_called_with(self.service.NOTIFY_STATE_CHANGE)
def test_handle_instance_state_change_down(self):
autospec_method(self.service.notify)
instance_event = serviceinstance.ServiceInstance.STATE_DOWN
self.service._handle_instance_state_change(mock.Mock(), instance_event)
self.service.notify.assert_called_with(self.service.NOTIFY_STATE_CHANGE)
self.service.instances.clear_down.assert_called_with()
def test_handle_instance_state_change_failed(self):
autospec_method(self.service.notify)
autospec_method(self.service.record_events)
instance_event = serviceinstance.ServiceInstance.STATE_FAILED
self.service._handle_instance_state_change(mock.Mock(), instance_event)
assert not self.service.notify.mock_calls
self.service.record_events.assert_called_with()
def test_handle_instance_state_change_starting(self):
autospec_method(self.service.notify)
autospec_method(self.service.record_events)
instance_event = serviceinstance.ServiceInstance.STATE_STARTING
self.service._handle_instance_state_change(mock.Mock(), instance_event)
assert not self.service.notify.mock_calls
assert not self.service.record_events.mock_calls
def test_record_events_failure(self):
autospec_method(self.service.get_state)
state = self.service.get_state.return_value = service.ServiceState.FAILED
self.service.event_recorder = mock.create_autospec(event.EventRecorder)
self.service.record_events()
self.service.event_recorder.critical.assert_called_with(state)
def test_record_events_up(self):
autospec_method(self.service.get_state)
state = self.service.get_state.return_value = service.ServiceState.UP
self.service.event_recorder = mock.create_autospec(event.EventRecorder)
self.service.record_events()
self.service.event_recorder.ok.assert_called_with(state)
def test_state_data(self):
expected = dict(enabled=False, instances=self.instances.state_data)
assert_equal(self.service.state_data, expected)
def test__eq__not_equal(self):
assert_not_equal(self.service, None)
assert_not_equal(self.service, mock.Mock())
other = service.Service(self.config, mock.Mock())
assert_not_equal(self.service, other)
def test__eq__(self):
other = service.Service(self.config, self.instances)
assert_equal(self.service, other)
def test_restore_state(self):
autospec_method(self.service.watch_instances)
autospec_method(self.service.enable)
state_data = {'enabled': True, 'instances': []}
self.service.restore_state(state_data)
self.service.watch_instances.assert_called_with(
self.instances.restore_state.return_value)
self.service.enable.assert_called_with()
class ServiceCollectionTestCase(TestCase):
@setup
def setup_collection(self):
self.collection = service.ServiceCollection()
self.service_list = [
mock.create_autospec(service.Service) for _ in xrange(3)]
def _add_service(self):
self.collection.services.update(
(serv.name, serv) for serv in self.service_list)
@mock.patch('tron.core.service.Service', autospec=True)
def test_load_from_config(self, mock_service):
autospec_method(self.collection.get_names)
autospec_method(self.collection.add)
service_configs = {'a': mock.Mock(), 'b': mock.Mock()}
context = mock.create_autospec(command_context.CommandContext)
result = list(self.collection.load_from_config(service_configs, context))
expected = [mock.call(config, context)
for config in service_configs.itervalues()]
assert_mock_calls(expected, mock_service.from_config.mock_calls)
expected = [mock.call(s) for s in result]
assert_mock_calls(expected, self.collection.add.mock_calls)
def test_add(self):
self.collection.services = mock.MagicMock()
service = mock.Mock()
result = self.collection.add(service)
self.collection.services.replace.assert_called_with(service)
assert_equal(result, self.collection.services.replace.return_value)
def test_restore_state(self):
state_count = 2
state_data = dict(
(serv.name, serv) for serv in self.service_list[:state_count])
self._add_service()
self.collection.restore_state(state_data)
for name in state_data:
service = self.collection.services[name]
service.restore_state.assert_called_with(state_data[name])
if __name__ == "__main__":
run()
| 42.308081 | 83 | 0.718515 | 7,962 | 0.95046 | 0 | 0 | 2,290 | 0.273368 | 0 | 0 | 87 | 0.010386 |
2a0c50108ad0d192b72ca77b3ab58cf9d7e8b047 | 113 | py | Python | constants.py | mattdeitke/allenact-1 | 70f106b32a38424e862399a76d84f607838063be | [
"MIT"
] | 1 | 2020-09-10T13:09:14.000Z | 2020-09-10T13:09:14.000Z | constants.py | mattdeitke/allenact-1 | 70f106b32a38424e862399a76d84f607838063be | [
"MIT"
] | null | null | null | constants.py | mattdeitke/allenact-1 | 70f106b32a38424e862399a76d84f607838063be | [
"MIT"
] | null | null | null | import os
from pathlib import Path
ABS_PATH_OF_TOP_LEVEL_DIR = os.path.abspath(os.path.dirname(Path(__file__)))
| 22.6 | 76 | 0.814159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2a0dc668b21a91832f8ec31e1a4ff4f191731611 | 4,558 | py | Python | Implementation/eshopee.py | AchuthaVVyas/259902_Ltts_StepIN- | 8dca06479a1c163e9bc971be97f34b09c9aa5aad | [
"Unlicense"
] | null | null | null | Implementation/eshopee.py | AchuthaVVyas/259902_Ltts_StepIN- | 8dca06479a1c163e9bc971be97f34b09c9aa5aad | [
"Unlicense"
] | null | null | null | Implementation/eshopee.py | AchuthaVVyas/259902_Ltts_StepIN- | 8dca06479a1c163e9bc971be97f34b09c9aa5aad | [
"Unlicense"
] | null | null | null | import tkinter as tk
from tkinter.font import Font
from tkinter import *
from tkinter import messagebox
from models.Store import Store
from models.ShoppingCart import ShoppingCart
import datetime
def viewStore():
global storeWindow
storeLabelFrame = LabelFrame(storeWindow, text="Store Items",)
storeLabelFrame.pack(fill="both", expand="yes", padx="20", pady="10")
storeItemsFrame = Frame(storeLabelFrame)
storeItemsFrame.pack(padx="10", pady="5")
store = Store()
storeItems = store.getStoreItems()
for item in storeItems:
itemFrame = Frame(storeItemsFrame, pady="5")
itemFrame.pack(fill="both", expand="yes")
nameLabel = Label(itemFrame, text=item.name,
font=("Candara", 15), fg="blue")
priceLabel = Label(itemFrame, text="Rs.%s" %
item.price, font=("Candara", 13), fg="red")
addToCartBtn = Button(itemFrame, text="Add To Cart",
cursor="hand2", command=lambda i=item: addItemToCart(i))
btnImage = PhotoImage(file="F:\python_project\Eshopping\images\\addTocart.png")
addToCartBtn.image = btnImage
addToCartBtn.config( image=btnImage,width="40", height="40")
nameLabel.pack(side="left")
priceLabel.pack(side="left", fill="both", expand="yes")
addToCartBtn.pack(side="right")
btnGoCart = Button(storeWindow, text="Go To Cart", font=(
"Candara", 15, "bold"), fg="red", bg="white", cursor="hand2", command=viewCart)
btnGoCart.pack(pady="6")
def viewCart():
cartWindow = Toplevel()
cartWindow.title("The Cart")
cartWindow.grab_set()
global cart
cartItems = cart.getCartItems()
cartItemsLabelFrame = LabelFrame(cartWindow, text="Cart Items")
cartItemsLabelFrame.pack(fill="both", expand="yes", padx="20", pady="10")
cartItemsFrame = Frame(cartItemsLabelFrame, padx=3, pady=3)
cartItemsFrame.pack()
index = 0
for item in cartItems:
itemFrame = Frame(cartItemsFrame, pady="5")
itemFrame.pack(fill="both", expand="yes")
nameLabel = Label(itemFrame, text=item.name,
font=("Candara", 15), fg="blue")
priceLabel = Label(itemFrame, text=" Rs.%s" %
item.price, font=("Candara", 13), fg="red")
addToCartBtn = Button(itemFrame, text="Remove From Cart", font=("Candara", 11, "bold"),
fg="red", bg="white", cursor="hand2", command=lambda i=index: removeFromCart(i, cartWindow))
nameLabel.pack(side="left")
priceLabel.pack(side="left")
addToCartBtn.pack(side="right")
index += 1
checkOutFrame = Frame(cartWindow, pady="10")
totalPriceLabel = Label(checkOutFrame, text="Total Price : Rs.%s" %
cart.getTotalPrice(), font=("Candara", 14, "bold"), fg="indigo")
totalPriceLabel.pack(side="left")
buyBtn = Button(checkOutFrame, text="Buy Now", font=("Candara", 15, "bold"),
fg="indigo", bg="white", cursor="hand2", command=lambda: buyCommand(cartWindow))
buyBtn.pack(side="left", padx="10")
checkOutFrame.pack()
backToStoreBtn = Button(cartWindow, text="Back To Store", font=(
"Candara", 15, "bold"), fg="red", bg="white", cursor="hand2", command=cartWindow.destroy)
backToStoreBtn.pack(pady="6")
cartWindow.mainloop()
def addItemToCart(item=None):
global cart
cart.addToCart(item)
messagebox.showinfo(
title="Success", message="Item %s Added To The Cart !!" % item.name)
def removeFromCart(itemIndex=None, cartWindow=None):
global cart
cart.removeFromCart(itemIndex)
messagebox.showinfo(title="success", message="Item Removed")
cartWindow.destroy()
viewCart()
def buyCommand(cartWindow):
global cart
current_time = datetime.datetime.now()
cartitems = cart.getCartItems()
file = open(
"F:\python_project\\a.txt", "a")
file.write('\n\nDate:-'+str(current_time.date()) +
'\tTime:-'+str(current_time.time())+'\n')
for items in cartitems:
file.write(str(items.name) + '\t' + str(items.price) + '\n')
price = cart.getTotalPrice()
file.write('Total_Price :- '+str(price)+'\n')
file.close()
cart.emptyCart()
cartWindow.destroy()
messagebox.showinfo(
title="success", message="Purchase Completed Successfully")
storeWindow = tk.Tk()
storeWindow.title("The Store")
viewStore()
cart = ShoppingCart()
storeWindow.mainloop()
| 34.793893 | 122 | 0.630759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 775 | 0.170031 |
2a0dc67bade2117bf57ad1386621e32344ab2ab8 | 15,919 | py | Python | atomate/vasp/database.py | Zhuoying/atomate | 067023f0f740d3abac47b7ae7743c1c31eff8a06 | [
"BSD-3-Clause-LBNL"
] | null | null | null | atomate/vasp/database.py | Zhuoying/atomate | 067023f0f740d3abac47b7ae7743c1c31eff8a06 | [
"BSD-3-Clause-LBNL"
] | null | null | null | atomate/vasp/database.py | Zhuoying/atomate | 067023f0f740d3abac47b7ae7743c1c31eff8a06 | [
"BSD-3-Clause-LBNL"
] | null | null | null | """
This module defines the database classes.
"""
import json
import zlib
from typing import Any
import gridfs
from bson import ObjectId
from maggma.stores.aws import S3Store
from monty.dev import deprecated
from monty.json import MontyEncoder
from pymatgen.electronic_structure.bandstructure import (
BandStructure,
BandStructureSymmLine,
)
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.io.vasp import Chgcar
from pymongo import ASCENDING, DESCENDING
from atomate.utils.database import CalcDb
from atomate.utils.utils import get_logger
__author__ = "Kiran Mathew"
__credits__ = "Anubhav Jain"
__email__ = "kmathew@lbl.gov"
logger = get_logger(__name__)
# If we use Maggmastores we will have to initialize a magmma store for each object typl
OBJ_NAMES = (
"dos",
"bandstructure",
"chgcar",
"locpot",
"aeccar0",
"aeccar1",
"aeccar2",
"elfcar",
)
class VaspCalcDb(CalcDb):
"""
Class to help manage database insertions of Vasp drones
"""
def __init__(
self,
host="localhost",
port=27017,
database="vasp",
collection="tasks",
user=None,
password=None,
**kwargs,
):
super().__init__(host, port, database, collection, user, password, **kwargs)
def build_indexes(self, indexes=None, background=True):
"""
Build the indexes.
Args:
indexes (list): list of single field indexes to be built.
background (bool): Run in the background or not.
TODO: make sure that the index building is sensible and check for
existing indexes.
"""
_indices = (
indexes
if indexes
else [
"formula_pretty",
"formula_anonymous",
"output.energy",
"output.energy_per_atom",
"dir_name",
]
)
self.collection.create_index("task_id", unique=True, background=background)
# build single field indexes
for i in _indices:
self.collection.create_index(i, background=background)
# build compound indexes
for formula in ("formula_pretty", "formula_anonymous"):
self.collection.create_index(
[
(formula, ASCENDING),
("output.energy", DESCENDING),
("completed_at", DESCENDING),
],
background=background,
)
self.collection.create_index(
[
(formula, ASCENDING),
("output.energy_per_atom", DESCENDING),
("completed_at", DESCENDING),
],
background=background,
)
# TODO consider sensible index building for the maggma stores
def insert_task(self, task_doc, use_gridfs=False):
"""
Inserts a task document (e.g., as returned by Drone.assimilate()) into the database.
Handles putting DOS, band structure and charge density into GridFS as needed.
During testing, a percentage of runs on some clusters had corrupted AECCAR files
when even if everything else about the calculation looked OK.
So we do a quick check here and only record the AECCARs if they are valid
Args:
task_doc (dict): the task document
use_gridfs (bool): store the data matching OBJ_NAMES to gridfs.
if maggma_store_type is set (ex. "s3") this flag will be ignored
Returns:
(int) - task_id of inserted document
"""
big_data_to_store = {}
def extract_from_calcs_reversed(obj_key):
"""
Grab the data from calcs_reversed.0.obj_key and store on gridfs directly or some Maggma store
Args:
obj_key: Key of the data in calcs_reversed.0 to store
"""
calcs_r_data = task_doc["calcs_reversed"][0][obj_key]
# remove the big object from all calcs_reversed
# this can catch situations were the drone added the data to more than one calc.
for i_calcs in range(len(task_doc["calcs_reversed"])):
del task_doc["calcs_reversed"][i_calcs][obj_key]
return calcs_r_data
# drop the data from the task_document and keep them in a separate dictionary (big_data_to_store)
if (
self._maggma_store_type is not None or use_gridfs
) and "calcs_reversed" in task_doc:
for data_key in OBJ_NAMES:
if data_key in task_doc["calcs_reversed"][0]:
big_data_to_store[data_key] = extract_from_calcs_reversed(data_key)
# insert the task document
t_id = self.insert(task_doc)
if "calcs_reversed" in task_doc:
# upload the data to a particular location and store the reference to that location in the task database
for data_key, data_val in big_data_to_store.items():
fs_di_, compression_type_ = self.insert_object(
use_gridfs=use_gridfs,
d=data_val,
collection=f"{data_key}_fs",
task_id=t_id,
)
self.collection.update_one(
{"task_id": t_id},
{
"$set": {
f"calcs_reversed.0.{data_key}_compression": compression_type_
}
},
)
self.collection.update_one(
{"task_id": t_id},
{"$set": {f"calcs_reversed.0.{data_key}_fs_id": fs_di_}},
)
return t_id
def retrieve_task(self, task_id):
"""
Retrieves a task document and unpacks the band structure and DOS as dict
Args:
task_id: (int) task_id to retrieve
Returns:
(dict) complete task document with BS + DOS included
"""
task_doc = self.collection.find_one({"task_id": task_id})
calc = task_doc["calcs_reversed"][0]
if "bandstructure_fs_id" in calc:
bs = self.get_band_structure(task_id)
calc["bandstructure"] = bs.as_dict()
if "dos_fs_id" in calc:
dos = self.get_dos(task_id)
calc["dos"] = dos.as_dict()
if "chgcar_fs_id" in calc:
chgcar = self.get_chgcar(task_id)
calc["chgcar"] = chgcar
if "aeccar0_fs_id" in calc:
aeccar = self.get_aeccar(task_id)
calc["aeccar0"] = aeccar["aeccar0"]
calc["aeccar2"] = aeccar["aeccar2"]
return task_doc
def insert_object(self, use_gridfs, *args, **kwargs):
"""Insert the object into big object storage, try maggma_store if
it is availible, if not try storing directly to girdfs.
Args:
use_gridfs (bool): Whether to store on gridfs if maggma storage is not availible
Returns:
fs_id: The id of the stored object
compression_type: The compress method of the stored object
"""
if self._maggma_store_type is not None:
return self.insert_maggma_store(*args, **kwargs)
elif use_gridfs:
return self.insert_gridfs(*args, **kwargs)
def insert_gridfs(self, d, collection="fs", compress=True, oid=None, task_id=None):
"""
Insert the given document into GridFS.
Args:
d (dict): the document
collection (string): the GridFS collection name
compress (bool): Whether to compress the data or not
oid (ObjectId()): the _id of the file; if specified, it must not already exist in GridFS
task_id(int or str): the task_id to store into the gridfs metadata
Returns:
file id, the type of compression used.
"""
oid = oid or ObjectId()
compression_type = None
# always perform the string conversion when inserting directly to gridfs
d = json.dumps(d, cls=MontyEncoder)
if compress:
d = zlib.compress(d.encode(), compress)
compression_type = "zlib"
fs = gridfs.GridFS(self.db, collection)
m_data = {"compression": compression_type}
if task_id:
m_data["task_id"] = task_id
# Putting task id in the metadata subdocument as per mongo specs:
# https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.rst#terms
fs_id = fs.put(d, _id=oid, metadata=m_data)
return fs_id, compression_type
def insert_maggma_store(
self, d: Any, collection: str, oid: ObjectId = None, task_id: Any = None
):
"""
Insert the given document into a Maggma store, first check if the store is already
Args:
data: the document to be stored
collection (string): the name prefix for the maggma store
compress (bool): Whether to compress the data or not
oid (ObjectId()): the _id of the file; if specified, it must not already exist in GridFS
task_id(int or str): the task_id to store into the gridfs metadata
Returns:
file id, the type of compression used.
"""
oid = oid or str(ObjectId())
compression_type = None
doc = {
"fs_id": oid,
"maggma_store_type": self.get_store(collection).__class__.__name__,
"compression": compression_type,
"data": d,
}
search_keys = [
"fs_id",
]
if task_id is not None:
search_keys.append("task_id")
doc["task_id"] = str(task_id)
elif isinstance(d, dict) and "task_id" in d:
search_keys.append("task_id")
doc["task_id"] = str(d["task_id"])
# make sure the store is availible
with self.get_store(collection) as store:
ping_ = store.index._collection.database.command("ping")
if ping_.get("ok", 0) != 1.0:
raise ConnectionError(
f"Not connected to the index store of {self.__name__}.maggma_store[{collection}]"
)
if isinstance(store, S3Store):
# TODO find some way to ping the aws service
# ping_ = self._maggma_stores[collection].s3_bucket._name
pass
if store.compress:
compression_type = "zlib"
doc["compression"] = "zlib"
store.update([doc], search_keys)
return oid, compression_type
def get_data_from_maggma_or_gridfs(self, task_id, key):
"""
look for a task, then the object of type key associated with that task
Returns:
The data stored on object storage, typically a dictionary
"""
m_task = self.collection.find_one({"task_id": task_id}, {"calcs_reversed": 1})
fs_id = m_task["calcs_reversed"][0][f"{key}_fs_id"]
obj_dict = None
if self._maggma_store_type is not None:
with self.get_store(f"{key}_fs") as store:
obj_dict = store.query_one({"fs_id": fs_id})["data"]
# if the object cannot be found then try using the grid_fs method
if obj_dict is not None:
return obj_dict
else:
fs = gridfs.GridFS(self.db, f"{key}_fs")
bs_json = zlib.decompress(fs.get(fs_id).read())
obj_dict = json.loads(bs_json.decode())
return obj_dict
def get_band_structure(self, task_id):
"""
Read the BS data into a PMG BandStructure or BandStructureSymmLine object
Args:
task_id(int or str): the task_id containing the data
Returns:
BandStructure or BandStructureSymmLine
"""
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="bandstructure")
if obj_dict["@class"] == "BandStructure":
return BandStructure.from_dict(obj_dict)
elif obj_dict["@class"] == "BandStructureSymmLine":
return BandStructureSymmLine.from_dict(obj_dict)
else:
raise ValueError(
"Unknown class for band structure! {}".format(obj_dict["@class"])
)
def get_dos(self, task_id):
"""
Read the DOS data into a PMG DOS object
Args:
task_id(int or str): the task_id containing the data
Returns:
CompleteDos object
"""
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="dos")
return CompleteDos.from_dict(obj_dict)
@deprecated("No longer supported, use get_chgcar instead")
def get_chgcar_string(self, task_id):
pass
def get_chgcar(self, task_id):
"""
Read the CHGCAR data into a PMG Chgcar object
Args:
task_id(int or str): the task_id containing the data
Returns:
chgcar: Chgcar object
"""
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="chgcar")
return Chgcar.from_dict(obj_dict)
def get_aeccar(self, task_id, check_valid=True):
"""
Read the AECCAR0 + AECCAR2 grid_fs data into a Chgcar object
Args:
task_id(int or str): the task_id containing the gridfs metadata
check_valid (bool): make sure that the aeccar is positive definite
Returns:
{"aeccar0" : Chgcar, "aeccar2" : Chgcar}: dict of Chgcar objects
"""
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="aeccar0")
aeccar0 = Chgcar.from_dict(obj_dict)
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="aeccar2")
aeccar2 = Chgcar.from_dict(obj_dict)
if check_valid and (aeccar0.data["total"] + aeccar2.data["total"]).min() < 0:
ValueError(f"The AECCAR seems to be corrupted for task_id = {task_id}")
return {"aeccar0": aeccar0, "aeccar2": aeccar2}
def reset(self):
self.collection.delete_many({})
self.db.counter.delete_one({"_id": "taskid"})
self.db.counter.insert_one({"_id": "taskid", "c": 0})
self.db.boltztrap.delete_many({})
self.db.dos_fs.files.delete_many({})
self.db.dos_fs.chunks.delete_many({})
self.db.dos_boltztrap_fs.files.delete_many({})
self.db.dos_boltztrap_fs.chunks.delete_many({})
self.db.bandstructure_fs.files.delete_many({})
self.db.bandstructure_fs.chunks.delete_many({})
self.build_indexes()
# TODO: @albalu, @matk86, @computron - add BoltztrapCalcDB management here -computron, matk86
def put_file_in_gridfs(
file_path, db, collection_name=None, compress=False, compression_type=None
):
"""
Helper function to store a file in gridfs.
Args:
file_path (str):path to the files that should be saved.
db (CalcDb): the interface with the database.
collection_name (str): optionally modify the name of the collection
with respect to the one included in the db.
compress (bool): if True the file will be compressed with zlib.
compression_type (str): if file is already compressed defines the
compression type to be stored in the metadata.
Returns:
ObjectId: the mongodb id of the file that have been saved.
"""
with open(file_path, "rb") as f:
data = f.read()
if compress:
data = zlib.compress(data, compress)
compression_type = "zlib"
if collection_name is None:
collection_name = db.collection
fs = gridfs.GridFS(db.db, collection_name)
fs_id = fs.put(data, metadata={"compression": compression_type})
return fs_id
| 36.179545 | 116 | 0.597839 | 13,819 | 0.868082 | 0 | 0 | 113 | 0.007098 | 0 | 0 | 7,241 | 0.454865 |
2a0e1ca9345564e019c758529a1cb3634a3a9fae | 296 | py | Python | ergoservice/dashboard/models.py | damirmedakovic/ergoapp | 38dec32aa2dbdd3ec27a8e911fc233dff7c7c499 | [
"MIT"
] | null | null | null | ergoservice/dashboard/models.py | damirmedakovic/ergoapp | 38dec32aa2dbdd3ec27a8e911fc233dff7c7c499 | [
"MIT"
] | 5 | 2020-06-05T23:32:29.000Z | 2021-06-10T18:58:29.000Z | ergoservice/dashboard/models.py | damirmedakovic/ergoapp | 38dec32aa2dbdd3ec27a8e911fc233dff7c7c499 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Case(models.Model):
name = models.CharField(max_length =100)
email = models.EmailField(max_length=100, unique=True)
message = models.CharField(max_length=500, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
| 22.769231 | 55 | 0.773649 | 233 | 0.787162 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.087838 |
2a0fe0131e7b0ce2b0125eb0c7c09663dccc2793 | 3,032 | py | Python | BaseFeaturizer.py | dsbrown1331/HardRLWithYoutube | 33f72a5473c46841b77468b6902489c0fd812795 | [
"MIT"
] | 34 | 2018-10-16T16:09:07.000Z | 2021-10-19T06:21:03.000Z | BaseFeaturizer.py | dsbrown1331/HardRLWithYoutube | 33f72a5473c46841b77468b6902489c0fd812795 | [
"MIT"
] | 5 | 2019-01-31T16:31:13.000Z | 2019-06-26T01:13:04.000Z | BaseFeaturizer.py | dsbrown1331/HardRLWithYoutube | 33f72a5473c46841b77468b6902489c0fd812795 | [
"MIT"
] | 9 | 2018-12-04T11:39:43.000Z | 2021-04-02T19:17:11.000Z | from abc import ABCMeta, abstractmethod # Required to create an abstract class
import numpy as np
from scipy import spatial # For nearest neighbour lookup
import tensorflow as tf
class BaseFeaturizer(metaclass=ABCMeta):
"""Interface for featurizers
Featurizers take images that are potentially from different videos of the same game,
and encode them into an aligned embedding (the same frames from different videos
have similar embeddings.)
The mapping between images and embeddings is learned by training with an unsupervised task.
Each featurizer type uses a different unsupervised task.
"""
@abstractmethod
def __init__(self):
raise NotImplementedError
@abstractmethod
def train(self, dataset, epochs, batch_size):
"""Trains the featurizer.
Dataset is a list of ndarrays; overall shape = (videos, frames, width, height, 1)
"""
raise NotImplementedError
def featurize(self, data, batch_size=32):
"""Encodes the data into an embedding
Data: ndarray with shape (-1, width, height, 1)
"""
splitted_data = np.array_split(data, max(data.shape[0] // batch_size, 1))
feature_vectors = []
for batch in splitted_data:
normalized_batch = batch / 255
feature_vectors.append(self.sess.run(self.graph['feature_vector'], {
self.graph['is_training']: False, self.graph['state']: normalized_batch
}))
feature_vectors = np.concatenate(feature_vectors)
return feature_vectors
def save(self, save_path='default'):
return self.saver.save(self.sess, './featurizers/{}/{}.ckpt'.format(save_path, save_path))
def load(self, load_path='default'):
self.saver.restore(self.sess, './featurizers/{}/{}.ckpt'.format(load_path, load_path))
def evaluate_cycle_consistency(self, data, sequence_length=1024):
'''Cycle-consistency evaluation as in "Playing hard exploration games by watching YouTube"'''
shuffled_data = np.copy(data)
np.random.shuffle(shuffled_data)
first_sequence = shuffled_data[:sequence_length]
second_sequence = shuffled_data[sequence_length : 2 * sequence_length]
# featurize sequences
first_sequence_features = self.featurize(first_sequence)
second_sequence_features = self.featurize(second_sequence)
# Use k-dimensional trees to do nearest-neighbor lookup
first_sequence_kd_tree = spatial.KDTree(first_sequence_features)
second_sequence_kd_tree = spatial.KDTree(second_sequence_features)
consistent_cycles = 0
for i in range(sequence_length):
v = first_sequence_features[i]
_, w_index = second_sequence_kd_tree.query(v)
w = second_sequence_features[w_index]
_, v_prime_index = first_sequence_kd_tree.query(w)
if i == v_prime_index:
consistent_cycles += 1
return consistent_cycles / sequence_length | 41.534247 | 101 | 0.685356 | 2,852 | 0.940633 | 0 | 0 | 308 | 0.101583 | 0 | 0 | 975 | 0.32157 |
2a101d90d02ecf24ffa04bb3238bffdbcd08a712 | 897 | py | Python | 101-200/101-110/106-binaryTreeFromInPostOrder/binaryTreeFromInPostOrder.py | xuychen/Leetcode | c8bf33af30569177c5276ffcd72a8d93ba4c402a | [
"MIT"
] | null | null | null | 101-200/101-110/106-binaryTreeFromInPostOrder/binaryTreeFromInPostOrder.py | xuychen/Leetcode | c8bf33af30569177c5276ffcd72a8d93ba4c402a | [
"MIT"
] | null | null | null | 101-200/101-110/106-binaryTreeFromInPostOrder/binaryTreeFromInPostOrder.py | xuychen/Leetcode | c8bf33af30569177c5276ffcd72a8d93ba4c402a | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def buildTree(self, inorder, postorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
def buildChildTree(inIndex, postIndex, length):
if length == 0:
return None
root = TreeNode(postorder[postIndex])
count = 0
while inorder[inIndex+count] != postorder[postIndex]:
count += 1
root.left = buildChildTree(inIndex, postIndex-length+count, count)
root.right = buildChildTree(inIndex+count+1, postIndex-1, length-count-1)
return root
return buildChildTree(0, len(postorder)-1, len(postorder)) | 32.035714 | 85 | 0.565217 | 858 | 0.956522 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.15942 |
2a108ae249c4fe5e282fb02c6ca69143383b433a | 477 | py | Python | csv2ofx/mappings/starling.py | mibanescu/csv2ofx | df9328bbf39348ebd6781701855c0b71592bb7ea | [
"MIT"
] | 153 | 2015-03-27T05:44:45.000Z | 2022-03-07T04:15:22.000Z | csv2ofx/mappings/starling.py | mibanescu/csv2ofx | df9328bbf39348ebd6781701855c0b71592bb7ea | [
"MIT"
] | 74 | 2015-11-29T13:03:05.000Z | 2022-01-23T20:00:24.000Z | csv2ofx/mappings/starling.py | mibanescu/csv2ofx | df9328bbf39348ebd6781701855c0b71592bb7ea | [
"MIT"
] | 94 | 2015-03-10T17:54:32.000Z | 2022-03-27T09:50:44.000Z | from __future__ import (
absolute_import, division, print_function, unicode_literals)
from operator import itemgetter
def fixdate(ds):
dmy = ds.split('/')
# BUG (!?): don't understand but stolen from ubs-ch-fr.py
return '.'.join((dmy[1], dmy[0], dmy[2]))
mapping = {
'has_header': True,
'date': lambda tr: fixdate(tr['Date']),
'amount': itemgetter('Amount (GBP)'),
'desc': itemgetter('Reference'),
'payee': itemgetter('Counter Party')
}
| 25.105263 | 64 | 0.639413 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.310273 |
2a112329bea0c737e5460d8489595b646350f0f7 | 732 | py | Python | logtf_analyser/tests/test_convert_id3_to_id64.py | cob16/logtf_analyzer | d7398da4fcf0dc64e2dfa7078e036a8189c81b77 | [
"MIT"
] | null | null | null | logtf_analyser/tests/test_convert_id3_to_id64.py | cob16/logtf_analyzer | d7398da4fcf0dc64e2dfa7078e036a8189c81b77 | [
"MIT"
] | null | null | null | logtf_analyser/tests/test_convert_id3_to_id64.py | cob16/logtf_analyzer | d7398da4fcf0dc64e2dfa7078e036a8189c81b77 | [
"MIT"
] | null | null | null | from unittest import TestCase
from parameterized import parameterized
from logtf_analyser.utils import convert_id3_to_id64
class TestConvert_id3_to_id64(TestCase):
def test_convert_id3_to_id64(self):
id64 = convert_id3_to_id64("[U:1:22202]")
self.assertEqual(id64, 76561197960287930)
@parameterized.expand([
("missing prefix", "22202]"),
("missing suffix", "[U:1:22202"),
])
def test_rase_value_error_if(self, name, input):
with self.assertRaises(ValueError):
id64 = convert_id3_to_id64(input)
def test_rase_if_not_string(self):
with self.assertRaises(TypeError):
convert_id3_to_id64(122434234)
convert_id3_to_id64(True)
| 28.153846 | 52 | 0.693989 | 604 | 0.825137 | 0 | 0 | 253 | 0.345628 | 0 | 0 | 65 | 0.088798 |
2a114ce8d5ef6e2e85c7b5e7e1ac2b3ca629ebdb | 14,191 | py | Python | aiida_lsmo/workchains/cp2k_binding_energy.py | mbercx/aiida-lsmo | 0999ccec3e445cfd0dfd37a65ab013299a5f7d51 | [
"MIT"
] | 2 | 2020-06-23T12:50:02.000Z | 2022-03-21T08:11:10.000Z | aiida_lsmo/workchains/cp2k_binding_energy.py | mbercx/aiida-lsmo | 0999ccec3e445cfd0dfd37a65ab013299a5f7d51 | [
"MIT"
] | 59 | 2019-11-26T15:25:36.000Z | 2022-02-22T12:37:18.000Z | aiida_lsmo/workchains/cp2k_binding_energy.py | mbercx/aiida-lsmo | 0999ccec3e445cfd0dfd37a65ab013299a5f7d51 | [
"MIT"
] | 6 | 2019-12-04T14:43:28.000Z | 2022-01-20T18:08:12.000Z | # -*- coding: utf-8 -*-
"""Binding energy workchain"""
from copy import deepcopy
from aiida.common import AttributeDict
from aiida.engine import append_, while_, WorkChain, ToContext
from aiida.engine import calcfunction
from aiida.orm import Dict, Int, SinglefileData, Str, StructureData, Bool
from aiida.plugins import WorkflowFactory
from aiida_lsmo.utils import HARTREE2EV, dict_merge, aiida_structure_merge
from aiida_lsmo.utils.cp2k_utils import (ot_has_small_bandgap, get_bsse_section)
from .cp2k_multistage_protocols import load_isotherm_protocol
from .cp2k_multistage import get_initial_magnetization
Cp2kBaseWorkChain = WorkflowFactory('cp2k.base') # pylint: disable=invalid-name
@calcfunction
def get_output_parameters(**cp2k_out_dict):
"""Extracts important results to include in the output_parameters."""
output_dict = {'motion_step_info': {}}
output_dict['motion_opt_converged'] = cp2k_out_dict['final_geo_opt']['motion_opt_converged']
selected_motion_keys = [
'dispersion_energy_au', 'energy_au', 'max_grad_au', 'max_step_au', 'rms_grad_au', 'rms_step_au', 'scf_converged'
]
for key in selected_motion_keys:
output_dict['motion_step_info'][key] = cp2k_out_dict['final_geo_opt']['motion_step_info'][key]
selected_bsse_keys = [
'binding_energy_raw', 'binding_energy_corr', 'binding_energy_bsse', 'binding_energy_unit',
'binding_energy_dispersion'
]
for key in selected_bsse_keys:
if key in cp2k_out_dict['bsse'].get_dict(): # "binding_energy_dispersion" may miss
output_dict[key] = cp2k_out_dict['bsse'][key]
return Dict(dict=output_dict)
@calcfunction
def get_loaded_molecule(loaded_structure, input_molecule):
"""Return only the molecule's atoms in the unit cell as a StructureData object."""
natoms_molecule = len(input_molecule.get_ase())
molecule_ase = loaded_structure.get_ase()[-natoms_molecule:]
return StructureData(ase=molecule_ase)
class Cp2kBindingEnergyWorkChain(WorkChain):
"""Submits Cp2kBase work chain for structure + molecule system, first optimizing the geometry of the molecule and
later computing the BSSE corrected interaction energy.
This work chain is inspired to Cp2kMultistage, and shares some logics and data from it.
"""
@classmethod
def define(cls, spec):
super().define(spec)
spec.expose_inputs(Cp2kBaseWorkChain,
namespace='cp2k_base',
exclude=['cp2k.structure', 'cp2k.parameters', 'cp2k.metadata.options.parser_name'])
spec.input('structure', valid_type=StructureData, help='Input structure that contains the molecule.')
spec.input('molecule', valid_type=StructureData, help='Input molecule in the unit cell of the structure.')
spec.input('protocol_tag',
valid_type=Str,
default=lambda: Str('standard'),
required=False,
help='The tag of the protocol tag.yaml. NOTE: only the settings are read, stage is set to GEO_OPT.')
spec.input('protocol_yaml',
valid_type=SinglefileData,
required=False,
help='Specify a custom yaml file. NOTE: only the settings are read, stage is set to GEO_OPT.')
spec.input('protocol_modify',
valid_type=Dict,
default=lambda: Dict(dict={}),
required=False,
help='Specify custom settings that overvrite the yaml settings')
spec.input('starting_settings_idx',
valid_type=Int,
default=lambda: Int(0),
required=False,
help='If idx>0 is chosen, jumps directly to overwrite settings_0 with settings_{idx}')
spec.input(
'cp2k_base.cp2k.parameters',
valid_type=Dict,
required=False,
help='Specify custom CP2K settings to overwrite the input dictionary just before submitting the CalcJob')
# Workchain outline
spec.outline(
cls.setup,
while_(cls.should_run_geo_opt)(
cls.run_geo_opt,
cls.inspect_and_update_settings_geo_opt,
),
cls.run_bsse,
cls.results,
)
# Exit codes
spec.exit_code(901, 'ERROR_MISSING_INITIAL_SETTINGS',
'Specified starting_settings_idx that is not existing, or any in between 0 and idx is missing')
spec.exit_code(902, 'ERROR_NO_MORE_SETTINGS',
'Settings for Stage0 are not ok but there are no more robust settings to try')
spec.exit_code(903, 'ERROR_PARSING_OUTPUT',
'Something important was not printed correctly and the parsing of the first calculation failed')
# Outputs
spec.expose_outputs(Cp2kBaseWorkChain, include=['remote_folder'])
spec.output('loaded_molecule', valid_type=StructureData, help='Molecule geometry in the unit cell.')
spec.output('loaded_structure', valid_type=StructureData, help='Geometry of the system with both fragments.')
spec.output('output_parameters', valid_type=Dict, help='Info regarding the binding energy of the system.')
def setup(self):
"""Setup initial parameters."""
# Read yaml file selected as SinglefileData or chosen with the tag, and overwrite with custom modifications
if 'protocol_yaml' in self.inputs:
self.ctx.protocol = load_isotherm_protocol(singlefiledata=self.inputs.protocol_yaml)
else:
self.ctx.protocol = load_isotherm_protocol(tag=self.inputs.protocol_tag.value)
dict_merge(self.ctx.protocol, self.inputs.protocol_modify.get_dict())
# Initialize
self.ctx.settings_ok = False
self.ctx.settings_idx = 0
self.ctx.settings_tag = 'settings_{}'.format(self.ctx.settings_idx)
self.ctx.system = aiida_structure_merge(self.inputs.structure, self.inputs.molecule)
self.ctx.natoms_structure = len(self.inputs.structure.get_ase())
self.ctx.natoms_molecule = len(self.inputs.molecule.get_ase())
# Generate input parameters
self.ctx.cp2k_param = deepcopy(self.ctx.protocol['settings_0'])
while self.inputs.starting_settings_idx > self.ctx.settings_idx:
# overwrite until the desired starting setting are obtained
self.ctx.settings_idx += 1
self.ctx.settings_tag = 'settings_{}'.format(self.ctx.settings_idx)
if self.ctx.settings_tag in self.ctx.protocol:
dict_merge(self.ctx.cp2k_param, self.ctx.protocol[self.ctx.settings_tag])
else:
return self.exit_codes.ERROR_MISSING_INITIAL_SETTINGS # pylint: disable=no-member
# handle starting magnetization
results = get_initial_magnetization(self.ctx.system, Dict(dict=self.ctx.protocol), with_ghost_atoms=Bool(True))
self.ctx.system = results['structure']
dict_merge(self.ctx.cp2k_param, results['cp2k_param'].get_dict())
dict_merge(
self.ctx.cp2k_param,
{
'GLOBAL': {
'RUN_TYPE': 'GEO_OPT'
},
'FORCE_EVAL': {
'DFT': {
'SCF': {
'SCF_GUESS': 'ATOMIC'
}
}
},
'MOTION': {
'GEO_OPT': {
'MAX_ITER': 200
}, # Can be adjusted from builder.cp2k_base.cp2k.parameters
'CONSTRAINT': {
'FIXED_ATOMS': {
'LIST': '1..{}'.format(self.ctx.natoms_structure)
}
}
}
})
def should_run_geo_opt(self):
"""Returns True if it is the first iteration or the settings are not ok."""
return not self.ctx.settings_ok
def run_geo_opt(self):
"""Prepare inputs, submit and direct output to context."""
self.ctx.base_inp = AttributeDict(self.exposed_inputs(Cp2kBaseWorkChain, 'cp2k_base'))
self.ctx.base_inp['cp2k']['structure'] = self.ctx.system
# Overwrite the generated input with the custom cp2k/parameters, update metadata and submit
if 'parameters' in self.exposed_inputs(Cp2kBaseWorkChain, 'cp2k_base')['cp2k']:
dict_merge(self.ctx.cp2k_param,
self.exposed_inputs(Cp2kBaseWorkChain, 'cp2k_base')['cp2k']['parameters'].get_dict())
self.ctx.base_inp['cp2k']['parameters'] = Dict(dict=self.ctx.cp2k_param)
self.ctx.base_inp['metadata'].update({'label': 'geo_opt_molecule', 'call_link_label': 'run_geo_opt_molecule'})
self.ctx.base_inp['cp2k']['metadata'].update({'label': 'GEO_OPT'})
self.ctx.base_inp['cp2k']['metadata']['options']['parser_name'] = 'lsmo.cp2k_advanced_parser'
running_base = self.submit(Cp2kBaseWorkChain, **self.ctx.base_inp)
self.report('Optimize molecule position in the structure.')
return ToContext(stages=append_(running_base))
def inspect_and_update_settings_geo_opt(self): # pylint: disable=inconsistent-return-statements
"""Inspect the settings_{idx} calculation and check if it is
needed to update the settings and resubmint the calculation."""
self.ctx.settings_ok = True
# Settings/structure are bad: there are problems in parsing the output file
# and, most probably, the calculation didn't even start the scf cycles
if 'output_parameters' in self.ctx.stages[-1].outputs:
cp2k_out = self.ctx.stages[-1].outputs.output_parameters
else:
self.report('ERROR_PARSING_OUTPUT')
return self.exit_codes.ERROR_PARSING_OUTPUT # pylint: disable=no-member
# Settings are bad: the SCF did not converge in the final step
if not cp2k_out['motion_step_info']['scf_converged'][-1]:
self.report('BAD SETTINGS: the SCF did not converge')
self.ctx.settings_ok = False
self.ctx.settings_idx += 1
else:
# SCF converged, but the computed bandgap needs to be checked
self.report('Bandgaps spin1/spin2: {:.3f} and {:.3f} ev'.format(cp2k_out['bandgap_spin1_au'] * HARTREE2EV,
cp2k_out['bandgap_spin2_au'] * HARTREE2EV))
bandgap_thr_ev = self.ctx.protocol['bandgap_thr_ev']
if ot_has_small_bandgap(self.ctx.cp2k_param, cp2k_out, bandgap_thr_ev):
self.report('BAD SETTINGS: band gap is < {:.3f} eV'.format(bandgap_thr_ev))
self.ctx.settings_ok = False
self.ctx.settings_idx += 1
# Update the settings tag, check if it is available and overwrite
if not self.ctx.settings_ok:
cp2k_out.label = '{}_{}_discard'.format(self.ctx.stage_tag, self.ctx.settings_tag)
next_settings_tag = 'settings_{}'.format(self.ctx.settings_idx)
if next_settings_tag in self.ctx.protocol:
self.ctx.settings_tag = next_settings_tag
dict_merge(self.ctx.cp2k_param, self.ctx.protocol[self.ctx.settings_tag])
else:
return self.exit_codes.ERROR_NO_MORE_SETTINGS # pylint: disable=no-member
def run_bsse(self):
"""Update parameters and run BSSE calculation. BSSE assumes that the molecule has no charge and unit
multiplicity: this can be customized from builder.cp2k_base.cp2k.parameters.
"""
self.ctx.cp2k_param['GLOBAL']['RUN_TYPE'] = 'BSSE'
dict_merge(
self.ctx.cp2k_param,
get_bsse_section(natoms_a=self.ctx.natoms_structure,
natoms_b=self.ctx.natoms_molecule,
mult_a=self.ctx.cp2k_param['FORCE_EVAL']['DFT']['MULTIPLICITY'],
mult_b=1,
charge_a=0,
charge_b=0))
# Overwrite the generated input with the custom cp2k/parameters, update structure and metadata, and submit
if 'parameters' in self.exposed_inputs(Cp2kBaseWorkChain, 'cp2k_base')['cp2k']:
dict_merge(self.ctx.cp2k_param,
self.exposed_inputs(Cp2kBaseWorkChain, 'cp2k_base')['cp2k']['parameters'].get_dict())
self.ctx.base_inp['cp2k']['parameters'] = Dict(dict=self.ctx.cp2k_param)
self.ctx.base_inp['cp2k']['structure'] = self.ctx.stages[-1].outputs.output_structure
self.ctx.base_inp['metadata'].update({'label': 'bsse', 'call_link_label': 'run_bsse'})
self.ctx.base_inp['cp2k']['metadata'].update({'label': 'BSSE'})
self.ctx.base_inp['cp2k']['metadata']['options']['parser_name'] = 'lsmo.cp2k_bsse_parser'
running_base = self.submit(Cp2kBaseWorkChain, **self.ctx.base_inp)
self.report('Run BSSE calculation to compute corrected binding energy.')
return ToContext(stages=append_(running_base))
def results(self):
"""Gather final outputs of the workchain."""
# Expose the loaded_structure remote_folder
self.out_many(self.exposed_outputs(self.ctx.stages[-2], Cp2kBaseWorkChain))
# Return parameters, loaded structure and molecule
cp2k_out_dict = {
'final_geo_opt': self.ctx.stages[-2].outputs.output_parameters,
'bsse': self.ctx.stages[-1].outputs.output_parameters
}
self.out('output_parameters', get_output_parameters(**cp2k_out_dict))
self.out('loaded_structure', self.ctx.stages[-2].outputs.output_structure)
self.out('loaded_molecule', get_loaded_molecule(self.outputs['loaded_structure'], self.inputs['molecule']))
self.report('Completed! Ouput Dict<{}>, loaded StructureData<{}>, loaded molecule StructureData<{}>'.format(
self.outputs['output_parameters'].pk, self.outputs['loaded_structure'].pk,
self.outputs['loaded_molecule'].pk))
| 50.682143 | 120 | 0.640758 | 12,212 | 0.860545 | 0 | 0 | 4,276 | 0.301318 | 0 | 0 | 5,191 | 0.365795 |
2a11e07673a89b3a3b7bcf3915b416ec6d3dad8d | 5,716 | py | Python | apps/blog/editer_user.py | jhjguxin/blogserver | 7873e1a94d05859650ce88d50e241456f4ab43ed | [
"MIT"
] | 3 | 2015-11-17T16:16:41.000Z | 2018-07-07T13:34:18.000Z | apps/blog/editer_user.py | jhjguxin/blogserver | 7873e1a94d05859650ce88d50e241456f4ab43ed | [
"MIT"
] | null | null | null | apps/blog/editer_user.py | jhjguxin/blogserver | 7873e1a94d05859650ce88d50e241456f4ab43ed | [
"MIT"
] | 1 | 2020-06-08T02:46:07.000Z | 2020-06-08T02:46:07.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.forms import PasswordResetForm,PasswordChangeForm,AdminPasswordChangeForm
#from django.contrib.auth.models import User
from dynamicresponse.response import *
from forms import *
from models import *
from django.views.decorators.csrf import csrf_exempt
import pdb
"""
def users(request):
users = Users.objects.all()
return render_to_response("users.html", {
'users': users },
RequestContext(request))
def test_js(request):
return render_to_response('test_js.html', {}, RequestContext(request))
"""
"""
@ csrf_exempt
def index_user(request):
"Lists all blog user."
if request.method == 'POST':
user = User.objects.create(title=request.POST.get("title"), reviewer=request.POST.get("reviewer"), email=request.POST.get("email"),content=request.POST.get("content") )
user.save()
form = RegisterForm(request.POST, instance=user)
#users = Users.objects.all()
else:
form = RegisterForm(instance=None)
users = User.objects.all()
#pdb.set_trace()
return SerializeOrRender('blog/index_user.html', { 'users': users }, extra={ 'form': form })
"""
def users_list(request):
"Lists all blog user."
users = User.objects.all()
return SerializeOrRender('blog/users_list.html', { 'users': users })
"""
def delete_user(request, user_id):
"Deletes the blog user."
user = get_object_or_404(User.objects.all(), pk=user_id)
if request.method == 'POST':
user.delete()
return SerializeOrRedirect(reverse('list_users'), {}, status=CR_DELETED)
else:
return SerializeOrRender('blog/delete_user.html', { 'user': user }, status=CR_CONFIRM)
"""
def register(request, user_id=None):
"""Displays, creates or updates a blog users."""
user = None
if user_id:
user = get_object_or_404(User.objects.all(), pk=user_id)
if request.method == 'POST':
form = RegisterForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('users_list'), { 'user': user })
else:
form = RegisterForm(instance=user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
def u_change(request, user_id=None):
"""Displays, creates or updates a blog users."""
user = None
if user_id:
user = get_object_or_404(User.objects.all(), pk=user_id)
if request.method == 'POST':
form = U_ChangeForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('users_list'), { 'user': user })
else:
form = U_ChangeForm(instance=user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
def passwordchange(request, user_id=None):
password_change_form=PasswordChangeForm
user = None
if user_id:
user = get_object_or_404(User.objects.get(id=user_id), pk=user_id)
if request.method == 'POST':
form = PasswordChangeForm(user, request.POST)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('list_users'), { 'user': user })
else:
form = password_change_form(user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
"""
def passwordchange(request, user_id=None):
"Displays, creates or updates a blog users."
user = None
if user_id:
user = get_object_or_404(User.objects.all(), pk=user_id)
olduser=User.objects.get(id=user_id)
if request.method == 'POST':
form = U_PasswordChangeForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
return SerializeOrRedirect(reverse('list_users'), { 'user': user })
# else:
# form = U_PasswordChangeForm(instance=user)
return SerializeOrRender('blog/user.html', { 'user': user }, extra={ 'form': form })
"""
"""
def passwordchange(request, is_admin_site=False, template_name='blog/user.html',
email_template_name='registration/password_reset_email.html',
password_reset_form=PasswordResetForm, token_generator=default_token_generator,
post_reset_redirect=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_done')
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {}
opts['use_https'] = request.is_secure()
opts['token_generator'] = token_generator
if is_admin_site:
opts['domain_override'] = request.META['HTTP_HOST']
else:
opts['email_template_name'] = email_template_name
if not Site._meta.installed:
opts['domain_override'] = RequestSite(request).domain
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
return render_to_response(template_name, {
'form': form,
}, context_instance=RequestContext(request))
"""
| 33.426901 | 172 | 0.646956 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,490 | 0.610567 |
2a145ab11f78cc5615c8d4ed402ea232a50a31a2 | 297 | py | Python | exercises/ja/solution_01_03_02.py | tuanducdesign/spacy-course | f8d092c5fa2997fccb3f367d174dce8667932b3d | [
"MIT"
] | 2 | 2020-07-07T01:46:37.000Z | 2021-04-20T03:19:43.000Z | exercises/ja/solution_01_03_02.py | tuanducdesign/spacy-course | f8d092c5fa2997fccb3f367d174dce8667932b3d | [
"MIT"
] | null | null | null | exercises/ja/solution_01_03_02.py | tuanducdesign/spacy-course | f8d092c5fa2997fccb3f367d174dce8667932b3d | [
"MIT"
] | null | null | null | # spaCyをインポートし、日本語のnlpオブジェクトを作成
import spacy
nlp = spacy.blank("ja")
# テキストを処理
doc = nlp("私はツリーカンガルーとイッカクが好きです。")
# 「ツリーカンガルー」のスライスを選択
tree_kangaroos = doc[2:4]
print(tree_kangaroos.text)
# 「ツリーカンガルーとイッカク」のスライスを選択
tree_kangaroos_and_narwhals = doc[2:6]
print(tree_kangaroos_and_narwhals.text)
| 18.5625 | 39 | 0.787879 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.612159 |
2a15f841c908a1dfe49e0cb148931bd74de7aa9f | 841 | py | Python | Source/Tools/BindTool/writer.py | ssinai1/rbfx | e69a7093d153667e3d8dd3270449d3d594c1c1a8 | [
"MIT"
] | 441 | 2018-12-26T14:50:23.000Z | 2021-11-05T03:13:27.000Z | Source/Tools/BindTool/writer.py | ssinai1/rbfx | e69a7093d153667e3d8dd3270449d3d594c1c1a8 | [
"MIT"
] | 221 | 2018-12-29T17:40:23.000Z | 2021-11-06T21:41:55.000Z | Source/Tools/BindTool/writer.py | ssinai1/rbfx | e69a7093d153667e3d8dd3270449d3d594c1c1a8 | [
"MIT"
] | 101 | 2018-12-29T13:08:10.000Z | 2021-11-02T09:58:37.000Z |
class InterfaceWriter(object):
def __init__(self, output_path):
self._output_path_template = output_path + '/_{key}_{subsystem}.i'
self._fp = {
'pre': {},
'post': {},
}
def _write(self, key, subsystem, text):
subsystem = subsystem.lower()
fp = self._fp[key].get(subsystem)
if fp is None:
self._fp[key][subsystem] = fp = open(self._output_path_template.format(key=key, subsystem=subsystem), 'w+')
fp.write(text)
fp.write('\n')
def write_pre(self, subsystem, text):
self._write('pre', subsystem, text)
def write_post(self, subsystem, text):
self._write('post', subsystem, text)
def close(self):
for fp_map in self._fp.values():
for fp in fp_map.values():
fp.close()
| 30.035714 | 119 | 0.567182 | 839 | 0.997622 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.06302 |
2a186bff39afa1d9b54a01c7714b0ec21b096dfa | 10,757 | py | Python | setix/backends/b_numpy.py | dustymugs/python-setix | 1ea5e122c616abffcac64f770c5f6f5644240a28 | [
"MIT"
] | 3 | 2016-01-26T07:18:54.000Z | 2018-08-02T01:43:39.000Z | setix/backends/b_numpy.py | dustymugs/python-setix | 1ea5e122c616abffcac64f770c5f6f5644240a28 | [
"MIT"
] | null | null | null | setix/backends/b_numpy.py | dustymugs/python-setix | 1ea5e122c616abffcac64f770c5f6f5644240a28 | [
"MIT"
] | 1 | 2020-11-27T16:32:40.000Z | 2020-11-27T16:32:40.000Z | import numpy
import numbers
import math
import struct
from six.moves import zip
from .. import SetIntersectionIndexBase, SearchResults, EmptySearchResults
def _check_numpy ():
missing = []
for fn in ("zeros", "empty", "digitize", "resize", "concatenate", "unique", "bincount", "argsort"):
if not getattr (numpy, fn, False):
missing.append (fn)
if missing:
raise ImportError ("setix.backends.numpy: required functions not provided by installed numpy: " + ", ".join(missing))
_check_numpy ()
class SetIntersectionIndex (SetIntersectionIndexBase):
def __init__ (self,
max_sets=2**32,
max_symbols=2**16,
init_bucket_size=16,
support_most_frequent=True,
support_find_similar=True):
self._sets = numpy.empty (64, dtype="object")
self._num_sets = 0
self._symbols = []
self._index = {}
self._sets_by_sig = {}
self._init_bs = init_bucket_size
self._packers = {}
self._support_most_frequent = bool (support_most_frequent)
self._support_find_similar = bool (support_find_similar)
if not isinstance (max_sets, numbers.Number):
raise TypeError ("max_sets")
if not isinstance (max_symbols, numbers.Number):
raise TypeError ("max_symbols")
if not isinstance (init_bucket_size, numbers.Number):
raise TypeError ("init_bucket_size")
if max_sets < 1 or max_sets >= 2**64:
raise ValueError ("max_sets")
if max_symbols < 1 or max_symbols >= 2**64:
raise ValueError ("max_sets")
if init_bucket_size < 4:
raise ValueError ("init_bucket_size")
set_bits = int (round (math.log (max_sets, 2)))
symbol_bits = int (round (math.log (max_symbols, 2)))
sz = (9, 17, 33, 65)
dt = (numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64)
sf = ("B", "H", "I", "L")
x = numpy.digitize([set_bits], sz)[0]
self._dtype_sets = dt[x]
self._max_sets = 2 ** (sz[x]-1)
x = numpy.digitize([symbol_bits], sz)[0]
self._dtype_symbols = dt[x]
self._max_symbols = 2 ** (sz[x]-1)
self._struct_symbols = sf[x]
if support_find_similar:
self._set_sizes = numpy.zeros (8 * init_bucket_size, dtype=self._dtype_symbols)
if support_most_frequent:
self._symbol_counts = numpy.zeros (8 * init_bucket_size, dtype=self._dtype_sets)
@property
def symbol_count (self):
return len (self._symbols)
@property
def set_count (self):
return self._num_sets
@property
def symbols (self):
return tuple (self._symbols)
@property
def payloads (self):
for s in self._sets:
for pl in s:
yield pl
@property
def supports_most_frequent (self):
return self._support_most_frequent
@property
def supports_find_similar (self):
return self._support_find_similar
@property
def max_sets (self):
return self._max_sets
@property
def max_symbols (self):
return self._max_symbols
def __getstate__ (self):
state = dict (self.__dict__)
del state["_packers"]
return state
def __setstate__ (self, state):
self.__dict__ = state
state["_packers"] = {}
def add (self, iterable, payload=SetIntersectionIndexBase._SENTINEL):
if payload is self._SENTINEL:
payload = iterable
max_sets = self._max_sets
max_symbols = self._max_symbols
init_bs = self._init_bs
symbols = self._symbols
index = self._index
buckets = [] # list of per-symbol buckets this set belongs in
sig = set() # set of symbol ids for identifying the set
num_syms = len (symbols)
for symbol in iterable:
bucket = index.get (symbol)
if bucket is None:
# register new symbol
id = len (symbols)
if id >= max_symbols:
raise RuntimeError ("index full: maximum number of symbols reached")
bucket = index[symbol] = [id, 0, numpy.zeros (init_bs, dtype=self._dtype_sets)]
symbols.append (symbol)
buckets.append (bucket)
sig.add (bucket[0])
sig = sorted (sig)
# packed signature used as a key in self._sets
# this saves memory compared to a tuple of ints
lsig = len (sig)
packer = self._packers[lsig] = self._packers.get(lsig) or struct.Struct(self._struct_symbols * lsig).pack
ssig = packer (*sig)
S = self._sets_by_sig.get (ssig)
if S is None:
# register new set
sid = self._num_sets
if sid >= max_sets:
raise RuntimeError ("index full: maximum number of sets reached")
self._num_sets += 1
sets = self._sets
if sid >= sets.size:
sets = self._sets = numpy.resize (sets, int(sid * 1.25))
S = self._sets_by_sig[ssig] = []
sets[sid] = S
if self._support_find_similar:
if self._set_sizes.size <= sid:
self._set_sizes = numpy.resize (self._set_sizes, int(sid * 1.25))
self._set_sizes[sid] = len (buckets)
# add set to per-symbol buckets
for bucket in buckets:
arr = bucket[2]
idx = bucket[1]
if arr.size <= idx:
arr = bucket[2] = numpy.resize (arr, int(idx * 1.25))
arr[idx] = sid
bucket[1] += 1
if self._support_most_frequent:
# update counts of symbol occurrences
symbol_counts = self._symbol_counts
new_syms = len (symbols)
if new_syms > num_syms and new_syms >= symbol_counts.size:
self._symbol_counts = symbol_counts = numpy.resize (symbol_counts, int(new_syms * 1.25))
symbol_counts[num_syms:new_syms] = 0
if len (sig) == len (buckets): #no repetitions
symbol_counts[ numpy.array (sig, dtype=self._dtype_symbols) ] += 1
else:
for bucket in buckets:
symbol_counts[bucket[0]] += 1
S.append (payload)
def _find (self, iterable):
buckets = []
sig = set()
occurrences = []
L = 0
for symbol in iterable:
L += 1
bucket = self._index.get (symbol)
if bucket is not None:
buckets.append (bucket)
sig.add (bucket[0])
if bucket[1]:
occurrences.append (bucket[2][0:bucket[1]])
if occurrences:
sids, indices = numpy.unique (numpy.concatenate (occurrences), return_inverse=True)
counts = numpy.bincount (indices)
return L, sids, indices, counts
else:
return L, [], [], []
class SearchResults (SearchResults):
def __init__ (self, sids, scores, sets):
self._sids = sids
self._scores = scores
self._sets = sets
self._sort = None
self._list = None
self._list_for = None
def get (self, max_results=None):
scores = self._scores
sort = self._sort = self._sort or numpy.argsort (scores)
if max_results is not None:
sort = sort[-max_results:]
sort = sort[::-1]
r_sids = self._sids[sort]
r_counts = scores[sort]
return zip (r_counts, self._sets[r_sids])
def __len__ (self):
return self._scores.size
def find (self, iterable, threshold=1, max_results=None):
if not isinstance (threshold, numbers.Number):
raise TypeError ("threshold")
if threshold < 1 and threshold >= 0:
raise ValueError ("threshold")
L, sids, indices, counts = self._find (iterable)
if threshold < 0:
threshold = L + threshold
if threshold < 1:
raise ValueError ("threshold")
if len (counts) == 0:
return EmptySearchResults ()
mask = counts >= threshold
counts = counts[mask]
sids = sids[mask]
return self.SearchResults (sids, counts, self._sets)
def find_similar (self, iterable, threshold=0.3):
if not isinstance (threshold, numbers.Number):
raise TypeError ("threshold")
if threshold > 1 or not (threshold > 0):
raise ValueError ("threshold")
if not self._support_find_similar:
raise RuntimeError ("find_similar support disabled")
L, sids, indices, counts = self._find (iterable)
if len (counts) == 0:
return EmptySearchResults ()
smls = counts / (self._set_sizes[sids] + (L * 1.0) - counts)
mask = smls >= threshold
smls = smls[mask]
sids = sids[mask]
return self.SearchResults (sids, smls, self._sets)
def most_frequent (self, threshold=2.0/3.0, max_results=None, with_counts=False):
if not self._support_most_frequent:
raise RuntimeError ("most_frequent support disabled")
counts = self._symbol_counts
if self._num_sets == 0:
return
sort = numpy.argsort (counts[0:len(self._symbols)])
limit = counts[sort[-1]] * 1.0 * threshold
symbols = self._symbols
if max_results:
sort = sort[-max_results:]
if with_counts:
for x in sort[::-1]:
count = counts[x]
if count < limit:
break
yield (symbols[x], count)
else:
for x in sort[::-1]:
count = counts[x]
if count < limit:
break
yield symbols[x]
| 32.59697 | 125 | 0.524217 | 10,223 | 0.950358 | 996 | 0.092591 | 635 | 0.059031 | 0 | 0 | 786 | 0.073069 |
2a1b32d6549e142ab87a8c6dd798f43bdbbe5e9f | 685 | py | Python | gitzer/director/migrations/0001_initial.py | IgnisDa/Gitzer | 6810ee7ca6b9d86d7d87745342850a8be58d1865 | [
"Apache-2.0"
] | 2 | 2021-01-24T07:44:28.000Z | 2021-01-30T22:12:43.000Z | gitzer/director/migrations/0001_initial.py | IgnisDa/Gitzer | 6810ee7ca6b9d86d7d87745342850a8be58d1865 | [
"Apache-2.0"
] | null | null | null | gitzer/director/migrations/0001_initial.py | IgnisDa/Gitzer | 6810ee7ca6b9d86d7d87745342850a8be58d1865 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.5 on 2021-01-16 08:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Repository',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(help_text='The absolute path to the local git repository.', max_length=500)),
('created_on', models.DateTimeField(auto_now_add=True, help_text='The date and time this repository was logged on.')),
],
),
]
| 29.782609 | 134 | 0.617518 | 592 | 0.864234 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.267153 |
2a1c70b9d1e40d06c382598343320ee23443054f | 2,668 | py | Python | model_based/svms/test_model_compression.py | vohoaiviet/tag-image-retrieval | 0a257560581f702cd394f3f28c9e0f6202827ce8 | [
"MIT"
] | 50 | 2015-11-04T15:53:09.000Z | 2022-01-03T14:46:17.000Z | model_based/svms/test_model_compression.py | vohoaiviet/tag-image-retrieval | 0a257560581f702cd394f3f28c9e0f6202827ce8 | [
"MIT"
] | 2 | 2018-03-07T09:51:50.000Z | 2018-10-13T11:05:13.000Z | model_based/svms/test_model_compression.py | vohoaiviet/tag-image-retrieval | 0a257560581f702cd394f3f28c9e0f6202827ce8 | [
"MIT"
] | 17 | 2015-10-26T03:41:49.000Z | 2021-08-23T08:11:05.000Z |
import sys
import os
import time
from basic.common import ROOT_PATH,checkToSkip,makedirsforfile
from basic.util import readImageSet
from simpleknn.bigfile import BigFile, StreamFile
from basic.annotationtable import readConcepts,readAnnotationsFrom
from basic.metric import getScorer
if __name__ == "__main__":
try:
rootpath = sys.argv[1]
except:
rootpath = ROOT_PATH
metric = 'AP'
feature = "dsift"
trainCollection = 'voc2008train'
trainAnnotationName = 'conceptsvoc2008train.txt'
testCollection = 'voc2008val'
testAnnotationName = 'conceptsvoc2008val.txt'
testset = testCollection
modelName = 'fik50'
modelName = 'fastlinear'
if 'fastlinear' == modelName:
from fastlinear.fastlinear import fastlinear_load_model as load_model
else:
from fiksvm.fiksvm import fiksvm_load_model as load_model
scorer = getScorer(metric)
imset = readImageSet(testCollection,testset,rootpath=rootpath)
concepts = readConcepts(testCollection,testAnnotationName,rootpath=rootpath)
feat_dir = os.path.join(rootpath, testCollection, "FeatureData", feature)
feat_file = BigFile(feat_dir)
_renamed, _vectors = feat_file.read(imset)
nr_of_images = len(_renamed)
nr_of_concepts = len(concepts)
mAP = 0.0
models = [None] * len(concepts)
for i,concept in enumerate(concepts):
model_file_name = os.path.join(rootpath,trainCollection,'Models',trainAnnotationName,feature, modelName, '%s.model'%concept)
model1 = load_model(model_file_name)
(pA,pB) = model1.get_probAB()
model2 = load_model(model_file_name)
model2.add_fastsvm(model1, 0.8, 1)
names,labels = readAnnotationsFrom(testCollection, testAnnotationName, concept, rootpath=rootpath)
name2label = dict(zip(names,labels))
ranklist1 = [(_id, model1.predict(_vec)) for _id,_vec in zip(_renamed, _vectors)]
ranklist2 = [(_id, model2.predict(_vec)) for _id,_vec in zip(_renamed, _vectors)]
model_file_name = os.path.join(rootpath,trainCollection,'Models', 'bag' + trainAnnotationName,feature, modelName, '%s.model'%concept)
model3 = load_model(model_file_name)
ranklist3 = [(_id, model3.predict(_vec)) for _id,_vec in zip(_renamed, _vectors)]
print concept,
for ranklist in [ranklist1, ranklist2, ranklist3]:
ranklist.sort(key=lambda v:v[1], reverse=True)
sorted_labels = [name2label[x[0]] for x in ranklist if x[0] in name2label]
print '%.3f'%scorer.score(sorted_labels),
print ''
| 31.388235 | 141 | 0.687031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.071214 |
2a1e4ea3df7a438ae1d54be3a54f23a971889c67 | 24,369 | py | Python | tonnikala/languages/python/generator.py | tetframework/Tonnikala | d69e7c71d323221ad749082c4e653654bd63e819 | [
"Apache-2.0"
] | 13 | 2015-11-09T21:11:49.000Z | 2021-12-26T20:36:02.000Z | tonnikala/languages/python/generator.py | tetframework/Tonnikala | d69e7c71d323221ad749082c4e653654bd63e819 | [
"Apache-2.0"
] | 3 | 2016-10-04T19:51:38.000Z | 2017-08-28T05:58:39.000Z | tonnikala/languages/python/generator.py | tetframework/Tonnikala | d69e7c71d323221ad749082c4e653654bd63e819 | [
"Apache-2.0"
] | 3 | 2015-09-06T13:44:43.000Z | 2021-01-25T10:04:26.000Z | import ast
from ast import *
from collections.abc import Iterable
from .astalyzer import FreeVarFinder
from ..base import LanguageNode, ComplexNode, BaseGenerator
from ...helpers import StringWithLocation
from ...runtime.debug import TemplateSyntaxError
import sys
try: # pragma: no cover
import sysconfig
HAS_ASSERT = bool(sysconfig.get_config_var('Py_DEBUG'))
del sysconfig
except ImportError: # pragma: no cover
HAS_ASSERT = False
name_counter = 0
ALWAYS_BUILTINS = '''
False
True
None
'''.split()
def simple_call(func, args=None):
return Call(func=func, args=args or [], keywords=[], starargs=None,
kwargs=None)
def create_argument_list(arguments):
return [arg(arg=id, annotation=None) for id in arguments]
def simple_function_def(name, arguments=()):
arguments = create_argument_list(arguments)
if sys.version_info >= (3, 8):
extra = {'posonlyargs': []}
else:
extra = {}
return FunctionDef(
name=name,
args=ast.arguments(
args=arguments,
vararg=None,
varargannotation=None,
kwonlyargs=[],
kwarg=None,
kwargannotation=None,
defaults=[],
kw_defaults=[],
**extra
),
body=[Pass()],
decorator_list=[],
returns=None
)
def NameX(id, store=False):
return Name(id=id, ctx=Load() if not store else Store())
def adjust_locations(ast_node, first_lineno, first_offset):
"""
Adjust the locations of the ast nodes, offsetting them
to the new lineno and column offset
"""
line_delta = first_lineno - 1
def _fix(node):
if 'lineno' in node._attributes:
lineno = node.lineno
col = node.col_offset
# adjust the offset on the first line
if lineno == 1:
col += first_offset
lineno += line_delta
node.lineno = lineno
node.col_offset = col
for child in iter_child_nodes(node):
_fix(child)
_fix(ast_node)
def get_fragment_ast(expression, mode='eval', adjust=(0, 0)):
if not isinstance(expression, str):
return expression
position = getattr(expression, 'position', (1, 0))
position = position[0] + adjust[0], position[1] + adjust[1]
tree = None
t = None
try:
exp = expression
if expression[-1:] != '\n':
exp = expression + '\n'
tree = ast.parse(exp, mode=mode)
except SyntaxError as e:
lineno = e.lineno
lineno += position[0] - 1
t = TemplateSyntaxError(e.msg, lineno=lineno)
if tree is None:
raise t
adjust_locations(tree, position[0], position[1])
return tree.body
def gen_name(typename=None):
global name_counter
name_counter += 1
if typename:
return "__TK__typed__%s__%d__" % (typename, name_counter)
else:
return "__TK_%d__" % (name_counter)
def static_eval(expr):
if isinstance(expr, UnaryOp) and isinstance(expr.op, Not):
return not static_eval(expr.operand)
return literal_eval(expr)
def static_expr_to_bool(expr):
try:
return bool(static_eval(expr))
except:
return None
class PythonNode(LanguageNode):
is_top_level = False
def generate_output_ast(self, code, generator, parent, escape=False,
position=None):
func = Name(id='__TK__output', ctx=Load())
if not isinstance(code, list):
code = [code]
rv = []
for i in code:
if position is not None:
i.lineno, i.col_offset = position
e = Expr(simple_call(func, [i]))
e.output_args = [i]
rv.append(e)
return rv
def make_buffer_frame(self, body):
new_body = []
new_body.append(Assign(
targets=[
NameX('__TK__output', store=True),
],
value=simple_call(
NameX('__TK__mkbuffer')
)
))
new_body.extend(body)
new_body.append(Return(value=NameX('__TK__output')))
return new_body
def make_function(self, name, body, add_buffer=False, arguments=()):
# ensure that the function name is an str
func = simple_function_def(str(name), arguments=arguments)
new_body = func.body = []
if add_buffer:
new_body.extend(self.make_buffer_frame(body))
else:
new_body.extend(body)
if not new_body:
new_body.append(Pass())
return func
def generate_varscope(self, body):
name = gen_name('variable_scope')
rv = [
self.make_function(name, body,
arguments=['__TK__output', '__TK__escape']),
Expr(simple_call(NameX(name),
[NameX('__TK__output'), NameX('__TK__escape')]))
]
return rv
class PyOutputNode(PythonNode):
def __init__(self, text):
super(PyOutputNode, self).__init__()
self.text = text
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
return Str(s=self.text)
def generate_ast(self, generator, parent):
return self.generate_output_ast(self.get_expression(), generator,
parent)
class PyTranslatableOutputNode(PyOutputNode):
def __init__(self, text, needs_escape=False):
super(PyTranslatableOutputNode, self).__init__(text)
self.needs_escape = needs_escape
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
name = 'gettext'
if self.needs_escape:
name = 'egettext'
expr = simple_call(
NameX(name),
[Str(s=self.text)],
)
return expr
class PyExpressionNode(PythonNode):
def __init__(self, expression):
super(PyExpressionNode, self).__init__()
self.expr = expression
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
return simple_call(
NameX('__TK__escape'),
[self.get_unescaped_expression()]
)
def get_unescaped_expression(self):
return get_fragment_ast(self.expr)
def generate_ast(self, generator, parent):
return self.generate_output_ast(self.get_expression(), generator,
parent)
class PyCodeNode(PythonNode):
def __init__(self, source):
super(PyCodeNode, self).__init__()
self.source = source
def generate_ast(self, generator, parent):
return get_fragment_ast(self.source, mode='exec')
def coalesce_strings(args):
rv = []
str_on = None
for i in args:
if isinstance(i, Str):
if str_on:
str_on.s += i.s
continue
str_on = i
else:
str_on = None
rv.append(i)
return rv
class PyComplexNode(ComplexNode, PythonNode):
def generate_child_ast(self, generator, parent_for_children):
rv = []
for i in self.children:
rv.extend(i.generate_ast(generator, parent_for_children))
return rv
class PyIfNode(PyComplexNode):
def __init__(self, expression):
super(PyIfNode, self).__init__()
self.expression = expression
def generate_ast(self, generator, parent):
test = get_fragment_ast(self.expression)
boolean = static_expr_to_bool(test)
if boolean == False:
return []
if boolean == True:
return self.generate_child_ast(generator, parent)
node = If(
test=test,
body=self.generate_child_ast(generator, self),
orelse=[]
)
return [node]
def PyUnlessNode(self, expression):
expression = get_fragment_ast(expression)
expression = UnaryOp(op=Not(), operand=expression)
return PyIfNode(expression)
class PyImportNode(PythonNode):
def __init__(self, href, alias):
super(PyImportNode, self).__init__()
self.href = str(href)
self.alias = str(alias)
def generate_ast(self, generator, parent):
node = Assign(
targets=[NameX(str(self.alias), store=True)],
value=
simple_call(
func=
Attribute(value=NameX('__TK__runtime', store=False),
attr='import_defs', ctx=Load()),
args=[
NameX('__TK__original_context'),
Str(s=self.href)
]
)
)
if parent.is_top_level:
generator.add_top_level_import(str(self.alias), node)
return []
return [node]
class PyAttributeNode(PyComplexNode):
def __init__(self, name, value):
super(PyAttributeNode, self).__init__()
self.name = name
def get_expressions(self):
rv = []
for i in self.children:
rv.extend(i.get_expressions())
return rv
def generate_ast(self, generator, parent):
if len(self.children) == 1 and \
isinstance(self.children[0], PyExpressionNode):
# special case, the attribute contains a single
# expression, these are handled by
# __TK__output.output_boolean_attr,
# given the name, and unescaped expression!
return [Expr(simple_call(
func=Attribute(
value=NameX('__TK__output'),
attr='output_boolean_attr',
ctx=Load()
),
args=[
Str(s=self.name),
self.children[0].get_unescaped_expression()
]
))]
# otherwise just return the output for the attribute code
# like before
return self.generate_output_ast(
[Str(s=' %s="' % self.name)] +
self.get_expressions() +
[Str(s='"')],
generator, parent
)
class PyAttrsNode(PythonNode):
def __init__(self, expression):
super(PyAttrsNode, self).__init__()
self.expression = expression
def generate_ast(self, generator, parent):
expression = get_fragment_ast(self.expression)
output = simple_call(
NameX('__TK__output_attrs'),
args=[expression]
)
return self.generate_output_ast(output, generator, parent)
class PyForNode(PyComplexNode):
def __init__(self, target_and_expression, parts):
super(PyForNode, self).__init__()
self.target_and_expression = target_and_expression
def generate_contents(self, generator, parent):
lineno, col = getattr(self.target_and_expression, 'position', (1, 0))
body = get_fragment_ast(
StringWithLocation('for %s: pass' % self.target_and_expression,
lineno, col - 4),
'exec',
)
for_node = body[0]
for_node.body = self.generate_child_ast(generator, self)
return [for_node]
def generate_ast(self, generator, parent):
# TODO: this could be needed to be reinstantiated
# return self.generate_varscope(self.generate_contents())
return self.generate_contents(generator, parent)
class PyDefineNode(PyComplexNode):
def __init__(self, funcspec):
super(PyDefineNode, self).__init__()
self.position = getattr(funcspec, 'position', (1, 0))
if '(' not in funcspec:
funcspec += '()'
self.funcspec = funcspec
def generate_ast(self, generator, parent):
body = get_fragment_ast(
StringWithLocation('def %s: pass' % self.funcspec,
self.position[0], self.position[1] - 4),
"exec"
)
def_node = body[0]
name = self.funcspec.partition('(')[0]
def_node.body = self.make_buffer_frame(
self.generate_child_ast(generator, self)
)
# move the function out of the closure
if parent.is_top_level:
generator.add_top_def(def_node.name, def_node)
return []
return [def_node]
class PyComplexExprNode(PyComplexNode):
def get_expressions(self):
rv = []
for i in self.children:
if hasattr(i, 'get_expression'):
rv.append(i.get_expression())
else:
rv.extend(i.get_expressions())
return rv
def generate_ast(self, generator, parent=None):
return self.generate_output_ast(self.get_expressions(),
generator, parent)
class PyBlockNode(PyComplexNode):
def __init__(self, name):
super(PyBlockNode, self).__init__()
self.name = name
def generate_ast(self, generator, parent):
is_extended = isinstance(parent, PyExtendsNode)
name = self.name
blockfunc_name = '__TK__block__%s' % name
position = getattr(name, 'position', (1, 0))
body = get_fragment_ast(
StringWithLocation(
'def %s():pass' % blockfunc_name,
position[0], position[1] - 4),
'exec'
)
def_node = body[0]
def_node.body = self.make_buffer_frame(
self.generate_child_ast(generator, self)
)
if not isinstance(name, str): # pragma: python2
name = name.encode('UTF-8')
generator.add_block(str(name), def_node, blockfunc_name)
if not is_extended:
# call the block in place
return self.generate_output_ast(
[simple_call(NameX(str(self.name)), [])],
self,
parent,
position=position
)
else:
return []
class PyWithNode(PyComplexNode):
def __init__(self, vars):
super(PyWithNode, self).__init__()
self.vars = vars
def generate_ast(self, generator, parent=None):
var_defs = get_fragment_ast(self.vars, 'exec')
body = var_defs + self.generate_child_ast(generator, self)
return self.generate_varscope(body)
class PyExtendsNode(PyComplexNode):
is_top_level = True
def __init__(self, href):
super(PyExtendsNode, self).__init__()
self.href = href
def generate_ast(self, generator, parent=None):
generator.make_extended_template(self.href)
return self.generate_child_ast(generator, self)
def ast_equals(tree1, tree2):
x1 = ast.dump(tree1)
x2 = ast.dump(tree2)
return x1 == x2
def coalesce_outputs(tree):
"""
Coalesce the constant output expressions
__output__('foo')
__output__('bar')
__output__(baz)
__output__('xyzzy')
into
__output__('foobar', baz, 'xyzzy')
"""
coalesce_all_outputs = True
if coalesce_all_outputs:
should_coalesce = lambda n: True
else:
should_coalesce = lambda n: n.output_args[0].__class__ is Str
class OutputCoalescer(NodeVisitor):
def visit(self, node):
# if - else expression also has a body! it is not we want, though.
if hasattr(node, 'body') and isinstance(node.body, Iterable):
# coalesce continuous string output nodes
new_body = []
output_node = None
def coalesce_strs():
if output_node:
output_node.value.args[:] = \
coalesce_strings(output_node.value.args)
for i in node.body:
if hasattr(i, 'output_args') and should_coalesce(i):
if output_node:
if len(output_node.value.args) + len(i.output_args) > 250:
coalesce_strs()
output_node = i
else:
output_node.value.args.extend(i.output_args)
continue
output_node = i
else:
coalesce_strs()
output_node = None
new_body.append(i)
coalesce_strs()
node.body[:] = new_body
NodeVisitor.visit(self, node)
def check(self, node):
"""
Coalesce __TK__output(__TK__escape(literal(x))) into
__TK__output(x).
"""
if not ast_equals(node.func, NameX('__TK__output')):
return
for i in range(len(node.args)):
arg1 = node.args[i]
if not arg1.__class__.__name__ == 'Call':
continue
if not ast_equals(arg1.func, NameX('__TK__escape')):
continue
if len(arg1.args) != 1:
continue
arg2 = arg1.args[0]
if not arg2.__class__.__name__ == 'Call':
continue
if not ast_equals(arg2.func, NameX('literal')):
continue
if len(arg2.args) != 1:
continue
node.args[i] = arg2.args[0]
def visit_Call(self, node):
self.check(node)
self.generic_visit(node)
OutputCoalescer().visit(tree)
def remove_locations(node):
"""
Removes locations from the given AST tree completely
"""
def fix(node):
if 'lineno' in node._attributes and hasattr(node, 'lineno'):
del node.lineno
if 'col_offset' in node._attributes and hasattr(node, 'col_offset'):
del node.col_offset
for child in iter_child_nodes(node):
fix(child)
fix(node)
class PyRootNode(PyComplexNode):
def __init__(self):
super(PyRootNode, self).__init__()
is_top_level = True
def generate_ast(self, generator, parent=None):
main_body = self.generate_child_ast(generator, self)
extended = generator.extended_href
toplevel_funcs = generator.blocks + generator.top_defs
# do not generate __main__ for extended templates
if not extended:
main_func = self.make_function('__main__', main_body,
add_buffer=True)
generator.add_bind_decorator(main_func)
toplevel_funcs = [main_func] + toplevel_funcs
# analyze the set of free variables
free_variables = set()
for i in toplevel_funcs:
fv_info = FreeVarFinder.for_ast(i)
free_variables.update(fv_info.get_free_variables())
# discard __TK__ variables, always builtin names True, False, None
# from free variables.
for i in list(free_variables):
if i.startswith('__TK__') or i in ALWAYS_BUILTINS:
free_variables.discard(i)
# discard the names of toplevel funcs from free variables
free_variables.difference_update(generator.top_level_names)
code = '__TK__mkbuffer = __TK__runtime.Buffer\n'
code += '__TK__escape = __TK__escape_g = __TK__runtime.escape\n'
code += '__TK__output_attrs = __TK__runtime.output_attrs\n'
if extended:
code += '__TK__parent_template = __TK__runtime.load(%r)\n' % \
extended
code += 'def __TK__binder(__TK__context):\n'
code += ' __TK__original_context = __TK__context.copy()\n'
code += ' __TK__bind = __TK__runtime.bind(__TK__context)\n'
code += ' __TK__bindblock = __TK__runtime.bind(__TK__context, ' \
'block=True)\n'
# bind gettext early!
for i in ['egettext']:
if i in free_variables:
free_variables.add('gettext')
free_variables.discard(i)
if 'gettext' in free_variables:
code += ' def egettext(msg):\n'
code += ' return __TK__escape(gettext(msg))\n'
code += ' gettext = __TK__context["gettext"]\n'
free_variables.discard('gettext')
code += ' raise\n' # a placeholder
if extended:
# an extended template does not have a __main__ (it is inherited)
code += ' __TK__parent_template.binder_func(__TK__context)\n'
for i in free_variables:
code += ' if "%s" in __TK__context:\n' % i
code += ' %s = __TK__context["%s"]\n' % (i, i)
code += ' return __TK__context\n'
tree = ast.parse(code)
remove_locations(tree)
class LocatorAndTransformer(ast.NodeTransformer):
binder = None
def visit_FunctionDef(self, node):
if node.name == '__TK__binder' and not self.binder:
self.binder = node
self.generic_visit(node)
return node
locator = LocatorAndTransformer()
locator.visit(tree)
# inject the other top level funcs in the binder
binder = locator.binder
for i, e in enumerate(binder.body):
if isinstance(e, Raise):
break
binder.body[i:i + 1] = toplevel_funcs
binder.body[i:i] = generator.imports
coalesce_outputs(tree)
return tree
# noinspection PyProtectedMember
class LocationMapper(object):
def __init__(self):
self.lineno_map = {1: 1}
self.prev_original_line = 1
self.prev_mapped_line = 1
self.prev_column = 0
def map_linenos(self, node):
if 'lineno' in node._attributes:
if hasattr(node, 'lineno'):
if node.lineno != self.prev_original_line:
self.prev_mapped_line += 1
self.lineno_map[self.prev_mapped_line] = node.lineno
self.prev_original_line = node.lineno
node.lineno = self.prev_mapped_line
if 'col_offset' in node._attributes:
if hasattr(node, 'col_offset'):
self.prev_column = node.col_offset
node.col_offset = self.prev_column
for child in iter_child_nodes(node):
self.map_linenos(child)
class Generator(BaseGenerator):
OutputNode = PyOutputNode
TranslatableOutputNode = PyTranslatableOutputNode
IfNode = PyIfNode
ForNode = PyForNode
DefineNode = PyDefineNode
ComplexExprNode = PyComplexExprNode
ExpressionNode = PyExpressionNode
ImportNode = PyImportNode
RootNode = PyRootNode
AttributeNode = PyAttributeNode
AttrsNode = PyAttrsNode
UnlessNode = PyUnlessNode
ExtendsNode = PyExtendsNode
BlockNode = PyBlockNode
CodeNode = PyCodeNode
WithNode = PyWithNode
def __init__(self, ir_tree):
super(Generator, self).__init__(ir_tree)
self.blocks = []
self.top_defs = []
self.top_level_names = set()
self.extended_href = None
self.imports = []
self.lnotab = None
def add_bind_decorator(self, func, block=True):
binder_call = NameX('__TK__bind' + ('block' if block else ''))
decors = [binder_call]
func.decorator_list = decors
def add_block(self, name, blockfunc, blockfunc_name):
self.top_level_names.add(blockfunc_name)
self.add_bind_decorator(blockfunc, block=True)
self.blocks.append(blockfunc)
def add_top_def(self, name, defblock):
self.top_level_names.add(name)
self.add_bind_decorator(defblock)
self.top_defs.append(defblock)
def add_top_level_import(self, name, node):
self.top_level_names.add(name)
self.imports.append(node)
def make_extended_template(self, href):
self.extended_href = href
def lnotab_info(self):
return self.lnotab
def generate_ast(self):
tree = super(Generator, self).generate_ast()
loc_mapper = LocationMapper()
loc_mapper.map_linenos(tree)
self.lnotab = loc_mapper.lineno_map
return tree
| 28.369034 | 86 | 0.577414 | 19,536 | 0.801674 | 0 | 0 | 0 | 0 | 0 | 0 | 3,040 | 0.124749 |
2a1fc79d624fe31282ea8ff5726da6d9664343bd | 389 | py | Python | training_data.py | zestdoc/echoAI | 2ef96dbd156c94fff0c1d8ff90d0e1706864185e | [
"MIT"
] | null | null | null | training_data.py | zestdoc/echoAI | 2ef96dbd156c94fff0c1d8ff90d0e1706864185e | [
"MIT"
] | null | null | null | training_data.py | zestdoc/echoAI | 2ef96dbd156c94fff0c1d8ff90d0e1706864185e | [
"MIT"
] | null | null | null | # Format of training prompt
defaultPrompt = """I am a Cardiologist. My patient asked me what this means:
Input: Normal left ventricular size and systolic function. EF > 55 %. Normal right ventricular size and systolic function. Normal valve structure and function
Output: Normal pumping function of the left and right side of the heart. Heart valves are normal
-
Input: {}
Output:"""
| 29.923077 | 158 | 0.766067 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.953728 |
2a20c2f2ce75008bda8bbed62280c06d1b9e0a6b | 682 | py | Python | db/src/scrape/programs.py | tomquirk/uq-api | 1a856d8b8e5fe8a94986d6c687e885bae642067b | [
"MIT"
] | 8 | 2017-06-14T00:11:49.000Z | 2021-02-03T23:58:10.000Z | db/src/scrape/programs.py | tomquirk/uq-api | 1a856d8b8e5fe8a94986d6c687e885bae642067b | [
"MIT"
] | 2 | 2020-07-16T15:44:23.000Z | 2021-05-08T00:42:12.000Z | db/src/scrape/programs.py | tomquirk/uq-catalogue-api | 1a856d8b8e5fe8a94986d6c687e885bae642067b | [
"MIT"
] | 7 | 2017-05-11T05:25:16.000Z | 2021-02-02T15:57:57.000Z | """
Programs scraper
"""
import re
import src.scrape.util.helpers as helpers
import src.settings as settings
from src.logger import get_logger
_LOG = get_logger("programs_scraper")
def programs():
"""
Scrapes list of programs
:return: List, of program codes
"""
_LOG.debug("scraping list of programs")
programs = []
url = f"{settings.UQ_FUTURE_BASE_URL}/study/find-a-program/listing/undergraduate"
soup = helpers.get_soup(url)
raw_programs = soup.find_all("a", href=re.compile("/study/program"))
for raw_program in raw_programs:
program_code = raw_program["href"][-4:]
programs.append(program_code)
return programs
| 22.733333 | 85 | 0.695015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 245 | 0.359238 |
2a2228848cab06332324e3506158b74866879860 | 2,219 | py | Python | wagtail_review/forms.py | jacobtoppm/wagtail-review | 23b81d7e5699ecb843a99da1aa207775a8b85bd6 | [
"BSD-3-Clause"
] | 44 | 2018-12-17T16:37:16.000Z | 2022-03-06T15:09:23.000Z | wagtail_review/forms.py | jacobtoppm/wagtail-review | 23b81d7e5699ecb843a99da1aa207775a8b85bd6 | [
"BSD-3-Clause"
] | 33 | 2019-01-07T18:03:14.000Z | 2021-12-15T08:46:57.000Z | wagtail_review/forms.py | jacobtoppm/wagtail-review | 23b81d7e5699ecb843a99da1aa207775a8b85bd6 | [
"BSD-3-Clause"
] | 19 | 2019-01-08T14:08:15.000Z | 2021-10-19T03:16:30.000Z | from django import forms
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.forms.formsets import DELETION_FIELD_NAME
from django.utils.module_loading import import_string
from django.utils.translation import ugettext
import swapper
from wagtail_review.models import Reviewer, Response
Review = swapper.load_model('wagtail_review', 'Review')
class CreateReviewForm(forms.ModelForm):
class Meta:
model = Review
fields = []
def get_review_form_class():
"""
Get the review form class from the ``WAGTAILREVIEW_REVIEW_FORM`` setting.
"""
form_class_name = getattr(settings, 'WAGTAILREVIEW_REVIEW_FORM', 'wagtail_review.forms.CreateReviewForm')
try:
return import_string(form_class_name)
except ImportError:
raise ImproperlyConfigured(
"WAGTAILREVIEW_REVIEW_FORM refers to a form '%s' that is not available" % form_class_name
)
class BaseReviewerFormSet(forms.BaseInlineFormSet):
def add_fields(self, form, index):
super().add_fields(form, index)
form.fields[DELETION_FIELD_NAME].widget = forms.HiddenInput()
def clean(self):
# Confirm that at least one reviewer has been specified.
# Do this as a custom validation step (rather than passing min_num=1 /
# validate_min=True to inlineformset_factory) so that we can have a
# custom error message.
if (self.total_form_count() - len(self.deleted_forms) < 1):
raise ValidationError(
ugettext("Please select one or more reviewers."),
code='too_few_forms'
)
ReviewerFormSet = forms.inlineformset_factory(
Review, Reviewer,
fields=['user', 'email'],
formset=BaseReviewerFormSet,
extra=0,
widgets={
'user': forms.HiddenInput,
'email': forms.HiddenInput,
}
)
class ResponseForm(forms.ModelForm):
class Meta:
model = Response
fields = ['result', 'comment']
widgets = {
'result': forms.RadioSelect,
'comment': forms.Textarea(attrs={
'placeholder': 'Enter your comments',
}),
}
| 30.39726 | 109 | 0.671924 | 1,096 | 0.493916 | 0 | 0 | 0 | 0 | 0 | 0 | 613 | 0.276251 |
2a2538f0dcb641072614c3ea490a7e3d7705aa5a | 20,466 | py | Python | metrics.py | giacoballoccu/Explanation-Quality-CPGPR | 3ec199d17e24154f8766e3af760b1e1486d135a6 | [
"MIT"
] | null | null | null | metrics.py | giacoballoccu/Explanation-Quality-CPGPR | 3ec199d17e24154f8766e3af760b1e1486d135a6 | [
"MIT"
] | null | null | null | metrics.py | giacoballoccu/Explanation-Quality-CPGPR | 3ec199d17e24154f8766e3af760b1e1486d135a6 | [
"MIT"
] | null | null | null | import numpy as np
from myutils import *
from easydict import EasyDict as edict
def dcg_at_k(r, k, method=1):
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=0):
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def measure_rec_quality(path_data):
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
metrics_names = ["ndcg", "hr", "recall", "precision"]
metrics = edict()
for metric in metrics_names:
metrics[metric] = {"Overall": []}
for values in attribute_list.values():
if len(attribute_list) == 1: break
attribute_to_name = values[1]
for _, name in attribute_to_name.items():
metrics[metric][name] = []
topk_matches = path_data.uid_topk
test_labels = path_data.test_labels
test_user_idxs = list(test_labels.keys())
invalid_users = []
for uid in test_user_idxs:
if uid not in topk_matches: continue
if len(topk_matches[uid]) < 10:
invalid_users.append(uid)
continue
pred_list, rel_set = topk_matches[uid], test_labels[uid]
if len(pred_list) == 0:
continue
k = 0
hit_num = 0.0
hit_list = []
for pid in pred_list:
k += 1
if pid in rel_set:
hit_num += 1
hit_list.append(1)
else:
hit_list.append(0)
ndcg = ndcg_at_k(hit_list, k)
recall = hit_num / len(rel_set)
precision = hit_num / len(pred_list)
hit = 1.0 if hit_num > 0.0 else 0.0
# Based on attribute
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
metrics["ndcg"][attr_name].append(ndcg)
metrics["recall"][attr_name].append(recall)
metrics["precision"][attr_name].append(precision)
metrics["hr"][attr_name].append(hit)
metrics["ndcg"]["Overall"].append(ndcg)
metrics["recall"]["Overall"].append(recall)
metrics["precision"]["Overall"].append(precision)
metrics["hr"]["Overall"].append(hit)
return metrics
def print_rec_metrics(dataset_name, flags, metrics):
attribute_list = get_attribute_list(dataset_name, flags)
print("\n---Recommandation Quality---")
print("Average for the entire user base:", end=" ")
for metric, values in metrics.items():
print("{}: {:.3f}".format(metric, np.array(values["Overall"]).mean()), end=" | ")
print("")
for attribute_category, values in attribute_list.items():
print("\n-Statistic with user grouped by {} attribute".format(attribute_category))
for attribute in values[1].values():
print("{} group".format(attribute), end=" ")
for metric_name, groups_values in metrics.items():
print("{}: {:.3f}".format(metric_name, np.array(groups_values[attribute]).mean()), end=" | ")
print("")
print("\n")
"""
Explanation metrics
"""
def topk_ETV(path_data):
dataset_name = path_data.dataset_name
def simpson_index(topk):
n_path_for_patterns = {k: 0 for k in set(PATH_TYPES[dataset_name])}
N = 0
for path in topk:
path = path
path_type = get_path_type(path)
if path_type == 'self_loop':
path_type = 'described_as'
n_path_for_patterns[path_type] += 1
N += 1
numerator = 0
for path_type, n_path_type_ith in n_path_for_patterns.items():
numerator += n_path_type_ith * (n_path_type_ith - 1)
# N = 0
# for item_path in pred_uv_paths.items():
# N += len(item_path[1])
if N * (N - 1) == 0:
return 0
return 1 - (numerator / (N * (N - 1)))
ETVs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
ETV = simpson_index([path_data.uid_pid_explanation[uid][pid] for pid in topk])
ETVs[uid] = ETV
return ETVs
def avg_ETV(path_data):
uid_ETVs = topk_ETV(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_ETV = {}
groups_ETV_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_ETV_scores[attribute_label] = []
if "Overall" not in groups_ETV_scores:
groups_ETV_scores["Overall"] = []
for uid, ETV in uid_ETVs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue # Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_ETV_scores[attr_name].append(ETV)
groups_ETV_scores["Overall"].append(ETV)
for attribute_label, group_scores in groups_ETV_scores.items():
avg_groups_ETV[attribute_label] = np.array(group_scores).mean()
explanation_type_variety = edict(
avg_groups_ETV=avg_groups_ETV,
groups_ETV_scores=groups_ETV_scores
)
return explanation_type_variety
def avg_LID(path_data):
uid_LIDs = topk_LID(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_LID = {}
groups_LID_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_LID_scores[attribute_label] = []
if "Overall" not in groups_LID_scores:
groups_LID_scores["Overall"] = []
for uid, LID in uid_LIDs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_LID_scores[attr_name].append(LID)
groups_LID_scores["Overall"].append(LID)
for attribute_label, group_scores in groups_LID_scores.items():
avg_groups_LID[attribute_label] = np.array(group_scores).mean()
linked_interaction_diversity_results = edict(
avg_groups_LID=avg_groups_LID,
groups_LID_scores=groups_LID_scores
)
return linked_interaction_diversity_results
def topk_LID(path_data):
LIDs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
unique_linked_interaction = set()
count = 0
for pid in topk:
if pid not in path_data.uid_pid_explanation[uid]:
continue
current_path = path_data.uid_pid_explanation[uid][pid]
li = get_linked_interaction_id(current_path)
if current_path[1][0] == "mention":
li += 10000 #pad in order to not make them overlap, this is a stupid workaround, fix it
unique_linked_interaction.add(li)
if len(topk) == 0 or len(unique_linked_interaction) == 0:
count += 1
LID = len(unique_linked_interaction) / len(topk)
LIDs[uid] = LID
print(count)
return LIDs
def avg_SED(path_data):
uid_SEDs = topk_SED(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_SED = {}
groups_SED_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_SED_scores[attribute_label] = []
if "Overall" not in groups_SED_scores:
groups_SED_scores["Overall"] = []
for uid, SED in uid_SEDs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_SED_scores[attr_name].append(SED)
groups_SED_scores["Overall"].append(SED)
for attribute_label, group_scores in groups_SED_scores.items():
avg_groups_SED[attribute_label] = np.array(group_scores).mean()
shared_entity_diversity_results = edict(
avg_groups_SED=avg_groups_SED,
groups_SED_scores=groups_SED_scores
)
return shared_entity_diversity_results
def topk_SED(path_data):
SEDs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
unique_shared_entities = set()
for pid in topk:
if pid not in path_data.uid_pid_explanation[uid]:
continue
current_path = path_data.uid_pid_explanation[uid][pid]
se = get_shared_entity_id(current_path)
unique_shared_entities.add(se)
if len(topk) > 0:
SED = len(unique_shared_entities) / len(topk)
else:
SED = 1
SEDs[uid] = SED
return SEDs
def topk_ETD(path_data):
ETDs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
unique_path_types = set()
for pid in topk:
if pid not in path_data.uid_pid_explanation[uid]:
continue
current_path = path_data.uid_pid_explanation[uid][pid]
path_type = get_path_type(current_path)
unique_path_types.add(path_type)
ETD = len(unique_path_types) / TOTAL_PATH_TYPES[path_data.dataset_name]
ETDs[uid] = ETD
return ETDs
def get_attribute_list(dataset_name, flags):
attribute_list = {}
for attribute, flag in flags.items():
if flag and DATASET_SENSIBLE_ATTRIBUTE_MATRIX[dataset_name][attribute]:
attribute_list[attribute] = []
for attribute in attribute_list.keys():
if attribute == "Gender":
user2attribute, attribute2name = get_kg_uid_to_gender_map(dataset_name)
elif attribute == "Age":
user2attribute, attribute2name = get_kg_uid_to_age_map(dataset_name)
elif attribute == "Occupation":
user2attribute, attribute2name = get_kg_uid_to_occupation_map(dataset_name)
elif attribute == "Country":
pass #implement country
else:
print("Unknown attribute")
attribute_list[attribute] = [user2attribute, attribute2name]
return attribute_list
def avg_ETD(path_data):
uid_ETDs = topk_ETD(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_ETD = {}
groups_ETD_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_ETD_scores[attribute_label] = []
if "Overall" not in groups_ETD_scores:
groups_ETD_scores["Overall"] = []
for uid, ETD in uid_ETDs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_ETD_scores[attr_name].append(ETD)
groups_ETD_scores["Overall"].append(ETD)
for attribute_label, group_scores in groups_ETD_scores.items():
avg_groups_ETD[attribute_label] = np.array(group_scores).mean()
diversity_results = edict(
avg_groups_ETD=avg_groups_ETD,
groups_ETD_scores=groups_ETD_scores
)
return diversity_results
#Extract the value of LIR for the given user item path from the LIR_matrix
def LIR_single(path_data, path):
uid = int(path[0][-1])
if uid not in path_data.uid_timestamp or uid not in path_data.LIR_matrix or len(path_data.uid_timestamp[uid]) <= 1: return 0. #Should not enter there
predicted_path = path
linked_interaction = int(get_interaction_id(predicted_path))
linked_interaction_type = get_interaction_type(predicted_path)
#Handle the case of Amazon Dataset where a path may have different interaction types
if linked_interaction_type == "mentions":
LIR = path_data.LIR_matrix_words[uid][linked_interaction]
elif linked_interaction_type == "watched" or linked_interaction_type == "listened" or linked_interaction_type == "purchase":
LIR = path_data.LIR_matrix[uid][linked_interaction]
else:
LIR = 0.
return LIR
# Returns a dict where to every uid is associated a value of LIR calculated based on his topk
def topk_LIR(path_data):
LIR_topk = {}
# Precompute user timestamps weigths
LIR_matrix = path_data.LIR_matrix
for uid in path_data.test_labels.keys(): #modified for pgpr labels
LIR_single_topk = []
if uid not in LIR_matrix or uid not in path_data.uid_topk:
continue
for pid in path_data.uid_topk[uid]:
predicted_path = path_data.uid_pid_explanation[uid][pid]
linked_interaction = int(get_interaction_id(predicted_path))
linked_interaction_type = get_interaction_type(predicted_path)
# Handle the case of Amazon Dataset where a path may have different interaction types
if linked_interaction_type == "mentions":
LIR = path_data.LIR_matrix_words[uid][linked_interaction]
elif linked_interaction_type == "purchase" or linked_interaction_type == "watched" or linked_interaction_type == "listened":
LIR = LIR_matrix[uid][linked_interaction]
else:
LIR = 0.
LIR_single_topk.append(LIR)
LIR_topk[uid] = np.array(LIR_single_topk).mean() if len(LIR_single_topk) != 0 else 0
return LIR_topk
# Returns an avg value for the LIR of a given group
def avg_LIR(path_data):
uid_LIR_score = topk_LIR(path_data)
avg_groups_LIR = {}
groups_LIR_scores = {}
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
#Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_LIR_scores[attribute_label] = []
if "Overall" not in groups_LIR_scores:
groups_LIR_scores["Overall"] = []
for uid, LIR_score in uid_LIR_score.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_LIR_scores[attr_name].append(LIR_score)
groups_LIR_scores["Overall"].append(LIR_score)
for attribute_label, group_scores in groups_LIR_scores.items():
avg_groups_LIR[attribute_label] = np.array(group_scores).mean()
LIR = edict(
avg_groups_LIR=avg_groups_LIR,
groups_LIR_scores=groups_LIR_scores,
)
return LIR
#Extract the value of SEP for the given user item path from the SEP_matrix
def SEP_single(path_data, path):
related_entity_type, related_entity_id = get_shared_entity(path)
SEP = path_data.SEP_matrix[related_entity_type][related_entity_id]
return SEP
def topks_SEP(path_data):
SEP_topk = {}
# Precompute entity distribution
exp_serendipity_matrix = path_data.SEP_matrix
#Measure explanation serendipity for topk
for uid in path_data.test_labels:
SEP_single_topk = []
if uid not in path_data.uid_topk: continue
for pid in path_data.uid_topk[uid]:
if pid not in path_data.uid_pid_explanation[uid]:
#print("strano 2")
continue
path = path_data.uid_pid_explanation[uid][pid]
related_entity_type, related_entity_id = get_shared_entity(path)
SEP = exp_serendipity_matrix[related_entity_type][related_entity_id]
SEP_single_topk.append(SEP)
if len(SEP_single_topk) == 0: continue
SEP_topk[uid] = np.array(SEP_single_topk).mean()
return SEP_topk
def avg_SEP(path_data):
uid_SEP = topks_SEP(path_data)
avg_groups_SEP = {}
groups_SEP_scores = {}
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_SEP_scores[attribute_label] = []
if "Overall" not in groups_SEP_scores:
groups_SEP_scores["Overall"] = []
for uid, SEP_score in uid_SEP.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_SEP_scores[attr_name].append(SEP_score)
groups_SEP_scores["Overall"].append(SEP_score)
for attribute_label, group_scores in groups_SEP_scores.items():
avg_groups_SEP[attribute_label] = np.array(group_scores).mean()
serendipity_results = edict(
avg_groups_SEP=avg_groups_SEP,
groups_SEP_scores=groups_SEP_scores,
)
return serendipity_results
def print_expquality_metrics(dataset_name, flags, metric_values):
attribute_list = get_attribute_list(dataset_name, flags)
print("\n---Explanation Quality---")
print("Average for the entire user base:", end=" ")
for metric, values in metric_values.items():
print("{}: {:.3f}".format(metric, values["Overall"]), end= " | ")
print("")
for attribute_category, values in attribute_list.items():
attributes = values[1].values()
print("\n-Statistic with user grouped by {} attribute".format(attribute_category))
for attribute in attributes:
print("{} group".format(attribute), end=" ")
for metric, values in metric_values.items():
print("{}: {:.3f}".format(metric, values[attribute]), end=" | ")
print("")
| 38.908745 | 153 | 0.664077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,924 | 0.142871 |
2a2632dc493ea2bda2dbffcc144253e2dd95b604 | 987 | py | Python | config/setting.py | SihongHo/Vehicles-Dispatch-Simulator | 3ad80fb25fdce10db46a2b3825e45a28d7f72891 | [
"MIT"
] | 19 | 2020-11-24T08:52:24.000Z | 2022-03-05T15:38:02.000Z | config/setting.py | SihongHo/Vehicles-Dispatch-Simulator | 3ad80fb25fdce10db46a2b3825e45a28d7f72891 | [
"MIT"
] | 4 | 2020-12-24T07:21:03.000Z | 2022-02-26T16:57:52.000Z | config/setting.py | SihongHo/Vehicles-Dispatch-Simulator | 3ad80fb25fdce10db46a2b3825e45a28d7f72891 | [
"MIT"
] | 15 | 2020-12-23T03:18:27.000Z | 2022-03-29T09:31:27.000Z | import numpy as np
#Simulater Setting
#------------------------------
MINUTES=60000000000
TIMESTEP = np.timedelta64(10*MINUTES)
PICKUPTIMEWINDOW = np.timedelta64(10*MINUTES)
#It can enable the neighbor car search system to determine the search range according to the set search distance and the size of the grid.
#It use dfs to find the nearest idle vehicles in the area.
NeighborCanServer = False
#You can adjust the size of the experimental area by entering latitude and longitude.
#The order, road network and grid division will be adaptive. Adjust to fit selected area
FocusOnLocalRegion = False
LocalRegionBound = (104.035,104.105,30.625,30.695)
if FocusOnLocalRegion == False:
LocalRegionBound = (104.011, 104.125, 30.618, 30.703)
#Input parameters
VehiclesNumber = 6000
SideLengthMeter = 800
VehiclesServiceMeter = 800
DispatchMode = "Simulation"
DemandPredictionMode = "None"
#["TransportationClustering","KmeansClustering","SpectralClustering"]
ClusterMode = "Grid"
| 30.84375 | 138 | 0.763931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 528 | 0.534954 |
2a2702efe481d78052c5c978e6d0aa3a68bf3f5e | 4,716 | py | Python | samples/heksekraft.py | clayboone/ursina | 9aebd9403b924af260fbefbfd7cef5ad82feeff7 | [
"MIT"
] | 1 | 2020-09-04T14:32:33.000Z | 2020-09-04T14:32:33.000Z | samples/heksekraft.py | clayboone/ursina | 9aebd9403b924af260fbefbfd7cef5ad82feeff7 | [
"MIT"
] | null | null | null | samples/heksekraft.py | clayboone/ursina | 9aebd9403b924af260fbefbfd7cef5ad82feeff7 | [
"MIT"
] | 1 | 2020-09-04T14:32:41.000Z | 2020-09-04T14:32:41.000Z | from ursina import *
from copy import copy
class MinecraftClone(Entity):
def __init__(self):
super().__init__()
c = Cylinder(6, height=1, start=-.5)
verts = c.vertices
tris = c.triangles
vertices = list()
triangles = list()
colors = list()
# for z in range(16):
# # for x in range(4):
# # triangles += [(t[0]+(i*len(triangles)), t[1]+(i*len(triangles)), t[2]+(i*len(triangles))) for t in tris]
# vertices += [Vec3(v) + Vec3(i,0,i) for v in verts]
# colors += (color.random_color(),) * len(verts)
i = 0
size = 16
# verts = list()
for z in range(size):
for x in range(size):
for y in range(1):
x_pos = x * .8660025
if z%2 == 0:
x_pos += .5*.8660025
# extra_height = 0
# if random.random() < .2:
# extra_height = random.uniform(1, 3) * .2
# vertices += [Vec3(v) + Vec3(x_pos,0,z*.75) + Vec3(0,extra_height,0) for v in verts]
# col = lerp(color.lime, color.random_color(), .2)
# colors += (lerp(color.yellow, color.random_color(), .2).tint(-.2), ) * len(verts)
#
# i += 1
voxel = Entity(
parent = self,
name = 'voxel',
model = copy(c),
origin_y = -.5,
double_sided = True,
color = lerp(lerp(color.brown, color.dark_gray, .5), color.random_color(), .1),
index = (x,y,z),
position=(x*.8660025,y,z*.75)
)
voxel.collider = MeshCollider(voxel, mesh=c, center=-voxel.origin)
top = Entity(parent=voxel, model=copy(c), y=1.01, scale_y=.01)
top.color = lerp(color.lime, color.random_color(), .2)
if z%2 == 0:
voxel.x += .5*.8660025
if random.random() < .2:
voxel.scale_y += random.uniform(1, 3) * .2
m = Mesh(vertices, colors=colors)
# Entity(model=m, scale=2, collider='mesh')
sky = Sky()
Entity(model='quad', scale=99999, color=color.azure.tint(-.1), rotation_x=90)
player = FirstPersonController()
class Magician(Entity):
def update(self):
# print(mo)
if mouse.left and mouse.hovered_entity:
mouse.hovered_entity.scale_y += 1.5 * time.dt
if mouse.right and mouse.hovered_entity:
mouse.hovered_entity.scale_y -= 1.5 * time.dt
Magician()
class FirstPersonController(Entity):
def __init__(self):
super().__init__()
self.speed = 3
self.i = 0
self.update_interval = 30
self.cursor = Entity(
parent = camera.ui,
model = 'quad',
color = color.pink,
scale = .008,
rotation_z = 45
)
self.graphics = Entity(
name = 'player_graphics',
parent = self,
model = 'cube',
origin = (0, -.5, 0),
scale = (1, 1.8, 1),
)
self.arrow = Entity(
parent = self.graphics,
model = 'cube',
color = color.blue,
position = (0, .5, .5),
scale = (.1, .1, .5)
)
camera.parent = self
self.position = (0, 1, 1)
camera.rotation = (0,0,0)
camera.position = (0,1.5,0)
camera.fov = 90
mouse.locked = True
def update(self):
if self.i < self.update_interval:
self.i += 1
return
self.direction = (
self.forward * (held_keys['w'] - held_keys['s'])
+ self.right * (held_keys['d'] - held_keys['a'])
)
if not raycast(self.world_position + self.up, self.direction, .5).hit:
self.position += self.direction * self.speed * time.dt
self.rotation_y += mouse.velocity[0] * 40
camera.rotation_x -= mouse.velocity[1] * 40
camera.rotation_x = clamp(camera.rotation_x, -90, 90)
self.y += held_keys['e']
self.y -= held_keys['q']
if __name__ == '__main__':
app = Ursina()
# window.size = (450, (450/window.aspect_ratio))
# window.center_on_screen()
MinecraftClone()
# vr = VideoRecorder(name='minecraft_clone', duration=6)
# invoke(setattr, vr, 'recording', True, delay=2)
app.run()
| 31.44 | 120 | 0.47201 | 4,371 | 0.926845 | 0 | 0 | 0 | 0 | 0 | 0 | 910 | 0.19296 |
2a291bb3879db1c009a0ad3fab6dd256c7e79e33 | 1,646 | py | Python | gemini/bin/gemini_python.py | codes1gn/gemini | 4b173ea583f2578244d1d0fb482ccb77818f7558 | [
"MIT"
] | null | null | null | gemini/bin/gemini_python.py | codes1gn/gemini | 4b173ea583f2578244d1d0fb482ccb77818f7558 | [
"MIT"
] | null | null | null | gemini/bin/gemini_python.py | codes1gn/gemini | 4b173ea583f2578244d1d0fb482ccb77818f7558 | [
"MIT"
] | null | null | null | import sys
import traceback
import copy
import importlib
from gemini.gemini_compiler import *
from gemini.utils import *
def main(argv=sys.argv[1:]):
print('gemini compiler entry point')
filename = copy.deepcopy(argv[0])
arguments = copy.deepcopy(argv[1:])
compiler = GeminiCompiler()
src_code = read_src(filename)
assert 'gemini_python' in sys.argv[0]
# step 1, parse src code
compiler.parse(src_code, filename=filename)
compiler.dump(pretty=True, prefix='src_parse')
assert 1, 'step 1 parse src'
# patch, fix import gemini
# _plugin = importlib.import_module('gemini.plugins.bert_plugin')
# sys.modules['gemini'] = _plugin
# step 2, parse modules
compiler.parse_modules()
compiler.dump(pretty=True, prefix='parse_module')
assert 1, 'step 2 parse module'
# TODO(albert) construct config, use dummy string instead
config = Configuration()
print(config)
compiler.apply_model_parallel(config)
compiler.dump(pretty=True, prefix='apply_{}_passes'.format(config.mode))
assert 1, 'step 3 apply sharding mode'
use_ast = False
# TODO(albert) have bug when not use_ast
if not use_ast:
try:
compiler.compile_and_run(use_ast=False)
print('try run src success')
except Exception:
print('try run src fail')
traceback.print_exc()
else:
try:
compiler.compile_and_run(use_ast=True)
print('try run ast success')
except Exception:
print('try run ast fail')
traceback.print_exc()
if __name__ == '__main__':
main()
| 27.433333 | 76 | 0.655529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 509 | 0.309235 |
2a29410e6bfdb975ab115a17e1c90cb3b363e11d | 4,513 | py | Python | modules/quiet-scan-threaded.py | ChristopherBilg/cse-545-PCTF-team-31 | 24061a26b1d239605720a5e31d3cb1bfc288317a | [
"MIT"
] | null | null | null | modules/quiet-scan-threaded.py | ChristopherBilg/cse-545-PCTF-team-31 | 24061a26b1d239605720a5e31d3cb1bfc288317a | [
"MIT"
] | 1 | 2021-02-07T19:51:32.000Z | 2021-02-07T19:51:32.000Z | modules/quiet-scan-threaded.py | ChristopherBilg/cse-545-PCTF-team-31 | 24061a26b1d239605720a5e31d3cb1bfc288317a | [
"MIT"
] | 3 | 2021-02-17T15:44:39.000Z | 2022-01-29T03:18:49.000Z | #!/usr/bin/env python3
import subprocess
import sys, getopt, os
import threading
import argparse
cwd = os.getcwd()
path = cwd + "/port_scanner_files/"
ip_list_file = path + "input/IP_list.txt"
nmap_output_file = path + "temp_output/nmap_output"
#the scorchedearth option runs every nmap scan that doesn't require an additional host OR standard ping scan
scorchedearth = ["-sS", "-sT", "-sA", "-sW", "-sM", "-sU", "-sN", "-sF", "-sX", "-sY","-sZ", "-sO"]
def module_name():
return "quiet-scan-threaded"
def module_help():
return "quiet port scanner tool (threaded)"
def module_usage():
return "{0}".format(module_name())
def system_call(program_name, args = [], privilege = False):
call = program_name
if(privilege == True):
call = "sudo " + call
for arg in args:
call = call + " " + arg
print(call)
subprocess.call(call, shell=True)
def parse_nmap_output(ipAddr, scantype, nmap_output):
services_list = path + "scan_output/services_list" + "_" + ipAddr + ".txt"
nmap_fp = open(nmap_output, 'r')
line_in = nmap_fp.readline()
while(line_in.find("STATE") == -1 and line_in.find("SERVICE") == -1): #Fixed a bug in this line as it was looping infinitely because the previous check was "STATE SERVICE", but sometimes, we endup in getting an extra space and none of the lines match
line_in = nmap_fp.readline()
if(line_in.lower().find("nmap done") != -1): #when no ports are open we should exit
return
#changed to append, so we can get results from all scan types
services_fp = open(services_list, 'a')
line_in = nmap_fp.readline()
while (line_in and line_in.strip() != ''):
if(line_in.lower().find("closed") != -1): #IF port is closed, continue parsing the next line
line_in = nmap_fp.readline()
continue
str_split = line_in.split('/')
str_split_2 = str_split[-1].split(' ')
line_out_list = []
line_out_list.append(str_split[0])
line_out_list.extend(str_split_2)
line_out = ' '.join(line_out_list)
services_fp.write("Scanned with: " + scantype + "\n")#write what scan produced these results
services_fp.write(line_out)
line_in = nmap_fp.readline()
services_fp.close()
nmap_fp.close()
def buildArgs(argv, line, fileName):
arr = [line.strip()]
if(len(argv) > 0):
arr.extend(argv)
arr.append("-Pn") #controversial addition: we know all the hosts are going to be online because of the game rules, so adding this skips the host discovery
arr.extend([">", fileName])
return arr
def nmap_worker(line, scantype, standard_args):
#since we are appending multiple scan results to one file, zero out the file before start
line = line.strip()
services_list = path + "scan_output/services_list" + "_" + line + ".txt"
services_fp = open(services_list, 'w')
services_fp.close()
if ("scortchedearth" in scantype) or ("se" in scantype):
scantype = scorchedearth
for scan in scantype:
print("Starting NMAP Thread for " + line +":" + " Scantype: " + scan)
fileName = nmap_output_file + "_" + line + ".txt"
system_call(("nmap " + scan), buildArgs(standard_args, line, fileName), True) #-p- to scan all the ports
parse_nmap_output(line, scan, fileName)
def main():
with open(ip_list_file ,'r') as file:
line = file.readline()
#New calling convention is two lists of arguments: the scan type and whatever other arguments there are
#example python3 quiet-scan-threaded.py --scantype "-sS -sN" --options "-p 80 -sV"
parser = argparse.ArgumentParser()
parser.add_argument('--scantype', dest= 'scantype', required=True, help="List all scan types here. Format as if callingi n nmap ie -sS -Su -sM' etc. To automatically run all scan types enter 'scortchedearth' or 'se'")
parser.add_argument('--options', dest= 'standard_args', nargs='+', required=True, help="All options other than scan type listed here, just as if calling nmap from the commandline")
args = parser.parse_args()
standard_args = args.standard_args
scans = args.scantype
scantype = scans.split(' ')
while line:
t = threading.Thread(target=nmap_worker, args=(line, scantype, standard_args))
t.start()
line = file.readline()
if __name__ == "__main__":
main()
| 35.535433 | 254 | 0.645025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,669 | 0.369821 |
2a2949f385ec93a057a10d2a09d0bd12596b5253 | 1,535 | py | Python | veselin-angelov-ekatte/create_database.py | veselin-angelov/training-projects | f02a29eed57d60720fe66e76af483d2b1f689bfd | [
"Apache-2.0"
] | null | null | null | veselin-angelov-ekatte/create_database.py | veselin-angelov/training-projects | f02a29eed57d60720fe66e76af483d2b1f689bfd | [
"Apache-2.0"
] | null | null | null | veselin-angelov-ekatte/create_database.py | veselin-angelov/training-projects | f02a29eed57d60720fe66e76af483d2b1f689bfd | [
"Apache-2.0"
] | null | null | null | from helpers import create_connection, execute_query
connection = create_connection(
"postgres", "postgres", "admin", "127.0.0.1", "5432"
)
create_database_query = "CREATE DATABASE ekatte"
execute_query(connection, create_database_query)
connection = create_connection(
"ekatte", "postgres", "admin", "127.0.0.1", "5432"
)
create_area_query = '''
CREATE TABLE IF NOT EXISTS "areas" (
"id" serial NOT NULL UNIQUE,
"name" TEXT NOT NULL UNIQUE,
"code" TEXT NOT NULL UNIQUE,
CONSTRAINT "area_pk" PRIMARY KEY ("code")
);
'''
create_municipality_query = '''
CREATE TABLE IF NOT EXISTS "municipalities" (
"id" serial NOT NULL UNIQUE,
"name" TEXT NOT NULL,
"code" TEXT NOT NULL UNIQUE,
"area_code" TEXT NOT NULL,
CONSTRAINT "municipalities_pk" PRIMARY KEY ("code"),
CONSTRAINT "municipalities_fk0" FOREIGN KEY ("area_code") REFERENCES "areas"("code")
);
'''
create_settlements_query = '''
CREATE TABLE IF NOT EXISTS "settlements" (
"id" serial NOT NULL,
"ekatte" TEXT NOT NULL UNIQUE,
"type" TEXT NOT NULL,
"name" TEXT NOT NULL,
"municipality_code" TEXT NOT NULL,
CONSTRAINT "settlements_pk" PRIMARY KEY ("id"),
CONSTRAINT "settlements_fk0" FOREIGN KEY ("municipality_code") REFERENCES "municipalities"("code")
);
'''
execute_query(connection, create_area_query)
execute_query(connection, create_municipality_query)
execute_query(connection, create_settlements_query)
| 31.326531 | 106 | 0.672964 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,079 | 0.702932 |
2a294c8c332d0308530bc30b08c40aaea110d61c | 1,964 | py | Python | src/isaw.events/setup.py | isawnyu/isaw.web | 604499f9fa55d1ce9698ca05f85ddb54a88f1cab | [
"CC-BY-3.0"
] | null | null | null | src/isaw.events/setup.py | isawnyu/isaw.web | 604499f9fa55d1ce9698ca05f85ddb54a88f1cab | [
"CC-BY-3.0"
] | 405 | 2015-03-12T18:20:25.000Z | 2022-03-07T18:44:16.000Z | src/isaw.events/setup.py | isawnyu/isaw.web | 604499f9fa55d1ce9698ca05f85ddb54a88f1cab | [
"CC-BY-3.0"
] | 1 | 2016-11-07T21:18:49.000Z | 2016-11-07T21:18:49.000Z | # -*- coding: utf-8 -*-
"""
This module contains the tool of Events
"""
import os
from setuptools import setup, find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
version = '0.2'
long_description = (
read('README.txt')
+ '\n' +
'Change history\n'
'**************\n'
+ '\n' +
read('CHANGES.txt')
+ '\n' +
'Detailed Documentation\n'
'**********************\n'
+ '\n' +
read('isaw', 'events', 'README.txt')
+ '\n' +
'Contributors\n'
'************\n'
+ '\n' +
read('CONTRIBUTORS.txt')
+ '\n' +
'Download\n'
'********\n'
)
tests_require=['zope.testing']
setup(name='isaw.events',
version=version,
description="",
long_description=long_description,
# Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Framework :: Plone',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: GNU General Public License (GPL)',
],
keywords='events isaw schedule calendar',
author='Christopher Warner',
author_email='christopher.warner@nyu.edu',
url='http://github.com/christophwarner/isaw.events',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['isaw', ],
include_package_data=True,
zip_safe=False,
dependency_links=['http://code.google.com/p/python-twitter/'],
install_requires=[
'setuptools',
'tweepy >= 1.5',
'simplejson >= 2.0.9',
'tinyurl >= 0.1.0',
],
tests_require=tests_require,
extras_require=dict(tests=tests_require),
test_suite='isaw.events.tests.test_docs.test_suite',
entry_points="""
# -*- entry_points -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
| 26.540541 | 83 | 0.571283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 987 | 0.502546 |
2a29f26fbea0a3dc55f5442506c8f8e9d6c38803 | 1,053 | py | Python | api/model.py | thomashusken/img-class | 0ceead70f4e30face409069fb49d02fb927abc03 | [
"MIT"
] | null | null | null | api/model.py | thomashusken/img-class | 0ceead70f4e30face409069fb49d02fb927abc03 | [
"MIT"
] | 2 | 2022-01-13T01:20:18.000Z | 2022-03-11T23:50:06.000Z | api/model.py | thomashusken/img-class | 0ceead70f4e30face409069fb49d02fb927abc03 | [
"MIT"
] | null | null | null | from torchvision import models
import json
import numpy as np
import torch
from collections import OrderedDict
from operator import itemgetter
import os
def return_top_5(processed_image):
# inception = models.inception_v3(pretrained=True)
inception = models.inception_v3()
inception.load_state_dict(torch.load("data/inception_v3_google-1a9a5a14.pth"))
inception.eval()
result = inception(processed_image)
#load imagenet classes
class_idx = json.load(open('data/imagenet_class_index.json'))
idx2label = [class_idx[str(k)][1] for k in range(len(class_idx))]
result_idx = result.sort()[1][0][-5:]
#exponentiate and get probabilities
exps = np.exp(result.detach().numpy()[0])
exps_sum = np.sum(exps)
softmax = [np.round((j / exps_sum)*100, 2) for j in exps]
out = []
for idx in result_idx:
out.append((idx2label[idx], softmax[idx]))
# out = {k: v for k, v in dict(out).items()}
result = OrderedDict(sorted(dict(out).items(), key=itemgetter(1), reverse=True))
return result | 31.909091 | 84 | 0.698955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.213675 |
2a2a7803fb377912b3cfe716d9faf1491cd16671 | 816 | py | Python | 2015/day-09/day9.py | smolsbs/aoc | 558cc68b94ead332190e14ad7a9ecd6ca5c4aa5a | [
"Unlicense"
] | 1 | 2019-12-02T20:20:41.000Z | 2019-12-02T20:20:41.000Z | 2015/day-09/day9.py | smolsbs/aoc | 558cc68b94ead332190e14ad7a9ecd6ca5c4aa5a | [
"Unlicense"
] | null | null | null | 2015/day-09/day9.py | smolsbs/aoc | 558cc68b94ead332190e14ad7a9ecd6ca5c4aa5a | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
from collections import defaultdict
from itertools import permutations
def main():
places = defaultdict(lambda: {})
with open('input', 'r') as fp:
for line in fp.read().split('\n'):
a = line.split(' ')
places[a[0]][a[2]] = int(a[4])
places[a[2]][a[0]] = int(a[4])
shortest = None
longest = None
ways = list(permutations(places.keys(), 8))
for trip in ways:
_sum = sum([places[trip[i]][trip[i+1]] for i in range(7)])
if shortest is None:
shortest = _sum
if _sum < shortest:
shortest = _sum
if longest is None:
longest = _sum
if _sum > longest:
longest = _sum
print(shortest)
print(longest)
if __name__ == '__main__':
main() | 27.2 | 66 | 0.541667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.060049 |
2a2b13313d1d275974a44eab41c48c672b2ad06a | 504 | py | Python | tests/test_generator.py | reesmanp/typedpy | eca9e002b6c9edae19f803cef55447926f502309 | [
"MIT"
] | null | null | null | tests/test_generator.py | reesmanp/typedpy | eca9e002b6c9edae19f803cef55447926f502309 | [
"MIT"
] | null | null | null | tests/test_generator.py | reesmanp/typedpy | eca9e002b6c9edae19f803cef55447926f502309 | [
"MIT"
] | null | null | null | import pickle
from pytest import raises
from typedpy import Structure, serialize
from typedpy.fields import Generator
class Foo(Structure):
g: Generator
def test_generator_wrong_type():
with raises(TypeError):
Foo(g=[])
def test_generator():
foo = Foo(g=(i for i in range(5)))
assert sum(foo.g) == 10
def test_generator_err():
foo = Foo(g=(i for i in range(5)))
with raises(TypeError):
pickle.dumps(foo)
with raises(TypeError):
serialize(foo) | 18 | 40 | 0.666667 | 38 | 0.075397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2a2c9906dc961361309d725a8c8106f33e523ad9 | 5,624 | py | Python | stateflow/stateclass.py | frolenkov-nikita/django-stateflow | 0a5d75e42606b662c7d510b5d5ef7cb996cf185b | [
"BSD-3-Clause"
] | null | null | null | stateflow/stateclass.py | frolenkov-nikita/django-stateflow | 0a5d75e42606b662c7d510b5d5ef7cb996cf185b | [
"BSD-3-Clause"
] | null | null | null | stateflow/stateclass.py | frolenkov-nikita/django-stateflow | 0a5d75e42606b662c7d510b5d5ef7cb996cf185b | [
"BSD-3-Clause"
] | null | null | null | """
Workflow based on Python classes
"""
class StateMetaclass(type):
def __init__(cls, name, bases, dict):
super(StateMetaclass, cls).__init__(name, bases, dict)
abstract = dict.pop('abstract', False)
if not abstract:
cls.forward_transitions = []
cls.backward_transitions = []
# TODO: This method don't belong to 'general' part
# But it's here because of metaclass conflict.
# Something should be done about it
def __str__(cls):
return cls.get_title()
def __repr__(cls):
return "<State: '%s'>" % cls.get_title()
class TransitionMetaclass(type):
def __init__(cls, name, bases, dict):
super(TransitionMetaclass, cls).__init__(name, bases, dict)
abstract = dict.pop('abstract', False)
if not abstract:
for klass in dict['income']:
forward_transitions = getattr(klass, 'forward_transitions')
forward_transitions.append(cls)
getattr(klass, 'backward_transitions').append(cls)
def __str__(cls):
return cls.get_title()
def __repr__(cls):
return "<Transition: '%s'>" % cls.get_title()
class FlowMetaclass(type):
def __init__(cls, name, bases, attrs):
super(FlowMetaclass, cls).__init__(name, bases, attrs)
for state in cls.states:
state.flow = cls
for transition in cls.transitions:
transition.flow = cls
def __str__(cls):
return ".".join([cls.__module__, cls.__name__])
class State(object):
__metaclass__ = StateMetaclass
abstract = True
class Transition(object):
__metaclass__ = TransitionMetaclass
abstract = True
@classmethod
def apply(cls, obj, *args, **kwargs):
raise NotImplementedError(
"Apply method should be defined in subclasses")
class Flow(object):
__metaclass__ = FlowMetaclass
states = []
transitions = []
initial_state = None
@classmethod
def get_state(cls, value=None):
if value is None or value == '':
return cls.initial_state
for item in cls.states:
if item.get_value() == value:
return item
raise ValueError('Cannot find state %r' % value)
@classmethod
def state_choices(cls):
return [state.as_tuple() for state in cls.states]
class DjangoItem(object):
"""Stores common methods of DjanoState and DjangoTransition"""
@classmethod
def get_name(cls):
return cls.__name__
@classmethod
def get_value(cls):
try:
return getattr(cls, 'value')
except AttributeError:
return cls.get_name().upper()
@classmethod
def get_title(cls):
title = getattr(cls, 'title', None)
return title or cls.get_name()
@classmethod
def as_tuple(cls):
return cls.get_value(), cls.get_title()
@classmethod
def as_sql(cls):
return '%s', (cls.get_value(),)
class DjangoState(State, DjangoItem):
def __str__(self):
# We need to keep this instance method in order to solve a specific
# problem with Django templates.
# When django template variable resolver encounters a callable it
# always tries to call it. And since class is a callable (and call to
# it returns a new instance), we end up having instances, not classes
# rendered in the template
return str(self.__class__)
@classmethod
def forward_allowed_transitions(cls, roles):
return [trans for trans in cls.forward_transitions
if set(trans.permissions) & set(roles)]
@classmethod
def forward_states(cls):
return [trans.outcome for trans in cls.forward_transitions
if trans.forward]
@classmethod
def all_forward_states(cls):
#TODO: Capture possible recursion in case of wrong 'forward' value
def get_states(state, lst):
lst.add(state)
[get_states(st, lst) for st in state.forward_states()]
result = set([])
get_states(cls, result)
return list(result)
class IncorrectStateError(ValueError):
pass
class TransitionFailedError(Exception):
pass
class AdminAction(object):
def __init__(self, transition):
self.transition = transition
self.short_description = '%s selected' % transition
self.__name__ = str(transition)
def __call__(self, modeladmin, request, queryset):
for obj in queryset:
self.transition.apply(obj)
class DjangoTransition(Transition, DjangoItem):
abstract = True # This transition is not the part of workflow
# By default transitions are considered 'forward', i.e.
# workflow doesn't return to previous state
forward = True
def __str__(self):
# We need to keep this instance method in order to solve a specific
# problem with Django templates.
# When django template variable resolver encounters a callable it
# always tries to call it. And since class is a callable (and call to
# it returns a new instance), we end up having instances, not classes
# rendered in the template
return str(self.__class__)
@classmethod
def all(cls):
import warnings
warnings.warn("transition.all is deprecated, "
"use flow.transitions instead",
DeprecationWarning)
return cls.flow.transitions
@classmethod
def admin_actions(cls):
return [AdminAction(trans) for trans in cls.all()]
| 27.038462 | 77 | 0.634068 | 5,549 | 0.986664 | 0 | 0 | 2,026 | 0.360242 | 0 | 0 | 1,370 | 0.243599 |
2a2d6edb0fce651f5c81ecdcc1654544c63224b4 | 254 | py | Python | src/app/admin/__init__.py | schwetzen/liblr | 408235a4f539a05f54f0376dbf9dbcd83957db03 | [
"Apache-2.0"
] | null | null | null | src/app/admin/__init__.py | schwetzen/liblr | 408235a4f539a05f54f0376dbf9dbcd83957db03 | [
"Apache-2.0"
] | 1 | 2018-12-07T22:15:28.000Z | 2018-12-07T22:15:28.000Z | src/app/admin/__init__.py | schwetzen/liblr | 408235a4f539a05f54f0376dbf9dbcd83957db03 | [
"Apache-2.0"
] | 2 | 2018-12-07T20:59:53.000Z | 2018-12-17T21:02:21.000Z | from django.contrib import admin
from app.models import *
from app.admin.tip import ReadingTipAdmin
from app.admin.user import UserAdmin
# Register your models here.
admin.site.register(User, UserAdmin)
admin.site.register(ReadingTip, ReadingTipAdmin)
| 25.4 | 48 | 0.818898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.110236 |
2a2ef0f35e15ed911d8da568d2fb240bfd2a6fd8 | 5,551 | py | Python | ppobyter/marketplace/abilities.py | graatje/highscoresbot | 26207b8191ed6c9a3d7ecd49fea482e6d3603c36 | [
"MIT"
] | null | null | null | ppobyter/marketplace/abilities.py | graatje/highscoresbot | 26207b8191ed6c9a3d7ecd49fea482e6d3603c36 | [
"MIT"
] | null | null | null | ppobyter/marketplace/abilities.py | graatje/highscoresbot | 26207b8191ed6c9a3d7ecd49fea482e6d3603c36 | [
"MIT"
] | null | null | null | abilities = {1: 'Stench', 2: 'Drizzle', 3: 'Speed Boost', 4: 'Battle Armor', 5: 'Sturdy', 6: 'Damp', 7: 'Limber',
8: 'Sand Veil', 9: 'Static', 10: 'Volt Absorb', 11: 'Water Absorb', 12: 'Oblivious', 13: 'Cloud Nine',
14: 'Compound Eyes', 15: 'Insomnia', 16: 'Color Change', 17: 'Immunity', 18: 'Flash Fire',
19: 'Shield Dust', 20: 'Own Tempo', 21: 'Suction Cups', 22: 'Intimidate', 23: 'Shadow Tag',
24: 'Rough Skin', 25: 'Wonder Guard', 26: 'Levitate', 27: 'Effect Spore', 28: 'Synchronize',
29: 'Clear Body', 30: 'Natural Cure', 31: 'Lightning Rod', 32: 'Serene Grace', 33: 'Swift Swim',
34: 'Chlorophyll', 35: 'Illuminate', 36: 'Trace', 37: 'Huge Power', 38: 'Poison Point', 39: 'Inner Focus',
40: 'Magma Armor', 41: 'Water Veil', 42: 'Magnet Pull', 43: 'Soundproof', 44: 'Rain Dish',
45: 'Sand Stream', 46: 'Pressure', 47: 'Thick Fat', 48: 'Early Bird', 49: 'Flame Body',
50: 'Run Away', 51: 'Keen Eye', 52: 'Hyper Cutter', 53: 'Pickup', 54: 'Truant', 55: 'Hustle',
56: 'Cute Charm', 57: 'Plus', 58: 'Minus', 59: 'Forecast', 60: 'Sticky Hold', 61: 'Shed Skin',
62: 'Guts', 63: 'Marvel Scale', 64: 'Liquid Ooze', 65: 'Overgrow', 66: 'Blaze', 67: 'Torrent',
68: 'Swarm', 69: 'Rock Head', 70: 'Drought', 71: 'Arena Trap', 72: 'Vital Spirit', 73: 'White Smoke',
74: 'Pure Power', 75: 'Shell Armor', 76: 'Air Lock', 77: 'Tangled Feet', 78: 'Motor Drive', 79: 'Rivalry',
80: 'Steadfast', 81: 'Snow Cloak', 82: 'Gluttony', 83: 'Anger Point', 84: 'Unburden', 85: 'Heatproof',
86: 'Simple', 87: 'Dry Skin', 88: 'Download', 89: 'Iron Fist', 90: 'Poison Heal', 91: 'Adaptability',
92: 'Skill Link', 93: 'Hydration', 94: 'Solar Power', 95: 'Quick Feet', 96: 'Normalize', 97: 'Sniper',
98: 'Magic Guard', 99: 'No Guard', 100: 'Stall', 101: 'Technician', 102: 'Leaf Guard', 103: 'Klutz',
104: 'Mold Breaker', 105: 'Super Luck', 106: 'Aftermath', 107: 'Anticipation', 108: 'Forewarn',
109: 'Unaware', 110: 'Tinted Lens', 111: 'Filter', 112: 'Slow Start', 113: 'Scrappy',
114: 'Storm Drain', 115: 'Ice Body', 116: 'Solid Rock', 117: 'Snow Warning', 118: 'Honey Gather',
119: 'Frisk', 120: 'Reckless', 121: 'Multitype', 122: 'Flower Gift', 123: 'Bad Dreams', 124: 'Pickpocket',
125: 'Sheer Force', 126: 'Contrary', 127: 'Unnerve', 128: 'Defiant', 129: 'Defeatist', 130: 'Cursed Body',
131: 'Healer', 132: 'Friend Guard', 133: 'Weak Armor', 134: 'Heavy Metal', 135: 'Light Metal',
136: 'Multiscale', 137: 'Toxic Boost', 138: 'Flare Boost', 139: 'Harvest', 140: 'Telepathy', 141: 'Moody',
142: 'Overcoat', 143: 'Poison Touch', 144: 'Regenerator', 145: 'Big Pecks', 146: 'Sand Rush',
147: 'Wonder Skin', 148: 'Analytic', 149: 'Illusion', 150: 'Imposter', 151: 'Infiltrator', 152: 'Mummy',
153: 'Moxie', 154: 'Justified', 155: 'Rattled', 156: 'Magic Bounce', 157: 'Sap Sipper', 158: 'Prankster',
159: 'Sand Force', 160: 'Iron Barbs', 161: 'Zen Mode', 162: 'Victory Star', 163: 'Turboblaze',
164: 'Teravolt', 165: 'Aroma Veil', 166: 'Flower Veil', 167: 'Cheek Pouch', 168: 'Protean',
169: 'Fur Coat', 170: 'Magician', 171: 'Bulletproof', 172: 'Competitive', 173: 'Strong Jaw',
174: 'Refrigerate', 175: 'Sweet Veil', 176: 'Stance Change', 177: 'Gale Wings', 178: 'Mega Launcher',
179: 'Grass Pelt', 180: 'Symbiosis', 181: 'Tough Claws', 182: 'Pixilate', 183: 'Gooey', 184: 'Aerilate',
185: 'Parental Bond', 186: 'Dark Aura', 187: 'Fairy Aura', 188: 'Aura Break', 189: 'Primordial Sea',
190: 'Desolate Land', 191: 'Delta Stream', 192: 'Stamina', 193: 'Wimp Out', 194: 'Emergency Exit',
195: 'Water Compaction', 196: 'Merciless', 197: 'Shields Down', 198: 'Stakeout', 199: 'Water Bubble',
200: 'Steelworker', 201: 'Berserk', 202: 'Slush Rush', 203: 'Long Reach', 204: 'Liquid Voice',
205: 'Triage', 206: 'Galvanize', 207: 'Surge Surfer', 208: 'Schooling', 209: 'Disguise',
210: 'Battle Bond', 211: 'Power Construct', 212: 'Corrosion', 213: 'Comatose', 214: 'Queenly Majesty',
215: 'Innards Out', 216: 'Dancer', 217: 'Battery', 218: 'Fluffy', 219: 'Dazzling', 220: 'Soul-Heart',
221: 'Tangling Hair', 222: 'Receiver', 223: 'Power of Alchemy', 224: 'Beast Boost', 225: 'RKS System',
226: 'Electric Surge', 227: 'Psychic Surge', 228: 'Misty Surge', 229: 'Grassy Surge',
230: 'Full Metal Body', 231: 'Shadow Shield', 232: 'Prism Armor', 233: 'Neuroforce', 234: 'Intrepid Sword',
235: 'Dauntless Shield', 236: 'Libero', 237: 'Ball Fetch', 238: 'Cotton Down', 239: 'Propeller Tail',
240: 'Mirror Armor', 241: 'Gulp Missile', 242: 'Stalwart', 243: 'Steam Engine', 244: 'Punk Rock',
245: 'Sand Spit', 246: 'Ice Scales', 247: 'Ripen', 248: 'Ice Face', 249: 'Power Spot', 250: 'Mimicry',
251: 'Screen Cleaner', 252: 'Steely Spirit', 253: 'Perish Body', 254: 'Wandering Spirit',
255: 'Gorilla Tactics', 256: 'Neutralizing Gas', 257: 'Pastel Veil', 258: 'Hunger Switch',
259: 'Quick Draw', 260: 'Unseen Fist', 261: 'Curious Medicine', 262: 'Transistor', 263: "Dragon's Maw",
264: 'Chilling Neigh', 265: 'Grim Neigh', 266: 'As One', 267: 'As One'}
| 108.843137 | 120 | 0.566384 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,140 | 0.565664 |
2a2fc5a472dc2313204790421e750c0201901553 | 1,371 | py | Python | handler.py | jocafneto/Insurance-All-Cross-Sell | ab67200854a70d39c85459fab9b441fbfc31c7e4 | [
"MIT"
] | null | null | null | handler.py | jocafneto/Insurance-All-Cross-Sell | ab67200854a70d39c85459fab9b441fbfc31c7e4 | [
"MIT"
] | null | null | null | handler.py | jocafneto/Insurance-All-Cross-Sell | ab67200854a70d39c85459fab9b441fbfc31c7e4 | [
"MIT"
] | null | null | null | from crypt import methods
import os
import pickle
import pandas as pd
import lightgbm
from flask import Flask, request, Response
from healthinsurance.HealthInsurance import HealthInsurance
# loading model
model = pickle.load( open( 'model/LGBM_Model.pkl', 'rb' ) )
# initialize API
app = Flask( __name__ )
@app.route( '/predict', methods=['POST'] )
def healthinsurance_predict():
test_json = request.get_json()
if test_json: # there is data
if isinstance( test_json, dict ): # unique exemple
test_raw = pd.DataFrame( test_json, index=[0] )
else: # multiple exemples
test_raw = pd.DataFrame( test_json, columns=test_json[0].keys() )
# Instantiate HealthInsurance Class
pipeline = HealthInsurance()
# data cleaning
df = pipeline.data_cleaning( test_raw )
# feature engineering
df = pipeline.feature_engineering( df )
# data preparation
df = pipeline.data_preparation( df )
# prediction
df_response = pipeline.get_prediction( model, test_raw, df )
return df_response
else:
return Response( '{}', status=200, mimetype='application/json')
if __name__ == '__main__':
port = os.environ.get( 'PORT', 5000 )
app.run( '0.0.0.0', port=port )
| 28.5625 | 77 | 0.623632 | 0 | 0 | 0 | 0 | 951 | 0.693654 | 0 | 0 | 271 | 0.197666 |
2a3122b28ce578dad33591fd4205e3edee9aea4c | 2,490 | py | Python | python_scripts/video_pipeline.py | MichlF/scikit-learn-mooc | 31fd171c9e3f75bd19f3709e1f5345a75a7d22ab | [
"CC-BY-4.0"
] | 1 | 2022-01-25T19:20:21.000Z | 2022-01-25T19:20:21.000Z | python_scripts/video_pipeline.py | gmash24/scikit-learn-mooc | b58f051efb591a38859a4242369c9494ccac6a17 | [
"CC-BY-4.0"
] | null | null | null | python_scripts/video_pipeline.py | gmash24/scikit-learn-mooc | b58f051efb591a38859a4242369c9494ccac6a17 | [
"CC-BY-4.0"
] | null | null | null | # %% [markdown]
# # How to define a scikit-learn pipeline and visualize it
# %% [markdown]
# The goal of keeping this notebook is to:
# - make it available for users that want to reproduce it locally
# - archive the script in the event we want to rerecord this video with an
# update in the UI of scikit-learn in a future release.
# %% [markdown]
# ### First we load the dataset
# %% [markdown]
# We need to define our data and target. In this case we will build a classification model
# %%
import pandas as pd
ames_housing = pd.read_csv("../datasets/house_prices.csv", na_values='?')
target_name = "SalePrice"
data, target = ames_housing.drop(columns=target_name), ames_housing[target_name]
target = (target > 200_000).astype(int)
# %% [markdown]
# We inspect the first rows of the dataframe
# %%
data
# %% [markdown]
# We can cherry-pick some features and only retain this subset of data
# %%
numeric_features = ['LotArea', 'FullBath', 'HalfBath']
categorical_features = ['Neighborhood', 'HouseStyle']
data = data[numeric_features + categorical_features]
# %% [markdown]
# ### Then we create the pipeline
# %% [markdown]
# The first step is to define the preprocessing steps
# %%
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler(),
)])
categorical_transformer = OneHotEncoder(handle_unknown='ignore')
# %% [markdown]
# The next step is to apply the transformations using `ColumnTransformer`
# %%
from sklearn.compose import ColumnTransformer
preprocessor = ColumnTransformer(transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features),
])
# %% [markdown]
# Then we define the model and join the steps in order
# %%
from sklearn.linear_model import LogisticRegression
model = Pipeline(steps=[
('preprocessor', preprocessor),
('classifier', LogisticRegression()),
])
# %% [markdown]
# Let's visualize it!
# %%
from sklearn import set_config
set_config(display='diagram')
model
# %% [markdown]
# ### Finally we score the model
# %%
from sklearn.model_selection import cross_validate
cv_results = cross_validate(model, data, target, cv=5)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
| 25.151515 | 90 | 0.725703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,284 | 0.515663 |
2a32589106ff6dd9c86ebca030f230c0c1716f14 | 23,423 | py | Python | generator_configuration.py | mariaangelapellegrino/virtual_assistant_generator | b8b31c38797ded888f201c30b38771e861747eff | [
"MIT"
] | 2 | 2020-08-10T23:05:20.000Z | 2021-06-09T05:46:08.000Z | generator_configuration.py | lewismc/virtual_assistant_generator | b8b31c38797ded888f201c30b38771e861747eff | [
"MIT"
] | 5 | 2020-08-10T23:05:16.000Z | 2021-08-31T21:35:59.000Z | generator_configuration.py | lewismc/virtual_assistant_generator | b8b31c38797ded888f201c30b38771e861747eff | [
"MIT"
] | 3 | 2020-08-10T23:05:25.000Z | 2021-05-19T17:14:13.000Z | from nltk.corpus import wordnet as wn
import json
from pyinflect import getAllInflections, getInflection
import re
import inflect
import urllib.parse
from SPARQLWrapper import SPARQLWrapper, JSON
from urllib.parse import quote
from py_thesaurus import Thesaurus
import sys, getopt
WN_NOUN = 'n'
WN_VERB = 'v'
WN_ADJECTIVE = 'a'
WN_ADJECTIVE_SATELLITE = 's'
WN_ADVERB = 'r'
entities = {}
properties = {}
def nounify(verb_word):
""" Transform a verb to the closest noun: die -> death """
verb_synsets = wn.synsets(verb_word, pos="v")
# Word not found
if not verb_synsets:
return []
# Get all verb lemmas of the word
verb_lemmas = []
for s in verb_synsets:
for l in s.lemmas():
if s.name().split('.')[1] == 'v':
verb_lemmas.append(l)
print(verb_lemmas)
# Get related forms
derivationally_related_forms = [(l, l.derivationally_related_forms()) \
for l in verb_lemmas]
# filter only the nouns
related_noun_lemmas = [l for drf in derivationally_related_forms \
for l in drf[1] if l.synset().name().split('.')[1] == 'n']
# Extract the words from the lemmas
words = [l.name() for l in related_noun_lemmas]
len_words = len(words)
# Build the result in the form of a list containing tuples (word, probability)
result = [(w, float(words.count(w))/len_words) for w in set(words)]
result.sort(key=lambda w: -w[1])
# return all the possibilities sorted by probability
return result
def convert(word, from_pos, to_pos):
""" Transform words given from/to POS tags """
synsets = wn.synsets(word, pos=from_pos)
# Word not found
if not synsets:
return []
# Get all lemmas of the word (consider 'a'and 's' equivalent)
lemmas = []
for s in synsets:
for l in s.lemmas():
if s.name().split('.')[1] == from_pos or from_pos in (WN_ADJECTIVE, WN_ADJECTIVE_SATELLITE) and s.name().split('.')[1] in (WN_ADJECTIVE, WN_ADJECTIVE_SATELLITE):
lemmas += [l]
# Get related forms
derivationally_related_forms = [(l, l.derivationally_related_forms()) for l in lemmas]
# filter only the desired pos (consider 'a' and 's' equivalent)
related_noun_lemmas = []
for drf in derivationally_related_forms:
for l in drf[1]:
if l.synset().name().split('.')[1] == to_pos or to_pos in (WN_ADJECTIVE, WN_ADJECTIVE_SATELLITE) and l.synset().name().split('.')[1] in (WN_ADJECTIVE, WN_ADJECTIVE_SATELLITE):
related_noun_lemmas += [l]
# Extract the words from the lemmas
words = [l.name() for l in related_noun_lemmas]
len_words = len(words)
# Build the result in the form of a list containing tuples (word, probability)
result = [(w, float(words.count(w)) / len_words) for w in set(words)]
result.sort(key=lambda w:-w[1])
# return all the possibilities sorted by probability
return result
def clean_value(value):
value = value.lower()
value = value.replace("$", "dollar ")
#value = value.replace("€", "euro ")
temp = ''
for word in value.split():
if word.startswith('%'):
word = urllib.parse.unquote(word)
temp = temp + " " + word
value = temp
value = re.sub(r"[%][a-zA-Z0-9]+", "", value)
value = value.replace("&", "and")
value = value.replace("/", " or ")
value = re.sub(r"([(].*[)])", "", value)
value = value.replace("'", "")
value = value.replace('"', "")
value = value.replace(':', "")
value = value.replace(',', "")
value = value.replace('<', "")
value = value.replace('>', "")
value = value.replace('(', "")
value = value.replace(')', "")
value = value.replace('!', "")
value = value.replace('\\', "")
value = value.replace('+', " ")
value = value.replace("_", " ")
p = inflect.engine()
temp = ''
for word in value.split():
if word.isdigit() or bool(re.match('^([0-9]|[,]|[.])*$',word)):
word = p.number_to_words(word)
temp = temp + " " + word
value = temp
value = value.strip()
value = re.sub(r"[ \s\n\t]+", " ", value)
return value
def contains_special_characters(value):
return not bool(re.match('^([a-zA-Z]|[ ]|[-])*$',value))
def remove_ambiguities_slot_properties():
global properties
#with open('./augmented_slot_properties.json') as f:
# properties = json.load(f)
all_properties_value = list(properties.keys())
for key in properties:
if 'synonyms' in properties[key]:
synonyms = properties[key]['synonyms']
new_synoynms= []
for synonym in synonyms:
if not synonym in all_properties_value:
all_properties_value.append(synonym)
new_synoynms.append(synonym)
properties[key]['synonyms'] = new_synoynms
#with open("./augmented_slot_properties.json", "w") as write_file:
# json.dump(properties, write_file, indent=4)
def augment_slot_properties():
global properties
#with open('./cleaned_slot_properties.json') as f:
# properties = json.load(f)
for key in properties:
# nouns to verbs
verb_tuples = convert(key, 'n', 'v')
verb_form = []
for verb_tuple in verb_tuples:
value = verb_tuple[0]
value = clean_value(value)
verb_form.append(value)
verb_form = set(verb_form)
# add verb inflections
for verb in verb_form:
temp = getAllInflections(verb)
inflections = []
for t in temp:
value = temp[t][0]
value = clean_value(value)
inflections.append(value)
inflections = set(inflections)
verb_form = verb_form.union(inflections)
verb_form = set(verb_form)
if key in verb_form:
verb_form.remove(key)
verb_form = list(verb_form)
# nouns to adjectives
adjective_tuples = convert(key, 'n', 'a')
adjective_form = []
for adjective_tuple in adjective_tuples:
value = adjective_tuple[0]
value = clean_value(value)
adjective_form.append(value)
adjective_form = set(adjective_form)
if key in adjective_form:
adjective_form.remove(key)
adjective_form = list(adjective_form)
'''
# noun synonyms
synonyms = [clean_value(l.name()) for synset in wn.synsets(key) for l in synset.lemmas()]
synonyms = set(synonyms)
temp = set()
for s in synonyms:
if not s in all_augmented_value:
temp.add(s)
#if key in temp:
# temp.remove(key)
synonyms = list(temp)
# combine all
extended_synonyms = list(set(verb_form + synonyms + adjective_form))
'''
extended_synonyms = list(set(verb_form + adjective_form))
if extended_synonyms:
properties[key]["synonyms"] = extended_synonyms
#with open("./augmented_slot_properties.json", "w") as write_file:
# json.dump(properties, write_file, indent=4)
def clean_slot_properties():
global properties
#with open('./slot_properties.json') as f:
# properties = json.load(f)
cleaned_properties = {}
for key in properties:
if contains_special_characters(key):
new_key = clean_value(key)
else:
new_key = key
if new_key and len(new_key.strip())>0 and not contains_special_characters(new_key):
if not new_key in cleaned_properties:
cleaned_properties[new_key] = {'urls':[]}
cleaned_properties[new_key]['urls'] = list(set(cleaned_properties[new_key]['urls'] + properties[key]['urls']))
if 'synonyms' in properties[key]:
if not 'synonyms' in cleaned_properties[new_key]:
cleaned_properties[new_key]['synonyms'] = []
cleaned_properties[new_key]['synonyms'] = list(set(cleaned_properties[new_key]['synonyms'] + properties[key]['synonyms']))
#with open("./cleaned_slot_properties.json", "w") as write_file:
# json.dump(cleaned_properties, write_file, indent=4)
properties = cleaned_properties
def augment_slot_entities():
with open('./cleaned_slot_entities.json') as f:
entities = json.load(f)
for key in entities:
synonyms = [clean_value(l.name()) for synset in wn.synsets(key) for l in synset.lemmas()]
synonyms = set(synonyms)
if key in synonyms:
synonyms.remove(key)
synonyms = list(synonyms)
if synonyms:
entities[key]["synonyms"] = synonyms
with open("./augmented_slot_entities.json", "w") as write_file:
json.dump(entities, write_file, indent=4)
def clean_slot_entities():
global entities
#with open('./slot_entities.json') as f:
# entities = json.load(f)
cleaned_entities = {}
for key in entities:
if contains_special_characters(key):
new_key = clean_value(key)
else:
new_key = key
if new_key and len(new_key.strip())>0 and not contains_special_characters(new_key):
if not new_key in cleaned_entities:
cleaned_entities[new_key] = {'urls':[]}
cleaned_entities[new_key]['urls'] = list(set(cleaned_entities[new_key]['urls'] + entities[key]['urls']))
if 'synonyms' in entities[key]:
if not 'synonyms' in cleaned_entities[new_key]:
cleaned_entities[new_key]['synonyms'] = []
cleaned_entities[new_key]['synonyms'] = list(set(cleaned_entities[new_key]['synonyms'] + entities[key]['synonyms']))
#with open("./cleaned_slot_entities.json", "w") as write_file:
# json.dump(cleaned_entities, write_file, indent=4)
entities = cleaned_entities
def generate_entity_label(slot):
parts = slot["class"]["value"].split("/")
label = parts[-1]
if "#" in label:
parts = label.split("#")
label = parts[-1]
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', label)
label = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
label = label.replace("_", " ")
return label
def store_entities(result):
global entities
for slot in result["results"]["bindings"]:
if "label" not in slot or "value" not in slot["label"] :
label = generate_entity_label(slot)
else:
label = slot["label"]["value"]
label = label.lower()
if len(label) < 140:
if label not in entities:
entities[label] = {"urls" : set()}
entities[label]["urls"].add("<"+slot["class"]["value"]+">")
def query_skosConcepts(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX skos: <http://www.w3.org/2004/02/skos/core#> "
"SELECT DISTINCT ?class ?label WHERE { "
"?class a skos:Concept."
"OPTIONAL{ "
"?class skos:prefLabel ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"} ")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
result = sparql.query().convert()
try:
result = sparql.query().convert()
store_entities(result)
print("OK skos:concepts query")
except:
print("Failed skos:concepts query")
pass
def query_rdfsClasses(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"SELECT DISTINCT ?class ?label WHERE { "
"?class a rdfs:Class. "
"OPTIONAL{ "
"?class rdfs:label ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"} ")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
result = sparql.query().convert()
try:
result = sparql.query().convert()
store_entities(result)
print("OK rdfs:classes query")
except:
print("Failed rdfs:classes query")
pass
def query_owlClasses(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX owl: <http://www.w3.org/2002/07/owl#> "
"PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"SELECT DISTINCT ?class ?label WHERE { "
"?class a owl:Class. "
"OPTIONAL{ "
"?class rdfs:label ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"}")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
result = sparql.query().convert()
try:
result = sparql.query().convert()
store_entities(result)
print("OK owl classes query")
except:
print("Failed owl classes query")
pass
def query_usedClasses(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"SELECT DISTINCT ?class ?label WHERE { "
"[] a ?class. "
"OPTIONAL{ "
"?class rdfs:label ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"} ")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
result = sparql.query().convert()
store_entities(result)
print("OK used classes query")
except:
print("Failed used classes query")
pass
def query_entities(sparql_endpoint, defaultGraph = "", lang="en"):
global entities
query_usedClasses(sparql_endpoint, defaultGraph=defaultGraph, lang= lang)
query_skosConcepts(sparql_endpoint, defaultGraph=defaultGraph, lang= lang)
query_rdfsClasses(sparql_endpoint, defaultGraph=defaultGraph, lang= lang)
query_owlClasses(sparql_endpoint, defaultGraph= defaultGraph, lang= lang)
for e in entities:
entities[e]["urls"] = list(entities[e]["urls"])
#with open("./slot_entities.json", "w") as write_file:
# json.dump(entities, write_file, indent=4)
def generate_property_label(slot):
parts = slot["p"]["value"].split("/")
label = parts[-1]
if "#" in label:
parts = label.split("#")
label = parts[-1]
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', label)
label = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
label = label.replace("_", " ")
return label
def store_properties(result):
global properties
for slot in result["results"]["bindings"]:
if "label" not in slot or "value" not in slot["label"] :
label = generate_property_label(slot)
else:
label = slot["label"]["value"]
label = label.lower()
if len(label) < 140:
if label not in properties:
properties[label] = {"urls" : set()}
properties[label]["urls"].add("<"+slot["p"]["value"]+">")
def query_rdfProperty(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"PREFIX rdf: <https://www.w3.org/1999/02/22-rdf-syntax-ns#>"
"SELECT DISTINCT ?p ?label WHERE { "
"?p rdf:type rdf:Property. "
"OPTIONAL{ "
"?p rdfs:label ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"}")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
result = sparql.query().convert()
store_properties(result)
print("OK rdf:Property query")
except:
print("failed rdf:Property query")
pass
def query_owlDatatypeProperties(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>"
"PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>"
"PREFIX owl: <http://www.w3.org/2002/07/owl#>"
"SELECT DISTINCT ?p ?label WHERE { "
"?p rdf:type owl:DatatypeProperty. "
"OPTIONAL{ "
"?p rdfs:label ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"}")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
result = sparql.query().convert()
store_properties(result)
print("OK owl:DatatypeProperty query")
except:
print("failed owl:DatatypeProperty query")
pass
def query_owlObjectProperties(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>"
"PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>"
"PREFIX owl: <http://www.w3.org/2002/07/owl#>"
"SELECT DISTINCT ?p ?label WHERE { "
"?p rdf:type owl:ObjectProperty. "
"OPTIONAL{ "
"?p rdfs:label ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"}")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
result = sparql.query().convert()
store_properties(result)
print("OK owl:ObjectProperty query")
except:
print("failed owl:ObjectProperty query")
pass
def query_usedPropertiesWithoutLabels(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("SELECT DISTINCT ?p WHERE { ?s ?p ?o. }")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
result = sparql.query().convert()
store_properties(result)
print("OK used property without labels query")
except:
print("failed used property without labels query")
pass
def query_usedProperties(sparql_endpoint, defaultGraph = "", lang="en"):
sparql = SPARQLWrapper(sparql_endpoint, defaultGraph=defaultGraph)
query = ("PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"SELECT DISTINCT ?p ?label WHERE { "
"?s ?p ?o. "
"OPTIONAL{ "
"?p rdfs:label ?label. "
"FILTER(LANG(?label)='"+lang+"')"
"}"
"} LIMIT 500")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
result = sparql.query().convert()
store_properties(result)
print("OK used property with labels query")
except:
print("failed used property with labels query")
query_usedPropertiesWithoutLabels(sparql_endpoint, defaultGraph, lang)
def query_properties(sparql_endpoint, defaultGraph = "", lang="en"):
global properties
#query_usedProperties(sparql_endpoint, defaultGraph=defaultGraph, lang= lang)
query_usedPropertiesWithoutLabels(sparql_endpoint, defaultGraph=defaultGraph, lang=lang)
query_owlObjectProperties(sparql_endpoint, defaultGraph=defaultGraph, lang= lang)
query_owlDatatypeProperties(sparql_endpoint, defaultGraph=defaultGraph, lang= lang)
query_rdfProperty(sparql_endpoint, defaultGraph= defaultGraph, lang= lang)
for p in properties:
properties[p]["urls"] = list(properties[p]["urls"])
#with open("./slot_properties.json", "w") as write_file:
# json.dump(properties, write_file, indent=4)
def main(argv):
endpoint = "" # e.g., "http://dbpedia.org/sparql"
defaultGraph = "" # e.g., "http://dbpedia.org"
lang = None #"en" default
invocation_name = None #"my personal assistant" default
intents = [
"getAllResultsPreviousQuery",
"getQueryExplanation",
"getFurtherDetails",
"getPropertyObject",
"getDescription",
"getNumericFilter",
"getNumericFilterByClass",
"getClassInstances",
"getTripleVerification",
"getLocation",
"getSuperlative",
"getPropertySubjectByClass",
"getPropertySubject"
]
result_limit = 5
if len(argv) == 0:
print('generator_configuration.py -e SPARQ_endpoint -g default graph [-l lang -i invocation name]')
sys.exit(2)
try:
opts, args = getopt.getopt(argv,"he:g:li:",["endpoint=","graph=","lang","invocation_name"])
except getopt.GetoptError:
print('generator_configuration.py -e SPARQ_endpoint -g default graph [-l lang -i invocation name]')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('generator_configuration.py -e SPARQ_endpoint -g default graph [-l lang -i invocation name]')
sys.exit()
elif opt in ("-e", "--endpoint"):
endpoint = arg
elif opt in ("-g", "--graph"):
defaultGraph = arg
elif opt in ("-l", "--lang") and (arg == "en" or arg == "it"):
lang = arg
elif opt in ("-i", "--invocation_name"):
invocation_name = arg
if lang is None:
lang="en"
if invocation_name is None:
invocation_name = "my personal assistant"
print('SPARQL endpoint: ', endpoint)
print('Graph: ', defaultGraph)
print('Lang: ', lang)
print('Invocation name: ', invocation_name)
print("Querying entities...")
query_entities(endpoint, defaultGraph=defaultGraph, lang=lang)
print("Cleaning class labels...")
clean_slot_entities()
#print("Augmenting class labels...")
#augment_slot_entities()
print("Querying properties...")
query_properties(endpoint, defaultGraph=defaultGraph, lang=lang)
print("Cleaning property labels...")
clean_slot_properties()
print("Augmenting property labels...")
augment_slot_properties()
remove_ambiguities_slot_properties()
'''
with open('./augmented_slot_properties.json') as f:
properties = json.load(f)
'''
if "label" in properties and len(properties["label"]["urls"])>1:
dict_label = {}
for prop_label in properties["label"]["urls"]:
sparql = SPARQLWrapper(endpoint, defaultGraph=defaultGraph)
query = ("SELECT COUNT(*) as ?count WHERE { "
"?s " + prop_label + " ?o. "
"}")
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
result = sparql.query().convert()
result = result['results']['bindings'][0]
dict_label[prop_label] = result['count']['value']
except:
pass
key_max = max(dict_label, key= lambda x: dict_label[x])
properties["label"]["urls"] = [key_max]
'''
with open('./cleaned_slot_entities.json') as f:
entities = json.load(f)
'''
conf = {
"invocation_name" : invocation_name,
"intents" : intents,
"lang" : lang,
"result_limit" : result_limit,
"endpoint" : endpoint,
"entity" : entities,
"property" : properties
}
with open("./conf.json", "w") as write_file:
json.dump(conf, write_file, indent=4)
if __name__ == "__main__":
main(sys.argv[1:])
| 31.652703 | 187 | 0.597191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,169 | 0.306041 |
2a326bdb7af710f5433ff63b32a58d49b24260c8 | 3,951 | py | Python | strictdoc/backend/sdoc/grammar/grammar.py | BenGardiner/strictdoc | c927872a8f080ca1f86cdb7e99767fb25398dbd5 | [
"Apache-2.0"
] | null | null | null | strictdoc/backend/sdoc/grammar/grammar.py | BenGardiner/strictdoc | c927872a8f080ca1f86cdb7e99767fb25398dbd5 | [
"Apache-2.0"
] | 3 | 2022-02-21T10:57:01.000Z | 2022-02-21T13:22:23.000Z | strictdoc/backend/sdoc/grammar/grammar.py | BenGardiner/strictdoc | c927872a8f080ca1f86cdb7e99767fb25398dbd5 | [
"Apache-2.0"
] | null | null | null | STRICTDOC_GRAMMAR = r"""
Document[noskipws]:
'[DOCUMENT]' '\n'
// NAME: is deprecated. Both documents and sections now have TITLE:.
(('NAME: ' name = /.*$/ '\n') | ('TITLE: ' title = /.*$/ '\n')?)
(config = DocumentConfig)?
('\n' grammar = DocumentGrammar)?
free_texts *= SpaceThenFreeText
section_contents *= SectionOrRequirement
;
ReservedKeyword[noskipws]:
'DOCUMENT' | 'GRAMMAR'
;
DocumentGrammar[noskipws]:
'[GRAMMAR]' '\n'
'ELEMENTS:' '\n'
elements += GrammarElement
;
GrammarElement[noskipws]:
'- TAG: ' tag = RequirementType '\n'
' FIELDS:' '\n'
fields += GrammarElementField
;
GrammarElementField[noskipws]:
GrammarElementFieldString |
GrammarElementFieldSingleChoice |
GrammarElementFieldMultipleChoice |
GrammarElementFieldTag
;
GrammarElementFieldString[noskipws]:
' - TITLE: ' title=FieldName '\n'
' TYPE: String' '\n'
' REQUIRED: ' (required = BooleanChoice) '\n'
;
GrammarElementFieldSingleChoice[noskipws]:
' - TITLE: ' title=FieldName '\n'
' TYPE: SingleChoice'
'(' ((options = ChoiceOption) (options *= ChoiceOptionXs)) ')' '\n'
' REQUIRED: ' (required = BooleanChoice) '\n'
;
GrammarElementFieldMultipleChoice[noskipws]:
' - TITLE: ' title=FieldName '\n'
' TYPE: MultipleChoice'
'(' ((options = ChoiceOption) (options *= ChoiceOptionXs)) ')' '\n'
' REQUIRED: ' (required = BooleanChoice) '\n'
;
GrammarElementFieldTag[noskipws]:
' - TITLE: ' title=FieldName '\n'
' TYPE: Tag' '\n'
' REQUIRED: ' (required = BooleanChoice) '\n'
;
BooleanChoice[noskipws]:
('True' | 'False')
;
DocumentConfig[noskipws]:
('VERSION: ' version = /.*$/ '\n')?
('NUMBER: ' number = /.*$/ '\n')?
('OPTIONS:' '\n'
(' MARKUP: ' (markup = MarkupChoice) '\n')?
(' AUTO_LEVELS: ' (auto_levels = AutoLevelsChoice) '\n')?
)?
;
MarkupChoice[noskipws]:
'RST' | 'Text' | 'HTML'
;
AutoLevelsChoice[noskipws]:
'On' | 'Off'
;
Section[noskipws]:
'[SECTION]'
'\n'
('UID: ' uid = /.+$/ '\n')?
('LEVEL: ' level = /.*/ '\n')?
'TITLE: ' title = /.*$/ '\n'
free_texts *= SpaceThenFreeText
section_contents *= SectionOrRequirement
'\n'
'[/SECTION]'
'\n'
;
SectionOrRequirement[noskipws]:
'\n' (Section | Requirement | CompositeRequirement)
;
SpaceThenRequirement[noskipws]:
'\n' (Requirement | CompositeRequirement)
;
SpaceThenFreeText[noskipws]:
'\n' (FreeText)
;
ReservedKeyword[noskipws]:
'DOCUMENT' | 'GRAMMAR' | 'SECTION' | 'FREETEXT'
;
Requirement[noskipws]:
'[' !CompositeRequirementTagName requirement_type = RequirementType ']' '\n'
fields *= RequirementField
;
CompositeRequirementTagName[noskipws]:
'COMPOSITE_'
;
RequirementType[noskipws]:
!ReservedKeyword /[A-Z]+(_[A-Z]+)*/
;
RequirementField[noskipws]:
(
field_name = 'REFS' ':' '\n'
(field_value_references += Reference)
) |
(
field_name = FieldName ':'
(
((' ' field_value = SingleLineString | field_value = '') '\n') |
(' ' (field_value_multiline = MultiLineString) '\n')
)
)
;
CompositeRequirement[noskipws]:
'[COMPOSITE_' requirement_type = RequirementType ']' '\n'
fields *= RequirementField
requirements *= SpaceThenRequirement
'\n'
'[/COMPOSITE_REQUIREMENT]' '\n'
;
ChoiceOption[noskipws]:
/[\w\/-]+( *[\w\/-]+)*/
;
ChoiceOptionXs[noskipws]:
/, /- ChoiceOption
;
RequirementStatus[noskipws]:
'Draft' | 'Active' | 'Deleted';
RequirementComment[noskipws]:
'COMMENT: ' (
comment_single = SingleLineString | comment_multiline = MultiLineString
) '\n'
;
FreeText[noskipws]:
'[FREETEXT]' '\n'
parts+=TextPart
FreeTextEnd
;
FreeTextEnd: /^/ '[/FREETEXT]' '\n';
TextPart[noskipws]:
(InlineLink | NormalString)
;
NormalString[noskipws]:
(!SpecialKeyword !FreeTextEnd /(?ms)./)*
;
SpecialKeyword:
InlineLinkStart // more keywords are coming later
;
InlineLinkStart: '[LINK: ';
InlineLink[noskipws]:
InlineLinkStart value = /[^\]]*/ ']'
;
"""
| 20.365979 | 78 | 0.638826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,930 | 0.994685 |
2a33d0eec1f385f2483bfafb27af77afb0c18924 | 1,683 | py | Python | app/extensions/api/__init__.py | ssfdust/full-stack-flask-smorest | a0bdbd3a7d314b82bb43b265578aba7bbd175e51 | [
"Apache-2.0"
] | 33 | 2019-09-25T02:19:43.000Z | 2022-03-25T01:58:19.000Z | app/extensions/api/__init__.py | ssfdust/full-stack-flask-rest-api | a0bdbd3a7d314b82bb43b265578aba7bbd175e51 | [
"Apache-2.0"
] | null | null | null | app/extensions/api/__init__.py | ssfdust/full-stack-flask-rest-api | a0bdbd3a7d314b82bb43b265578aba7bbd175e51 | [
"Apache-2.0"
] | 6 | 2020-01-12T15:18:07.000Z | 2021-06-01T16:30:26.000Z | # Copyright 2019 RedLotus <ssfdust@gmail.com>
# Author: RedLotus <ssfdust@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask_smorest import Api as BaseApi
from app.extensions.marshal import UploadField
class Api(BaseApi):
def register_blueprint(self, blp, base_prefix=None, **options):
"""注册蓝图
组合新前缀url与蓝图默认前缀url
:param base_prefix str: 新的前缀
:param blp Blueprint: 待注册蓝图
:param options dict: 蓝图参数
app初始化后调用
"""
url_prefix = options.get("url_prefix", blp.url_prefix)
if base_prefix is not None:
options["url_prefix"] = base_prefix + url_prefix
self._app.register_blueprint(blp, **options)
blp.register_views_in_doc(self._app, self.spec)
self.spec.tag({"name": blp.name, "description": blp.description})
spec_kwargs = {
"components": {
"securitySchemes": {
"api_key": {"type": "http", "scheme": "bearer", "bearerFormat": "JWT"},
"refresh_key": {"type": "http", "scheme": "bearer", "bearerFormat": "JWT"},
}
}
}
api = Api(spec_kwargs=spec_kwargs)
api.register_field(UploadField, "string", "binary")
| 30.6 | 87 | 0.674985 | 686 | 0.391329 | 0 | 0 | 0 | 0 | 0 | 0 | 1,063 | 0.606389 |
2a3542de3a20f0796eef0e90a172db6862e950d4 | 1,236 | py | Python | bot.py | donmbelembe/dino-auto-play | bfcdb31696b7a2bfa25fd43377732967062fa07c | [
"MIT"
] | null | null | null | bot.py | donmbelembe/dino-auto-play | bfcdb31696b7a2bfa25fd43377732967062fa07c | [
"MIT"
] | 4 | 2021-03-19T01:40:43.000Z | 2022-03-11T23:50:45.000Z | bot.py | donmbelembe/dino-auto-play | bfcdb31696b7a2bfa25fd43377732967062fa07c | [
"MIT"
] | null | null | null | # https://www.youtube.com/watch?v=bf_UOFFaHiY
# http://www.trex-game.skipser.com/
from PIL import ImageGrab, ImageOps
import pyautogui
import time
from numpy import *
class Cordinates():
replayBtn = (962, 530)
dinosaur = (664, 536) # dinaosaur standing
# dinosaur = (686, 548) # dinosaur down
#730= x cordinate to check for tree
#y cordinate = 565
def restartGame():
pyautogui.click(Cordinates.replayBtn)
# pyautogui.keyDown('down')
def pressSpace():
# pyautogui.keyUp('down')
pyautogui.keyDown('space')
print("Jump")
time.sleep(0.18)
pyautogui.keyUp('space')
# pyautogui.keyDown('down')
def imageGrab():
x1 = Cordinates.dinosaur[0] + 50 # 70 is the distance
y1 = Cordinates.dinosaur[1]
x2 = x1 + 80
y2 = Cordinates.dinosaur[1] + 35
box = (x1, y1, x2, y2)
image = ImageGrab.grab(box)
grayImage = ImageOps.grayscale(image)
a = array(grayImage.getcolors())
print(a.sum())
return a.sum()
def main():
restartGame()
try:
while True:
# imageGrab()
if(imageGrab() != 3047):
pressSpace()
time.sleep(0.1)
except KeyboardInterrupt:
print("Program stopped")
main() | 24.72 | 57 | 0.617314 | 200 | 0.161812 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.27589 |
2a37076e9a1081b1644f5c10b693cd623705219d | 735 | py | Python | cybox/objects/win_event_object.py | Mattlk13/python-cybox | ee82c7da40ca4638e3ca8d70766150c0dace1b55 | [
"BSD-3-Clause"
] | 40 | 2015-03-05T18:22:51.000Z | 2022-03-06T07:29:25.000Z | cybox/objects/win_event_object.py | Mattlk13/python-cybox | ee82c7da40ca4638e3ca8d70766150c0dace1b55 | [
"BSD-3-Clause"
] | 106 | 2015-01-12T18:52:20.000Z | 2021-04-25T22:57:52.000Z | cybox/objects/win_event_object.py | Mattlk13/python-cybox | ee82c7da40ca4638e3ca8d70766150c0dace1b55 | [
"BSD-3-Clause"
] | 30 | 2015-03-25T07:24:40.000Z | 2021-07-23T17:10:11.000Z | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities
from mixbox import fields
import cybox.bindings.win_event_object as win_event_binding
from cybox.objects.win_handle_object import WinHandle
from cybox.common import ObjectProperties, String
class WinEvent(ObjectProperties):
_binding = win_event_binding
_binding_class = win_event_binding.WindowsEventObjectType
_namespace = "http://cybox.mitre.org/objects#WinEventObject-2"
_XSI_NS = "WinEventObj"
_XSI_TYPE = "WindowsEventObjectType"
name = fields.TypedField("Name", String)
handle = fields.TypedField("Handle", WinHandle)
type_ = fields.TypedField("Type", String)
| 33.409091 | 66 | 0.779592 | 408 | 0.555102 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.282993 |
2a3762ff9f2942b988ba9f4529ccc38f94cea111 | 1,883 | py | Python | cfgov/cfgov/settings/production.py | cyVR/aur | 269dad2e659f7366e6eea037110d38ab41e3ad53 | [
"CC0-1.0"
] | null | null | null | cfgov/cfgov/settings/production.py | cyVR/aur | 269dad2e659f7366e6eea037110d38ab41e3ad53 | [
"CC0-1.0"
] | null | null | null | cfgov/cfgov/settings/production.py | cyVR/aur | 269dad2e659f7366e6eea037110d38ab41e3ad53 | [
"CC0-1.0"
] | null | null | null | from .base import *
# Sends an email to developers in the ADMIN_EMAILS list if Debug=False for errors
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': [],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'WARNING',
'propagate': True,
},
'django': {
'level': 'ERROR',
'propagate': False,
}
}
}
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.getenv('EMAIL_HOST')
if not COLLECTSTATIC:
if os.environ.get('DATABASE_ROUTING', False):
DATABASE_ROUTERS = ['v1.db_router.CFGOVRouter', 'v1.db_router.LegacyRouter']
DATABASES = {
'default': {
'ENGINE': MYSQL_ENGINE,
'NAME': os.environ.get('MYSQL_NAME', ''),
'USER': os.environ.get('MYSQL_USER', ''),
'PASSWORD': os.environ.get('MYSQL_PW', ''),
'HOST': os.environ.get('MYSQL_HOST', ''),
'PORT': os.environ.get('MYSQL_PORT', ''),
},
'legacy': {
'ENGINE': MYSQL_ENGINE,
'NAME': os.environ.get('LEGACY_MYSQL_NAME', ''),
'USER': os.environ.get('LEGACY_MYSQL_USER', ''),
'PASSWORD': os.environ.get('LEGACY_MYSQL_PW', ''),
'HOST': os.environ.get('LEGACY_MYSQL_HOST', ''),
'PORT': os.environ.get('LEGACY_MYSQL_PORT', ''),
},
}
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
| 31.915254 | 85 | 0.51248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 838 | 0.445035 |
2a377186c5cffd5cbf92c4375d940a280d7978d4 | 163 | py | Python | Lab5/try.py | HelloYeew/helloyeew-lab-computer-programming- | 92f643deadcec39ffdf2af2cba43626d40f925cd | [
"MIT"
] | 1 | 2022-01-02T00:47:28.000Z | 2022-01-02T00:47:28.000Z | Lab5/try.py | HelloYeew/helloyeew-lab-computer-programming-ii | 92f643deadcec39ffdf2af2cba43626d40f925cd | [
"MIT"
] | null | null | null | Lab5/try.py | HelloYeew/helloyeew-lab-computer-programming-ii | 92f643deadcec39ffdf2af2cba43626d40f925cd | [
"MIT"
] | null | null | null | import tkinter as tk
root = tk.Tk()
def motion(event):
x, y = event.x, event.y
print('{}, {}'.format(x, y))
root.bind('<Motion>', motion)
root.mainloop() | 18.111111 | 32 | 0.601227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.110429 |
2a37a23903a55c44659c456a27c899c5c9c44baa | 3,552 | py | Python | deprecated/nexus_server/nexus_db.py | utarsuno/quasar_source | 634fcd71ad78cd99cca4f1852693a02acae3107d | [
"MIT"
] | 7 | 2017-07-11T04:40:49.000Z | 2020-10-22T04:43:08.000Z | deprecated/nexus_server/nexus_db.py | utarsuno/quasar_source | 634fcd71ad78cd99cca4f1852693a02acae3107d | [
"MIT"
] | 2 | 2020-07-07T19:52:42.000Z | 2021-03-09T03:26:35.000Z | deprecated/nexus_server/nexus_db.py | utarsuno/quasar_source | 634fcd71ad78cd99cca4f1852693a02acae3107d | [
"MIT"
] | 1 | 2018-11-14T06:08:50.000Z | 2018-11-14T06:08:50.000Z | # coding=utf-8
"""This module, nexus_db.py, defines a basic started database for the Nexus Server."""
import pika
import json
import time
from scripts.docker.wait_for_rabbit_host import WaitForRabbitMQHost
from libraries.database_abstraction.sql.sqlite import sqlite_db
from libraries.database_abstraction.sql.sqlite import table_abstraction
from libraries.database_abstraction.sql.query_abstraction import sql_query
######################################################################
db = sqlite_db.SQLiteDB('/v/db.sqlite', False, True)
todo_lists = table_abstraction.TableAbstraction('todo_lists')
todo_lists.add_column_string('table_id', nullable=False, unique=True)
todo_lists.add_column_string('px', nullable=False, unique=False)
todo_lists.add_column_string('py', nullable=False, unique=False)
todo_lists.add_column_string('pz', nullable=False, unique=False)
todo_lists.add_column_string('nx', nullable=False, unique=False)
todo_lists.add_column_string('ny', nullable=False, unique=False)
todo_lists.add_column_string('nz', nullable=False, unique=False)
todo_rows = table_abstraction.TableAbstraction('todo_rows')
todo_rows.add_column_string('table_id', nullable=False, unique=False)
todo_rows.add_column_string('row_id', nullable=False, unique=False)
todo_rows.add_column_string('description', nullable=False, unique=False)
todo_rows.add_column_string('time', nullable=False, unique=False)
todo_rows.add_column_string('difficulty', nullable=False, unique=False)
todo_rows.add_column_string('importance', nullable=False, unique=False)
todo_rows.add_column_string('completed', nullable=False, unique=False)
######################################################################
TEMP = '/Users/utarsuno/git_repos/quasar_source/generated_output/local/personal'
class NexusDatabase(object):
"""API for the Nexus Server's DB."""
def __init__(self, path: str):
self._db = sqlite_db.SQLiteDB(path, False, True)
self._schema_users = self._load_schema_users()
self._schema_worlds = self._load_schema_worlds()
self._schema_entities_root = self._load_schema_entities_root()
self._schema_entities_data = self._load_schema_entities_data()
def _load_schema_users(self):
"""Loads the users table schema."""
users = table_abstraction.TableAbstraction('user')
users.add_column_row_id_alias()
users.add_column_string('email', nullable=False, unique=True, indexed=True)
users.add_column_string('password', nullable=False, unique=False, indexed=False)
#users.add_column_string('meta_data', nullable=True, unique=False, indexed=False)
return users
def _load_schema_worlds(self):
"""Loads the worlds table schema."""
worlds = table_abstraction.TableAbstraction('world')
worlds.add_column_row_id_alias()
#worlds.add_column_string('meta_data', nullable=True, unique=False, indexed=False)
return worlds
def _load_schema_entities_root(self):
"""Loads the entities root table schema."""
entities_root = table_abstraction.TableAbstraction('entity_root')
entities_root.add_column_row_id_alias()
entities_root.add_column_foreign_key(self._schema_worlds.primary_key)
return entities_root
def _load_schema_entities_data(self):
"""Loads the entities root table schema."""
entities_data = table_abstraction.TableAbstraction('entity_data')
entities_data.add_column_row_id_alias()
return entities_data
| 47.36 | 90 | 0.728604 | 1,777 | 0.500282 | 0 | 0 | 0 | 0 | 0 | 0 | 868 | 0.244369 |
2a37bb94944073ebefda7ff352df1e712e016066 | 7,429 | py | Python | tdcsm/cli.py | tdcoa/usage | 408091f77360fe29e14186b60746fd7d60713e42 | [
"MIT"
] | null | null | null | tdcsm/cli.py | tdcoa/usage | 408091f77360fe29e14186b60746fd7d60713e42 | [
"MIT"
] | 4 | 2020-07-21T18:42:22.000Z | 2020-10-14T00:50:45.000Z | tdcsm/cli.py | tdcoa/usage | 408091f77360fe29e14186b60746fd7d60713e42 | [
"MIT"
] | 1 | 2020-08-05T20:09:41.000Z | 2020-08-05T20:09:41.000Z | #! /usr/bin/env python
"tdcsm command-line interface"
import argparse
from pathlib import Path
from typing import Any, Sequence, Callable, List, Optional
from logging import getLogger
from .tdgui import coa as tdgui
from .tdcoa import tdcoa
from .model import load_filesets, load_srcsys, dump_srcsys, SrcSys, FileSet, SQLFile
logger = getLogger(__name__)
apppath = Path.cwd()
secrets = 'secrets.yaml'
def start_gui() -> None:
"invoe tdcsm GUI"
tdgui(str(apppath), secrets)
def tabulate(rows: Sequence[Sequence[str]], headers: Sequence[str]) -> None:
"format and print tablulated data"
widths = [len(h) for h in headers]
if rows:
widths = [max(w) for w in zip((max(map(len, col)) for col in zip(*rows)), widths)]
fmt = ' '.join("{{:{}}}".format(w) for w in widths).format
print(fmt(*headers))
print(fmt(*('-' * w for w in widths)))
for row in rows:
print(fmt(*row))
def show_systems(name: Optional[List[str]] = None, verbose: bool = False, active: bool = False) -> None:
"show source system information"
def make_row(k: str, p: SrcSys) -> List[str]:
return [k, 'Yes' if p.active else 'No']
def make_details(k: str, p: SrcSys) -> List[str]:
return make_row(k, p) + [p.siteid, ','.join(kf for kf, pf in p.filesets.items() if pf.active)]
systems = load_srcsys(approot=apppath)
if name:
systems = {k: v for k, v in systems.items() if k in name}
if active:
systems = {k: v for k, v in systems.items() if v.active}
if verbose:
tabulate([make_details(k, p) for k, p in systems.items()], ["System", "Enabled", "Site ID", "Filesets"])
else:
tabulate([make_row(k, p) for k, p in systems.items()], ["System", "Enabled"])
def enable_systems(systems: Sequence[str], enable: bool = True) -> None:
"show source system information"
srcsys = load_srcsys(approot=apppath)
changed = 0
for n in systems:
try:
if srcsys[n].active != enable:
srcsys[n].active = enable
changed += 1
except KeyError:
logger.error("'%s' is not a valid source system name", n)
if changed > 0:
dump_srcsys(srcsys, approot=apppath)
else:
logger.warning('No source systems were changed')
def activate_filesets(system: str, filesets: Sequence[str], activate: bool = True) -> None:
"show source system information"
srcsys = load_srcsys(approot=apppath)
if system not in srcsys:
raise SystemExit(f"'{system}' is not a valid source system name")
changed = 0
for k in filesets:
try:
if srcsys[system].filesets[k].active != activate:
srcsys[system].filesets[k].active = activate
changed += 1
except KeyError:
logger.error("'%s' is not a valid fileset name", k)
if changed > 0:
dump_srcsys(srcsys, approot=apppath)
else:
logger.warning('No filesets were changed')
def show_filesets(name: List[str], verbose: bool, active: bool) -> None:
"show filesets information"
filesets = load_filesets()
def make_row(k: str, p: FileSet) -> List[str]:
return [k, 'Yes' if p.active else 'No', '' if p.fileset_version is None else p.fileset_version]
def make_details(k: str, p: FileSet, f: SQLFile) -> List[str]:
return make_row(k, p) + [f.gitfile]
if active:
filesets = {k: v for k, v in filesets.items() if v.active}
if name:
filesets = {k: v for k, v in filesets.items() if k in name}
if verbose:
tabulate([make_details(k, p, f) for k, p in filesets.items() for f in p.files.values()], ["System", "Active", "Version", "GIT File"])
else:
tabulate([make_row(k, p) for k, p in filesets.items()], ["System", "Active", "Version"])
def run_sets(action: Sequence[str]) -> None:
"run an action, can be all which runs all actions"
app = tdcoa(str(apppath), secrets)
for a, fn in [('download', app.download_files), ('prepare', app.prepare_sql), ('execute', app.execute_run), ('upload', app.upload_to_transcend)]:
if a in action:
fn()
def first_time() -> None:
"Initialize a folder for the first time"
_ = tdcoa(str(apppath))
def run(approot: Path, secfile: str, cmd: Callable, **kwargs: Any) -> None:
"run script with given validated parameters"
global apppath, secrets
apppath = approot
if not (apppath / 'source_systems.yaml').exists() and cmd not in [first_time, start_gui]:
raise SystemExit("Missing source_systems.yaml file, please use init or gui")
secrets = secfile
cmd(**kwargs)
def main(argv: Sequence[str] = None) -> None:
"script entry-point"
def add_subp_systems(parser: argparse.ArgumentParser) -> None:
subp = parser.add_subparsers(help='Choose one sub-command', dest='cmd')
parser.set_defaults(cmd=show_systems)
p = subp.add_parser('list', help='list source systems')
p.set_defaults(cmd=show_systems)
p.add_argument('name', nargs='*', help='show only named systems')
p.add_argument('-a', '--active', action='store_true', help='show only active (enabled) entries')
p.add_argument('-v', '--verbose', action='store_true', help='also include active filesets')
p = subp.add_parser('enable', help='enable source system')
p.set_defaults(cmd=enable_systems)
p.add_argument('systems', nargs='+', help='source system names')
p = subp.add_parser('disable', help='disable source system')
p.set_defaults(cmd=lambda systems: enable_systems(systems, enable=False))
p.add_argument('systems', nargs='+', help='source system names')
p = subp.add_parser('activate', help='activate filesets for a source system')
p.set_defaults(cmd=activate_filesets)
p.add_argument('system', help='source system name')
p.add_argument('filesets', nargs='+', help='source system name')
p = subp.add_parser('deactivate', help='deactivate filesets for a source system')
p.set_defaults(cmd=lambda system, filesets: activate_filesets(system, filesets, activate=False))
p.add_argument('system', help='source system name')
p.add_argument('filesets', nargs='+', help='source system name')
def folder(v: str) -> Path:
"returns Path from string if it exists and a folder"
if Path(v).is_dir():
return Path(v)
raise argparse.ArgumentTypeError("'%s' does not exist or is not a directory" % v)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--approot', type=folder, metavar='DIR', default=Path.cwd(),
help='directory of the approot (working directory) in which to house all COA collateral')
parser.add_argument('--secrets', dest='secfile', metavar='FILE', default=secrets,
help='name of a YAML file, relative to APPROOT, containing secrets. default: secrents.yaml')
parser.set_defaults(cmd=start_gui)
subp = parser.add_subparsers(help='Choose one sub-command')
p = subp.add_parser('gui', help='Start a GUI session')
p.set_defaults(cmd=start_gui)
p = subp.add_parser('init', help='Initialize COA folder for the first-time')
p.set_defaults(cmd=first_time)
p = subp.add_parser('systems', help='Source Systems')
add_subp_systems(p)
p = subp.add_parser('filesets', help='Filesets information')
p.set_defaults(cmd=show_filesets)
p.add_argument('name', nargs='*', help='show only named filesets')
p.add_argument('-a', '--active', action='store_true', help='show only active entries')
p.add_argument('-v', '--verbose', action='store_true', help='also include gitfile names')
p = subp.add_parser('run', help='Run actions against filesets')
p.set_defaults(cmd=run_sets)
p.add_argument('action', nargs='+', choices=['download', 'prepare', 'execute', 'upload'], help='actions to run')
run(**vars(parser.parse_args(argv)))
if __name__ == '__main__':
main()
| 34.235023 | 146 | 0.70171 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,112 | 0.284291 |
2a38d7dfa0cafc127e2193d495cf4f0ef572b6e1 | 12,247 | py | Python | cogs/Musica.py | Borgotto/discord_bot | dd86d5a321e1aa9bf0bfc84a70f2174c35fcfdd2 | [
"MIT"
] | null | null | null | cogs/Musica.py | Borgotto/discord_bot | dd86d5a321e1aa9bf0bfc84a70f2174c35fcfdd2 | [
"MIT"
] | null | null | null | cogs/Musica.py | Borgotto/discord_bot | dd86d5a321e1aa9bf0bfc84a70f2174c35fcfdd2 | [
"MIT"
] | 1 | 2021-11-09T11:52:17.000Z | 2021-11-09T11:52:17.000Z | import itertools
import asyncio
from async_timeout import timeout
from functools import partial
from youtube_dl import YoutubeDL
from discord.ext import commands
from discord import Embed, FFmpegPCMAudio, HTTPException, PCMVolumeTransformer, Color
ytdlopts = {
'format': 'bestaudio/best',
'extractaudio': True,
'audioformat': 'mp3',
'outtmpl': 'downloads/%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0' # ipv6 addresses cause issues sometimes
}
ffmpegopts = {
'before_options': '-nostdin -reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 60',
'options': '-vn'
}
ytdl = YoutubeDL(ytdlopts)
class VoiceConnectionError(commands.CommandError):
"""Custom Exception class for connection errors."""
class InvalidVoiceChannel(VoiceConnectionError):
"""Exception for cases of invalid Voice Channels."""
class YTDLSource(PCMVolumeTransformer):
def __init__(self, source, *, data, requester):
super().__init__(source)
self.requester = requester
self.title = data.get('title')
self.web_url = data.get('webpage_url')
# https://github.com/rg3/youtube-dl/blob/master/README.md
def __getitem__(self, item: str):
return self.__getattribute__(item)
@classmethod
async def create_source(cls, ctx, search: str, *, loop, download=False, add_to_q=True):
loop = loop or asyncio.get_event_loop()
to_run = partial(ytdl.extract_info, url=search, download=download)
data = await loop.run_in_executor(None, to_run)
if 'entries' in data:
data = data['entries'][0]
if add_to_q is True:
embed = Embed(title="Aggiunto alla coda:", description=f'[{data["title"]}]({data["webpage_url"]}) [{ctx.author.mention}]', color=0xfefefe)
await ctx.send(embed=embed)
return {'webpage_url': data['webpage_url'], 'requester': ctx.author, 'title': data['title']}
@classmethod
async def regather_stream(cls, data, *, loop):
loop = loop or asyncio.get_event_loop()
requester = data['requester']
to_run = partial(ytdl.extract_info, url=data['webpage_url'], download=False)
data = await loop.run_in_executor(None, to_run)
return cls(FFmpegPCMAudio(data['url'], before_options=ffmpegopts['before_options'], options=ffmpegopts['options']), data=data, requester=requester)
class MusicPlayer(commands.Cog):
__slots__ = ('bot', '_guild', '_channel', '_cog', 'queue', 'next', 'current', 'np', 'volume')
def __init__(self, ctx):
self.bot = ctx.bot
self._guild = ctx.guild
self._channel = ctx.channel
self._cog = ctx.cog
self.queue = asyncio.Queue()
self.next = asyncio.Event()
self.np = None # Now playing message
self.volume = .3
self.current = None
ctx.bot.loop.create_task(self.player_loop())
async def player_loop(self):
await self.bot.wait_until_ready()
while not self.bot.is_closed():
self.next.clear()
try:
async with timeout(120):
source = await self.queue.get()
except asyncio.TimeoutError:
return self.destroy(self._guild)
if not isinstance(source, YTDLSource):
try:
source = await YTDLSource.regather_stream(source, loop=self.bot.loop)
except Exception as e:
await self._channel.send(f"C'è stato un errore nella richiesta della canzone.\n"f'```css\n[{e}]\n```')
continue
source.volume = self.volume
self.current = source
self._guild.voice_client.play(source, after=lambda _: self.bot.loop.call_soon_threadsafe(self.next.set))
embed = Embed(title="Ora in riproduzione:", description=f'[{source.title}]({source.web_url}) [{source.requester.mention}]', color=0xfefefe)
self.np = await self._channel.send(embed=embed)
await self.next.wait()
source.cleanup()
self.current = None
#try:
#await self.np.delete()
#except HTTPException:
#pass
def destroy(self, guild):
return self.bot.loop.create_task(self._cog.cleanup(guild))
class Musica(commands.Cog):
__slots__ = ('bot', 'players')
def __init__(self, bot):
self.bot = bot
self.players = {}
@commands.Cog.listener()
async def on_ready(self):
print("Musica caricata!")
async def cleanup(self, guild):
try:
await guild.voice_client.disconnect()
except AttributeError:
pass
try:
del self.players[guild.id]
except KeyError:
pass
async def __local_check(self, ctx):
if not ctx.guild:
raise commands.NoPrivateMessage
return True
async def __error(self, ctx, error):
if isinstance(error, commands.NoPrivateMessage):
try:
return await ctx.send('Questo comando non può essere usato nei messaggi privati')
except HTTPException:
pass
elif isinstance(error, InvalidVoiceChannel):
await ctx.send('Devi essere in un canale per mettere la musica', reference=ctx.message, mention_author=False)
def get_player(self, ctx):
try:
player = self.players[ctx.guild.id]
except KeyError:
player = MusicPlayer(ctx)
self.players[ctx.guild.id] = player
return player
@commands.command(name='connetti', aliases=['join','entra','connect','connettiti'], help="Fai connettere il bot al canale vocale")
async def connect_(self, ctx):
try:
channel = ctx.author.voice.channel
except AttributeError:
if ctx.author.voice is None:
await ctx.send(f"{ctx.author.mention} Devi essere in un canale vocale per mettere la musica", reference=ctx.message, mention_author=False)
raise InvalidVoiceChannel(f'Nessun canale in cui entrare.')
vc = ctx.voice_client
if vc:
if vc.channel.id == channel.id:
return
try:
await vc.move_to(channel)
except asyncio.TimeoutError:
raise VoiceConnectionError(f'Spostamento canale: <{channel}> timed out.')
else:
try:
await channel.connect()
except asyncio.TimeoutError:
raise VoiceConnectionError(f'Connessione al canale: <{channel}> timed out.')
embed = Embed(title="Entrato in chiamata", color=0xfefefe)
embed.add_field(name="Connesso a:", value=channel, inline=True)
#await ctx.send(embed=embed)
@commands.command(name='play', aliases=['riproduci', 'p'], help='Riproduci una canzone')
async def play_(self, ctx, *, search: str):
await ctx.trigger_typing()
if ctx.author.voice is None:
return await ctx.send(f"{ctx.author.mention} Devi essere in un canale vocale per mettere la musica", reference=ctx.message, mention_author=False)
vc = ctx.voice_client
if not vc:
await ctx.invoke(self.connect_)
player = self.get_player(ctx)
source = await YTDLSource.create_source(ctx, search, loop=self.bot.loop, download=False, add_to_q=(player.current is not None))
await player.queue.put(source)
@commands.command(name='pausa', aliases=['pause'], help="Pausa la canzone in riproduzione")
async def pause_(self, ctx):
vc = ctx.voice_client
if not vc or not vc.is_playing():
return await ctx.send("Non c'è niente in riproduzione!", reference=ctx.message, mention_author=False)
elif vc.is_paused():
await ctx.invoke(self.resume_)
return
vc.pause()
await ctx.message.add_reaction("🆗")
@commands.command(name='riprendi', aliases=['unpause', 'resume'], help="Riprendi la riproduzione della canzone")
async def resume_(self, ctx):
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send("Non c'è niente in riproduzione!", reference=ctx.message, mention_author=False)
elif not vc.is_paused():
return
vc.resume()
await ctx.message.add_reaction("🆗")
@commands.command(name='skip', aliases=['next','skippa'], help="Salta la canzone corrente")
async def skip_(self, ctx):
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send("Non c'è niente in riproduzione!", reference=ctx.message, mention_author=False)
if vc.is_paused():
pass
elif not vc.is_playing():
return
vc.stop()
await ctx.message.add_reaction("🆗")
@commands.command(name='coda', aliases=['q', 'playlist', 'queue'], help="Mostra la coda delle canzoni")
async def queue_info(self, ctx):
vc = ctx.voice_client
player = self.get_player(ctx)
if player.queue.empty():
return await ctx.send('Non ci sono canzoni nella coda.', reference=ctx.message, mention_author=False)
#upcoming = list(itertools.islice(player.queue._queue, 0, 5))
#fmt = '\n'.join(f'[{_["title"]}]({_["webpage_url"]}) [{_["requester"].mention}]' for _ in player.queue._queue)
#embed = Embed(title=f'Canzoni in coda:', color=0xfefefe, description=fmt)
x=0
description = f"```ml\nCoda Canzoni:\n\n\t⬐ prossima traccia\n"
for song in player.queue._queue:
x+=1
description += f'{x}) {song["title"]}\n'
description += '\n\tFine della coda!```'
await ctx.send(description, reference=ctx.message, mention_author=False)
@commands.command(name='in_riproduzione', aliases=['np', 'current', 'currentsong', 'playing', 'ir'], help="Mostra la canzone in riproduzione")
async def now_playing_(self, ctx):
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send('Non sono connesso a un canale vocale!', reference=ctx.message, mention_author=False)
player = self.get_player(ctx)
if not player.current:
return await ctx.send("Non c'è niente in riproduzione!", reference=ctx.message, mention_author=False)
try:
await player.np.delete()
except HTTPException:
pass
embed = Embed(title="Ora in riproduzione:", description=f'{vc.source.title} [{ctx.author.mention}]', color=0xfefefe)
player.np = await ctx.send(embed=embed)
@commands.command(name='volume', aliases=['vol','v'], help="Cambia il volume della musica")
async def change_volume(self, ctx, *, vol: float=None):
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send('Non sono connesso a un canale vocale!', reference=ctx.message, mention_author=False)
player = self.get_player(ctx)
if vol is None:
vol = player.volume*100
elif not 0 < vol < 101:
return await ctx.send('Inserisci un valora compreso tra 1 e 100', reference=ctx.message, mention_author=False)
elif vc.source:
vc.source.volume = vol / 100
player.volume = vol / 100
if vol >= 80:
emoji = ':loud_sound:'
elif 30 < vol < 80:
emoji = ':sound:'
elif vol <=30:
emoji = ':speaker:'
embed = Embed(title=f'**Volume:** {int(vol)} {emoji}:', color=0xfefefe)
await ctx.send(embed=embed)
@commands.command(name='esci', aliases=['stop','leave','fuori'], help="Stoppa la musica (rimuove la coda)")
async def stop_(self, ctx):
vc = ctx.voice_client
await self.cleanup(ctx.guild)
await ctx.message.add_reaction("🆗")
def setup(bot):
bot.add_cog(Musica(bot)) | 35.809942 | 157 | 0.617539 | 11,348 | 0.925084 | 0 | 0 | 7,551 | 0.615554 | 8,605 | 0.701476 | 2,912 | 0.237385 |
2a394b04ac8e33befbcd55f3ea716e30d44e735b | 2,584 | py | Python | kipy/fileobjs/sch/sheet.py | Arie001/klonor-kicad | c916ae4cdac5072803e12f530ef32230732b30d7 | [
"MIT"
] | 2 | 2016-05-17T13:10:52.000Z | 2018-09-03T15:18:30.000Z | kipy/fileobjs/sch/sheet.py | Arie001/klonor-kicad | c916ae4cdac5072803e12f530ef32230732b30d7 | [
"MIT"
] | 1 | 2016-08-03T09:31:39.000Z | 2016-08-03T11:19:19.000Z | kipy/fileobjs/sch/sheet.py | Arie001/klonor-kicad | c916ae4cdac5072803e12f530ef32230732b30d7 | [
"MIT"
] | 1 | 2018-09-04T16:32:13.000Z | 2018-09-04T16:32:13.000Z | '''
Classes based on SchItem for parsing and rendering $Sheet sub-sheets
inside a .sch file.
'''
from .schitem import SchItem
class Sheet(SchItem):
keyword = '$Sheet'
_by_keyword = {}
def render(self, linelist):
linelist.append(self.keyword)
self.S.render(self, linelist)
self.U.render(self, linelist)
for i, f in enumerate(self.fields):
f.render(i, linelist)
self.EndSheet.render(self, linelist)
class S(Sheet):
@classmethod
def subparse(cls, sheet, tokens, lineiterator):
cls.checkstate(sheet, -1, 2)
sheet.startx, sheet.starty, sheet.endx, sheet.endy = tokens[1:]
sheet.endx += sheet.startx
sheet.endy += sheet.starty
@staticmethod
def render(sheet, linelist):
linelist.append('S %-4s %-4s %-4s %-4s' % (sheet.startx, sheet.starty,
sheet.endx - sheet.startx, sheet.endy - sheet.starty))
class U(Sheet):
@classmethod
def subparse(cls, sheet, tokens, lineiterator):
cls.checkstate(sheet, 2, 1)
assert not hasattr(sheet, 'timestamp')
sheet.timestamp, = tokens[1:]
sheet.fields = []
@staticmethod
def render(sheet, linelist):
if sheet.timestamp is not None:
linelist.append('U %s' % (sheet.timestamp))
class F(Sheet):
keyword = None
@classmethod
def subparse(cls, sheet, tokens, lineiterator):
if sheet.parsestate == 2:
sheet.parsestate = 1
sheet.timestamp = None
sheet.fields = []
cls.checkstate(sheet, 1, 1)
cls(sheet, tokens)
def __init__(self, sheet, tokens):
index = tokens[0]
assert index[0] == 'F'
index = int(index[1:])
assert index == len(sheet.fields)
sheet.fields.append(self)
if index < 2:
self.name, self.size = tokens[1:]
else:
self.name, self.form, self.side, self.posx, self.posy, self.size = tokens[1:]
def render(self, index, linelist):
tokens = ['F%d "%s"' % (index, self.name)]
if index >= 2:
tokens.extend((self.form, self.side, self.posx, self.posy, self.size, ''))
else:
tokens.append(self.size)
linelist.append(' '.join((str(x) for x in tokens)))
class EndSheet(Sheet):
keyword = '$EndSheet'
@classmethod
def subparse(cls, sheet, tokens, lineiterator):
cls.checkstate(sheet, 1, 0)
assert len(sheet.fields) >= 2
@classmethod
def render(cls, schpage, linelist):
linelist.append(cls.keyword)
| 31.13253 | 90 | 0.592879 | 2,447 | 0.946981 | 0 | 0 | 1,276 | 0.493808 | 0 | 0 | 173 | 0.06695 |
2a3aa00e1e605b1a991cede3b58b1dc8bfec0c42 | 1,988 | py | Python | PyBullet_experiments/experiments/plot_return.py | AnonymousLaBER/LaBER | af9da8ffda0654e2021de20cb162ef71dc9b9d6c | [
"MIT"
] | 3 | 2021-10-11T22:25:02.000Z | 2022-03-04T20:00:56.000Z | PyBullet_experiments/experiments/plot_return.py | AnonymousLaBER/LaBER | af9da8ffda0654e2021de20cb162ef71dc9b9d6c | [
"MIT"
] | null | null | null | PyBullet_experiments/experiments/plot_return.py | AnonymousLaBER/LaBER | af9da8ffda0654e2021de20cb162ef71dc9b9d6c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import pickle
import json
import argparse
from collections import defaultdict
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('path', help="Tf path to event files from which to extract variables")
parser.add_argument('-w', '--write', default=None, type=str, dest='write_dir')
parser.add_argument('-j', '--json', action='store_true')
args = parser.parse_args()
def extract_file(file, values):
for event in tf.compat.v1.train.summary_iterator(file):
for value in event.summary.value:
values[value.tag].append(value.simple_value)
return values
if not os.path.exists(args.path):
print("No such file or directory")
exit()
if os.path.isfile(args.path):
files = [args.path]
elif os.path.isdir(args.path):
files = []
for directory, _, file_list in os.walk(args.path):
for file in file_list:
if file.startswith('events.out.tfevents.'):
files.append(os.path.join(directory, file))
if not files:
print("No event file found")
exit()
else:
print("Invalid file type")
exit()
if args.write_dir and not os.path.exists(args.write_dir):
os.makedirs(args.write_dir)
values = defaultdict(list)
for file in files:
values = extract_file(file, values)
if args.write_dir:
save_file_nb, computer_name = file.split('.')[-2:]
extension = '.json' if args.json else '.pkl'
save_file_name = 'Events_' + str(save_file_nb) + '_' + computer_name + extension
mode = 'w' if args.json else 'wb'
with open(os.path.join(args.write_dir, save_file_name), mode) as save_file:
if args.json:
json.dump(values, save_file)
else:
pickle.dump(values, save_file)
for k, v in values.items():
if 'rollout/ep_rew_mean' in k:
plt.plot(v, label=k)
plt.legend()
plt.show()
| 26.157895 | 90 | 0.654427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 276 | 0.138833 |
2a3b07acb290992d78e2cb48fb5c7452dc6ca1c9 | 551 | py | Python | helper/policy.py | icesiv/webstaurantstore | 79b5f32625a6be38e42d525c74537d01fde82e26 | [
"MIT"
] | null | null | null | helper/policy.py | icesiv/webstaurantstore | 79b5f32625a6be38e42d525c74537d01fde82e26 | [
"MIT"
] | null | null | null | helper/policy.py | icesiv/webstaurantstore | 79b5f32625a6be38e42d525c74537d01fde82e26 | [
"MIT"
] | null | null | null | from rotating_proxies.policy import BanDetectionPolicy
class MyPolicy(BanDetectionPolicy):
def response_is_ban(self, request, response):
# use default rules, but also consider HTTP 200 responses
# a ban if there is 'captcha' word in response body.
ban = super(MyPolicy, self).response_is_ban(request, response)
ban = ban or b'captcha' in response.body
return ban
def exception_is_ban(self, request, exception):
# override method completely: don't take exceptions in account
return None | 42.384615 | 70 | 0.709619 | 495 | 0.898367 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.328494 |
2a3cf567574bc6e1ccbbf93b58791ade2b96a58a | 8,162 | py | Python | training/scripts/trainer/src/convert/convert.py | TommyTeaVee/training | ecbd86d5137d5b639d79824661c58792d4ce177c | [
"MIT"
] | 2,442 | 2019-02-10T03:37:16.000Z | 2021-05-23T05:33:10.000Z | training/scripts/trainer/src/convert/convert.py | TommyTeaVee/training | ecbd86d5137d5b639d79824661c58792d4ce177c | [
"MIT"
] | 189 | 2019-02-11T11:34:27.000Z | 2021-04-23T19:05:49.000Z | training/scripts/trainer/src/convert/convert.py | TommyTeaVee/training | ecbd86d5137d5b639d79824661c58792d4ce177c | [
"MIT"
] | 459 | 2019-02-10T09:34:55.000Z | 2021-05-25T14:20:12.000Z | import os
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
from tensorflow.python.framework import dtypes
from tensorflow.python.tools import strip_unused_lib
tf.enable_eager_execution()
parser = argparse.ArgumentParser()
# export types
parser.add_argument("--coreml", action="store_true")
parser.add_argument("--tflite", action="store_true")
parser.add_argument("--tfjs", action="store_true")
parser.add_argument("--model-type", type=str)
# import paths
parser.add_argument("--saved-model", type=str)
# export paths
parser.add_argument("--mlmodel-path", type=str)
parser.add_argument("--tflite-path", type=str)
parser.add_argument("--tfjs-path", type=str)
args = parser.parse_args()
def print_header(msg):
print(" " * 80)
print("_" * 80)
print(msg)
def print_footer(msg):
print(msg)
print("_" * 80)
print(" " * 80)
def attempt_conversion(model_type, model_format):
def attempt_conversion(convert):
try:
print_header(f"Converting {model_type} model to {model_format}")
convert()
print_footer(f"Successfully converted to {model_format}")
except Exception as e:
print(e)
print_footer(f"Unable to convert to {model_format}")
return attempt_conversion
def get_anchors(graph):
"""
Computes the list of anchor boxes by sending a fake image through the graph.
Outputs an array of size (4, num_anchors) where each element is an anchor box
given as [ycenter, xcenter, height, width] in normalized coordinates.
"""
with tf.Session(graph=graph) as sess:
anchors_tensor = "Concatenate/concat:0"
image_tensor = graph.get_tensor_by_name("image_tensor:0")
box_corners_tensor = graph.get_tensor_by_name(anchors_tensor)
box_corners = sess.run(
box_corners_tensor, feed_dict={image_tensor: np.zeros((1, 300, 300, 3))}
)
# The TensorFlow graph gives each anchor box as [ymin, xmin, ymax, xmax].
# Convert these min/max values to a center coordinate, width and height.
ymin, xmin, ymax, xmax = np.transpose(box_corners)
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.0
xcenter = xmin + width / 2.0
return np.stack([ycenter, xcenter, height, width])
def strip_and_freeze_model(
saved_model, output_path, input_node_names=[], output_node_names=[]
):
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
print("loading model...")
tf.saved_model.loader.load(sess, [tf.saved_model.SERVING], saved_model)
print("stripping unused ops...")
gdef = strip_unused_lib.strip_unused(
input_graph_def=tf.get_default_graph().as_graph_def(),
input_node_names=input_node_names,
output_node_names=output_node_names,
placeholder_type_enum=dtypes.float32.as_datatype_enum,
)
gdef = tf.graph_util.convert_variables_to_constants(
sess, gdef, output_node_names
)
with gfile.GFile(output_path, "wb") as f:
print("writing frozen model...")
f.write(gdef.SerializeToString())
return graph
os.makedirs(".tmp", exist_ok=True)
################################################################################
# Object Detection
################################################################################
if args.model_type == "localization":
labels_path = os.path.join(args.saved_model, "labels.json")
@attempt_conversion("object detection", "Core ML")
def convert_object_detection_coreml():
if args.coreml:
from convert.convert_to_core_ml import convert_localization
frozen_model = ".tmp/coreml_frozen_model.pb"
graph = strip_and_freeze_model(
saved_model=args.saved_model,
output_path=frozen_model,
input_node_names=["Preprocessor/sub"],
output_node_names=["Squeeze", "Postprocessor/convert_scores"],
)
anchors = get_anchors(graph)
convert_localization(
frozen_model=frozen_model,
labels_path=labels_path,
output_path=args.mlmodel_path,
anchors=anchors,
)
@attempt_conversion("object detection", "TensorFlow Lite")
def convert_object_detection_tflite():
if args.tflite:
from convert.convert_to_tflite import convert_localization
frozen_model = ".tmp/tflite_frozen_model.pb"
graph = strip_and_freeze_model(
saved_model=args.saved_model,
output_path=frozen_model,
input_node_names=["Preprocessor/sub"],
output_node_names=["Squeeze", "Postprocessor/convert_scores"],
)
anchors = get_anchors(graph)
convert_localization(
frozen_model=frozen_model,
labels_path=labels_path,
output_path=args.tflite_path,
anchors=anchors,
)
@attempt_conversion("object detection", "TensorFlow.js")
def convert_object_detection_tfjs():
if args.tfjs:
from convert.convert_to_tfjs import convert_localization
frozen_model = ".tmp/tfjs_frozen_model.pb"
strip_and_freeze_model(
saved_model=args.saved_model,
output_path=frozen_model,
input_node_names=[],
output_node_names=["Postprocessor/ExpandDims_1", "Postprocessor/Slice"],
)
convert_localization(
frozen_model=frozen_model,
labels_path=labels_path,
output_path=args.tfjs_path,
)
################################################################################
# Classification
################################################################################
if args.model_type == "classification":
labels_path = os.path.join(args.saved_model, "labels.txt")
@attempt_conversion("classification", "Core ML")
def convert_classification_coreml():
if args.coreml:
from convert.convert_to_core_ml import convert_classification
frozen_model = ".tmp/coreml_frozen_model.pb"
strip_and_freeze_model(
saved_model=args.saved_model,
output_path=frozen_model,
input_node_names=["Placeholder"],
output_node_names=["final_result"],
)
convert_classification(
frozen_model=frozen_model,
labels_path=labels_path,
output_path=args.mlmodel_path,
)
@attempt_conversion("classification", "TensorFlow Lite")
def convert_classification_tflite():
if args.tflite:
from convert.convert_to_tflite import convert_classification
frozen_model = ".tmp/tflite_frozen_model.pb"
strip_and_freeze_model(
saved_model=args.saved_model,
output_path=frozen_model,
input_node_names=["Placeholder"],
output_node_names=["final_result"],
)
convert_classification(
frozen_model=frozen_model,
labels_path=labels_path,
output_path=args.tflite_path,
)
@attempt_conversion("classification", "TensorFlow.js")
def convert_classification_tfjs():
if args.tfjs:
from convert.convert_to_tfjs import convert_classification
frozen_model = ".tmp/tfjs_frozen_model.pb"
strip_and_freeze_model(
saved_model=args.saved_model,
output_path=frozen_model,
input_node_names=["Placeholder"],
output_node_names=["final_result"],
)
convert_classification(
frozen_model=frozen_model,
labels_path=labels_path,
output_path=args.tfjs_path,
)
| 32.779116 | 88 | 0.604019 | 0 | 0 | 0 | 0 | 4,262 | 0.522176 | 0 | 0 | 1,838 | 0.22519 |
2a3d04876b6b65c8918ff882255667977b529bb1 | 3,594 | py | Python | packages/Python/lldbsuite/test/lang/swift/private_decl_name/TestSwiftPrivateDeclName.py | enterstudio/swift-lldb | af85d636d230da2460f91938b1ff734b0fb64b42 | [
"Apache-2.0"
] | 2 | 2019-05-24T14:10:24.000Z | 2019-05-24T14:27:38.000Z | packages/Python/lldbsuite/test/lang/swift/private_decl_name/TestSwiftPrivateDeclName.py | enterstudio/swift-lldb | af85d636d230da2460f91938b1ff734b0fb64b42 | [
"Apache-2.0"
] | null | null | null | packages/Python/lldbsuite/test/lang/swift/private_decl_name/TestSwiftPrivateDeclName.py | enterstudio/swift-lldb | af85d636d230da2460f91938b1ff734b0fb64b42 | [
"Apache-2.0"
] | null | null | null | # TestSwiftPrivateDeclName.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test that we correctly find private decls
"""
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.decorators as decorators
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftPrivateDeclName(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
self.a_source = "a.swift"
self.a_source_spec = lldb.SBFileSpec(self.a_source)
self.b_source = "b.swift"
self.b_source_spec = lldb.SBFileSpec(self.b_source)
@decorators.swiftTest
@decorators.expectedFailureAll(bugnumber="rdar://23236790")
def test_swift_private_decl_name(self):
"""Test that we correctly find private decls"""
self.build()
self.do_test()
exe_name = "a.out"
exe = self.getBuildArtifact(exe_name)
# Create the target
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set the breakpoints
a_breakpoint = target.BreakpointCreateBySourceRegex(
'break here', self.a_source_spec)
self.assertTrue(a_breakpoint.GetNumLocations() > 0, VALID_BREAKPOINT)
b_breakpoint = target.BreakpointCreateBySourceRegex(
'break here', self.b_source_spec)
self.assertTrue(b_breakpoint.GetNumLocations() > 0, VALID_BREAKPOINT)
process = target.LaunchSimple(None, None, os.getcwd())
self.assertTrue(process, PROCESS_IS_VALID)
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, a_breakpoint)
self.assertTrue(len(threads) == 1)
self.thread = threads[0]
self.frame = self.thread.frames[0]
self.assertTrue(self.frame, "Frame 0 is valid.")
var = self.frame.FindVariable("a")
child_a = var.GetChildMemberWithName("a")
child_b = var.GetChildMemberWithName("b")
child_c = var.GetChildMemberWithName("c")
lldbutil.check_variable(self, var, False, typename="a.S.A")
lldbutil.check_variable(self, child_a, False, value="1")
lldbutil.check_variable(self, child_b, False, '"hello"')
lldbutil.check_variable(self, child_c, False, value='1.25')
process.Continue()
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, b_breakpoint)
self.assertTrue(len(threads) == 1)
self.thread = threads[0]
self.frame = self.thread.frames[0]
self.assertTrue(self.frame, "Frame 0 is valid.")
var = self.frame.FindVariable("a")
child_a = var.GetChildMemberWithName("a")
child_b = var.GetChildMemberWithName("b")
child_c = var.GetChildMemberWithName("c")
lldbutil.check_variable(self, var, False, typename="a.S.A")
lldbutil.check_variable(self, child_a, False, value="3")
lldbutil.check_variable(self, child_b, False, '"goodbye"')
lldbutil.check_variable(self, child_c, False, value='1.25')
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
| 36.673469 | 80 | 0.665832 | 2,768 | 0.770173 | 0 | 0 | 2,436 | 0.677796 | 0 | 0 | 774 | 0.215359 |
2a3de723ee6980cbdbd514d26ca2d35bda76c165 | 195 | py | Python | tests/test_input_laplace_time_ebcs.py | Gkdnz/SfePy | a3a39d4e087705e9e0e8884cbf63513a2ded2108 | [
"BSD-3-Clause"
] | null | null | null | tests/test_input_laplace_time_ebcs.py | Gkdnz/SfePy | a3a39d4e087705e9e0e8884cbf63513a2ded2108 | [
"BSD-3-Clause"
] | null | null | null | tests/test_input_laplace_time_ebcs.py | Gkdnz/SfePy | a3a39d4e087705e9e0e8884cbf63513a2ded2108 | [
"BSD-3-Clause"
] | 2 | 2019-01-14T03:12:34.000Z | 2021-05-25T11:44:50.000Z | input_name = '../examples/diffusion/laplace_time_ebcs.py'
output_name_trunk = 'test_laplace_time_ebcs'
from tests_basic import TestInputEvolutionary
class Test(TestInputEvolutionary):
pass
| 24.375 | 57 | 0.825641 | 43 | 0.220513 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.348718 |
2a3f420858525afd3b6e496ae0a3136e8d138627 | 518 | py | Python | boto3_exceptions/iot1click-devices.py | siteshen/boto3_exceptions | d6174c2577c9d4b17a09a89cd0e4bd1fe555b26b | [
"MIT"
] | 2 | 2021-06-22T00:00:35.000Z | 2021-07-15T03:25:52.000Z | boto3_exceptions/iot1click-devices.py | siteshen/boto3_exceptions | d6174c2577c9d4b17a09a89cd0e4bd1fe555b26b | [
"MIT"
] | null | null | null | boto3_exceptions/iot1click-devices.py | siteshen/boto3_exceptions | d6174c2577c9d4b17a09a89cd0e4bd1fe555b26b | [
"MIT"
] | null | null | null | import boto3
exceptions = boto3.client('iot1click-devices').exceptions
ForbiddenException = exceptions.ForbiddenException
InternalFailureException = exceptions.InternalFailureException
InvalidRequestException = exceptions.InvalidRequestException
PreconditionFailedException = exceptions.PreconditionFailedException
RangeNotSatisfiableException = exceptions.RangeNotSatisfiableException
ResourceConflictException = exceptions.ResourceConflictException
ResourceNotFoundException = exceptions.ResourceNotFoundException
| 43.166667 | 70 | 0.903475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.03668 |
2a4151beaa3590019a94a7fe55fcae4fc19b8066 | 1,826 | py | Python | IMfunctions.py | KartoffelCheetah/personal-website-001 | c0700687ef90cc9a8a26c13af158fc91ee30a647 | [
"MIT"
] | null | null | null | IMfunctions.py | KartoffelCheetah/personal-website-001 | c0700687ef90cc9a8a26c13af158fc91ee30a647 | [
"MIT"
] | 7 | 2017-10-04T15:22:23.000Z | 2018-02-08T18:31:21.000Z | IMfunctions.py | KartoffelCheetah/personal-website-001 | c0700687ef90cc9a8a26c13af158fc91ee30a647 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.5
#-*- coding:utf-8 -*-
"""Functions handling images."""
import subprocess
import os
# supported ImageMagic Formats?
def getDateTimeOriginal(filePath) :
"""If image has DateTimeOriginal then returns it as a string. If it's missing or there isn't EXIF at all then returns empty string."""
return subprocess.check_output(['identify', '-format', '%[EXIF:DateTimeOriginal]', filePath], timeout=3).decode('utf8').strip()
def getDimensions(filePath) :
"""Returns dimensions of image in a tuple."""
x,y = subprocess.check_output(['identify', '-format', '%[w]x%[h]', filePath], timeout=3).decode('utf8').strip().split('x')
return int(x), int(y)
def createThumbnail(originPath, destinationPath, size='200x200') :
"""Creates a thumbnail image out of originPath in destinationPath of size (which defaults to maxWidth:200, maxHeight:200) and keeps ratio. The function creates directory if not exists already. Returns the subprocess's response."""
print('Create thumbnail -> %s' % destinationPath)
destDirPath = os.path.split(destinationPath)[0]
if not os.path.exists(destDirPath) :
os.makedirs(destDirPath)
return subprocess.run(['convert', originPath, '-auto-orient', '-thumbnail', size, destinationPath], timeout=3)
def createRotatedImage(originPath, destinationPath) :
"""Creates a rotated image out of originPath in destinationPath. EXIF orientation will be adjusted with the rotation. The function creates directory if not exists already. Returns the subprocess's response."""
print('Create rotatedImage -> %s' % destinationPath)
destDirPath = os.path.split(destinationPath)[0]
if not os.path.exists(destDirPath) :
os.makedirs(destDirPath)
return subprocess.run(['convert', originPath, '-auto-orient', destinationPath], timeout=3)
| 58.903226 | 234 | 0.727273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 934 | 0.511501 |
2a4164567c21ed5336f207306a71b8f7e94eae41 | 18,415 | py | Python | plugins/modules/oci_resource_manager_template.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 108 | 2020-05-19T20:46:10.000Z | 2022-03-25T14:10:01.000Z | plugins/modules/oci_resource_manager_template.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 90 | 2020-06-14T22:07:11.000Z | 2022-03-07T05:40:29.000Z | plugins/modules/oci_resource_manager_template.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 42 | 2020-08-30T23:09:12.000Z | 2022-03-25T16:58:01.000Z | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_resource_manager_template
short_description: Manage a Template resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a Template resource in Oracle Cloud Infrastructure
- For I(state=present), creates a private template in the specified compartment.
- "This resource has the following action operations in the M(oracle.oci.oci_resource_manager_template_actions) module: change_compartment."
version_added: "2.9.0"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing this template.
- Required for create using I(state=present).
type: str
display_name:
description:
- The template's display name. Avoid entering confidential information.
- Required for create using I(state=present).
- Required for update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
description:
description:
- Description of the template. Avoid entering confidential information.
- Required for create using I(state=present).
- This parameter is updatable.
type: str
long_description:
description:
- Detailed description of the template. This description is displayed in the Console page listing templates when the template is expanded. Avoid
entering confidential information.
- This parameter is updatable.
type: str
logo_file_base64_encoded:
description:
- "Base64-encoded logo to use as the template icon.
Template icon file requirements: PNG format, 50 KB maximum, 110 x 110 pixels."
- This parameter is updatable.
type: str
template_config_source:
description:
- ""
- Required for create using I(state=present).
- This parameter is updatable.
type: dict
suboptions:
template_config_source_type:
description:
- Specifies the `configSourceType` for uploading the Terraform configuration.
- This parameter is updatable.
type: str
choices:
- "ZIP_UPLOAD"
required: true
zip_file_base64_encoded:
description:
- ""
- This parameter is updatable.
- Applicable when template_config_source_type is 'ZIP_UPLOAD'
type: str
freeform_tags:
description:
- "Free-form tags associated with the resource. Each tag is a key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
- This parameter is updatable.
type: dict
template_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the template.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
state:
description:
- The state of the Template.
- Use I(state=present) to create or update a Template.
- Use I(state=absent) to delete a Template.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create template
oci_resource_manager_template:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name: display_name_example
description: description_example
template_config_source:
# required
template_config_source_type: ZIP_UPLOAD
zip_file_base64_encoded: zip_file_base64_encoded_example
# optional
long_description: long_description_example
logo_file_base64_encoded: logo_file_base64_encoded_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update template
oci_resource_manager_template:
# required
template_id: "ocid1.template.oc1..xxxxxxEXAMPLExxxxxx"
# optional
display_name: display_name_example
description: description_example
long_description: long_description_example
logo_file_base64_encoded: logo_file_base64_encoded_example
template_config_source:
# required
template_config_source_type: ZIP_UPLOAD
zip_file_base64_encoded: zip_file_base64_encoded_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update template using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_resource_manager_template:
# required
display_name: display_name_example
# optional
description: description_example
long_description: long_description_example
logo_file_base64_encoded: logo_file_base64_encoded_example
template_config_source:
# required
template_config_source_type: ZIP_UPLOAD
zip_file_base64_encoded: zip_file_base64_encoded_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Delete template
oci_resource_manager_template:
# required
template_id: "ocid1.template.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
- name: Delete template using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_resource_manager_template:
# required
display_name: display_name_example
state: absent
"""
RETURN = """
template:
description:
- Details of the Template resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- Unique identifier (L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm)) for the template.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing this template.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
category_id:
description:
- Unique identifier for the category where the template is located.
Possible values are `0` (Quick Starts), `1` (Service), `2` (Architecture), and `3` (Private).
returned: on success
type: str
sample: "ocid1.category.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- Human-readable name of the template.
returned: on success
type: str
sample: display_name_example
description:
description:
- Brief description of the template.
returned: on success
type: str
sample: description_example
long_description:
description:
- Detailed description of the template. This description is displayed in the Console page listing templates when the template is expanded. Avoid
entering confidential information.
returned: on success
type: str
sample: long_description_example
is_free_tier:
description:
- whether the template will work for free tier tenancy.
returned: on success
type: bool
sample: true
time_created:
description:
- "The date and time at which the template was created.
Format is defined by RFC3339.
Example: `2020-11-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
template_config_source:
description:
- ""
returned: on success
type: complex
contains:
template_config_source_type:
description:
- The type of configuration source to use for the template configuration.
returned: on success
type: str
sample: ZIP_UPLOAD
lifecycle_state:
description:
- The current lifecycle state of the template.
returned: on success
type: str
sample: ACTIVE
freeform_tags:
description:
- "Free-form tags associated with the resource. Each tag is a key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"category_id": "ocid1.category.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"description": "description_example",
"long_description": "long_description_example",
"is_free_tier": true,
"time_created": "2013-10-20T19:20:30+01:00",
"template_config_source": {
"template_config_source_type": "ZIP_UPLOAD"
},
"lifecycle_state": "ACTIVE",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.resource_manager import ResourceManagerClient
from oci.resource_manager.models import CreateTemplateDetails
from oci.resource_manager.models import UpdateTemplateDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class TemplateHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def get_module_resource_id_param(self):
return "template_id"
def get_module_resource_id(self):
return self.module.params.get("template_id")
def get_get_fn(self):
return self.client.get_template
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_template, template_id=self.module.params.get("template_id"),
)
def get_required_kwargs_for_list(self):
return dict()
def get_optional_kwargs_for_list(self):
optional_list_method_params = ["compartment_id", "template_id", "display_name"]
return dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
self._use_name_as_identifier()
or (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
)
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(self.client.list_templates, **kwargs)
def get_create_model_class(self):
return CreateTemplateDetails
def get_exclude_attributes(self):
return ["logo_file_base64_encoded"]
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_template,
call_fn_args=(),
call_fn_kwargs=dict(create_template_details=create_details,),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.CREATE_OPERATION_KEY,
),
)
def get_update_model_class(self):
return UpdateTemplateDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_template,
call_fn_args=(),
call_fn_kwargs=dict(
template_id=self.module.params.get("template_id"),
update_template_details=update_details,
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.UPDATE_OPERATION_KEY,
),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_template,
call_fn_args=(),
call_fn_kwargs=dict(template_id=self.module.params.get("template_id"),),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.DELETE_OPERATION_KEY,
),
)
TemplateHelperCustom = get_custom_class("TemplateHelperCustom")
class ResourceHelper(TemplateHelperCustom, TemplateHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
compartment_id=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
description=dict(type="str"),
long_description=dict(type="str"),
logo_file_base64_encoded=dict(type="str"),
template_config_source=dict(
type="dict",
options=dict(
template_config_source_type=dict(
type="str", required=True, choices=["ZIP_UPLOAD"]
),
zip_file_base64_encoded=dict(type="str"),
),
),
freeform_tags=dict(type="dict"),
defined_tags=dict(type="dict"),
template_id=dict(aliases=["id"], type="str"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="template",
service_client_class=ResourceManagerClient,
namespace="resource_manager",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 38.686975 | 160 | 0.645072 | 3,767 | 0.204561 | 0 | 0 | 0 | 0 | 0 | 0 | 12,312 | 0.668585 |
2a419057ed32d0d72a2ab5830767804b84e6235c | 2,259 | py | Python | test.py | alexanderAustin/PythonGame | 9a3da340ffa426d3c6d59b5c2eb3f2a68792164f | [
"Apache-2.0"
] | null | null | null | test.py | alexanderAustin/PythonGame | 9a3da340ffa426d3c6d59b5c2eb3f2a68792164f | [
"Apache-2.0"
] | null | null | null | test.py | alexanderAustin/PythonGame | 9a3da340ffa426d3c6d59b5c2eb3f2a68792164f | [
"Apache-2.0"
] | null | null | null | # This was built from the tutorial https://www.raywenderlich.com/24252/beginning-game-programming-for-teens-with-python
import pygame, math, random
from pygame.locals import *
import pyganim
# 2 - Initialize the game
pygame.init()
width, height = 640, 480
screen=pygame.display.set_mode((width, height))
pygame.display.set_caption('PyGame - Testing')
rootImg = "resources/images/basic_game/"
rootAud = "resources/audio/basic_game/"
player = pygame.image.load(rootImg + "dude.png")
grass = pygame.image.load(rootImg + "grass.png")
castle = pygame.image.load(rootImg + "castle.png").convert_alpha()
# cow = pygame.image.load("resources/images/animals/cow/cow_front.png") #subject to change
# Used https://github.com/asweigart/pyganim/tree/master/examples
# http://www.pygame.org/project-Pyganim+sprite+animation+module-2106-.html
# for the sprite sheets
cows = pyganim.getImagesFromSpriteSheet(
filename="resources/images/animals/cow/cow_front.png",
rows=4, cols=2,
scale=2)
cframes = list(zip(cows, [100] * len(cows)))
cowObj = pyganim.PygAnimation(cframes)
cowObj.play()
cowsr = pyganim.getImagesFromSpriteSheet(
filename="resources/images/animals/cow/cow_rear.png",
rows=3, cols=3,
scale=2)
crframes = list(zip(cowsr, [100] * len(cowsr)))
# crframes = crframes.pop()#remove blank frame
print crframes
cowrObj = pyganim.PygAnimation(crframes)
cowrObj.play()
# 4 - keep looping through
running = 1
while running:
# 5 - clear the screen before drawing it again
screen.fill(0)
# 6 - draw the screen elements
for x in range(width/grass.get_width()+1):
for y in range(height/grass.get_height()+1):
screen.blit(grass,(x*100,y*100))
cowObj.blit(screen, (200, 20))
cowrObj.blit(screen, (50, 200))
# screen.blit(castle, (100,100))
# 7 - update the screen
pygame.display.flip()
# 8 - loop through the events
for event in pygame.event.get():
# check if the event is the X button
if event.type==pygame.QUIT:
# if it is quit the game
pygame.quit()
exit(0)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
pygame.display.flip() | 30.945205 | 119 | 0.683046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 885 | 0.391766 |
2a42be1c9cc44039b76455cddf90f7e4297f5a21 | 322 | py | Python | Python diye Programming sekha 1st/List to string.py | mitul3737/My-Python-Programming-journey-from-Beginning-to-Data-Sciene-Machine-Learning-AI-Deep-Learning | ca2c15c597a64e5a7689ba3a44ce36a1c0828194 | [
"MIT"
] | 1 | 2021-05-02T20:30:33.000Z | 2021-05-02T20:30:33.000Z | Python diye Programming sekha 1st/List to string.py | Mit382/My-Python-Programming-Journey-from-Beginning-to-Data-Sciene-Machine-Learning-AI-Deep-Learning | c19d84dfe6dcf496ff4527724f92e228579b6456 | [
"MIT"
] | null | null | null | Python diye Programming sekha 1st/List to string.py | Mit382/My-Python-Programming-Journey-from-Beginning-to-Data-Sciene-Machine-Learning-AI-Deep-Learning | c19d84dfe6dcf496ff4527724f92e228579b6456 | [
"MIT"
] | 1 | 2021-05-02T20:30:29.000Z | 2021-05-02T20:30:29.000Z | li=["a","b","d"]
print(li)
str="".join(li)#adding the lists value together
print(str)
str=" ".join(li)#adding the lists value together along with a space
print(str)
str=",".join(li)#adding the lists value together along with a comma
print(str)
str="&".join(li)#adding the lists value together along with a &
print(str) | 23 | 67 | 0.708075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.624224 |
2a4394188059b503b596278b8e6a7e6db8a33d8a | 240 | py | Python | src/classes/demo.py | daninarvaezr/SeleniumInmofianza | 51d488043188b87199dbf28b9f7ef6d43f5163d9 | [
"MIT"
] | 1 | 2021-12-17T05:18:47.000Z | 2021-12-17T05:18:47.000Z | src/classes/demo.py | daninarvaezr/SeleniumInmofianza | 51d488043188b87199dbf28b9f7ef6d43f5163d9 | [
"MIT"
] | null | null | null | src/classes/demo.py | daninarvaezr/SeleniumInmofianza | 51d488043188b87199dbf28b9f7ef6d43f5163d9 | [
"MIT"
] | null | null | null | import os
file=open("C:/Users/michael.duran\OneDrive - Thomas Greg/Documents/Audisoft/Thomas/Inmofianza/TeamQA/SeleniumInmofianza/src/classes/datos.txt","w")
file.write("Primera línea" + os.linesep)
file.write("Segunda línea")
file.close() | 48 | 148 | 0.783333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.690083 |
2a43cb149706d93e473e58a6de1f4b209d11723a | 1,962 | py | Python | src/egyptian_data_generator/finance.py | mahmoudahmedd/Egyptian-Data-Generator | d7bea688a7c7f8604908f302917267d795aa40a2 | [
"MIT"
] | 9 | 2020-11-25T10:45:32.000Z | 2020-11-29T12:42:09.000Z | src/egyptian_data_generator/finance.py | mahmoudahmedd/Egyptian-Data-Generator | d7bea688a7c7f8604908f302917267d795aa40a2 | [
"MIT"
] | null | null | null | src/egyptian_data_generator/finance.py | mahmoudahmedd/Egyptian-Data-Generator | d7bea688a7c7f8604908f302917267d795aa40a2 | [
"MIT"
] | null | null | null | from egyptian_data_generator.helpers import Helpers
class Finance:
def americanexpressCreditCardNumber(self):
return self.generateCreditCardNumber("americanexpress")
def discoverCreditCardNumber(self):
return self.generate("discover")
def mastercardCreditCardNumber(self):
return self.generateCreditCardNumber("mastercard")
def visa16CreditCardNumber(self):
return self.generateCreditCardNumber("visa16")
def visa13CreditCardNumber(self):
return self.generateCreditCardNumber("visa13")
def generateCreditCardNumber(self, _type):
card_types = ["americanexpress", "visa13", "visa16", "mastercard", "discover"]
def prefill(t):
def_length = 16
if t == card_types[0]:
return [3, Helpers.intBetween(4,7)], 13
elif t == card_types[1] or t == card_types[2]:
if t.endswith("16"):
return [4], def_length - 1
else:
return [4], 12
elif t == card_types[3]:
return [5, Helpers.intBetween(1,5)], def_length - 2
elif t == card_types[4]:
return [6, 0, 1, 1], def_length - 4
else:
return [], def_length
def finalize(nums):
check_sum = 0
check_offset = (len(nums) + 1) % 2
for i, n in enumerate(nums):
if (i + check_offset) % 2 == 0:
n_ = n*2
check_sum += n_ -9 if n_ > 9 else n_
else:
check_sum += n
return nums + [10 - (check_sum % 10) ]
initial, rem = prefill(_type.lower())
so_far = initial + [Helpers.intBetween(1,9) for x in range(rem - 1)]
return "".join(map(str,finalize(so_far)))
| 33.827586 | 87 | 0.514271 | 1,899 | 0.96789 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.059123 |
2a471148f06d620927af2a6aa7cde9846676eac8 | 349 | py | Python | modulo_exclusao/limpa_tableWidget_tela_pedido.py | kallif003/Sistema-Delivery | b9c68e3a02ff0d60c7513dd09c08d722e76b138b | [
"MIT"
] | null | null | null | modulo_exclusao/limpa_tableWidget_tela_pedido.py | kallif003/Sistema-Delivery | b9c68e3a02ff0d60c7513dd09c08d722e76b138b | [
"MIT"
] | null | null | null | modulo_exclusao/limpa_tableWidget_tela_pedido.py | kallif003/Sistema-Delivery | b9c68e3a02ff0d60c7513dd09c08d722e76b138b | [
"MIT"
] | null | null | null | def limpar(*args):
telaPrincipal = args[0]
cursor = args[1]
banco10 = args[2]
sql_limpa_tableWidget = args[3]
telaPrincipal.valorTotal.setText("Valor Total:")
telaPrincipal.tableWidget_cadastro.clear()
telaPrincipal.desconto.clear()
telaPrincipal.acrescimo.clear()
sql_limpa_tableWidget.limpar(cursor, banco10) | 26.846154 | 52 | 0.719198 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.040115 |
2a49bbb320a2f3a0c0ad1fe1ec309514aaf3de0f | 1,736 | py | Python | src/app.py | borjlp/the-real-devops-challenge | 6571c4ea2d08f421e92d319b08e7b15dcdb6393c | [
"Apache-2.0"
] | null | null | null | src/app.py | borjlp/the-real-devops-challenge | 6571c4ea2d08f421e92d319b08e7b15dcdb6393c | [
"Apache-2.0"
] | null | null | null | src/app.py | borjlp/the-real-devops-challenge | 6571c4ea2d08f421e92d319b08e7b15dcdb6393c | [
"Apache-2.0"
] | null | null | null | from os import environ
import logging
from bson.objectid import ObjectId
from bson.errors import InvalidId
from flask import Flask, jsonify, abort
from flask_pymongo import PyMongo
from typing import List, Dict
from pymongo import MongoClient, errors
from mongoflask import MongoJSONEncoder, find_restaurants
app = Flask(__name__)
app.config["MONGO_URI"] = environ.get("MONGO_URI")
app.json_encoder = MongoJSONEncoder
mongo = PyMongo(app)
logging.basicConfig(format='%(process)d %(levelname)s %(message)s')
@app.route("/health")
def readiness():
return "OK", 200
@app.route("/live")
def liveness():
try:
connection = MongoClient(environ.get("MONGO_URI"),
serverSelectionTimeoutMS=2000)
connection.admin.command('ping')
except (errors.ServerSelectionTimeoutError,
errors.ConnectionFailure,
errors.ExecutionTimeout) as e:
logging.error('Database connection error: %s', e)
return "ERROR", 500
return "OK", 200
@app.route("/api/v1/restaurant")
def restaurants() -> List:
restaurants = find_restaurants(mongo)
return jsonify(restaurants)
@app.route("/api/v1/restaurant/<id>")
def restaurant(id) -> Dict:
try:
restaurants = find_restaurants(mongo, ObjectId(id))
except (InvalidId, TypeError):
logging.error("Not a valid ObjectId.")
abort(500)
if not restaurants:
abort(404)
return jsonify(restaurants[0])
@app.errorhandler(404)
def not_found(e) -> Dict:
return dict(Error="ID not found"), 404
@app.errorhandler(500)
def error(e) -> Dict:
return dict(Error="ID not valid"), 500
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=False, port=8080)
| 25.15942 | 67 | 0.68318 | 0 | 0 | 0 | 0 | 1,128 | 0.64977 | 0 | 0 | 255 | 0.146889 |
2a49c06bdd89022dc1ff091683808f803c67b7b5 | 434 | py | Python | .ycm_extra_conf.py | baileyforrest/calico | 68f859d244e1dc62b1b690349263cb51e1c19bce | [
"MIT"
] | null | null | null | .ycm_extra_conf.py | baileyforrest/calico | 68f859d244e1dc62b1b690349263cb51e1c19bce | [
"MIT"
] | null | null | null | .ycm_extra_conf.py | baileyforrest/calico | 68f859d244e1dc62b1b690349263cb51e1c19bce | [
"MIT"
] | null | null | null | import subprocess
def Settings( **kwargs ):
flags = [
'-x',
'c++',
'-Wall',
'-Wextra',
'-Wno-unused-parameter',
'-std=c++14',
'-I',
'.',
'-I', 'third_party/googletest/googletest/include',
'-I', 'third_party/abseil-cpp',
'-I', 'third_party/libbcf',
'-pthread',
]
flags += subprocess.check_output(['ncursesw5-config', '--cflags']).split('\n')
return {
'flags': flags,
}
| 17.36 | 80 | 0.529954 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.495392 |
2a4a2838e188c76f4a2cd11bbdf6a1bfab7e4da2 | 101 | py | Python | python/lookout/__init__.py | meyskens/lookout-sdk | 805d0d61ca97120a257283780790ff2458a6ef29 | [
"Apache-2.0"
] | 5 | 2018-12-17T11:22:31.000Z | 2019-03-14T02:50:43.000Z | python/lookout/__init__.py | meyskens/lookout-sdk | 805d0d61ca97120a257283780790ff2458a6ef29 | [
"Apache-2.0"
] | 64 | 2018-08-31T10:41:36.000Z | 2019-12-14T15:01:13.000Z | python/lookout/__init__.py | meyskens/lookout-sdk | 805d0d61ca97120a257283780790ff2458a6ef29 | [
"Apache-2.0"
] | 16 | 2018-08-30T20:33:55.000Z | 2021-08-09T05:37:06.000Z | """Lookout - Assisted Code Review"""
import pkg_resources
pkg_resources.declare_namespace(__name__)
| 20.2 | 41 | 0.811881 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.356436 |
2a4c66a611b82e5e3fffe75e8ee7a205db523d36 | 789 | py | Python | Examples/Example_2.py | jmwerner/iPython_Parser | 82c2c14af7b312a7735c7ae6ae28d8a3f2c8b876 | [
"MIT"
] | 3 | 2015-01-29T04:20:43.000Z | 2015-02-08T05:02:02.000Z | Examples/Example_2.py | jmwerner/iPython_Parser | 82c2c14af7b312a7735c7ae6ae28d8a3f2c8b876 | [
"MIT"
] | 4 | 2015-01-29T18:15:20.000Z | 2015-02-03T23:58:20.000Z | Examples/Example_2.py | jmwerner/txt2pynb | 82c2c14af7b312a7735c7ae6ae28d8a3f2c8b876 | [
"MIT"
] | null | null | null |
# can I have a comment here?
import re
a = 538734
print(re.search('a', "helalo") != None)
print(re.search('a', "hallo") != None)
a = 30 * 25
for i in range(0,5):
print(i)
'''
This is a sigma $\sigma$ symbol in markdown and a forall $\forall$ symbol too!
Here's some mathematics $ 1 + 2 - 3 * 5 / 15$ and "quoted" information
And an integral for good measure $\int_{5}^{10}f(x)dx$
#Markdown header!
'''
a = 5
b = 10
a + b
'''
# BIG MARKDOWN HEADER!
Markdown line two, here's some math: $\sigma+\alpha=\beta$
'''
a=20
b=34
c=45
'''
# BIG MARKDOWN HEADER!
Markdown line two, here's some math: $\sigma+\alpha=\beta$
Indenting entire blocks is ok too,
'''
a=24
b=35
c=41
for i in range(0,5):
a = i * 10
b = a * i
for k in range(1,2):
b = a + 2 * k
d = b | 12.725806 | 78 | 0.598226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 503 | 0.637516 |
2a4c8d2566e58f41d640706fecad5ae122f31804 | 2,061 | py | Python | test/test_phones.py | tnurtdinov-st/python_traning | a5204b073101a9c7870b3219ba7ceff321cf6b41 | [
"Apache-2.0"
] | null | null | null | test/test_phones.py | tnurtdinov-st/python_traning | a5204b073101a9c7870b3219ba7ceff321cf6b41 | [
"Apache-2.0"
] | null | null | null | test/test_phones.py | tnurtdinov-st/python_traning | a5204b073101a9c7870b3219ba7ceff321cf6b41 | [
"Apache-2.0"
] | null | null | null | import re
from random import randrange
def test_phones_on_homepage(app):
contact_from_homepage = app.contact.get_contact_list()[0]
contact_from_editpage = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_homepage.all_phones_from_homepage == merge_phones_like_on_homepage(contact_from_editpage)
def test_phones_on_view_page(app):
contact_from_viewpage = app.contact.get_contact_from_viewpage(0)
contact_from_editpage = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_viewpage.homephone == contact_from_editpage.homephone
assert contact_from_viewpage.mobilephone == contact_from_editpage.mobilephone
assert contact_from_viewpage.workphone == contact_from_editpage.workphone
assert contact_from_viewpage.phone2 == contact_from_editpage.phone2
def test_random_person_info(app):
contact = app.contact.get_contact_list()
index = randrange(len(contact))
contact_from_homepage = app.contact.get_contact_list()[index]
contact_from_editpage = app.contact.get_contact_info_from_edit_page(index)
assert contact_from_homepage.firstname == contact_from_editpage.firstname
assert contact_from_homepage.lastname == contact_from_editpage.lastname
assert contact_from_homepage.id == contact_from_editpage.id
assert contact_from_homepage.address1 == contact_from_editpage.address1
assert contact_from_homepage.all_phones_from_homepage == merge_phones_like_on_homepage(contact_from_editpage)
assert contact_from_homepage.all_emails == merge_emails_like_on_homepage(contact_from_editpage)
def merge_phones_like_on_homepage(contact):
return "\n".join(filter(lambda x: x !="", map(lambda x: clear(x), filter(lambda x: x is not None, [contact.homephone, contact.mobilephone, contact.workphone, contact.phone2]))))
def merge_emails_like_on_homepage(contact):
return "\n".join(filter(lambda x: x !="", map(lambda x: clear(x), filter(lambda x: x is not None, [contact.email1, contact.email2, contact.email3]))))
def clear(s):
return re.sub("[() -]", "", s) | 55.702703 | 181 | 0.802523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.010674 |
2a4ee98d7d60b26fe472280ddb00dfeca8c8d165 | 8,352 | py | Python | data_analysis/data_analysis.py | marcoscale98/emojinet | 07f3d87c2b6e1b8dfdcc225691f97cc839117611 | [
"Apache-2.0"
] | null | null | null | data_analysis/data_analysis.py | marcoscale98/emojinet | 07f3d87c2b6e1b8dfdcc225691f97cc839117611 | [
"Apache-2.0"
] | null | null | null | data_analysis/data_analysis.py | marcoscale98/emojinet | 07f3d87c2b6e1b8dfdcc225691f97cc839117611 | [
"Apache-2.0"
] | 1 | 2020-06-21T08:02:28.000Z | 2020-06-21T08:02:28.000Z | import argparse
import logging
import sys
import json
import plotly.offline
import plotly.graph_objs as go
sys.path.append(sys.path[0] + "/..")
from utils.fileprovider import FileProvider
from preprocessing.reader import EvalitaDatasetReader
from nltk.tokenize import TweetTokenizer
logging.getLogger().setLevel(logging.INFO)
def plot_distribution(dictionary, title, x_axis_title, y_axis_title, min_frequency, dtick, color, output_path):
logging.info("Plotting {}".format(title))
min_frequency = min_frequency
X = []
Y = []
for element, element_count in sorted(dictionary.items(), key=lambda kv: kv[1], reverse=False):
if element_count > min_frequency:
X.append(element_count)
Y.append(element)
plotly.offline.plot({"data": [go.Bar(orientation="h",
x=X,
y=Y,
marker=dict(color=color))],
"layout": go.Layout(title="<b>{}</b>".format(title),
xaxis=dict(title="<b>{}</b>".format(x_axis_title),
titlefont=dict(color=color)),
yaxis=dict(title="<b>{}</b>".format(y_axis_title), dtick=dtick,
titlefont=dict(color=color)),
margin=go.layout.Margin(l=250, r=250)
)
},
filename=output_path,
auto_open=False)
if __name__ == '__main__':
"""##### Parameter parsing"""
parser = argparse.ArgumentParser(description='Data analysis for the ITAmoji task')
parser.add_argument('--workdir', required=True, help='Work path')
args = parser.parse_args()
files = FileProvider(args.workdir)
logging.info("Loading txt_2_emoji.json file")
with open("{}/{}".format("data_analysis", "txt_2_emoji.json"), 'r') as txt_2_emoji_file:
txt_2_emoji = json.load(txt_2_emoji_file)
logging.info("Loading idx_2_emoji.json file")
with open("{}/{}".format("data_analysis", "idx_2_emoji.json"), 'r') as idx_2_emoji_file:
idx_2_emoji = json.load(idx_2_emoji_file)
logging.info("Starting data analysis with parameters: {0}".format(vars(args)))
raw_train = EvalitaDatasetReader(files.evalita)
train_token_dict = dict()
train_hashtag_dict = dict()
train_mention_dict = dict()
train_url_dict = dict()
train_label_dict = dict()
tweet_tokenizer = TweetTokenizer()
logging.info("Computing counts for train set")
for train_tweet_text, train_tweet_label in zip(raw_train.X, raw_train.Y):
# tokens
for token in tweet_tokenizer.tokenize(train_tweet_text.lower()):
# general token
train_token_dict[token] = train_token_dict[token] + 1 if train_token_dict.get(token) else 1
if token.startswith("#"): # hashtag token
train_hashtag_dict[token] = train_hashtag_dict[token] + 1 if train_hashtag_dict.get(token) else 1
if token.startswith("@"): # mention token
train_mention_dict[token] = train_mention_dict[token] + 1 if train_mention_dict.get(token) else 1
if token.startswith("http"): # url token
train_url_dict[token] = train_url_dict[token] + 1 if train_url_dict.get(token) else 1
# labels
train_label_dict[train_tweet_label] = train_label_dict[train_tweet_label] + 1 if train_label_dict.get(train_tweet_label) else 1
with open("data_analysis/data_analysis.txt", 'w') as data_analysis_output:
total_number_of_tokens = sum([count for token, count in train_token_dict.items()])
total_number_of_unique_tokens = len(train_token_dict)
logging.info("Total number of tokens: {}".format(total_number_of_tokens))
data_analysis_output.write("Total number of tokens: {}\n".format(total_number_of_tokens))
logging.info("Total number of unique tokens: {}".format(total_number_of_unique_tokens))
data_analysis_output.write("Total number of unique tokens: {}\n".format(total_number_of_unique_tokens))
total_number_of_hashtags = sum([count for token, count in train_hashtag_dict.items()])
total_number_of_unique_hashtags = len(train_hashtag_dict)
logging.info("Total number of hashtags: {}".format(total_number_of_hashtags))
data_analysis_output.write("Total number of hashtags: {}\n".format(total_number_of_hashtags))
logging.info("Total number of unique hashtags: {}".format(total_number_of_unique_hashtags))
data_analysis_output.write("Total number of unique hashtags: {}\n".format(total_number_of_unique_hashtags))
total_number_of_mentions = sum([count for token, count in train_mention_dict.items()])
total_number_of_unique_mentions = len(train_mention_dict)
logging.info("Total number of mentions: {}".format(total_number_of_mentions))
data_analysis_output.write("Total number of mentions: {}\n".format(total_number_of_mentions))
logging.info("Total number of unique mentions: {}".format(total_number_of_unique_mentions))
data_analysis_output.write("Total number of unique mentions: {}\n".format(total_number_of_unique_mentions))
total_number_of_urls = sum([count for token, count in train_url_dict.items()])
total_number_of_unique_urls = len(train_url_dict)
logging.info("Total number of URLs: {}".format(total_number_of_urls))
data_analysis_output.write("Total number of URLs: {}\n".format(total_number_of_urls))
logging.info("Total number of unique URLs: {}".format(total_number_of_unique_urls))
data_analysis_output.write("Total number of unique URLs: {}\n".format(total_number_of_unique_urls))
total_number_of_labels = sum([count for token, count in train_label_dict.items()])
total_number_of_unique_labels = len(train_label_dict)
logging.info("Total number of labels: {}".format(total_number_of_labels))
data_analysis_output.write("Total number of labels: {}\n".format(total_number_of_labels))
logging.info("Total number of unique labels: {}".format(total_number_of_unique_labels))
data_analysis_output.write("Total number of unique labels: {}\n".format(total_number_of_unique_labels))
plot_distribution(train_token_dict, "token distribution", "frequency", "token", 2000, 2, "#3498db", "data_analysis/token_distribution.html")
plot_distribution(train_hashtag_dict, "hashtag distribution", "frequency", "hashtag", 250, 2, "#3498db", "data_analysis/hashtag_distribution.html")
plot_distribution(train_mention_dict, "mention distribution", "frequency", "mention", 150, 2, "#3498db", "data_analysis/mention_distribution.html")
plot_distribution(train_url_dict, "URL distribution", "frequency", "URL", 5, 2, "#3498db", "data_analysis/url_distribution.html")
logging.info("Plotting label distribution")
min_frequency = 0
X_label = []
Y_label = []
for label, label_count in sorted(train_label_dict.items(), key=lambda kv: kv[1], reverse=True):
if label_count > min_frequency:
X_label.append(label_count)
Y_label.append(idx_2_emoji[str(label)])
plotly.offline.plot({"data": [go.Bar(orientation="h",
x=X_label,
y=Y_label,
marker=dict(color="#3498db"))],
"layout": go.Layout(title="<b>label distribution</b>",
xaxis=dict(title="<b>label</b>",
titlefont=dict(color="#3498db")),
yaxis=dict(title="<b>frequency</b>", dtick=1,
titlefont=dict(color="#3498db")),
margin=go.layout.Margin(l=250, r=250)
)
},
filename="data_analysis/label_distribution.html",
auto_open=False) | 54.588235 | 151 | 0.629909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,689 | 0.202227 |
2a4f5e8b3a87fc69bf24b06e6f4c3bf065e18088 | 6,596 | py | Python | examples/pytorch/rgcn/entity_classify.py | shengwenLeong/dgl | 40ffa29a1dcd1639b1c41fa3b43e7339c1c187e7 | [
"Apache-2.0"
] | 2 | 2020-05-05T13:59:09.000Z | 2020-07-25T10:09:34.000Z | examples/pytorch/rgcn/entity_classify.py | shengwenLeong/dgl | 40ffa29a1dcd1639b1c41fa3b43e7339c1c187e7 | [
"Apache-2.0"
] | null | null | null | examples/pytorch/rgcn/entity_classify.py | shengwenLeong/dgl | 40ffa29a1dcd1639b1c41fa3b43e7339c1c187e7 | [
"Apache-2.0"
] | null | null | null | """
Modeling Relational Data with Graph Convolutional Networks
Paper: https://arxiv.org/abs/1703.06103
Code: https://github.com/tkipf/relational-gcn
Difference compared to tkipf/relation-gcn
* l2norm applied to all weights
* remove nodes that won't be touched
"""
import argparse
import numpy as np
import time
import torch
import torch.nn.functional as F
from dgl import DGLGraph
from dgl.nn.pytorch import RelGraphConv
from dgl.contrib.data import load_data
from functools import partial
from model import BaseRGCN
class EntityClassify(BaseRGCN):
def create_features(self):
features = torch.arange(self.num_nodes)
if self.use_cuda:
features = features.cuda()
return features
def build_input_layer(self):
return RelGraphConv(self.num_nodes, self.h_dim, self.num_rels, "basis",
self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
dropout=self.dropout)
def build_hidden_layer(self, idx):
return RelGraphConv(self.h_dim, self.h_dim, self.num_rels, "basis",
self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
dropout=self.dropout)
def build_output_layer(self):
return RelGraphConv(self.h_dim, self.out_dim, self.num_rels, "basis",
self.num_bases, activation=partial(F.softmax, dim=1),
self_loop=self.use_self_loop)
def main(args):
# load graph data
data = load_data(args.dataset, bfs_level=args.bfs_level, relabel=args.relabel)
num_nodes = data.num_nodes
num_rels = data.num_rels
num_classes = data.num_classes
labels = data.labels
train_idx = data.train_idx
test_idx = data.test_idx
# split dataset into train, validate, test
if args.validation:
val_idx = train_idx[:len(train_idx) // 5]
train_idx = train_idx[len(train_idx) // 5:]
else:
val_idx = train_idx
# since the nodes are featureless, the input feature is then the node id.
feats = torch.arange(num_nodes)
# edge type and normalization factor
edge_type = torch.from_numpy(data.edge_type)
edge_norm = torch.from_numpy(data.edge_norm).unsqueeze(1)
labels = torch.from_numpy(labels).view(-1)
# check cuda
use_cuda = args.gpu >= 0 and torch.cuda.is_available()
if use_cuda:
torch.cuda.set_device(args.gpu)
feats = feats.cuda()
edge_type = edge_type.cuda()
edge_norm = edge_norm.cuda()
labels = labels.cuda()
# create graph
g = DGLGraph()
g.add_nodes(num_nodes)
g.add_edges(data.edge_src, data.edge_dst)
# create model
model = EntityClassify(len(g),
args.n_hidden,
num_classes,
num_rels,
num_bases=args.n_bases,
num_hidden_layers=args.n_layers - 2,
dropout=args.dropout,
use_self_loop=args.use_self_loop,
use_cuda=use_cuda)
if use_cuda:
model.cuda()
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2norm)
# training loop
print("start training...")
forward_time = []
backward_time = []
model.train()
for epoch in range(args.n_epochs):
optimizer.zero_grad()
t0 = time.time()
logits = model(g, feats, edge_type, edge_norm)
torch.cuda.synchronize()
t1 = time.time()
loss = F.cross_entropy(logits[train_idx], labels[train_idx])
loss.backward()
optimizer.step()
t2 = time.time()
forward_time.append(t1 - t0)
backward_time.append(t2 - t1)
print("Epoch {:05d} | Train Forward Time(s) {:.4f} | Backward Time(s) {:.4f}".
format(epoch, forward_time[-1], backward_time[-1]))
train_acc = torch.sum(logits[train_idx].argmax(dim=1) == labels[train_idx]).item() / len(train_idx)
val_loss = F.cross_entropy(logits[val_idx], labels[val_idx])
val_acc = torch.sum(logits[val_idx].argmax(dim=1) == labels[val_idx]).item() / len(val_idx)
#print("Train Accuracy: {:.4f} | Train Loss: {:.4f} | Validation Accuracy: {:.4f} | Validation loss: {:.4f}".
# format(train_acc, loss.item(), val_acc, val_loss.item()))
print()
model.eval()
logits = model.forward(g, feats, edge_type, edge_norm)
test_loss = F.cross_entropy(logits[test_idx], labels[test_idx])
test_acc = torch.sum(logits[test_idx].argmax(dim=1) == labels[test_idx]).item() / len(test_idx)
print("Test Accuracy: {:.4f} | Test loss: {:.4f}".format(test_acc, test_loss.item()))
print()
print("Mean forward time: {:4f}".format(np.mean(forward_time[len(forward_time) // 4:])))
print("Mean backward time: {:4f}".format(np.mean(backward_time[len(backward_time) // 4:])))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='RGCN')
parser.add_argument("--dropout", type=float, default=0,
help="dropout probability")
parser.add_argument("--n-hidden", type=int, default=16,
help="number of hidden units")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--n-bases", type=int, default=-1,
help="number of filter weight matrices, default: -1 [use all]")
parser.add_argument("--n-layers", type=int, default=2,
help="number of propagation rounds")
parser.add_argument("-e", "--n-epochs", type=int, default=50,
help="number of training epochs")
parser.add_argument("-d", "--dataset", type=str, required=True,
help="dataset to use")
parser.add_argument("--l2norm", type=float, default=0,
help="l2 norm coef")
parser.add_argument("--relabel", default=False, action='store_true',
help="remove untouched nodes and relabel")
parser.add_argument("--use-self-loop", default=False, action='store_true',
help="include self feature as a special relation")
fp = parser.add_mutually_exclusive_group(required=False)
fp.add_argument('--validation', dest='validation', action='store_true')
fp.add_argument('--testing', dest='validation', action='store_false')
parser.set_defaults(validation=True)
args = parser.parse_args()
print(args)
args.bfs_level = args.n_layers + 1 # pruning used nodes for memory
main(args)
| 38.348837 | 117 | 0.640085 | 896 | 0.13584 | 0 | 0 | 0 | 0 | 0 | 0 | 1,440 | 0.218314 |
2a4f98ac2abb3fb715cc59e4d4ffee24c83452b6 | 2,992 | py | Python | setup.py | DanielIzquierdo/osisoftpy | c86fbf36a5a6dd7bcd51eebeaddd9eeec1a9b276 | [
"Apache-2.0"
] | 10 | 2018-09-10T20:58:47.000Z | 2021-06-24T21:55:55.000Z | setup.py | DanielIzquierdo/osisoftpy | c86fbf36a5a6dd7bcd51eebeaddd9eeec1a9b276 | [
"Apache-2.0"
] | null | null | null | setup.py | DanielIzquierdo/osisoftpy | c86fbf36a5a6dd7bcd51eebeaddd9eeec1a9b276 | [
"Apache-2.0"
] | 7 | 2018-07-01T00:31:58.000Z | 2021-03-10T05:05:40.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import re
from glob import glob
from io import open
from os.path import basename
from os.path import splitext
import setuptools
from os import path
# Single sourcing the version -
def read(*names, **kwargs):
with open(
path.join(path.dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
regex = r"^__version__ = ['\"]([^'\"]*)['\"]"
version_match = re.search(regex, version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setuptools.setup(
name='osisoftpy',
description='OSIsoft PI Web WebAPI client',
version=find_version('src/osisoftpy', '__init__.py'),
license='Apache Software License',
author='Andrew Pong',
author_email='apong@dstcontrols.com',
url='https://github.com/dstcontrols/osisoftpy',
packages=setuptools.find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(file))[0] for file in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: MacOS'
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Interface Engine/Protocol '
'Translator',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
'blinker',
'future',
'arrow',
'requests',
'requests-kerberos',
'mock'
],
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
entry_points={
},
scripts=[],
)
| 31.166667 | 74 | 0.606952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,547 | 0.517045 |
2a4fbd7855bed7d6e0f4a700d61e26a1d38e94eb | 11,080 | py | Python | tests/test.py | JostMigenda/hop-SNalert-app | 55c907bf07b1c8df2d5a9b3f7c648106b03406b9 | [
"BSD-3-Clause"
] | null | null | null | tests/test.py | JostMigenda/hop-SNalert-app | 55c907bf07b1c8df2d5a9b3f7c648106b03406b9 | [
"BSD-3-Clause"
] | null | null | null | tests/test.py | JostMigenda/hop-SNalert-app | 55c907bf07b1c8df2d5a9b3f7c648106b03406b9 | [
"BSD-3-Clause"
] | null | null | null | import subprocess
from hop import Stream
from hop.auth import Auth
from hop import auth
from hop.io import StartPosition
from hop.models import GCNCircular
import argparse
import random
import threading
import time
from functools import wraps
import datetime
import numpy
import uuid
from dotenv import load_dotenv
import os
from unittest.mock import Mock
import unittest
from mongoengine import connect, disconnect
# from hypothesis import given
# from hypothesis.strategies import lists, integers
# from hop.apps.SNalert import model as M
# from hop.apps.SNalert import decider
# from hop.apps.SNalert import db_storage
# from . import demo
# from .. import test_anything
test_locations = ["Houston", "New York", "Boston", "Not Texas"]
# load environment variables
load_dotenv(dotenv_path='./../.env')
# for measuring function execution time
# https://stackoverflow.com/questions/3620943/measuring-elapsed-time-with-the-time-module
PROF_DATA = {}
def profile(fn):
@wraps(fn)
def with_profiling(*args, **kwargs):
start_time = time.time()
ret = fn(*args, **kwargs)
elapsed_time = time.time() - start_time
if fn.__name__ not in PROF_DATA:
PROF_DATA[fn.__name__] = [0, []]
PROF_DATA[fn.__name__][0] += 1
PROF_DATA[fn.__name__][1].append(elapsed_time)
return ret
return with_profiling
def print_prof_data():
for fname, data in PROF_DATA.items():
max_time = max(data[1])
avg_time = sum(data[1]) / len(data[1])
print("Function %s called %d times. " % (fname, data[0]))
print('Execution time max: %.3f, average: %.3f' % (max_time, avg_time))
def clear_prof_data():
global PROF_DATA
PROF_DATA = {}
def exponentialDistribution(mean):
"""
Produce exponential distribution data.
:param mean: Mean of exponential distribution.
:return:
"""
return numpy.random.exponential(mean)
class integrationTest(object):
# @given(
# timeout=integers(min_value=1),
# mean=integers(min_value=1),
# totalTime=integers(min_value=1)
# )
def __init__(self, timeout, mean, totalTime):
"""
The constructor.
:param timeout: Time expiration parameter
:param mean:
:param totalTime:
"""
self.count = 0
self.topic = os.getenv("OBSERVATION_TOPIC")
self.mean = mean
self.totalTime = totalTime
# self.minTime = min
# self.maxTime = max
self.timeOut = timeout
self.auth = Auth(os.getenv("USERNAME"), os.getenv("PASSWORD"), method=auth.SASLMethod.PLAIN)
def run(self):
"""
Run the model for the integration test.
:return: none
"""
t1 = threading.Thread(target=self.readNumMsg, args=(self.topic,))
t1.start()
m = subprocess.Popen(['python3',
'../hop/apps/SNalert/model.py',
'--f',
'./../config.env',
'--no-auth'
])
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < self.totalTime:
# randomTime = random.randint(self.minTime, self.maxTime)
randomTime = exponentialDistribution(self.mean)
start2 = time.monotonic()
while True:
if time.monotonic() - start2 > randomTime:
break
# write message with current time
now = datetime.datetime.utcnow().strftime(os.getenv("TIME_STRING_FORMAT"))
# newFileName = self.writeMessage(now)
stream = Stream(auth=self.auth)
with stream.open(os.getenv("TESTING_TOPIC"), "w") as s:
s.write(self.writeMessage(now))
m.kill()
def readNumMsg(self, topic):
"""
Read the number of alert messages.
:param topic:
:param configFilePath:
:return:
"""
# gcnFormat = "json"
stream = Stream(persist=True, auth=self.auth)
# print("===")
# print(topic)
with stream.open(topic, "r") as s:
for msg in s: # set timeout=0 so it doesn't stop listening to the topic
print("====")
# if gcn_dict['header']['subject'] == "TEST":
# self.count += 1
self.count += 1
def getCount(self):
return self.count
def writeMessage(self, time):
msg = {}
msg["header"] = {}
msg["header"]["MESSAGE ID"] = str(uuid.uuid4())
msg["header"]["DETECTOR"] = "Test Detector"
msg["header"]["SUBJECT"] = "Test"
msg["header"]["MESSAGE SENT TIME"] = time
msg["header"]["NEUTRINO TIME"] = time
msg["header"]["LOCATION"] = test_locations[random.randint(0, 3)]
msg["header"]["P VALUE"] = "0.5"
msg["header"]["STATUS"] = "On"
msg["header"]["MESSAGE TYPE"] = "Observation"
msg["header"]["FROM"] = "Skylar Xu <yx48@rice.edu>"
msg["body"] = "This is an alert message generated at run time for testing purposes."
return msg
# def functionalTest():
#
# pass
class latencyTest(object):
def __init__(self, topic, numDetector=50, time=3000):
"""
The constructor.
"""
self.numMsgPublished = 0
self.numMsgReceived = 0
self.totalLatency = 0
self.numDetector = numDetector
self.detectorThreads = {}
self.countMsg = {}
self.totalTime = time
self.topic = topic
self.auth = Auth(os.getenv("USERNAME"), os.getenv("PASSWORD"), method=auth.SASLMethod.PLAIN)
self.idsWritten = set()
self.idsReceived = set()
self.lock = threading.Lock()
def oneDetectorThread(self, uuid):
# lock = threading.Lock()
print(uuid)
# print(timeout)
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < self.totalTime:
# print(time.monotonic() - startTime)
# print(self.totalTime)
# msg = self.writeMessage(uuid)
stream = Stream(auth=self.auth)
with stream.open(self.topic, "w") as s:
msg = self.writeMessage(uuid)
s.write(msg)
with self.lock:
self.numMsgPublished += 1
self.idsWritten.add(msg["header"]["MESSAGE ID"])
# def countWrittenMsgThread(self):
def runTest(self):
"""
Run the latency test.
:return:
"""
# create the topic if doesn't exist
stream = Stream(auth=self.auth)
# with stream.open(self.topic, "w") as s:
# s.write({"TEST": "TEST"})
# first run the thread that logs every message received
logThread = threading.Thread(target=self.logMsgs)
logThread.start()
# wait a few seconds
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < 10:
foo = 1
for i in range(self.numDetector):
# print(i)
id = uuid.uuid4()
# print(id)
t = threading.Thread(target=self.oneDetectorThread, args=(str(id),))
# self.oneDetectorThread(id)
self.detectorThreads[id] = t
t.start()
# # first run the thread that logs every message received
# logThread = threading.Thread(target=self.logMsgs)
# logThread.start()
def countMsgThread(self, msg_dict):
"""
A single thread for process the message received for Latency test.
:param msg_dict:
:return:
"""
# msg_dict = msg.asdict()['content']
id = msg_dict['header']['DETECTOR']
msg_id = msg_dict["header"]["MESSAGE ID"]
receivedTime = datetime.datetime.utcnow().strftime(os.getenv("TIME_STRING_FORMAT"))
sentTime = msg_dict['header']['MESSAGE SENT TIME']
timeDiff = datetime.datetime.strptime(receivedTime, os.getenv("TIME_STRING_FORMAT")) - datetime.datetime.strptime(sentTime, os.getenv("TIME_STRING_FORMAT"))
timeDiff_inSeconds = timeDiff.total_seconds()
# print("HERE")
with self.lock:
# print("____")
self.numMsgReceived += 1
self.totalLatency += timeDiff_inSeconds
self.idsReceived.add(msg_id)
def logMsgs(self):
# stream = Stream(persist=True, auth=self.auth, start_at=StartPosition.EARLIEST)
stream = Stream(persist=True, auth=self.auth)
with stream.open(self.topic, "r") as s:
for msg in s: # set timeout=0 so it doesn't stop listening to the topic
t = threading.Thread(target=self.countMsgThread, args=(msg.asdict()['content'],))
t.start()
def calculateAvgLatency(self):
"""
Calculate the latency.
:return:
"""
return self.totalLatency * 1.0 / self.numMsgReceived
def writeMessage(self, detector_id):
"""
Return a dictionary of the message in the required format.
:param uuid:
:return:
"""
now = datetime.datetime.utcnow().strftime(os.getenv("TIME_STRING_FORMAT"))
msg = {}
msg["header"] = {}
msg["header"]["MESSAGE ID"] = str(uuid.uuid4())
msg["header"]["DETECTOR"] = detector_id
msg["header"]["SUBJECT"] = "Test"
msg["header"]["MESSAGE SENT TIME"] = now
msg["header"]["NEUTRINO TIME"] = now
msg["header"]["LOCATION"] = test_locations[random.randint(0, 3)]
msg["header"]["P VALUE"] = "0.5"
msg["header"]["STATUS"] = "On"
msg["header"]["MESSAGE TYPE"] = "Latency Testing"
msg["header"]["FROM"] = "Skylar Xu <yx48@rice.edu>"
msg["body"] = "This is an alert message generated at run time for testing message latency."
return msg
def check(self):
assert self.numMsgReceived == self.numMsgPublished
if __name__ == '__main__':
print("Latency Test")
print("----------------------------------------")
print("Integration Test #1")
test = latencyTest("kafka://dev.hop.scimma.org:9092/snews-latencyTest", 5, 50)
print(test.totalTime)
test.runTest()
print("------")
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < 100:
foo = 1
# print(time.monotonic() - startTime)
print(test.calculateAvgLatency())
print(" %d messages written." % test.numMsgPublished)
print(" %d messages received and read." % test.numMsgReceived)
# print(" %d messages written." % len(test.idsWritten))
# print(" %d messages received and read." % len(test.idsReceived))
# print(" %d messages read in written." % len(test.idsReceived.intersection(test.idsWritten)))
assert test.numMsgPublished == test.numMsgReceived
| 32.97619 | 164 | 0.582852 | 8,167 | 0.737094 | 0 | 0 | 369 | 0.033303 | 0 | 0 | 4,262 | 0.384657 |
2a5062017f7cc51174da9372c129642a7f1b6002 | 992 | py | Python | iotkit/led.py | dhrg/iot-edison | d1a4eed659891ecbebe45e4e2fcf55c7828d01e7 | [
"CC-BY-4.0"
] | null | null | null | iotkit/led.py | dhrg/iot-edison | d1a4eed659891ecbebe45e4e2fcf55c7828d01e7 | [
"CC-BY-4.0"
] | null | null | null | iotkit/led.py | dhrg/iot-edison | d1a4eed659891ecbebe45e4e2fcf55c7828d01e7 | [
"CC-BY-4.0"
] | null | null | null | import mraa # For accessing the GPIO
import time # For sleeping between blinks
global led
def init_led(pin):
global led
led = mraa.Gpio(pin) # Get the LED pin object
led.dir(mraa.DIR_OUT) # Set the direction as output
led.write(0)
def write_led(signal):
global led
led.write(signal)
def main():
pin = 5 # we are using D5 pin
led = mraa.Gpio(pin) # Get the LED pin object
led.dir(mraa.DIR_OUT) # Set the direction as output
ledState = False # LED is off to begin with
led.write(ledState)
# One infinite loop coming up
while True:
if ledState == False:
# LED is off, turn it on
led.write(1)
ledState = True # LED is on
else:
led.write(0)
ledState = False
print "LED is: %s" %(ledState)
# Wait for some time
time.sleep(1)
if __name__ == '__main__':
main()
del led | 24.8 | 61 | 0.556452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 312 | 0.314516 |
2a51385674b04ab4c7bc2dc4e750a01f918e381a | 1,511 | py | Python | Assignments/Exam Preparation/Python Advanced Exam - 27 June 2020/03. List Manipulator.py | KaloyankerR/python-fundamentals-repository | b8e69523ea7e6aa352e8398f0202e283374a0f7c | [
"MIT"
] | null | null | null | Assignments/Exam Preparation/Python Advanced Exam - 27 June 2020/03. List Manipulator.py | KaloyankerR/python-fundamentals-repository | b8e69523ea7e6aa352e8398f0202e283374a0f7c | [
"MIT"
] | null | null | null | Assignments/Exam Preparation/Python Advanced Exam - 27 June 2020/03. List Manipulator.py | KaloyankerR/python-fundamentals-repository | b8e69523ea7e6aa352e8398f0202e283374a0f7c | [
"MIT"
] | null | null | null | from collections import deque
def list_manipulator(numbers, *args):
numbers = deque(numbers)
action = args[0]
direction = args[1]
if action == 'add':
parameters = [int(x) for x in args[2:]]
if direction == 'beginning':
[numbers.appendleft(x) for x in parameters[::-1]]
elif direction == 'end':
[numbers.append(x) for x in parameters]
elif action == 'remove':
parameter = None
if len(args) == 3:
parameter = int(args[2])
if parameter:
if parameter >= len(numbers):
numbers.clear()
else:
if direction == 'beginning':
[numbers.popleft() for x in range(parameter)]
elif direction == 'end':
[numbers.pop() for x in range(parameter)]
else:
if direction == 'beginning':
numbers.popleft()
elif direction == 'end':
numbers.pop()
return [int(x) for x in numbers]
print(list_manipulator([1, 2, 3], "remove", "end"))
print(list_manipulator([1, 2, 3], "remove", "beginning"))
print(list_manipulator([1, 2, 3], "add", "beginning", 20))
print(list_manipulator([1, 2, 3], "add", "end", 30))
print(list_manipulator([1, 2, 3], "remove", "end", 2))
print(list_manipulator([1, 2, 3], "remove", "beginning", 2))
print(list_manipulator([1, 2, 3], "add", "beginning", 20, 30, 40))
print(list_manipulator([1, 2, 3], "add", "end", 30, 40, 50))
| 32.847826 | 66 | 0.544011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 177 | 0.117141 |
2a522cf24383152e1f43899c4fe827f5c6c726b0 | 6,738 | py | Python | mayan/apps/documents/tests/test_links.py | bonitobonita24/Mayan-EDMS | 7845fe0e1e83c81f5d227a16116397a3d3883b85 | [
"Apache-2.0"
] | 343 | 2015-01-05T14:19:35.000Z | 2018-12-10T19:07:48.000Z | mayan/apps/documents/tests/test_links.py | bonitobonita24/Mayan-EDMS | 7845fe0e1e83c81f5d227a16116397a3d3883b85 | [
"Apache-2.0"
] | 191 | 2015-01-03T00:48:19.000Z | 2018-11-30T09:10:25.000Z | mayan/apps/documents/tests/test_links.py | bonitobonita24/Mayan-EDMS | 7845fe0e1e83c81f5d227a16116397a3d3883b85 | [
"Apache-2.0"
] | 114 | 2015-01-08T20:21:05.000Z | 2018-12-10T19:07:53.000Z | from django.urls import reverse
from ..links.document_file_links import (
link_document_file_delete, link_document_file_download_quick
)
from ..links.favorite_links import (
link_document_favorites_add, link_document_favorites_remove
)
from ..links.trashed_document_links import link_document_restore
from ..models import TrashedDocument
from ..permissions import (
permission_document_file_delete, permission_document_file_download,
permission_document_view, permission_trashed_document_restore
)
from .base import GenericDocumentViewTestCase
from .mixins.favorite_document_mixins import FavoriteDocumentTestMixin
class FavoriteDocumentLinkTestCase(
FavoriteDocumentTestMixin, GenericDocumentViewTestCase
):
def test_favorite_document_add_link_no_permission(self):
self._create_test_document_stub()
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_add.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_favorite_document_add_link_with_access(self):
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_add.resolve(context=context)
self.assertNotEqual(resolved_link, None)
def test_favorite_document_add_link_external_user_with_access(self):
self._create_test_user()
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self._test_document_favorite_add(user=self.test_user)
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_add.resolve(context=context)
self.assertNotEqual(resolved_link, None)
def test_favorite_document_remove_link_no_permission(self):
self._create_test_document_stub()
self._test_document_favorite_add()
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_remove.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_favorite_document_remove_link_with_access(self):
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self._test_document_favorite_add()
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_remove.resolve(context=context)
self.assertNotEqual(resolved_link, None)
def test_favorite_document_remove_link_external_user_with_access(self):
self._create_test_user()
self._create_test_document_stub()
self.grant_access(
obj=self.test_document_stub, permission=permission_document_view
)
self._test_document_favorite_add(user=self.test_user)
self.add_test_view(test_object=self.test_document)
context = self.get_test_view()
resolved_link = link_document_favorites_remove.resolve(context=context)
self.assertEqual(resolved_link, None)
class DocumentsLinksTestCase(GenericDocumentViewTestCase):
def test_document_file_delete_link_no_permission(self):
self._upload_test_document_file()
self.assertTrue(self.test_document.files.count(), 2)
self.add_test_view(test_object=self.test_document.files.first())
context = self.get_test_view()
resolved_link = link_document_file_delete.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_document_file_delete_link_with_permission(self):
self._upload_test_document_file()
self.assertTrue(self.test_document.files.count(), 2)
self.grant_access(
obj=self.test_document,
permission=permission_document_file_delete
)
self.add_test_view(test_object=self.test_document.files.first())
context = self.get_test_view()
resolved_link = link_document_file_delete.resolve(context=context)
self.assertNotEqual(resolved_link, None)
self.assertEqual(
resolved_link.url,
reverse(
viewname=link_document_file_delete.view,
args=(self.test_document.files.first().pk,)
)
)
def test_document_file_download_link_no_permission(self):
self.add_test_view(test_object=self.test_document.file_latest)
context = self.get_test_view()
resolved_link = link_document_file_download_quick.resolve(context=context)
self.assertEqual(resolved_link, None)
def test_document_file_download_link_with_permission(self):
self.grant_access(
obj=self.test_document,
permission=permission_document_file_download
)
self.add_test_view(test_object=self.test_document.file_latest)
context = self.get_test_view()
resolved_link = link_document_file_download_quick.resolve(context=context)
self.assertNotEqual(resolved_link, None)
self.assertEqual(
resolved_link.url,
reverse(
viewname=link_document_file_download_quick.view,
args=(self.test_document.file_latest.pk,)
)
)
class TrashedDocumentsLinksTestCase(GenericDocumentViewTestCase):
def setUp(self):
super().setUp()
self.test_document.delete()
self.test_trashed_document = TrashedDocument.objects.get(
pk=self.test_document.pk
)
self.add_test_view(test_object=self.test_trashed_document)
self.context = self.get_test_view()
def test_trashed_document_restore_link_no_permission(self):
resolved_link = link_document_restore.resolve(context=self.context)
self.assertEqual(resolved_link, None)
def test_trashed_document_restore_link_with_permission(self):
self.grant_access(
obj=self.test_document, permission=permission_trashed_document_restore
)
resolved_link = link_document_restore.resolve(context=self.context)
self.assertNotEqual(resolved_link, None)
self.assertEqual(
resolved_link.url,
reverse(
viewname=link_document_restore.view,
args=(self.test_trashed_document.pk,)
)
)
| 35.650794 | 82 | 0.723212 | 6,096 | 0.90472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2a52a6d200cba48f11a709eb7672ab1b68768cc1 | 218 | py | Python | cogs/anime.py | TheLastNever/discord_bot | b4bb5513a70ebd0f6f794b035f498de3ae6f9a8f | [
"MIT"
] | null | null | null | cogs/anime.py | TheLastNever/discord_bot | b4bb5513a70ebd0f6f794b035f498de3ae6f9a8f | [
"MIT"
] | null | null | null | cogs/anime.py | TheLastNever/discord_bot | b4bb5513a70ebd0f6f794b035f498de3ae6f9a8f | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
class anime(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
def setup(bot):
bot.add_cog(anime(bot)) | 14.533333 | 32 | 0.637615 | 111 | 0.509174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2a5311bf4850a13f8b1d521e223d6d0f9a689146 | 1,770 | py | Python | tests/controller/templates_manager/local_temp/bridge.py | threefoldtech/0-templates | 4106bb3d4d1de305557bf4748a7d77ffeb302abb | [
"Apache-2.0"
] | 1 | 2019-01-20T17:50:53.000Z | 2019-01-20T17:50:53.000Z | tests/controller/templates_manager/local_temp/bridge.py | threefoldtech/0-templates | 4106bb3d4d1de305557bf4748a7d77ffeb302abb | [
"Apache-2.0"
] | 192 | 2018-08-01T13:31:16.000Z | 2020-05-29T09:41:06.000Z | tests/controller/templates_manager/local_temp/bridge.py | threefoldtech/0-templates | 4106bb3d4d1de305557bf4748a7d77ffeb302abb | [
"Apache-2.0"
] | 1 | 2018-08-09T12:30:52.000Z | 2018-08-09T12:30:52.000Z | from jumpscale import j
from zerorobot.service_collection import ServiceNotFoundError
from testconfig import config
import random
class BrigeManager:
def __init__(self, parent, service_name=None):
self.bridge_template = 'github.com/threefoldtech/0-templates/bridge/0.0.1'
self._parent = parent
self.logger = self._parent.logger
self.robot = self._parent.remote_robot
self._bridge_service = None
if service_name:
self._bridge_service = self.robot.service.get(name=service_name)
@property
def service(self):
if self._bridge_service == None:
self.logger.error('bridge_service is None, Install it first.')
else:
return self._bridge_service
def install(self, wait=True, **kwargs):
self.default_data = {
'name': self._parent.random_string()[:10],
'hwaddr' : None,
'mode': 'none',
'nat': False,
'settings': {}
}
if kwargs:
self.default_data.update(kwargs)
self.bridge_service_name = self.default_data['name']
self._bridge_service = self.robot.services.create(self.bridge_template, self.bridge_service_name, self.default_data)
self._bridge_service.schedule_action('install').wait(die=wait)
def uninstall(self):
return self.service.schedule_action('uninstall').wait(die=True)
def nic_add(self, nic):
return self.service.schedule_action('nic_add', args={'nic': nic}).wait(die=True)
def nic_remove(self, nic):
return self.service.schedule_action('nic_remove', args={'nic': nic}).wait(die=True)
def nic_list(self):
return self.service.schedule_action('nic_list').wait(die=True) | 36.875 | 124 | 0.651977 | 1,639 | 0.925989 | 0 | 0 | 202 | 0.114124 | 0 | 0 | 202 | 0.114124 |