hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7f8501089d50cf608e52179d7f00962494fc1dec
| 7,471
|
py
|
Python
|
custom_components/plex_assistant/__init__.py
|
nashant/plex_assistant
|
6768b7c448babc634ee504544874c13153de2484
|
[
"MIT"
] | null | null | null |
custom_components/plex_assistant/__init__.py
|
nashant/plex_assistant
|
6768b7c448babc634ee504544874c13153de2484
|
[
"MIT"
] | null | null | null |
custom_components/plex_assistant/__init__.py
|
nashant/plex_assistant
|
6768b7c448babc634ee504544874c13153de2484
|
[
"MIT"
] | null | null | null |
"""
Plex Assistant is a component for Home Assistant to add control of Plex to
Google Assistant with a little help from IFTTT or DialogFlow.
Play to Google Cast devices or Plex Clients using fuzzy searches for media and
cast device names.
https://github.com/maykar/plex_assistant
"""
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
DOMAIN = "plex_assistant"
CONF_URL = "url"
CONF_TOKEN = "token"
CONF_DEFAULT_CAST = "default_cast"
CONF_LANG = "language"
CONF_TTS_ERROR = "tts_errors"
CONF_ALIASES = "aliases"
CONFIG_SCHEMA = vol.Schema({DOMAIN: {
vol.Required(CONF_URL): cv.url,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_DEFAULT_CAST): cv.string,
vol.Optional(CONF_LANG, default='en'): cv.string,
vol.Optional(CONF_TTS_ERROR, default=True): cv.boolean,
vol.Optional(CONF_ALIASES, default={}): vol.Any(dict),
}}, extra=vol.ALLOW_EXTRA)
class PA:
""" Hold our libraries, devices, etc. """
plex = None
server = None
lib = {}
devices = {}
device_names = []
clients = {}
client_names = []
client_sensor = []
alias_names = []
client_update = True
async def async_setup(hass, config):
"""Called when Home Assistant is loading our component."""
import os
import logging
from gtts import gTTS
from plexapi.server import PlexServer
from pychromecast import get_chromecasts
from pychromecast.controllers.plex import PlexController
from homeassistant.helpers.network import get_url
from .localize import LOCALIZE
from .process_speech import process_speech
from .helpers import (
cc_callback,
find_media,
fuzzy,
get_libraries,
media_error,
video_selection,
)
conf = config[DOMAIN]
base_url = conf.get(CONF_URL)
token = conf.get(CONF_TOKEN)
default_cast = conf.get(CONF_DEFAULT_CAST)
lang = conf.get(CONF_LANG)
tts_error = conf.get(CONF_TTS_ERROR)
aliases = conf.get(CONF_ALIASES)
_LOGGER = logging.getLogger(__name__)
localize = LOCALIZE[lang] if lang in LOCALIZE.keys() else LOCALIZE["en"]
zc = None
try:
from homeassistant.components.zeroconf import async_get_instance
zc = await async_get_instance(hass)
except:
from zeroconf import Zeroconf
zc = Zeroconf()
directory = hass.config.path() + "/www/plex_assist_tts/"
if tts_error and not os.path.exists(directory):
os.makedirs(directory, mode=0o777)
get_chromecasts(blocking=False, callback=cc_callback, zeroconf_instance=zc)
def sync_io_server(base_url, token):
PA.server = PlexServer(base_url, token)
PA.plex = PA.server.library
PA.lib = get_libraries(PA.plex)
await hass.async_add_executor_job(sync_io_server, base_url, token)
PA.alias_names = list(aliases.keys()) if aliases else []
def handle_input(call):
if not call.data.get("command").strip():
_LOGGER.warning(localize["no_call"])
return
command_string = call.data.get("command").strip().lower()
_LOGGER.debug("Command: %s", command_string)
PA.client_update = True
get_chromecasts(blocking=False, callback=cc_callback,
zeroconf_instance=zc)
if localize["controls"]["update_sensor"] in command_string:
update_sensor(hass)
return
cast = None
alias = ["", 0]
client = False
speech_error = False
command = process_speech(command_string, localize, default_cast, PA)
PA.device_names = list(PA.devices.keys())
if not command["control"]:
_LOGGER.debug({i: command[i] for i in command if i != "library"})
if PA.lib["updated"] < PA.plex.search(sort="addedAt:desc", limit=1)[0].addedAt:
PA.lib = get_libraries(PA.plex)
devices = PA.device_names + PA.client_names + PA.client_ids
device = fuzzy(command["device"] or default_cast, devices)
if aliases:
alias = fuzzy(command["device"] or default_cast, PA.alias_names)
if alias[1] < 60 and device[1] < 60:
_LOGGER.warning(
'{0} {1}: "{2}"'.format(
localize["cast_device"].capitalize(),
localize["not_found"],
command["device"].title(),
)
)
_LOGGER.debug("Device Score: %s", device[1])
_LOGGER.debug("Devices: %s", str(devices))
if aliases:
_LOGGER.debug("Alias Score: %s", alias[1])
_LOGGER.debug("Aliases: %s", str(PA.alias_names))
return
name = aliases[alias[0]] if alias[1] > device[1] else device[0]
cast = PA.devices[name] if name in PA.device_names else name
client = isinstance(cast, str)
if client:
client_device = next(
c for c in PA.clients if c.title == cast or c.machineIdentifier == cast
)
cast = client_device
if command["control"]:
control = command["control"]
if client:
plex_c = cast
else:
plex_c = PlexController()
cast.wait()
cast.register_handler(plex_c)
if control == "play":
plex_c.play()
elif control == "pause":
plex_c.pause()
elif control == "stop":
plex_c.stop()
elif control == "jump_forward":
plex_c.stepForward()
elif control == "jump_back":
plex_c.stepBack()
return
try:
result = find_media(command, command["media"], PA.lib)
media = video_selection(command, result["media"], result["library"])
except Exception:
error = media_error(command, localize)
if tts_error:
tts = gTTS(error, lang=lang)
tts.save(directory + "error.mp3")
speech_error = True
if speech_error and not client:
cast.wait()
med_con = cast.media_controller
mp3 = get_url(hass) + "/local/plex_assist_tts/error.mp3"
med_con.play_media(mp3, "audio/mpeg")
med_con.block_until_active()
return
_LOGGER.debug("Media: %s", str(media))
if client:
_LOGGER.debug("Client: %s", cast)
plex_c = cast
plex_c.playMedia(media)
else:
_LOGGER.debug("Cast: %s", cast.name)
plex_c = PlexController()
cast.register_handler(plex_c)
cast.wait()
plex_c.block_until_playing(media)
update_sensor(hass)
hass.services.async_register(DOMAIN, "command", handle_input)
return True
def update_sensor(hass):
clients = [
{client.title: {"ID": client.machineIdentifier, "type": client.product}}
for client in PA.clients
]
devicelist = list(PA.devices.keys())
state = str(len(devicelist + clients)) + " connected devices."
attributes = {
"Connected Devices": {
"Cast Devices": devicelist or "None",
"Plex Clients": clients or "None",
},
"friendly_name": "Plex Assistant Devices",
}
sensor = "sensor.plex_assistant_devices"
hass.states.async_set(sensor, state, attributes)
| 31.259414
| 87
| 0.600455
|
5362e185b312c7f503b4d5ee9c1635cfa09672a8
| 10,189
|
py
|
Python
|
MetaScreener/external_sw/mgltools/MGLToolsPckgs/AutoDockFR/evaluateEnergy.py
|
bio-hpc/metascreener
|
6900497629f601c4b6c0c37da26de58ffa221988
|
[
"Apache-2.0"
] | 8
|
2021-12-14T21:30:01.000Z
|
2022-02-14T11:30:03.000Z
|
MetaScreener/external_sw/mgltools/MGLToolsPckgs/AutoDockFR/evaluateEnergy.py
|
bio-hpc/metascreener
|
6900497629f601c4b6c0c37da26de58ffa221988
|
[
"Apache-2.0"
] | null | null | null |
MetaScreener/external_sw/mgltools/MGLToolsPckgs/AutoDockFR/evaluateEnergy.py
|
bio-hpc/metascreener
|
6900497629f601c4b6c0c37da26de58ffa221988
|
[
"Apache-2.0"
] | null | null | null |
########################################################################
#
# Date: 2000 Authors: Michel Sanner, Matt Danielson
#
# sanner@scripps.edu
#
# The Scripps Research Institute (TSRI)
# Molecular Graphics Lab
# La Jolla, CA 92037, USA
#
# Copyright: Michel Sanner and TSRI 2012
#
#########################################################################
#
# $Header: /opt/cvs/AutoDockFR/evaluateEnergy.py,v 1.5 2012/06/21 22:23:02 mldaniel Exp $
#
# $Id: evaluateEnergy.py,v 1.5 2012/06/21 22:23:02 mldaniel Exp $
#
import numpy, os
class ADEnergyEvaluator:
"""
This class allows calculating the energy of a complex
"""
def __init__(self, receptor, ligand, forceFieldVersion='4', receptorFT=None, scorerOpts={}):
"""
evaluator <- ADEnergyEvaluator(receptor, ligand, breakdown, forcefield='4')
receptor - receptor molecule
ligand -
forcefield
scorerOpts - dictionary of optinal parameter for scorer
"""
if forceFieldVersion=='4':
from AutoDockFR.ADCscorer import AD42ScoreC as ScorerClass
# Weights from AD4.1_bound.dat
self.FE_coeff_vdW_42 = 0.1662 # van der waals
self.FE_coeff_hbond_42 = 0.1209 # hydrogen bonding
self.FE_coeff_estat_42 = 0.1406 # electrostatics
self.FE_coeff_desolv_42 = 0.1322 # desolvation
self.FE_coeff_tors_42 = 0.2983 # torsional
else:
raise ValueError("bad forcefield version, expect '4' got %s" % forceFieldVersion)
# ligand atoms
self.ligand = ligand
self.ligAtoms = ligand.allAtoms
self.receptorFT = receptorFT
# passed a Flexilbity tree
if receptorFT:
rigidRecAtoms = receptorFT.getRigidAtoms()
flexRecAtoms = receptorFT.getMovingAtoms()
else:
rigidRecAtoms = receptor.allAtoms
flexRecAtoms = []
#print 'building scorer for %d rigid Rec %d ligand and %d flexible Rec atoms' % (
# len(rigidRecAtoms), len(self.ligAtoms), len(flexRecAtoms))
# Create scorer
self.scorer=ScorerClass(rigidRecAtoms, self.ligAtoms, self.ligand.torTree, self.ligand.TORSDOF,
flexRecAtoms=flexRecAtoms, **scorerOpts)
# Method call to create a dictionary that stores all the scoring values
self.data=self.scorer.getAllScoreTerms()
def getProtLigScore(self, receptorFT=None):
"""
None <- getProtLigScore(receptorFT)
receptorFT - receptor flexiblity tree. Used as a marker to print one or two scores
Will print the protein-ligand interaction energy to the screen
"""
# If you have receptor flexiblity, need 1. RigRec-Ligand score
# & 2. FlexRec-Ligand score
if receptorFT != None:
print self.data['FlexProtLig'][1] + self.data['RigidProtLig'][1]
else:
print self.data['RigidProtLig'][1]
def getProtLigScoreBreakdown(self):
"""
None <- getProtLigScoreBreakdown()
Will print the scoring breakdown to the screen
"""
self.scorer.printAllScoreTerms(self.data)
def getPerAtomBreakdown(self, molSyst):
lines = []
# Get the dictionary of terms
d1 = self.scorer.getTerms(molSyst)
perLigAtom = {}
fmt = "\t%-20s "
shortTermNames = ["Atmname"]
termTotals = []
# Assign each scoring term & associated array to the dictionary
for name in d1['names']:
term, weight = d1['terms'][name]
array = numpy.array(term.get_score_array(), 'f')
perLigAtom[name] = array.sum(axis=0)*weight
fmt += "%-10s "
shortTermNames.append(name[:min(len(name),10)])
termTotals.append(0)
print "** per atom ligand breakdown"
# create header line
line = fmt%tuple(shortTermNames)
lines.append(line)
# Using the shape attribute of numpy to return a tuple of the dimensions of the array
nbAtR, nbAtL = array.shape
# Score for each ligand atom
#ligAtoms = molSyst.get_atoms(molSyst.atomSetIndices['set2'])
ligAtoms = molSyst.get_atoms(1)
for i in range(nbAtL):
# List to store atom name and the scores
values = [ligAtoms[i].get_name()]
for j, name in enumerate(d1['names']):
val = perLigAtom[name][i]
values.append(val)
termTotals[j] += val
# Add the line to list
line = fmt % tuple(values)
lines.append(line)
from math import fabs
# Check to make sure the sum of the individual ligand atom scoring contributions add up to the toal value reported
# We use the fabs because we are dealing with floats and rounding error.
for j, name in enumerate(d1['names']):
term, weight = d1['terms'][name]
if fabs(termTotals[j] - term.get_score() * weight) > 0.01:
print 'WARNING sum of per ligand atm %s interaction (%f) does not match term (%f)'%(name, termTotals[j], term.get_score() * weight)
# Summary of values @ the end of Energy.txt
line = "\t--------------------------------------------------------------------------"
lines.append(line)
line = fmt % (("Sum",)+tuple(termTotals))
lines.append(line)
return lines
def getProtLigScorePerAtom(self):
"""
None <- getProtLigScorePerAtom()
Per atom energy analysis: breakdown of the scoring contribution: vdw, ele, hbond, delsolv
"""
# return a per atom list of energetic contributions
self.line_lst = []
print "** per atom ligand breakdown"
line = "\t%-10s %-10s %-10s %-10s %-10s %-10s %-10s" % ("Atmname", "vdW+Hb+Ele", "vdW+Hbond", "Ele", "VDW", "Hbond", "Desolv")
print line
self.line_lst.append(line)
# vdw.get_score_array() returns a list of lists that stores the score for each ligand atom. 1D = each protein atom, 2D = each ligand atom
vdw = self.scorer.vdw # instance of cAutoDock.scorer.VanDerWaals
#Using numpy to create an array not a list of lists
vdwArray = numpy.array(vdw.get_score_array())
estat = self.scorer.estat
estatArray = numpy.array(estat.get_score_array())
hBond = self.scorer.hBond
hBondArray = numpy.array(hBond.get_score_array())
ds = self.scorer.ds
dsArray = numpy.array(ds.get_score_array())
# Using the shape attribute of numpy to return a tuple of the dimensions of the array
nbAtR, nbAtL = vdwArray.shape
# Array that contains the sum of all protein interactions with each ligand atom. Scaled by the 4.2 prefactors
perLigAtVdw = vdwArray.sum(axis=0) * self.FE_coeff_vdW_42
perLigAtElec = estatArray.sum(axis=0) * self.FE_coeff_estat_42
perLigAtds = dsArray.sum(axis=0) * self.FE_coeff_desolv_42
perLigAthb = hBondArray.sum(axis=0) * self.FE_coeff_hbond_42
self.totalVDW = self.totalELE = self.totalHB = self.totalDS = 0.0
# Print out the scores for each ligand atom
for i in range(nbAtL):
# Needed to produce an output file that looks like AD4.2 .dlg file
vdwHbScore = perLigAtVdw[i] + perLigAthb[i]
vdwHbEleScore = vdwHbScore + perLigAtElec[i]
line = "\t%-10s %-10f %-10f %-10f %-10f %-10f %-10f" % \
(self.ligAtoms[i].name, vdwHbEleScore, vdwHbScore, perLigAtElec[i], perLigAtVdw[i], perLigAthb[i], perLigAtds[i])
self.line_lst.append(line)
print line
# Keep track of the total score for Summary of Energy values
self.totalVDW += perLigAtVdw[i]
self.totalELE += perLigAtElec[i]
self.totalHB += perLigAthb[i]
self.totalDS += perLigAtds[i]
from math import fabs
# Check to make sure the sum of the indivual ligand atom scoring contributions add up to the toal value reported
# We use the fabs because we are dealing with floats and rounding error.
assert fabs(self.totalVDW - vdw.get_score() * self.FE_coeff_vdW_42) < 0.0001, \
"sum of per ligand VDW interactions (%f) does not match VDW term (%f)"%(self.totalVDW, vdw.get_score() * VDW_WEIGHT_AUTODOCK)
assert fabs(self.totalELE - estat.get_score() * self.FE_coeff_estat_42) < 0.0001, \
"sum of per ligand ELE interactions (%f) does not match ELE term (%f)"%(self.totalELE, estat.get_score() * ESTAT_WEIGHT_AUTODOCK)
assert fabs(self.totalHB - hBond.get_score() * self.FE_coeff_hbond_42) < 0.0001, \
"sum of per ligand HB interactions (%f) does not match HB term (%f)"%(self.totalHB, hBond.get_score() * HBOND_WEIGHT_AUTODOCK)
assert fabs(self.totalDS - ds.get_score() * self.FE_coeff_desolv_42) < 0.0001, \
"sum of per ligand DS interactions (%f) does not match DS term (%f)"%(self.totalDS, ds.get_score() * DESOLV_WEIGHT_AUTODOCK)
# Summary of values @ the end of Energy.txt
line = "\t--------------------------------------------------------------------------"
print line
self.line_lst.append(line)
line = "\t%-10s %-10.2f %-10.2f %-10.2f %-10.2f %-10.2f %-10.2f" % \
("Sum", self.totalVDW + self.totalELE + self.totalHB + self.totalDS, \
self.totalVDW+self.totalHB, self.totalELE, self.totalVDW, self.totalHB, self.totalDS)
print line
self.line_lst.append(line)
def getProtLigScorePerAtomToFile(self):
"""
None <- getProtLigScorePerAtomToFile()
saves a file w/per atom energy analysis: breakdown of the scoring contribution: vdw, ele, hbond, delsolv
output = cAD_Energy.txt
"""
# Check to see if Energy.txt file already exists. Remove if it does
if os.path.isfile("cAD_Energy.txt"):
os.remove("cAD_Energy.txt")
# Output file
fo = open("cAD_Energy.txt", "w")
fo.write("cAutoDock Intermolecular Energy Analysis\n\n")
for line in self.line_lst:
fo.write(line + "\n")
#MLD: I think we need this for some other post-processing script....
# Summary of values @ the end of Energy.txt
fo.write("\n\n\tVDW Energy: %s\n" % (self.totalVDW))
fo.write("\tELE Energy: %s\n" % (self.totalELE))
fo.write("\tHB Energy: %s\n" % (self.totalHB))
fo.write("\tDS Energy: %s\n" % (self.totalDS))
fo.write("\tTotal Energy: %s\n" % (self.totalVDW + self.totalELE + self.totalHB + self.totalDS))
fo.close()
| 40.114173
| 141
| 0.635391
|
1ab6325dadd6ffad0a8ba462e91d77fd0a504e26
| 27,025
|
py
|
Python
|
adaptfx/3D_GUI.py
|
YoelPH/Adaptive-fractionation-2D
|
9e08e7a77ad8f20f97e8498328165d505f9cb51d
|
[
"MIT"
] | 1
|
2021-07-15T12:23:25.000Z
|
2021-07-15T12:23:25.000Z
|
adaptfx/3D_GUI.py
|
YoelPH/Adaptive-fractionation-2D
|
9e08e7a77ad8f20f97e8498328165d505f9cb51d
|
[
"MIT"
] | null | null | null |
adaptfx/3D_GUI.py
|
YoelPH/Adaptive-fractionation-2D
|
9e08e7a77ad8f20f97e8498328165d505f9cb51d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
GUI for 3D adaptive fractionation with minimum and maximum dose
"""
import tkinter as tk
import numpy as np
from scipy.stats import invgamma
from tkinter import filedialog as fd
from tkinter.messagebox import showinfo
import tkinter.ttk as ttk
import pandas as pd
import interpol3D as intp3
import threading
class VerticalScrolledFrame(tk.Frame):
"""A pure Tkinter scrollable frame that actually works!
* Use the 'interior' attribute to place widgets inside the scrollable frame
* Construct and pack/place/grid normally
* This frame only allows vertical scrolling
"""
def __init__(self, parent, *args, **kw):
tk.Frame.__init__(self, parent, *args, **kw)
# create a canvas object and a vertical scrollbar for scrolling it
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
vscrollbar.pack(fill=tk.Y, side=tk.RIGHT, expand=tk.FALSE)
canvas = tk.Canvas(self, bd=0, highlightthickness=0,
yscrollcommand=vscrollbar.set,height = 1000)
canvas.pack(side=tk.LEFT, fill= tk.BOTH, expand= tk.TRUE)
vscrollbar.config(command=canvas.yview)
# reset the view
canvas.xview_moveto(0)
canvas.yview_moveto(0)
# create a frame inside the canvas which will be scrolled with it
self.interior = interior = tk.Frame(canvas)
interior_id = canvas.create_window(0, 0, window=interior,
anchor=tk.NW)
# track changes to the canvas and frame width and sync them,
# also updating the scrollbar
def _configure_interior(event):
# update the scrollbars to match the size of the inner frame
size = (interior.winfo_reqwidth(), interior.winfo_reqheight())
canvas.config(scrollregion="0 0 %s %s" % size)
if interior.winfo_reqwidth() != canvas.winfo_width():
# update the canvas's width to fit the inner frame
canvas.config(width=interior.winfo_reqwidth())
interior.bind('<Configure>', _configure_interior)
def _configure_canvas(event):
if interior.winfo_reqwidth() != canvas.winfo_width():
# update the inner frame's width to fill the canvas
canvas.itemconfigure(interior_id, width=canvas.winfo_width())
canvas.bind('<Configure>', _configure_canvas)
class Task(threading.Thread):
def __init__(self, master, task):
threading.Thread.__init__(self, target=task)
if not hasattr(master, 'thread_compute') or not master.thread_compute.is_alive():
master.thread_compute = self
self.start()
class GUIextended3D:
def __init__(self, master):
self.master = master
master.title("3D Adaptive fractionation calculator extended")
self.frame = VerticalScrolledFrame(master)
self.frame.pack()
self.frm_probdis = tk.Frame(master = self.frame.interior,relief=tk.SUNKEN, borderwidth=3)
self.frm_probdis.pack()
self.data = []
self.info_funcs = [self.info1,self.info2,self.info3,self.info4,self.info5]
self.info_buttons =["btn_path","btn_mean","btn_std","btn_shae","btn_scale"]
for idx in range(len(self.info_funcs)):
globals()[self.info_buttons[idx]] = tk.Button(master = self.frm_probdis,text = '?',command = self.info_funcs[idx])
globals()[self.info_buttons[idx]].grid(row=idx+1,column=4)
for idx in range(len(self.info_funcs)):
globals()[self.info_buttons[idx]] = tk.Button(master = self.frm_probdis,text = '?',command = self.info_funcs[idx])
globals()[self.info_buttons[idx]].grid(row=idx+1,column=4)
self.var_radio = tk.IntVar()
self.var_radio.set(1)
self.hyper_insert = tk.Radiobutton(master = self.frm_probdis,text = 'hyperparameters',justify = "left",variable = self.var_radio, value = 1, command = self.checkbox1)
self.hyper_insert.grid(row= 0, column = 0)
self.file_insert = tk.Radiobutton(master = self.frm_probdis,text = 'prior data',justify = "left",variable = self.var_radio, value = 2, command = self.checkbox1)
self.file_insert.grid(row= 0, column = 1)
self.fixed_insert = tk.Radiobutton(master = self.frm_probdis, text = 'define normal distribution',justify= "left",variable = self.var_radio,value = 3, command = self.checkbox1)
self.fixed_insert.grid(row= 0, column = 2)
# open button
self.lbl_open = tk.Label(master = self.frm_probdis, text = 'load patient data for prior')
self.lbl_open.grid(row = 1, column = 0)
self.btn_open = tk.Button(
self.frm_probdis,
text='Open a File',
command=self.select_file)
self.ent_file = tk.Entry(master=self.frm_probdis, width=20)
self.btn_open.grid(row = 1, column = 1)
self.lbl_mean = tk.Label(master = self.frm_probdis, text = 'mean of normal distribution:')
self.lbl_mean.grid(row=2,column = 0)
self.ent_mean = tk.Entry(master = self.frm_probdis, width = 30)
self.ent_mean.grid(row = 2, column = 1,columnspan = 2)
self.lbl_std = tk.Label(master = self.frm_probdis, text = 'std of normal distribution:')
self.lbl_std.grid(row=3,column = 0)
self.ent_std = tk.Entry(master = self.frm_probdis, width = 30)
self.ent_std.grid(row = 3, column = 1,columnspan = 2)
self.lbl_alpha = tk.Label(master = self.frm_probdis, text = "shape of inverse-gamma distribution (alpha):")
self.lbl_alpha.grid(row=4,column = 0)
self.ent_alpha = tk.Entry(master = self.frm_probdis, width = 30)
self.ent_alpha.grid(row = 4, column = 1,columnspan = 2)
self.lbl_beta = tk.Label(master = self.frm_probdis, text = "scale of inverse-gamma distribution (beta):")
self.lbl_beta.grid(row=5,column = 0)
self.ent_beta = tk.Entry(master = self.frm_probdis, width = 30)
self.ent_beta.grid(row = 5, column = 1,columnspan = 2)
self.btn_open.configure(state = 'disabled')
self.ent_alpha.configure(state = 'normal')
self.ent_beta.configure(state = 'normal')
self.ent_file.configure(state = 'disabled')
self.ent_mean.configure(state = 'disabled')
self.ent_std.configure(state = 'disabled')
self.ent_alpha.insert(0,"0.6133124926763415")
self.ent_beta.insert(0,"0.0004167968394550765")
#produce master with extra option like number of fractions.
self.frm_extras = tk.Frame(master = self.frame.interior,relief = tk.SUNKEN, borderwidth = 3)
self.frm_extras.pack()
self.lbl_fractions = tk.Label(master = self.frm_extras, text = 'Total number of fractions')
self.lbl_fractions.grid(row=0,column = 0)
self.ent_fractions = tk.Entry(master = self.frm_extras, width = 30)
self.ent_fractions.grid(row = 0, column = 1,columnspan = 2)
self.ent_fractions.insert(0,"5")
self.btn_infofrac = tk.Button(master = self.frm_extras, text = '?', command = self.infofrac)
self.btn_infofrac.grid(row=0,column = 3)
self.lbl_mindose = tk.Label(master = self.frm_extras, text = 'minimum dose')
self.lbl_mindose.grid(row = 1, column = 0)
self.ent_mindose = tk.Entry(master = self.frm_extras, width = 30)
self.ent_mindose.grid(row = 1, column = 1,columnspan = 2)
self.ent_mindose.insert(0,"0")
self.btn_mindose = tk.Button(master = self.frm_extras, text = '?', command = self.infomin)
self.btn_mindose.grid(row=1,column = 3)
self.lbl_maxdose = tk.Label(master = self.frm_extras, text = 'maximum dose')
self.lbl_maxdose.grid(row = 2, column = 0)
self.ent_maxdose = tk.Entry(master = self.frm_extras, width = 30)
self.ent_maxdose.grid(row = 2, column = 1,columnspan = 2)
self.ent_maxdose.insert(0,"22.3")
self.btn_maxdose = tk.Button(master = self.frm_extras, text = '?', command = self.infomin)
self.btn_maxdose.grid(row=2,column = 3)
# Create a new frame `frm_form` to contain the Label
# and Entry widgets for entering variable values
self.frm_form = tk.Frame(master = self.frame.interior, relief=tk.SUNKEN, borderwidth=3)
# Pack the frame into the master
self.frm_form.pack()
self.frm_buttons = tk.Frame()
self.frm_buttons.pack(fill=tk.X, ipadx=5, ipady=5)
self.frm_output = tk.Frame(master = self.frame.interior,relief=tk.SUNKEN, borderwidth = 3)
#add label and entry for filename
self.label = tk.Label(master=self.frm_form, text='file path of prior patients')
self.ent_file = tk.Entry(master=self.frm_form, width=50)
self.label.grid(row=0, column=0, sticky="e")
self.ent_file.grid(row=0, column=1)
self.info_funcs = [self.info10,self.info11,self.info12,self.info13,self.info14,self.info15,self.info16]
self.info_buttons =["self.btn_sf","self.btn_abt","self.btn_abn","self.btn_OARlimit","self.btn_tumorlimit","self.btn_tumorBED","self.btn_OARBED"]
# List of field labels
self.labels = [
"sparing factors separated by spaces:",
"alpha-beta ratio of tumor:",
"alpha-beta ratio of OAR:",
"OAR limit:",
"prescribed tumor dose:",
"accumulated tumor dose:",
"accumulated OAR dose:"
]
self.ent_sf = tk.Entry(master=self.frm_form, width=50)
self.lbl_sf = tk.Label(master = self.frm_form, text = self.labels[0])
self.example_list = ["sparing factors separated by space",10,3,90,72,"only needed if we calculate the dose for a single fraction","only needed if we calculate the dose for a single fraction"]
self.lbl_sf.grid(row=0, column=0, sticky="e")
self.ent_sf.grid(row=0, column=1)
self.ent_sf.insert(0,f"{self.example_list[0]}")
self.btn_sf = tk.Button(master = self.frm_form,text = '?',command = self.info_funcs[0])
self.btn_sf.grid(row=0,column=2)
self.ent_abt = tk.Entry(master=self.frm_form, width=50)
self.lbl_abt = tk.Label(master = self.frm_form, text = self.labels[1])
self.lbl_abt.grid(row=1, column=0, sticky="e")
self.ent_abt.grid(row=1, column=1)
self.ent_abt.insert(0,f"{self.example_list[1]}")
self.btn_abt = tk.Button(master = self.frm_form,text = '?',command = self.info_funcs[1])
self.btn_abt.grid(row=1,column=2)
self.ent_abn = tk.Entry(master=self.frm_form, width=50)
self.lbl_abn = tk.Label(master = self.frm_form, text = self.labels[2])
self.lbl_abn.grid(row=2, column=0, sticky="e")
self.ent_abn.grid(row=2, column=1)
self.ent_abn.insert(0,f"{self.example_list[2]}")
self.btn_abn = tk.Button(master = self.frm_form,text = '?',command = self.info_funcs[2])
self.btn_abn.grid(row=2,column=2)
self.ent_OARlimit = tk.Entry(master=self.frm_form, width=50)
self.lbl_OARlimit = tk.Label(master = self.frm_form, text = self.labels[3])
self.lbl_OARlimit.grid(row=3, column=0, sticky="e")
self.ent_OARlimit.grid(row=3, column=1)
self.ent_OARlimit.insert(0,f"{self.example_list[3]}")
self.btn_OARlimit = tk.Button(master = self.frm_form,text = '?',command = self.info_funcs[3])
self.btn_OARlimit.grid(row=3,column=2)
self.ent_tumorlimit = tk.Entry(master=self.frm_form, width=50)
self.lbl_tumorlimit = tk.Label(master = self.frm_form, text = self.labels[4])
self.lbl_tumorlimit.grid(row=4, column=0, sticky="e")
self.ent_tumorlimit.grid(row=4, column=1)
self.ent_tumorlimit.insert(0,f"{self.example_list[4]}")
self.btn_tumorlimit = tk.Button(master = self.frm_form,text = '?',command = self.info_funcs[4])
self.btn_tumorlimit.grid(row=4,column=2)
self.ent_BED_tumor = tk.Entry(master=self.frm_form, width=50)
self.lbl_BED_tumor = tk.Label(master = self.frm_form, text = self.labels[5])
self.lbl_BED_tumor.grid(row=5, column=0, sticky="e")
self.ent_BED_tumor.grid(row=5, column=1)
self.ent_BED_tumor.insert(0,f"{self.example_list[5]}")
self.btn_BED_tumor = tk.Button(master = self.frm_form,text = '?',command = self.info_funcs[5])
self.btn_BED_tumor.grid(row=5,column=2)
self.ent_BED_OAR = tk.Entry(master=self.frm_form, width=50)
self.lbl_BED_OAR = tk.Label(master = self.frm_form, text = self.labels[6])
self.lbl_BED_OAR.grid(row=6, column=0, sticky="e")
self.ent_BED_OAR.grid(row=6, column=1)
self.ent_BED_OAR.insert(0,f"{self.example_list[6]}")
self.btn_BED_OAR = tk.Button(master = self.frm_form,text = '?',command = self.info_funcs[6])
self.btn_BED_OAR.grid(row=6,column=2)
self.ent_BED_OAR.configure(state = 'disabled')
self.ent_BED_tumor.configure(state = 'disabled')
# Create a new frame `frm_buttons` to contain the compute button
self.frm_buttons = tk.Frame(master = self.frame.interior)
self.frm_buttons.pack(fill=tk.X, ipadx=5, ipady=5)
self.btn_compute = tk.Button(master=self.frm_buttons, text="compute plan",command=lambda :Task(self, self.compute_plan))
self.btn_compute.pack(side=tk.BOTTOM, ipadx=10)
self.var = tk.IntVar()
self.chk_single_fraction = tk.Checkbutton(master = self.frm_buttons,text = "Calculate dose only for actual fraction",variable = self.var,onvalue = 1,offvalue = 0, command=self.checkbox)
self.chk_single_fraction.pack(side = tk.BOTTOM, padx = 10, ipadx = 10)
self.lbl_info = tk.Label(master = self.frm_output, text = "There are several default values set. Only the sparing factors have to been inserted.\nThis program might take some minutes to calculate")
self.lbl_info.pack()
self.frm_output.pack(fill=tk.BOTH, ipadx = 10, ipady = 10)
# progressbar
self.pb = ttk.Progressbar(
master = self.frm_output,
orient='horizontal',
mode='determinate',
length=500
)
# place the progressbar
self.pb.pack(pady = 10)
def select_file(self):
filetypes = (
('csv files', '*.csv'),
('All files', '*.*')
)
filename = fd.askopenfilename(
title='Open a file',
initialdir='/',
filetypes=filetypes)
showinfo(
title='Selected File',
message=self.filename
)
self.ent_file.insert(0,filename)
self.data = np.array(pd.read_csv(self.ent_file.get(),sep = ';'))
self.variances = self.data.var(axis = 1)
self.alpha,self.loc,self.beta = invgamma.fit(self.variances,floc = 0)
self.ent_alpha.configure(state = 'normal')
self.ent_beta.configure(state = 'normal')
self.ent_alpha.delete(0, 'end')
self.ent_beta.delete(0,'end')
self.ent_alpha.insert(0,self.alpha)
self.ent_beta.insert(0,self.beta)
self.ent_alpha.configure(state = 'disabled')
self.ent_beta.configure(state = 'disabled')
def checkbox1(self):
if self.var_radio.get() == 1:
self.btn_open.configure(state = 'disabled')
self.ent_alpha.configure(state = 'normal')
self.ent_beta.configure(state = 'normal')
self.ent_file.configure(state = 'disabled')
self.ent_mean.configure(state = 'disabled')
self.ent_std.configure(state = 'disabled')
elif self.var_radio.get() == 2:
self.ent_file.configure(state = 'normal')
self.btn_open.configure(state = 'normal')
self.ent_alpha.configure(state = 'disabled')
self.ent_beta.configure(state = 'disabled')
self.ent_mean.configure(state = 'disabled')
self.ent_std.configure(state = 'disabled')
elif self.var_radio.get() == 3:
self.ent_mean.configure(state = 'normal')
self.ent_std.configure(state = 'normal')
self.ent_alpha.configure(state = 'disabled')
self.ent_beta.configure(state = 'disabled')
self.btn_open.configure(state = 'disabled')
self.ent_file.configure(state = 'disabled')
#assign infobutton commands
def info1(self):
self.lbl_info["text"] = 'Insert the path of your prior patient data in here. \nThis is only needed, if the checkbox for prior data is marked. \nIf not, one can directly insert the hyperparameters below. \nThe file with the prior data must be of the shape n x k,\nwhere each new patient n is on a row and each fraction for patient n is in column k'
def info2(self):
self.lbl_info["text"] = 'Insert the mean of the sparing factor distribution. \nwith this option the distribution is not updated'
def info3(self):
self.lbl_info["text"] = 'Insert the standard deviation of the sparing factor distribution. \nwith this option the distribution is not updated'
def info4(self):
self.lbl_info["text"] = 'Insert the shape parameter for the inverse-gamme distribution.'
def info5(self):
self.lbl_info["text"] = 'Insert the scale parameter for the inverse-gamme distribution.'
def infofrac(self):
self.lbl_info["text"] = 'Insert the number of fractions to be delivered to the patient. \n5 fractions is set a standard SBRT treatment.'
def infomin(self):
self.lbl_info["text"] = 'Insert the minimal physical dose that shall be delivered to the PTV95 in one fraction.\nIt is recommended to not put too high minimum dose constraints to allow adaptation'
def infomax(self):
self.lbl_info["text"] = 'Insert the maximal physical dose that shall be delivered to the PTV95 in one fraction.'
#assign infobutton commands
def info10(self):
self.lbl_info["text"] = 'Insert the sparing factors that were observed so far.\n The sparing factor of the planning session must be included!.\nThe sparing factors must be separated by spaces e.g.:\n1.1 0.95 0.88\nFor a whole plan 6 sparing factors are needed.'
def info11(self):
self.lbl_info["text"] = 'Insert the alpha-beta ratio of the tumor tissue.'
def info12(self):
self.lbl_info["text"] = 'Insert the alpha-beta ratio of the dose-limiting Organ at risk.'
def info13(self):
self.lbl_info["text"] = 'Insert the maximum dose delivered to the dose-limiting OAR in BED.'
def info14(self):
self.lbl_info["text"] = 'Insert the prescribed biological effectiv dose to be delivered to the tumor.'
def info15(self):
self.lbl_info["text"] = 'Insert the accumulated tumor BED so far. (If fraction one, it is zero).'
def info16(self):
self.lbl_info["text"] = 'Insert the accumulated OAR dose so far. (If fraction one, it is zero).'
def compute_plan(self):
self.btn_compute.configure(state = 'disabled')
number_of_fractions = int(self.ent_fractions.get())
alpha = float(self.ent_alpha.get())
beta = float(self.ent_beta.get())
min_dose = float(self.ent_mindose.get())
max_dose = float(self.ent_maxdose.get())
if self.var_radio.get() != 3:
fixed_prob = 0
fixed_mean = 0
fixed_std = 0
elif self.var_radio.get() == 3:
fixed_prob = 1
fixed_mean = float(self.ent_mean.get())
fixed_std = float(self.ent_std.get())
try:
global lbl_output
self.lbl_output.destroy()
except:
pass
if self.var.get() == 0:
try:
sparing_factors_str = (self.ent_sf.get()).split()
sparing_factors = [float(i) for i in sparing_factors_str]
abt = float(self.ent_abt.get())
abn = float(self.ent_abn.get())
OAR_limit = float(self.ent_OARlimit.get())
tumor_limit = float(self.ent_tumorlimit.get())
physical_doses = np.zeros(number_of_fractions)
tumor_doses = np.zeros(number_of_fractions)
OAR_doses = np.zeros(number_of_fractions)
accumulated_OAR_dose = 0
accumulated_tumor_dose = 0
self.pb['value'] = 0
for looper in range(0,number_of_fractions):
[actual_policy,accumulated_tumor_dose,accumulated_OAR_dose,tumor_dose,OAR_dose] = intp3.value_eval(looper+1,number_of_fractions,accumulated_OAR_dose,accumulated_tumor_dose,sparing_factors[0:looper+2],abt,abn,OAR_limit,tumor_limit,alpha,beta,min_dose,max_dose,fixed_prob, fixed_mean, fixed_std)
physical_doses[looper] = actual_policy
tumor_doses[looper] = tumor_dose
OAR_doses[looper] = OAR_dose
self.pb['value'] += 100/number_of_fractions
self.lbl_output = tk.Frame(master = self.frame.interior)
self.lbl_output.pack()
frame = tk.Frame(master = self.lbl_output, relief = tk.RAISED, borderwidth = 1)
frame.grid(row = 0,column = 0)
label= tk.Label(master = frame, text = "fraction number")
label.pack()
frame = tk.Frame(master = self.lbl_output, relief = tk.RAISED, borderwidth = 1)
frame.grid(row = 0,column = 1)
label= tk.Label(master = frame, text = "sparing factor")
label.pack()
frame = tk.Frame(master = self.lbl_output, relief = tk.RAISED, borderwidth = 1)
frame.grid(row = 0,column = 2)
label= tk.Label(master = frame, text = "physical dose delivered to PTV95")
label.pack()
frame = tk.Frame(master = self.lbl_output, relief = tk.RAISED, borderwidth = 1)
frame.grid(row = 0,column = 3)
label= tk.Label(master = frame, text = "BED delivered to tumor")
label.pack()
frame = tk.Frame(master = self.lbl_output, relief = tk.RAISED, borderwidth = 1)
frame.grid(row = 0,column = 4)
label= tk.Label(master = frame, text = "BED delivered to OAR")
label.pack()
for i in range(1,number_of_fractions +1):
for j in range(5):
if j == 0:
frame = tk.Frame(master = self.lbl_output)
frame.grid(row = i,column = 0)
label = tk.Label(master= frame, text = f"fraction {i}")
label.pack()
elif j == 1:
frame = tk.Frame(master = self.lbl_output)
frame.grid(row = i,column = 1)
label = tk.Label(master= frame, text = f" {sparing_factors[i]}")
label.pack()
elif j == 2:
frame = tk.Frame(master = self.lbl_output)
frame.grid(row = i,column = 2)
label = tk.Label(master= frame, text = f" {np.round(physical_doses[i-1],2)}")
label.pack()
elif j == 3:
frame = tk.Frame(master = self.lbl_output)
frame.grid(row = i,column = 3)
label = tk.Label(master= frame, text = f" {np.round(tumor_doses[i-1],2)}")
label.pack()
elif j == 4:
frame = tk.Frame(master = self.lbl_output)
frame.grid(row = i,column = 4)
label = tk.Label(master= frame, text = f" {np.round(OAR_doses[i-1],2)}")
label.pack()
frame = tk.Frame(master = self.lbl_output, relief = tk.RAISED, borderwidth = 1)
frame.grid(row = number_of_fractions +1,column = 0)
label= tk.Label(master = frame, text = "accumulated doses")
label.pack()
frame = tk.Frame(master = self.lbl_output, relief = tk.RAISED, borderwidth = 1)
frame.grid(row = number_of_fractions +1 ,column = 3)
label = tk.Label(master= frame, text = f" {np.round(np.sum(tumor_doses),2)}")
label.pack()
frame = tk.Frame(master = self.lbl_output, relief = tk.RAISED, borderwidth = 1)
frame.grid(row = number_of_fractions + 1,column = 4)
label = tk.Label(master= frame, text = f" {np.sum(OAR_doses)}")
label.pack()
except ValueError:
self.lbl_info["text"] = "please enter correct values. Use the ? boxes for further information."
else:
try:
sparing_factors_str = (self.ent_sf.get()).split()
sparing_factors = [float(i) for i in sparing_factors_str]
abt = float(self.ent_abt.get())
abn = float(self.ent_abn.get())
OAR_limit = float(self.ent_OARlimit.get())
tumor_limit = float(self.ent_tumorlimit.get())
BED_tumor = float(self.ent_BED_tumor.get())
BED_OAR = float(self.ent_BED_OAR.get())
[optimal_dose,total_dose_delivered_tumor,total_dose_delivered_OAR,tumor_dose,OAR_dose] = intp3.value_eval(len(sparing_factors)-1,number_of_fractions,BED_OAR,BED_tumor,sparing_factors,abt,abn,OAR_limit,tumor_limit,alpha,beta,min_dose,max_dose,fixed_prob,fixed_mean,fixed_std)
self.lbl_info["text"] = f"The optimal dose for fraction {len(sparing_factors)-1}, = {optimal_dose}\naccumulated dose in tumor = {total_dose_delivered_tumor}\naccumulated dose OAR = {total_dose_delivered_OAR}"
except ValueError:
self.lbl_info["text"] = "please enter correct values. Use the ? boxes for further information."
self.btn_compute.configure(state = 'normal')
def checkbox(self):
if self.var.get() == 0:
self.ent_BED_tumor.configure(state = 'disabled')
self.ent_BED_OAR.configure(state = 'disabled')
else:
self.ent_BED_tumor.configure(state = 'normal')
self.ent_BED_OAR.configure(state = 'normal')
if __name__=='__main__':
root = tk.Tk()
GUI = GUIextended3D(root)
# Start the application
root.mainloop()
| 52.680312
| 355
| 0.609066
|
8e65339784a75a50d4bb4092d186e6e8adacd847
| 1,699
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
Fasih1994/recipe-app-api
|
deb3e13c13d9093f715d4626a1ee279569d8d20c
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
Fasih1994/recipe-app-api
|
deb3e13c13d9093f715d4626a1ee279569d8d20c
|
[
"MIT"
] | 5
|
2021-03-19T12:09:44.000Z
|
2021-06-10T20:30:40.000Z
|
app/core/migrations/0001_initial.py
|
Fasih1994/recipe-app-api
|
deb3e13c13d9093f715d4626a1ee279569d8d20c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2020-08-10 11:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 49.970588
| 266
| 0.637434
|
5ea5cfe6ed50ec9aeb91e2dc35c03a2037b7259f
| 785
|
py
|
Python
|
calculator.py
|
BooAA/calculator
|
a35d94cfa04e132cf6d657f6a2e1546491748966
|
[
"Apache-2.0"
] | null | null | null |
calculator.py
|
BooAA/calculator
|
a35d94cfa04e132cf6d657f6a2e1546491748966
|
[
"Apache-2.0"
] | null | null | null |
calculator.py
|
BooAA/calculator
|
a35d94cfa04e132cf6d657f6a2e1546491748966
|
[
"Apache-2.0"
] | null | null | null |
import scfm
def add(a, b):
return a + b
def sub(a, b):
return a - b
def mul(a, b):
return a * b
def div(a, b):
return a / b
if __name__ == '__main__':
scfm.Token.priority_table['+'] = 0
scfm.Token.priority_table['-'] = 0
scfm.Token.priority_table['*'] = 1
scfm.Token.priority_table['/'] = 1
parser = scfm.Parser()
calculator = scfm.Evaluator(
{
'+': add,
'-': sub,
'*': mul,
'/': div,
True: float
}
)
while True:
expr = input("calc: ")
if expr == 'quit':
break
RPN_form = parser.parse(expr)
result = calculator.eval(RPN_form)
print(f'-> {result}')
print('Leave calculator')
| 16.354167
| 42
| 0.46879
|
93e9ca755adbab6f5d402753bf8eaf346cd5f8ce
| 442
|
py
|
Python
|
examples/src/Shapes/AnimationTargetShapes.py
|
aspose-slides/Aspose.Slides-for-Python-via-.NET
|
c55ad5c71f942598f1e67e22a52cbcd1cb286467
|
[
"MIT"
] | null | null | null |
examples/src/Shapes/AnimationTargetShapes.py
|
aspose-slides/Aspose.Slides-for-Python-via-.NET
|
c55ad5c71f942598f1e67e22a52cbcd1cb286467
|
[
"MIT"
] | null | null | null |
examples/src/Shapes/AnimationTargetShapes.py
|
aspose-slides/Aspose.Slides-for-Python-via-.NET
|
c55ad5c71f942598f1e67e22a52cbcd1cb286467
|
[
"MIT"
] | null | null | null |
import aspose.slides as slides
# Path to source presentation
dataDir = "./examples/data/"
outDir = "./examples/out/"
with slides.Presentation(dataDir + "shapes_animation_example.pptx") as pres:
for slide in pres.slides:
for effect in slide.timeline.main_sequence:
print("{0} animation effect is set to shape#{1} on slide#{2}".format(
effect.type, effect.target_shape.unique_id, slide.slide_number))
| 40.181818
| 82
| 0.696833
|
0d7cdaddd181ffb93bb79b2cd88b80f2a19dd727
| 13,452
|
py
|
Python
|
scipy/linalg/matfuncs.py
|
sargas/scipy
|
3dcb7b2d5da7fcd9529137aa96b7a771cd47f111
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/linalg/matfuncs.py
|
sargas/scipy
|
3dcb7b2d5da7fcd9529137aa96b7a771cd47f111
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/linalg/matfuncs.py
|
sargas/scipy
|
3dcb7b2d5da7fcd9529137aa96b7a771cd47f111
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Author: Travis Oliphant, March 2002
#
from __future__ import division, print_function, absolute_import
__all__ = ['expm','expm2','expm3','cosm','sinm','tanm','coshm','sinhm',
'tanhm','logm','funm','signm','sqrtm']
from numpy import asarray, Inf, dot, eye, diag, exp, \
product, logical_not, ravel, transpose, conjugate, \
cast, log, ogrid, imag, real, absolute, amax, sign, \
isfinite, sqrt, single
from numpy import matrix as mat
import numpy as np
# Local imports
from .misc import norm
from .basic import solve, inv
from .special_matrices import triu, all_mat
from .decomp import eig
from .decomp_svd import orth, svd
from .decomp_schur import schur, rsf2csf
import warnings
eps = np.finfo(float).eps
feps = np.finfo(single).eps
def expm(A, q=None):
"""
Compute the matrix exponential using Pade approximation.
Parameters
----------
A : (N, N) array_like
Matrix to be exponentiated
Returns
-------
expm : (N, N) ndarray
Matrix exponential of `A`
References
----------
N. J. Higham,
"The Scaling and Squaring Method for the Matrix Exponential Revisited",
SIAM. J. Matrix Anal. & Appl. 26, 1179 (2005).
"""
if q:
warnings.warn("argument q=... in scipy.linalg.expm is deprecated.")
import scipy.sparse.linalg
return scipy.sparse.linalg.expm(A)
def expm2(A):
"""
Compute the matrix exponential using eigenvalue decomposition.
Parameters
----------
A : (N, N) array_like
Matrix to be exponentiated
Returns
-------
expm2 : (N, N) ndarray
Matrix exponential of `A`
"""
A = asarray(A)
t = A.dtype.char
if t not in ['f','F','d','D']:
A = A.astype('d')
t = 'd'
s,vr = eig(A)
vri = inv(vr)
r = dot(dot(vr,diag(exp(s))),vri)
if t in ['f', 'd']:
return r.real.astype(t)
else:
return r.astype(t)
def expm3(A, q=20):
"""
Compute the matrix exponential using Taylor series.
Parameters
----------
A : (N, N) array_like
Matrix to be exponentiated
q : int
Order of the Taylor series
Returns
-------
expm3 : (N, N) ndarray
Matrix exponential of `A`
"""
A = asarray(A)
t = A.dtype.char
if t not in ['f','F','d','D']:
A = A.astype('d')
t = 'd'
A = mat(A)
eA = eye(*A.shape,**{'dtype':t})
trm = mat(eA, copy=True)
castfunc = cast[t]
for k in range(1,q):
trm *= A / castfunc(k)
eA += trm
return eA
_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
def toreal(arr, tol=None):
"""Return as real array if imaginary part is small.
Parameters
----------
arr : array
tol : float
Absolute tolerance
Returns
-------
arr : double or complex array
"""
if tol is None:
tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[arr.dtype.char]]
if (arr.dtype.char in ['F', 'D','G']) and \
np.allclose(arr.imag, 0.0, atol=tol):
arr = arr.real
return arr
def cosm(A):
"""
Compute the matrix cosine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array
Returns
-------
cosm : (N, N) ndarray
Matrix cosine of A
"""
A = asarray(A)
if A.dtype.char not in ['F','D','G']:
return expm(1j*A).real
else:
return 0.5*(expm(1j*A) + expm(-1j*A))
def sinm(A):
"""
Compute the matrix sine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
sinm : (N, N) ndarray
Matrix cosine of `A`
"""
A = asarray(A)
if A.dtype.char not in ['F','D','G']:
return expm(1j*A).imag
else:
return -0.5j*(expm(1j*A) - expm(-1j*A))
def tanm(A):
"""
Compute the matrix tangent.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
tanm : (N, N) ndarray
Matrix tangent of `A`
"""
A = asarray(A)
if A.dtype.char not in ['F','D','G']:
return toreal(solve(cosm(A), sinm(A)))
else:
return solve(cosm(A), sinm(A))
def coshm(A):
"""
Compute the hyperbolic matrix cosine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
coshm : (N, N) ndarray
Hyperbolic matrix cosine of `A`
"""
A = asarray(A)
if A.dtype.char not in ['F','D','G']:
return toreal(0.5*(expm(A) + expm(-A)))
else:
return 0.5*(expm(A) + expm(-A))
def sinhm(A):
"""
Compute the hyperbolic matrix sine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
sinhm : (N, N) ndarray
Hyperbolic matrix sine of `A`
"""
A = asarray(A)
if A.dtype.char not in ['F','D']:
return toreal(0.5*(expm(A) - expm(-A)))
else:
return 0.5*(expm(A) - expm(-A))
def tanhm(A):
"""
Compute the hyperbolic matrix tangent.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array
Returns
-------
tanhm : (N, N) ndarray
Hyperbolic matrix tangent of `A`
"""
A = asarray(A)
if A.dtype.char not in ['F','D']:
return toreal(solve(coshm(A), sinhm(A)))
else:
return solve(coshm(A), sinhm(A))
def funm(A, func, disp=True):
"""
Evaluate a matrix function specified by a callable.
Returns the value of matrix-valued function ``f`` at `A`. The
function ``f`` is an extension of the scalar-valued function `func`
to matrices.
Parameters
----------
A : (N, N) array_like
Matrix at which to evaluate the function
func : callable
Callable object that evaluates a scalar function f.
Must be vectorized (eg. using vectorize).
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
Returns
-------
funm : (N, N) ndarray
Value of the matrix function specified by func evaluated at `A`
errest : float
(if disp == False)
1-norm of the estimated error, ||err||_1 / ||A||_1
"""
# Perform Shur decomposition (lapack ?gees)
A = asarray(A)
if len(A.shape)!=2:
raise ValueError("Non-matrix input to matrix function.")
if A.dtype.char in ['F', 'D', 'G']:
cmplx_type = 1
else:
cmplx_type = 0
T, Z = schur(A)
T, Z = rsf2csf(T,Z)
n,n = T.shape
F = diag(func(diag(T))) # apply function to diagonal elements
F = F.astype(T.dtype.char) # e.g. when F is real but T is complex
minden = abs(T[0,0])
# implement Algorithm 11.1.1 from Golub and Van Loan
# "matrix Computations."
for p in range(1,n):
for i in range(1,n-p+1):
j = i + p
s = T[i-1,j-1] * (F[j-1,j-1] - F[i-1,i-1])
ksl = slice(i,j-1)
val = dot(T[i-1,ksl],F[ksl,j-1]) - dot(F[i-1,ksl],T[ksl,j-1])
s = s + val
den = T[j-1,j-1] - T[i-1,i-1]
if den != 0.0:
s = s / den
F[i-1,j-1] = s
minden = min(minden,abs(den))
F = dot(dot(Z, F),transpose(conjugate(Z)))
if not cmplx_type:
F = toreal(F)
tol = {0:feps, 1:eps}[_array_precision[F.dtype.char]]
if minden == 0.0:
minden = tol
err = min(1, max(tol,(tol/minden)*norm(triu(T,1),1)))
if product(ravel(logical_not(isfinite(F))),axis=0):
err = Inf
if disp:
if err > 1000*tol:
print("Result may be inaccurate, approximate err =", err)
return F
else:
return F, err
def logm(A, disp=True):
"""
Compute matrix logarithm.
The matrix logarithm is the inverse of
expm: expm(logm(`A`)) == `A`
Parameters
----------
A : (N, N) array_like
Matrix whose logarithm to evaluate
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
Returns
-------
logm : (N, N) ndarray
Matrix logarithm of `A`
errest : float
(if disp == False)
1-norm of the estimated error, ||err||_1 / ||A||_1
"""
# Compute using general funm but then use better error estimator and
# make one step in improving estimate using a rotation matrix.
A = mat(asarray(A))
F, errest = funm(A,log,disp=0)
errtol = 1000*eps
# Only iterate if estimate of error is too large.
if errest >= errtol:
# Use better approximation of error
errest = norm(expm(F)-A,1) / norm(A,1)
if not isfinite(errest) or errest >= errtol:
N,N = A.shape
X,Y = ogrid[1:N+1,1:N+1]
R = mat(orth(eye(N,dtype='d')+X+Y))
F, dontcare = funm(R*A*R.H,log,disp=0)
F = R.H*F*R
if (norm(imag(F),1)<=1000*errtol*norm(F,1)):
F = mat(real(F))
E = mat(expm(F))
temp = mat(solve(E.T,(E-A).T))
F = F - temp.T
errest = norm(expm(F)-A,1) / norm(A,1)
if disp:
if not isfinite(errest) or errest >= errtol:
print("Result may be inaccurate, approximate err =", errest)
return F
else:
return F, errest
def signm(a, disp=True):
"""
Matrix sign function.
Extension of the scalar sign(x) to matrices.
Parameters
----------
A : (N, N) array_like
Matrix at which to evaluate the sign function
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
Returns
-------
signm : (N, N) ndarray
Value of the sign function at `A`
errest : float
(if disp == False)
1-norm of the estimated error, ||err||_1 / ||A||_1
Examples
--------
>>> from scipy.linalg import signm, eigvals
>>> a = [[1,2,3], [1,2,1], [1,1,1]]
>>> eigvals(a)
array([ 4.12488542+0.j, -0.76155718+0.j, 0.63667176+0.j])
>>> eigvals(signm(a))
array([-1.+0.j, 1.+0.j, 1.+0.j])
"""
def rounded_sign(x):
rx = real(x)
if rx.dtype.char=='f':
c = 1e3*feps*amax(x)
else:
c = 1e3*eps*amax(x)
return sign( (absolute(rx) > c) * rx )
result,errest = funm(a, rounded_sign, disp=0)
errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]]
if errest < errtol:
return result
# Handle signm of defective matrices:
# See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp.,
# 8:237-250,1981" for how to improve the following (currently a
# rather naive) iteration process:
a = asarray(a)
#a = result # sometimes iteration converges faster but where??
# Shifting to avoid zero eigenvalues. How to ensure that shifting does
# not change the spectrum too much?
vals = svd(a,compute_uv=0)
max_sv = np.amax(vals)
#min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1]
#c = 0.5/min_nonzero_sv
c = 0.5/max_sv
S0 = a + c*np.identity(a.shape[0])
prev_errest = errest
for i in range(100):
iS0 = inv(S0)
S0 = 0.5*(S0 + iS0)
Pp=0.5*(dot(S0,S0)+S0)
errest = norm(dot(Pp,Pp)-Pp,1)
if errest < errtol or prev_errest==errest:
break
prev_errest = errest
if disp:
if not isfinite(errest) or errest >= errtol:
print("Result may be inaccurate, approximate err =", errest)
return S0
else:
return S0, errest
def sqrtm(A, disp=True):
"""
Matrix square root.
Parameters
----------
A : (N, N) array_like
Matrix whose square root to evaluate
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
Returns
-------
sgrtm : (N, N) ndarray
Value of the sign function at `A`
errest : float
(if disp == False)
Frobenius norm of the estimated error, ||err||_F / ||A||_F
Notes
-----
Uses algorithm by Nicholas J. Higham
"""
A = asarray(A)
if len(A.shape)!=2:
raise ValueError("Non-matrix input to matrix function.")
T, Z = schur(A)
T, Z = rsf2csf(T,Z)
n,n = T.shape
R = np.zeros((n,n),T.dtype.char)
for j in range(n):
R[j,j] = sqrt(T[j,j])
for i in range(j-1,-1,-1):
s = 0
for k in range(i+1,j):
s = s + R[i,k]*R[k,j]
R[i,j] = (T[i,j] - s)/(R[i,i] + R[j,j])
R, Z = all_mat(R,Z)
X = (Z * R * Z.H)
if disp:
nzeig = np.any(diag(T)==0)
if nzeig:
print("Matrix is singular and may not have a square root.")
return X.A
else:
arg2 = norm(X*X - A,'fro')**2 / norm(A,'fro')
return X.A, arg2
| 25.003717
| 75
| 0.5449
|
4c25ac4c8c19ae5a22c06362235ff1b0f1a587f8
| 5,156
|
py
|
Python
|
tests/test_utils.py
|
klada/deconz
|
485e915822404d292156ff2a83488954e1ed8286
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
klada/deconz
|
485e915822404d292156ff2a83488954e1ed8286
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
klada/deconz
|
485e915822404d292156ff2a83488954e1ed8286
|
[
"MIT"
] | null | null | null |
"""Test pydeCONZ utilities.
pytest --cov-report term-missing --cov=pydeconz.utils tests/test_utils.py
"""
import asyncio
from unittest.mock import Mock, patch
from asynctest import CoroutineMock
import pytest
import aiohttp
from pydeconz import errors, utils
API_KEY = "1234567890"
IP = "127.0.0.1"
PORT = "80"
@pytest.mark.asyncio
async def test_get_api_key() -> None:
"""Test a successful call of get_api_key."""
session = Mock()
with patch(
"pydeconz.utils.async_request",
new=CoroutineMock(return_value=[{"success": {"username": API_KEY}}]),
):
response = await utils.async_get_api_key(session, IP, PORT)
assert response == API_KEY
@pytest.mark.asyncio
async def test_get_api_key_with_credentials() -> None:
"""Test a successful call of get_api_key with user crendentials."""
session = Mock()
with patch(
"pydeconz.utils.async_request",
new=CoroutineMock(return_value=[{"success": {"username": API_KEY}}]),
):
response = await utils.async_get_api_key(
session, IP, PORT, username="user", password="pass"
)
assert response == API_KEY
@pytest.mark.asyncio
async def test_delete_api_key() -> None:
"""Test a successful call of delete_api_key."""
session = Mock()
with patch("pydeconz.utils.async_request", new=CoroutineMock(return_value=True)):
await utils.async_delete_api_key(session, IP, PORT, API_KEY)
@pytest.mark.asyncio
async def test_delete_all_keys() -> None:
"""Test a successful call of delete_all_keys.
Delete all keys doesn't care what happens with delete_api_key.
"""
session = Mock()
with patch(
"pydeconz.utils.async_request",
new=CoroutineMock(return_value={"whitelist": {1: "123", 2: "456"}}),
):
await utils.async_delete_all_keys(session, IP, PORT, API_KEY)
@pytest.mark.asyncio
async def test_get_bridge_id() -> None:
"""Test a successful call of get_bridgeid."""
session = Mock()
with patch(
"pydeconz.utils.async_request",
new=CoroutineMock(return_value={"bridgeid": "12345"}),
):
response = await utils.async_get_bridge_id(session, IP, PORT, API_KEY)
assert response == "12345"
@pytest.mark.asyncio
async def test_discovery() -> None:
"""Test a successful call to discovery."""
session = Mock()
with patch(
"pydeconz.utils.async_request",
new=CoroutineMock(
return_value=[
{
"id": "123456FFFFABCDEF",
"internalipaddress": "host1",
"internalport": "port1",
},
{
"id": "234567BCDEFG",
"internalipaddress": "host2",
"internalport": "port2",
},
]
),
):
response = await utils.async_discovery(session)
assert [
{"bridgeid": "123456ABCDEF", "host": "host1", "port": "port1"},
{"bridgeid": "234567BCDEFG", "host": "host2", "port": "port2"},
] == response
@pytest.mark.asyncio
async def test_discovery_response_empty() -> None:
"""Test an empty discovery returns an empty list."""
session = Mock()
with patch("pydeconz.utils.async_request", new=CoroutineMock(return_value={})):
response = await utils.async_discovery(session)
assert not response
@pytest.mark.asyncio
async def test_request() -> None:
"""Test a successful call of request."""
response = Mock()
response.content_type = "application/json"
response.json = CoroutineMock(return_value={"json": "response"})
session = CoroutineMock(return_value=response)
result = await utils.async_request(session, "url")
assert result == {"json": "response"}
@pytest.mark.asyncio
async def test_request_fails_client_error() -> None:
"""Test a successful call of request."""
session = CoroutineMock(side_effect=aiohttp.ClientError)
with pytest.raises(errors.RequestError) as e_info:
await utils.async_request(session, "url")
assert str(e_info.value) == "Error requesting data from url: "
@pytest.mark.asyncio
async def test_request_fails_invalid_content() -> None:
"""Test a successful call of request."""
response = Mock()
response.content_type = "application/binary"
session = CoroutineMock(return_value=response)
with pytest.raises(errors.ResponseError) as e_info:
await utils.async_request(session, "url")
assert str(e_info.value) == "Invalid content type: application/binary"
@pytest.mark.asyncio
async def test_request_fails_raise_error() -> None:
"""Test a successful call of request."""
response = Mock()
response.content_type = "application/json"
response.json = CoroutineMock(
return_value=[
{"error": {"type": 1, "address": "address", "description": "description"}}
]
)
session = CoroutineMock(return_value=response)
with pytest.raises(errors.Unauthorized) as e_info:
await utils.async_request(session, "url")
assert str(e_info.value) == "address description"
| 28.32967
| 86
| 0.647789
|
31f38b77998a2de8628ce462ca5ce2b9c5632f43
| 7,254
|
py
|
Python
|
training/Data/genLMDB.py
|
Guanghan/GNet-pose
|
c70e0fc65b290e68a16ca3040a70300f9c2bee44
|
[
"Apache-2.0"
] | 86
|
2017-05-09T01:30:19.000Z
|
2020-11-24T17:30:02.000Z
|
training/Data/genLMDB.py
|
Guanghan/GNet-pose
|
c70e0fc65b290e68a16ca3040a70300f9c2bee44
|
[
"Apache-2.0"
] | 10
|
2017-07-10T11:31:32.000Z
|
2019-02-13T05:05:20.000Z
|
training/Data/genLMDB.py
|
Guanghan/GNet-pose
|
c70e0fc65b290e68a16ca3040a70300f9c2bee44
|
[
"Apache-2.0"
] | 31
|
2017-05-10T09:53:49.000Z
|
2022-02-05T21:03:10.000Z
|
# Script Comes From Convolutional Pose Machine
import scipy.io as sio
import numpy as np
import json
import cv2
import lmdb
import caffe
import os.path
import struct
def writeLMDB(datasets, lmdb_path, validation):
env = lmdb.open(lmdb_path, map_size=int(1e12))
txn = env.begin(write=True)
data = []
for d in range(len(datasets)):
if(datasets[d] == "MPI"):
print datasets[d]
with open('../Annotations/json/MPI_annotations.json') as data_file:
data_this = json.load(data_file)
data_this = data_this['root']
data = data + data_this
numSample = len(data)
print numSample
elif(datasets[d] == "LEEDS"):
print datasets[d]
with open('../Annotations/json/LEEDS_annotations.json') as data_file:
data_this = json.load(data_file)
data_this = data_this['root']
data = data + data_this
numSample = len(data)
print numSample
elif(datasets[d] == "FLIC"):
datasets[d]
with open('../Annotations/json/FLIC_annotations.json') as data_file:
data_this = json.load(data_file)
data_this = data_this['root']
data = data + data_this
numSample = len(data)
print numSample
elif(datasets[d] == "MPI_validation"):
print datasets[d]
with open('../Annotations/json/MPI_annotations_validation.json') as data_file:
data_this = json.load(data_file)
data_this = data_this['root']
data = data + data_this
numSample = len(data)
print numSample
random_order = np.random.permutation(numSample).tolist()
isValidationArray = [data[i]['isValidation'] for i in range(numSample)];
if(validation == 1):
totalWriteCount = isValidationArray.count(0.0); # Use data except validation data (Use training data only)
else:
totalWriteCount = len(data) # Use all data, including train + validation
print 'going to write %d images..' % totalWriteCount;
writeCount = 0
for count in range(numSample):
idx = random_order[count]
if (data[idx]['isValidation'] != 0 and validation == 1):
print '%d/%d skipped' % (count,idx)
continue
#print idx
if "MPI" in data[idx]['dataset']:
path_header = '../../dataset/MPI/images/'
elif "LEEDS" in data[idx]['dataset']:
path_header = '../../dataset/LEEDS/'
elif "FLIC" in data[idx]['dataset']:
path_header = '../../dataset/FLIC/'
print(path_header)
print(data[idx]['img_paths'])
img_path = os.path.join(path_header, data[idx]['img_paths'])
print(img_path)
img = cv2.imread(img_path)
height = img.shape[0]
width = img.shape[1]
if(width < 64):
img = cv2.copyMakeBorder(img,0,0,0,64-width,cv2.BORDER_CONSTANT,value=(128,128,128))
print 'saving padded image!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
cv2.imwrite('padded_img.jpg', img)
width = 64
# no modify on width, because we want to keep information
meta_data = np.zeros(shape=(height,width,1), dtype=np.uint8)
print type(img), img.shape
print type(meta_data), meta_data.shape
clidx = 0 # current line index
# dataset name (string)
for i in range(len(data[idx]['dataset'])):
meta_data[clidx][i] = ord(data[idx]['dataset'][i])
clidx = clidx + 1
# image height, image width
height_binary = float2bytes(data[idx]['img_height'])
for i in range(len(height_binary)):
meta_data[clidx][i] = ord(height_binary[i])
width_binary = float2bytes(data[idx]['img_width'])
for i in range(len(width_binary)):
meta_data[clidx][4+i] = ord(width_binary[i])
clidx = clidx + 1
# (a) isValidation(uint8), numOtherPeople (uint8), people_index (uint8), annolist_index (float), writeCount(float), totalWriteCount(float)
meta_data[clidx][0] = data[idx]['isValidation']
meta_data[clidx][1] = data[idx]['numOtherPeople']
meta_data[clidx][2] = data[idx]['people_index']
annolist_index_binary = float2bytes(data[idx]['annolist_index'])
for i in range(len(annolist_index_binary)): # 3,4,5,6
meta_data[clidx][3+i] = ord(annolist_index_binary[i])
count_binary = float2bytes(float(writeCount)) # note it's writecount instead of count!
for i in range(len(count_binary)):
meta_data[clidx][7+i] = ord(count_binary[i])
totalWriteCount_binary = float2bytes(float(totalWriteCount))
for i in range(len(totalWriteCount_binary)):
meta_data[clidx][11+i] = ord(totalWriteCount_binary[i])
nop = int(data[idx]['numOtherPeople'])
clidx = clidx + 1
# (b) objpos_x (float), objpos_y (float)
objpos_binary = float2bytes(data[idx]['objpos'])
for i in range(len(objpos_binary)):
meta_data[clidx][i] = ord(objpos_binary[i])
clidx = clidx + 1
# (c) scale_provided (float)
scale_provided_binary = float2bytes(data[idx]['scale_provided'])
for i in range(len(scale_provided_binary)):
meta_data[clidx][i] = ord(scale_provided_binary[i])
clidx = clidx + 1
# (d) joint_self (3*16) or (3*22) (float) (3 line)
joints = np.asarray(data[idx]['joint_self']).T.tolist() # transpose to 3*16
for i in range(len(joints)):
row_binary = float2bytes(joints[i])
for j in range(len(row_binary)):
meta_data[clidx][j] = ord(row_binary[j])
clidx = clidx + 1
# (e) check nop, prepare arrays
if(nop!=0):
if(nop==1):
joint_other = [data[idx]['joint_others']]
objpos_other = [data[idx]['objpos_other']]
scale_provided_other = [data[idx]['scale_provided_other']]
else:
joint_other = data[idx]['joint_others']
objpos_other = data[idx]['objpos_other']
scale_provided_other = data[idx]['scale_provided_other']
# (f) objpos_other_x (float), objpos_other_y (float) (nop lines)
for i in range(nop):
objpos_binary = float2bytes(objpos_other[i])
for j in range(len(objpos_binary)):
meta_data[clidx][j] = ord(objpos_binary[j])
clidx = clidx + 1
# (g) scale_provided_other (nop floats in 1 line)
scale_provided_other_binary = float2bytes(scale_provided_other)
for j in range(len(scale_provided_other_binary)):
meta_data[clidx][j] = ord(scale_provided_other_binary[j])
clidx = clidx + 1
# (h) joint_others (3*16) (float) (nop*3 lines)
for n in range(nop):
joints = np.asarray(joint_other[n]).T.tolist() # transpose to 3*16
for i in range(len(joints)):
row_binary = float2bytes(joints[i])
for j in range(len(row_binary)):
meta_data[clidx][j] = ord(row_binary[j])
clidx = clidx + 1
# print meta_data[0:12,0:48]
#print(len(meta_data,1))
#print(len(meta_data,2))
# total 7+4*nop lines
img4ch = np.concatenate((img, meta_data), axis=2)
img4ch = np.transpose(img4ch, (2, 0, 1))
#print img4ch.shape
datum = caffe.io.array_to_datum(img4ch, label=0)
key = '%07d' % writeCount
txn.put(key, datum.SerializeToString())
if(writeCount % 1000 == 0):
txn.commit()
txn = env.begin(write=True)
print 'count: %d/ write count: %d/ randomized: %d/ all: %d' % (count,writeCount,idx,totalWriteCount)
writeCount = writeCount + 1
txn.commit()
env.close()
def float2bytes(floats):
if type(floats) is float:
floats = [floats]
return struct.pack('%sf' % len(floats), *floats)
if __name__ == "__main__":
''' 1 means no validation data, 0 means training data plus validation data'''
writeLMDB(['MPI', 'LEEDS'], 'lmdb/MPI_LEEDS_alltrain', 0) # joint dataset (MPII + LEEDS), use train data + validation data
| 36.452261
| 140
| 0.6748
|
895ad0654d4336492305c4574d4b65a6c0821908
| 5,115
|
py
|
Python
|
src/Listing_test.py
|
ioah86/2016_Sortable_Challenge
|
3ce621b7065469c94515ed368579098aaec9f13e
|
[
"MIT"
] | null | null | null |
src/Listing_test.py
|
ioah86/2016_Sortable_Challenge
|
3ce621b7065469c94515ed368579098aaec9f13e
|
[
"MIT"
] | null | null | null |
src/Listing_test.py
|
ioah86/2016_Sortable_Challenge
|
3ce621b7065469c94515ed368579098aaec9f13e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
.. moduleauthor:: Albert Heinle<albert.heinle@gmail.com>
"""
import unittest
from Listing import Listing
import json
class TestProduct(unittest.TestCase):
"""
Testing the class Product for correct functionality
"""
def test_Product_From_Sortable_Challenge(self):
"""
Tests the first Listing entry from the sortable challenge, namely:
"title":"Fujifilm FinePix REAL 3D W3 10 MP Digital Camera\
with Dual 3x Optical Zoom Lenses (Black)",
"manufacturer":"Fujifilm Canada",
"currency":"CAD",
"price":"499.99"
We test if the Listing is correctly initiated, if all the
getters work properly, the string representation is right
and the JSON representation is right.
"""
title = "Fujifilm FinePix REAL 3D W3 10 MP Digital Camera\
with Dual 3x Optical Zoom Lenses (Black)"
manufacturer = "Fujifilm Canada"
currency = "CAD"
price = "499.99"
stringRep = """Title: Fujifilm FinePix REAL 3D W3 10 MP Digital Camera\
with Dual 3x Optical Zoom Lenses (Black)
Manufacturer: Fujifilm Canada
Currency: CAD
Price: 499.99"""
jsonRep = """{"title":"Fujifilm FinePix REAL 3D W3 10 MP Digital Camera with Dual 3x Optical Zoom Lenses (Black)","manufacturer":"Fujifilm Canada","currency":"CAD","price":"499.99"}"""
try:
testListing = Listing(title,manufacturer,currency,price)
except:
self.fail("Could not instanciate valid Listing")
self.assertEqual(title,testListing.getTitle(),
"The title was not stored properly")
self.assertEqual(manufacturer,testListing.getManufacturer(),
"The manufacturer was not stored properly")
self.assertEqual(currency,testListing.getCurrency(),
"The Currency was not stored properly")
self.assertEqual(price,testListing.getPrice(),
"The price was not stored properly")
self.assertEqual(stringRep, str(testListing),
"The string representation was not correct")
self.assertEqual(json.loads(jsonRep),json.loads(testListing.toJSON()),
"The JSON representation was not correct")
def test_Invalid_Types(self):
"""
Tests if everything Listing works even with invalid types for each
parameter for the constructor.
"""
title = "Fujifilm FinePix REAL 3D W3 10 MP Digital Camera\
with Dual 3x Optical Zoom Lenses (Black)"
manufacturer = "Fujifilm Canada"
currency = "CAD"
price = "499.99"
testPassed = 1
try:
testListing = Listing(1,manufacturer,currency,price)
testPassed = 0
except:
pass
if not testPassed:
self.fail("It was possible to provide a non-string type to\
title")
try:
testListing = Listing(title,1,currency,price)
testPassed = 0
except:
pass
if not testPassed:
self.fail("It was possible to provide a non-string type to\
manufacturer")
try:
testListing = Listing(title,manufacturer,1,price)
testPassed = 0
except:
pass
if not testPassed:
self.fail("It was possible to provide a non-string type to\
currency")
try:
testListing = Listing(title,manufacturer,currency,1)
testPassed = 0
except:
pass
if not testPassed:
self.fail("It was possible to provide a non-string type to\
price")
def test_EmptyStrings(self):
"""
Tests if everything works in Listings even with empty strings for each
parameter for the constructor. No entry is optional
"""
title = "Fujifilm FinePix REAL 3D W3 10 MP Digital Camera\
with Dual 3x Optical Zoom Lenses (Black)"
manufacturer = "Fujifilm Canada"
currency = "CAD"
price = "499.99"
testPassed = 1
try:
testListing = Listing("",manufacturer,currency,price)
testPassed = 0
except:
pass
if not testPassed:
self.fail("It was possible to provide an empty string to\
title")
# try:
# testListing = Listing(title,"",currency,price)
# testPassed = 0
# except:
# pass
# if not testPassed:
# self.fail("It was possible to provide an empty string to\
# manufacturer")
try:
testListing = Listing(title,manufacturer,"",price)
testPassed = 0
except:
pass
if not testPassed:
self.fail("It was possible to provide an empty string to\
currency")
try:
testListing = Listing(title,manufacturer,currency,"")
testPassed = 0
except:
pass
if not testPassed:
self.fail("It was possible to provide an empty string to\
price")
| 35.520833
| 192
| 0.595112
|
2e617b4f09ac91a1a5e0f5cb2e5c99fb3d0c44dd
| 430
|
py
|
Python
|
wagtail/palermo/settings/dev.py
|
cesosag/palermo
|
a1a7e78096d808cb01c73eb0e2d6cde2ce2cc175
|
[
"MIT"
] | null | null | null |
wagtail/palermo/settings/dev.py
|
cesosag/palermo
|
a1a7e78096d808cb01c73eb0e2d6cde2ce2cc175
|
[
"MIT"
] | null | null | null |
wagtail/palermo/settings/dev.py
|
cesosag/palermo
|
a1a7e78096d808cb01c73eb0e2d6cde2ce2cc175
|
[
"MIT"
] | null | null | null |
import os
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['DJ_SECRET_KEY']
# SECURITY WARNING: define the correct hosts in production!
ALLOWED_HOSTS = ['*']
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
try:
from .local import *
except ImportError:
pass
| 21.5
| 66
| 0.746512
|
431488cd5495b68f81f1e6a1a1eb29a4764fa331
| 3,874
|
py
|
Python
|
emmet-cli/emmet/cli/entry_point.py
|
nwinner/emmet
|
6bd779ba785a84f57b61954c88d1ed0dfa95b8cb
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
emmet-cli/emmet/cli/entry_point.py
|
nwinner/emmet
|
6bd779ba785a84f57b61954c88d1ed0dfa95b8cb
|
[
"BSD-3-Clause-LBNL"
] | 78
|
2020-11-16T06:46:43.000Z
|
2022-03-28T03:02:51.000Z
|
emmet-cli/emmet/cli/entry_point.py
|
utf/emmet
|
27a51a7ad4c300e280de5ba9b59a311dd77cffdd
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import os
import logging
import click
from multiprocessing_logging import install_mp_handler
from log4mongo.handlers import BufferedMongoHandler
from github3 import authorize, login
from io import StringIO
from emmet.cli.admin import admin
from emmet.cli.tasks import tasks
from emmet.cli.calc import calc
from emmet.cli.utils import calcdb_from_mgrant, ensure_indexes
from emmet.cli.utils import EmmetCliError
logger = logging.getLogger("")
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
def opt_prompt():
return click.prompt("GitHub OPT", hide_input=True)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option(
"--spec-or-dbfile",
metavar="HOST/DB",
help="MongoGrant spec or path to db.json for DB to use.",
)
@click.option("--run", is_flag=True, help="Run DB/filesystem write operations.")
@click.option("--issue", type=int, help="Production tracker issue (required if --run).")
@click.option("--sbatch", is_flag=True, help="Switch to SBatch mode.")
@click.option("--bb", is_flag=True, help="Use burst buffer.")
@click.option("--yes", is_flag=True, help="Automatic yes to all prompts.")
@click.option("--no-dupe-check", is_flag=True, help="Skip duplicate check(s).")
@click.option("--verbose", is_flag=True, help="Show debug messages.")
@click.version_option()
def emmet(spec_or_dbfile, run, issue, sbatch, bb, yes, no_dupe_check, verbose):
"""Command line interface for emmet"""
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
ctx = click.get_current_context()
ctx.ensure_object(dict)
if not sbatch and bb:
raise EmmetCliError("Burst buffer only available in SBatch mode (--sbatch).")
if spec_or_dbfile:
client = calcdb_from_mgrant(spec_or_dbfile)
ctx.obj["CLIENT"] = client
# ctx.obj["MONGO_HANDLER"] = BufferedMongoHandler(
# host=client.host,
# port=client.port,
# database_name=client.db_name,
# username=client.user,
# password=client.password,
# level=logging.WARNING,
# authentication_db=client.db_name,
# collection="emmet_logs",
# buffer_periodical_flush_timing=False, # flush manually
# )
# logger.addHandler(ctx.obj["MONGO_HANDLER"])
# coll = ctx.obj["MONGO_HANDLER"].collection
# ensure_indexes(SETTINGS.log_fields, [coll])
if run:
if not issue:
raise EmmetCliError(f"Need issue number via --issue!")
ctx.obj["LOG_STREAM"] = StringIO()
memory_handler = logging.StreamHandler(ctx.obj["LOG_STREAM"])
formatter = logging.Formatter(
"%(asctime)s %(name)-12s %(levelname)-8s %(message)s"
)
memory_handler.setFormatter(formatter)
logger.addHandler(memory_handler)
CREDENTIALS = os.path.join(os.path.expanduser("~"), ".emmet_credentials")
if not os.path.exists(CREDENTIALS):
user = click.prompt("GitHub Username")
password = click.prompt("GitHub Password", hide_input=True)
auth = authorize(
user,
password,
["user", "repo", "gist"],
"emmet CLI",
two_factor_callback=opt_prompt,
)
with open(CREDENTIALS, "w") as fd:
fd.write(auth.token)
with open(CREDENTIALS, "r") as fd:
token = fd.readline().strip()
ctx.obj["GH"] = login(token=token)
else:
click.secho("DRY RUN! Add --run flag to execute changes.", fg="green")
install_mp_handler(logger=logger)
def safe_entry_point():
try:
emmet()
except EmmetCliError as e:
click.secho(str(e), fg="red")
except Exception as e:
logger.info(e, exc_info=True)
emmet.add_command(admin)
emmet.add_command(calc)
emmet.add_command(tasks)
| 34.283186
| 88
| 0.649974
|
f85cfd3438f90f935b940cf75e6a122d5d7ce5b4
| 98
|
py
|
Python
|
snapshot_queries/__init__.py
|
cedar-team/snapshot-queries
|
e033d7f8cc221617e1922e913cae2f32b409faf6
|
[
"MIT"
] | null | null | null |
snapshot_queries/__init__.py
|
cedar-team/snapshot-queries
|
e033d7f8cc221617e1922e913cae2f32b409faf6
|
[
"MIT"
] | 6
|
2022-03-30T16:19:01.000Z
|
2022-03-30T16:32:08.000Z
|
snapshot_queries/__init__.py
|
cedar-team/snapshot-queries
|
e033d7f8cc221617e1922e913cae2f32b409faf6
|
[
"MIT"
] | 1
|
2022-03-31T19:22:22.000Z
|
2022-03-31T19:22:22.000Z
|
from .snapshot_queries import snapshot_queries
VERSION = "0.5.3"
__all__ = ["snapshot_queries"]
| 16.333333
| 46
| 0.765306
|
de761f86b6b79c271178033981c2585667072fda
| 17,510
|
py
|
Python
|
pfb/workers/post/binterp.py
|
ratt-ru/pfb-clean
|
b53540cf2ccd96c4ee9c9f76b200ec6a2cf5d7f3
|
[
"MIT"
] | 4
|
2020-07-26T20:05:12.000Z
|
2022-02-11T13:08:33.000Z
|
pfb/workers/post/binterp.py
|
ratt-ru/pfb-clean
|
b53540cf2ccd96c4ee9c9f76b200ec6a2cf5d7f3
|
[
"MIT"
] | 25
|
2020-04-17T12:37:28.000Z
|
2022-03-03T13:43:52.000Z
|
pfb/workers/post/binterp.py
|
ratt-ru/pfb-clean
|
b53540cf2ccd96c4ee9c9f76b200ec6a2cf5d7f3
|
[
"MIT"
] | 3
|
2020-04-28T16:09:16.000Z
|
2021-05-12T10:15:16.000Z
|
# flake8: noqa
import os
from contextlib import ExitStack
import click
from omegaconf import OmegaConf
from pfb.workers.main import cli
import pyscilog
pyscilog.init('pfb')
log = pyscilog.get_logger('BINTERP')
@cli.command()
@click.option('-image', '--image', required=True,
help="Path to model or restored image cube.")
@click.option('-o', '--output-dir',
help='Output directory. Placed next to -image if not provided')
@click.option('-postfix', '--postfix', default='beam.fits',
help="Postfix to append to -image for writing out beams.")
@click.option('-ms', '--ms',
help="Will interpolate beam onto TIME if provided.")
@click.option('-bm', '--beam-model', default=None, required=True,
help="Fits beam model to use. \n"
"It is assumed that the pattern is path_to_beam/"
"name_corr_re/im.fits. \n"
"Provide only the path up to name "
"e.g. /home/user/beams/meerkat_lband. \n"
"Patterns mathing corr are determined "
"automatically. \n"
"Only real and imaginary beam models currently "
"supported.")
@click.option('-band', "--band", type=str, default='l',
help="Band to use with JimBeam. L or UHF")
@click.option('-st', '--sparsify-time', type=int, default=10,
help="Average beam interpolation over this many unique time "
"stamps. Only has an effect if MS is provided.")
@click.option('-otype', '--out-dtype', default='f4', type=str,
help="Data type of output. Default is single precision")
@click.option('-ha', '--host-address',
help='Address where the distributed client lives. '
'Will use a local cluster if no address is provided')
@click.option('-nw', '--nworkers', type=int, default=1,
help='Number of workers for the client.')
@click.option('-ntpw', '--nthreads-per-worker', type=int,
help='Number of dask threads per worker.')
@click.option('-nvt', '--nvthreads', type=int,
help="Total number of threads to use for vertical scaling (eg. gridder, fft's etc.)")
@click.option('-mem', '--mem-limit', type=int,
help="Memory limit in GB. Default uses all available memory")
@click.option('-nthreads', '--nthreads', type=int,
help="Total available threads. Default uses all available threads")
def binterp(**kw):
"""
Beam interpolator
Interpolate beams and stack cubes one MS and one spectral window at a time.
"""
args = OmegaConf.create(kw)
from glob import glob
image = sorted(glob(args.image))
try:
assert len(image) > 0
args.image = image
except:
raise ValueError(f"No image at {args.image}")
if args.output_dir is None:
args.output_dir = os.path.dirname(args.image[0])
pyscilog.log_to_file(args.output_dir +
args.postfix.strip('fits') + 'log')
if args.ms is not None:
ms = glob(args.ms)
try:
assert len(ms) == 1
args.ms = ms[0]
except:
raise ValueError(f"There must be exactly one MS matching {args.ms} if provided")
if not isinstance(args.beam_model, str):
raise ValueError("Only string beam patterns allowed")
else:
# we are either using JimBeam or globbing for beam patterns
if args.beam_model.lower() == 'jimbeam':
args.beam_model = args.beam_model.lower()
band = args.band.lower()
if band != 'l' and band != 'uhf':
raise ValueError("Only l or uhf band supported with "
"JimBeam")
else:
print("Using %s band beam model"%args.band,
file=log)
elif args.beam_model.lower().endswith('.fits'):
beam_model = glob(args.beam_model)
try:
assert len(beam_model) > 0
except:
raise ValueError(f"No beam model at {args.beam_model}")
else:
raise ValueError("Unknown beam model provided. "
"Either use JimBeam or pass in the fits beam "
"patterns")
OmegaConf.set_struct(args, True)
with ExitStack() as stack:
from pfb import set_client
args = set_client(args, stack, log)
# TODO - prettier config printing
print('Input Options:', file=log)
for key in args.keys():
print(' %25s = %s' % (key, args[key]), file=log)
return _binterp(**args)
def _binterp(**kw):
args = OmegaConf.create(kw)
OmegaConf.set_struct(args, True)
from pfb.utils.fits import save_fits
import dask
import dask.array as da
import numpy as np
from numba import jit
from astropy.io import fits
import warnings
from africanus.rime import parallactic_angles
from pfb.utils.fits import load_fits, save_fits, data_from_header
from daskms import xds_from_ms, xds_from_table
if args.ms is None:
if args.beam_model.lower() == 'jimbeam':
for image in args.image:
mhdr = fits.getheader(image)
l_coord, ref_l = data_from_header(mhdr, axis=1)
l_coord -= ref_l
m_coord, ref_m = data_from_header(mhdr, axis=2)
m_coord -= ref_m
if mhdr["CTYPE4"].lower() == 'freq':
freq_axis = 4
stokes_axis = 3
elif mhdr["CTYPE3"].lower() == 'freq':
freq_axis = 3
stokes_axis = 4
else:
raise ValueError("Freq axis must be 3rd or 4th")
freq, ref_freq = data_from_header(mhdr, axis=freq_axis)
from katbeam import JimBeam
if args.band.lower() == 'l':
beam = JimBeam('MKAT-AA-L-JIM-2020')
elif args.band.lower() == 'uhf':
beam = JimBeam('MKAT-AA-UHF-JIM-2020')
else:
raise ValueError("Unkown band %s"%args.band[i])
xx, yy = np.meshgrid(l_coord, m_coord, indexing='ij')
beam_image = np.zeros((freq.size, l_coord.size, m_coord.size),
dtype=args.out_dtype)
for v in range(freq.size):
# freq must be in MHz
beam_image[v] = beam.I(xx, yy, freq[v]/1e6).astype(args.out_dtype)
if args.output_dir in image:
idx = len(args.output_dir)
iname = image[idx::]
outname = iname + '.' + args.postfix
else:
outname = image + '.' + args.postfix
beam_image = np.expand_dims(beam_image, axis=3 - stokes_axis + 1)
save_fits(args.output_dir + outname, beam_image, mhdr, dtype=args.out_dtype)
else:
raise NotImplementedError("Not there yet, sorry")
print("All done here.", file=log)
# @jit(nopython=True, nogil=True, cache=True)
# def _unflagged_counts(flags, time_idx, out):
# for i in range(time_idx.size):
# ilow = time_idx[i]
# ihigh = time_idx[i+1]
# out[i] = np.sum(~flags[ilow:ihigh])
# return out
# def extract_dde_info(args, freqs):
# """
# Computes paralactic angles, antenna scaling and pointing information
# required for beam interpolation.
# """
# # get ms info required to compute paralactic angles and weighted sum
# nband = freqs.size
# if args.ms is not None:
# utimes = []
# unflag_counts = []
# ant_pos = None
# phase_dir = None
# for ms_name in args.ms:
# # get antenna positions
# ant = xds_from_table(ms_name + '::ANTENNA')[0].compute()
# if ant_pos is None:
# ant_pos = ant['POSITION'].data
# else: # check all are the same
# tmp = ant['POSITION']
# if not np.array_equal(ant_pos, tmp):
# raise ValueError(
# "Antenna positions not the same across measurement sets")
# # get phase center for field
# field = xds_from_table(ms_name + '::FIELD')[0].compute()
# if phase_dir is None:
# phase_dir = field['PHASE_DIR'][args.field].data.squeeze()
# else:
# tmp = field['PHASE_DIR'][args.field].data.squeeze()
# if not np.array_equal(phase_dir, tmp):
# raise ValueError(
# 'Phase direction not the same across measurement sets')
# # get unique times and count flags
# xds = xds_from_ms(ms_name, columns=["TIME", "FLAG_ROW"], group_cols=[
# "FIELD_ID"])[args.field]
# utime, time_idx = np.unique(
# xds.TIME.data.compute(), return_index=True)
# ntime = utime.size
# # extract subset of times
# if args.sparsify_time > 1:
# I = np.arange(0, ntime, args.sparsify_time)
# utime = utime[I]
# time_idx = time_idx[I]
# ntime = utime.size
# utimes.append(utime)
# flags = xds.FLAG_ROW.data.compute()
# unflag_count = _unflagged_counts(flags.astype(
# np.int32), time_idx, np.zeros(ntime, dtype=np.int32))
# unflag_counts.append(unflag_count)
# utimes = np.concatenate(utimes)
# unflag_counts = np.concatenate(unflag_counts)
# ntimes = utimes.size
# # compute paralactic angles
# parangles = parallactic_angles(utimes, ant_pos, phase_dir)
# # mean over antanna nant -> 1
# parangles = np.mean(parangles, axis=1, keepdims=True)
# nant = 1
# # beam_cube_dde requirements
# ant_scale = np.ones((nant, nband, 2), dtype=np.float64)
# point_errs = np.zeros((ntimes, nant, nband, 2), dtype=np.float64)
# return (parangles,
# da.from_array(ant_scale, chunks=ant_scale.shape),
# point_errs,
# unflag_counts,
# True)
# else:
# ntimes = 1
# nant = 1
# parangles = np.zeros((ntimes, nant,), dtype=np.float64)
# ant_scale = np.ones((nant, nband, 2), dtype=np.float64)
# point_errs = np.zeros((ntimes, nant, nband, 2), dtype=np.float64)
# unflag_counts = np.array([1])
# return (parangles, ant_scale, point_errs, unflag_counts, False)
# def make_power_beam(args, lm_source, freqs, use_dask):
# print("Loading fits beam patterns from %s" % args.beam_model)
# from glob import glob
# paths = glob(args.beam_model + '**_**.fits')
# beam_hdr = None
# if args.corr_type == 'linear':
# corr1 = 'XX'
# corr2 = 'YY'
# elif args.corr_type == 'circular':
# corr1 = 'LL'
# corr2 = 'RR'
# else:
# raise KeyError(
# "Unknown corr_type supplied. Only 'linear' or 'circular' supported")
# for path in paths:
# if corr1.lower() in path[-10::]:
# if 're' in path[-7::]:
# corr1_re = load_fits(path)
# if beam_hdr is None:
# beam_hdr = fits.getheader(path)
# elif 'im' in path[-7::]:
# corr1_im = load_fits(path)
# else:
# raise NotImplementedError("Only re/im patterns supported")
# elif corr2.lower() in path[-10::]:
# if 're' in path[-7::]:
# corr2_re = load_fits(path)
# elif 'im' in path[-7::]:
# corr2_im = load_fits(path)
# else:
# raise NotImplementedError("Only re/im patterns supported")
# # get power beam
# beam_amp = (corr1_re**2 + corr1_im**2 + corr2_re**2 + corr2_im**2)/2.0
# # get cube in correct shape for interpolation code
# beam_amp = np.ascontiguousarray(np.transpose(beam_amp, (1, 2, 0))
# [:, :, :, None, None])
# # get cube info
# if beam_hdr['CUNIT1'].lower() != "deg":
# raise ValueError("Beam image units must be in degrees")
# npix_l = beam_hdr['NAXIS1']
# refpix_l = beam_hdr['CRPIX1']
# delta_l = beam_hdr['CDELT1']
# l_min = (1 - refpix_l)*delta_l
# l_max = (1 + npix_l - refpix_l)*delta_l
# if beam_hdr['CUNIT2'].lower() != "deg":
# raise ValueError("Beam image units must be in degrees")
# npix_m = beam_hdr['NAXIS2']
# refpix_m = beam_hdr['CRPIX2']
# delta_m = beam_hdr['CDELT2']
# m_min = (1 - refpix_m)*delta_m
# m_max = (1 + npix_m - refpix_m)*delta_m
# if (l_min > lm_source[:, 0].min() or m_min > lm_source[:, 1].min() or
# l_max < lm_source[:, 0].max() or m_max < lm_source[:, 1].max()):
# raise ValueError("The supplied beam is not large enough")
# beam_extents = np.array([[l_min, l_max], [m_min, m_max]])
# # get frequencies
# if beam_hdr["CTYPE3"].lower() != 'freq':
# raise ValueError(
# "Cubes are assumed to be in format [nchan, nx, ny]")
# nchan = beam_hdr['NAXIS3']
# refpix = beam_hdr['CRPIX3']
# delta = beam_hdr['CDELT3'] # assumes units are Hz
# freq0 = beam_hdr['CRVAL3']
# bfreqs = freq0 + np.arange(1 - refpix, 1 + nchan - refpix) * delta
# if bfreqs[0] > freqs[0] or bfreqs[-1] < freqs[-1]:
# warnings.warn("The supplied beam does not have sufficient "
# "bandwidth. Beam frequencies:")
# with np.printoptions(precision=2):
# print(bfreqs)
# if use_dask:
# return (da.from_array(beam_amp, chunks=beam_amp.shape),
# da.from_array(beam_extents, chunks=beam_extents.shape),
# da.from_array(bfreqs, bfreqs.shape))
# else:
# return beam_amp, beam_extents, bfreqs
# def interpolate_beam(ll, mm, freqs, args):
# """
# Interpolate beam to image coordinates and optionally compute average
# over time if MS is provoded
# """
# nband = freqs.size
# print("Interpolating beam")
# parangles, ant_scale, point_errs, unflag_counts, use_dask = extract_dde_info(
# args, freqs)
# lm_source = np.vstack((ll.ravel(), mm.ravel())).T
# beam_amp, beam_extents, bfreqs = make_power_beam(
# args, lm_source, freqs, use_dask)
# # interpolate beam
# if use_dask:
# from africanus.rime.dask import beam_cube_dde
# lm_source = da.from_array(lm_source, chunks=lm_source.shape)
# freqs = da.from_array(freqs, chunks=freqs.shape)
# # compute ncpu images at a time to avoid memory errors
# ntimes = parangles.shape[0]
# I = np.arange(0, ntimes, args.ncpu)
# nchunks = I.size
# I = np.append(I, ntimes)
# beam_image = np.zeros((ll.size, 1, nband), dtype=beam_amp.dtype)
# for i in range(nchunks):
# ilow = I[i]
# ihigh = I[i+1]
# part_parangles = da.from_array(
# parangles[ilow:ihigh], chunks=(1, 1))
# part_point_errs = da.from_array(
# point_errs[ilow:ihigh], chunks=(1, 1, freqs.size, 2))
# # interpolate and remove redundant axes
# part_beam_image = beam_cube_dde(beam_amp, beam_extents, bfreqs,
# lm_source, part_parangles, part_point_errs,
# ant_scale, freqs).compute()[:, :, 0, :, 0, 0]
# # weighted sum over time
# beam_image += np.sum(part_beam_image *
# unflag_counts[None, ilow:ihigh, None], axis=1, keepdims=True)
# # normalise by sum of weights
# beam_image /= np.sum(unflag_counts)
# # remove time axis
# beam_image = beam_image[:, 0, :]
# else:
# from africanus.rime.fast_beam_cubes import beam_cube_dde
# beam_image = beam_cube_dde(beam_amp, beam_extents, bfreqs,
# lm_source, parangles, point_errs,
# ant_scale, freqs).squeeze()
# # swap source and freq axes and reshape to image shape
# beam_source = np.transpose(beam_image, axes=(1, 0))
# return beam_source.squeeze().reshape((freqs.size, *ll.shape))
# def main(args):
# # get coord info
# hdr = fits.getheader(args.image)
# l_coord, ref_l = data_from_header(hdr, axis=1)
# l_coord -= ref_l
# m_coord, ref_m = data_from_header(hdr, axis=2)
# m_coord -= ref_m
# if hdr["CTYPE4"].lower() == 'freq':
# freq_axis = 4
# elif hdr["CTYPE3"].lower() == 'freq':
# freq_axis = 3
# else:
# raise ValueError("Freq axis must be 3rd or 4th")
# freqs, ref_freq = data_from_header(hdr, axis=freq_axis)
# xx, yy = np.meshgrid(l_coord, m_coord, indexing='ij')
# # interpolate primary beam to fits header and optionally average over time
# beam_image = interpolate_beam(xx, yy, freqs, args)
# # save power beam
# save_fits(args.output_filename, beam_image, hdr)
# print("Wrote interpolated beam cube to %s \n" % args.output_filename)
# return
| 39.795455
| 99
| 0.557339
|
f5bf086ff503bd7b97e8f5ddfbe5724ebdc11772
| 93
|
py
|
Python
|
app/main/__init__.py
|
Ahmed-moringa/Pitch_me
|
05c444cd5100248ce23709beb6f5b89fde59eda7
|
[
"MIT"
] | null | null | null |
app/main/__init__.py
|
Ahmed-moringa/Pitch_me
|
05c444cd5100248ce23709beb6f5b89fde59eda7
|
[
"MIT"
] | null | null | null |
app/main/__init__.py
|
Ahmed-moringa/Pitch_me
|
05c444cd5100248ce23709beb6f5b89fde59eda7
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
main = Blueprint('main',__name__)
from . import views,forms
| 18.6
| 34
| 0.741935
|
9cc06b22ff605be033ab78e9327666ca4505f1e9
| 452
|
py
|
Python
|
movies/models.py
|
vikram0207/django-rest
|
eafec575999dce6859dc7b99177cff339b2bcbdd
|
[
"MIT"
] | 5
|
2021-04-01T01:09:52.000Z
|
2022-02-21T16:54:54.000Z
|
movies/models.py
|
vikram0207/django-rest
|
eafec575999dce6859dc7b99177cff339b2bcbdd
|
[
"MIT"
] | 5
|
2021-04-08T21:58:18.000Z
|
2022-02-10T09:48:19.000Z
|
movies/models.py
|
vikram0207/django-rest
|
eafec575999dce6859dc7b99177cff339b2bcbdd
|
[
"MIT"
] | 1
|
2021-03-29T12:04:16.000Z
|
2021-03-29T12:04:16.000Z
|
from django.db import models
# Create Movie Model
class Movie(models.Model):
title = models.CharField(max_length=100)
genre = models.CharField(max_length=100)
year = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True) # When it was create
updated_at = models.DateTimeField(auto_now=True) # When i was update
creator = models.ForeignKey('auth.User', related_name='movies', on_delete=models.CASCADE)
| 28.25
| 93
| 0.74115
|
28416af39880acfa6d59f202f5b42fc0882512ee
| 24,348
|
py
|
Python
|
cev/POSPatentTagger.py
|
michellekoeth/cev
|
1b44abe0faaf551d54ffc9d4231eee4ec3779a4a
|
[
"MIT"
] | 1
|
2018-01-12T21:14:35.000Z
|
2018-01-12T21:14:35.000Z
|
cev/POSPatentTagger.py
|
michellekoeth/cev
|
1b44abe0faaf551d54ffc9d4231eee4ec3779a4a
|
[
"MIT"
] | 1
|
2017-12-20T00:27:26.000Z
|
2017-12-20T00:27:26.000Z
|
cev/POSPatentTagger.py
|
michellekoeth/cev
|
1b44abe0faaf551d54ffc9d4231eee4ec3779a4a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#import operator
import re
from mechanize import Browser
import spacy
import csv
import time
############################################################################################
# Removes wierd spacing from PatFT/AppFT claim sets, makes new tag called CLMN for claim number
##############################################################################################
def CleanTagged(taggedtuplelist):
fixedtaglist = []
for tagtuple in taggedtuplelist:
if re.match('\d+\.', tagtuple[0]):
clmno = re.match('\d+\.', tagtuple[0]).group(0)
# remove the period after the claim number
clmno = clmno[:-1]
# Fix the tag to be a custom "CLMN" tag
fixedtag = (str(clmno), "CLMN")
elif tagtuple[0].endswith("."):
fixedtag = (str(tagtuple[0][:-1]), str(tagtuple[1]))
else:
fixedtag = (str(tagtuple[0]),str(tagtuple[1]))
print str(fixedtag)
fixedtaglist.append(fixedtag)
return fixedtaglist
####################################################################################
# Given a docid (pat or app pub) and section type (either claims or description),
# returns the text from the requested section. Is a large string if a description is requested
# or is a list of strings (each string being a claim) if claims are requested, is a string if "priorpub" requested
# if "pgpub_and_claims" is requested, the pgpub and claims are returned in a dictionary data structure
# key "pgppubno" contains the pgpub number, and key "claimslist" is a list of claims,
# key "appnumber" returns the US application (serial) number
# NOTE: If ran too often, this function chokes PatFT or AppFT. So always run bulk jobs in a try/except clause
###################################################################################
def getPatDocSection(docid,sectiontypestr):
# First get the PATFT or APPFT URL and set a flag to remember if it is a patent or app
URL = ""
doctype = ""
if re.search("\\d{11}",docid):
URL = "http://appft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PG01&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.html&r=1&f=G&l=50&s1=%22" + docid + "%22.PGNR.&OS=DN/" + docid + "&RS=DN/" + docid
doctype = "app"
#print "processing app no:" + docid
elif re.search("\\d{7}",docid):
URL = "http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=" + docid + ".PN.&OS=PN/" + docid + "&RS=PN/" + docid
doctype = "patent"
#print "processing patent no: " + docid
# now call up the webpage using mechanize.
mech = Browser()
mech.set_handle_robots(False)
mech.set_handle_refresh(False) # can sometimes hang without this
mech.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
page = mech.open(URL)
html = page.read()
claimlistfull = []
claimno =''
pgpubno = ""
if ((sectiontypestr == "claims") or (sectiontypestr =="pgpub_and_claims")): # Return a claimset from the requested docid or both claims & pgpub no
if doctype == "app":
appsplithtml = html.split("<CENTER><B><I>Claims</B></I></CENTER>")
split = appsplithtml[1].split("<CENTER><B><I>Description</B></I></CENTER>")
elif doctype == "patent":
patsplithtml = html.split("<CENTER><b><i>Claims</b></i></CENTER>")
split = patsplithtml[1].split("<CENTER><b><i>Description</b></i></CENTER>")
if sectiontypestr == "pgpub_and_claims":
if "Prior Publication Data" in html:
section = html.split("<CENTER><b>Prior Publication Data</b></CENTER>")
section = section[1].split("</TR> </TABLE>")
pub = re.findall("US\s{1}\d{11}\s{1}[A-Z][0-9]", section[0])
pgpubno = pub[0]
else:
pgpubno = 0
claimbody = split[0]
claimslist = claimbody.split("<BR><BR>")[1:]
if doctype == "patent":
claimslist.pop(0)
for claimstr in claimslist:
# Get rid of \n from end of claim, after the period
claimstr = re.sub('\.\\n','.',claimstr)
# Now get rid of all other \n's
claimsliststr = re.sub('\\n+',' ', claimstr)
# Now get rid of the space before the claim number - first get the substring with the claim number
if re.match('\s{1}\d+\.\s{2}', claimsliststr):
claimno = re.match('\s{1}\d+\.\s{2}', claimsliststr).group(0)
fixed = claimno[1:-1]
claimsliststr = claimsliststr.replace(claimno, fixed)
elif re.match('\s{1}\d+\.\s{1}', claimsliststr):
claimno = re.match('\s{1}\d+\.\s{1}', claimsliststr).group(0)
fixed = claimno[1:]
claimsliststr = claimsliststr.replace(claimno, fixed)
# Now get rid of triple spaces
claimsliststr = claimsliststr.replace(" "," ")
claimlistfull.append(claimsliststr)
# Get rid of the _<HR>\n at the end of the last claim:
if doctype == "patent":
claimlistfull[-1] = claimlistfull[-1][:-6]
return {'pgpubno':pgpubno, 'claimslist': claimlistfull}
if doctype == "app":
claimlistfull[-1] = claimlistfull[-1][:-10]
return {'claimslist': claimlistfull}
elif sectiontypestr == "description":
if doctype == "app":
appsplithtml = html.split("<CENTER><B><I>Description</B></I></CENTER>")
split = appsplithtml[1].split("<BR><BR><CENTER><B>* * * * *</B></CENTER>")
elif doctype == "patent":
patsplithtml = html.split("<CENTER><b><i>Description</b></i></CENTER>")
split = patsplithtml[1].split("<BR><BR><CENTER><b>* * * * *</b></CENTER>")
descbody = split[0].replace("<BR><BR>","")
descbody = descbody.replace("<HR>","")
return descbody
elif (doctype == "patent" and sectiontypestr == "priorpub"):
if "Prior Publication Data" in html:
section = html.split("<CENTER><b>Prior Publication Data</b></CENTER>")
section = section[1].split("</TR> </TABLE>")
pub = re.findall("US\s{1}\d{11}\s{1}[A-Z][0-9]", section[0])
return pub[0]
else:
return 0
elif (doctype == "patent" and sectiontypestr == "appnumber"):
# Get beginning of desired section
section = html.split("Appl. No.:")
# Get ending of desired section
section = section[1].split("</b></TD></TR>")
# now section[0] is whats inbetween, use regex to get the actual text we want (app num)
pub = re.findall("\d{2}\/\d{3},\d{3}", section[0])
# Clean up the number to remove all slashes and commas
messyappno = pub[0]
appnotoreturn = messyappno[0:2] + messyappno[3:6] + messyappno[7:]
return appnotoreturn
elif (doctype == "app" and sectiontypestr == "appnumber"):
# The differences between the app text and the patent text is lol --
# Basically, some of the HTML tags are capitalized (like <B>) and for some reason
# In the app number they dont include a comma to set off digit triples
# Get beginning of desired section
section = html.split("Appl. No.:")
# Get ending of desired section
section = section[1].split("</B></TD></TR>")
# now section[0] is whats inbetween, use regex to get the actual text we want (app num)
pub = re.findall("\d{2}\/\d{6}", section[0])
# Clean up the number to remove all slashes and commas
messyappno = pub[0]
appnotoreturn = messyappno[0:2] + messyappno[3:]
return appnotoreturn
else:
print "Unrecognized requested section type. Returning"
return null
##########################################################################
# Gets claims for a given docid and POSTags them using spaCy python tagger
##########################################################################
def POSPatentTagger(docid):
claimtagged = []
taggedclaimset = []
claimslist = getPatDocSection(docid,"claims")
for claimstr in claimslist:
# POSTag with SpaCy
nlp = spacy.load('en')
doc = nlp(unicode(claimstr,'utf_8'))
for word in doc:
# this writes the tagged tuple for the word into a list. Three tuple items:
# The text of the word, the lemma of the word and the tag of the word
claimtagged.append((word.text, word.lemma_,word.tag_))
#print(word.text, word.lemma, word.lemma_, word.tag, word.tag_, word.pos, word.pos_)
# aggregate tagged claim into the set
taggedclaimset.append(claimtagged)
# Fresh and clean, number one tagged claimslist when it steps out on the scene!!
return taggedclaimset
############################################################################################
# TODO: Function that returns the first independent method/apparatus/CRM (beuregard) or system
# claim given an input claimset and the claim type to find.
############################################################################################
#########################################################################################
# Gets pgpub id (just the number part) given a patent number if a pgpub exists
#########################################################################################
def getAppNo(patno):
# given a patent number, get the corresponding PgPub docid if one exists
num = getPatDocSection(patno,"priorpub")
if num <> 0:
return num[3:14]
else:
return num
##########################################################################
# Gets claims for a given patent doc, finds the corresponding claims from
# the PgPub (if one exists) and POSTags both claim sets using spaCy python tagger
##########################################################################
def PatPubClaimEvTag(patno):
claimtagged = []
pattaggedclaimset = []
apptaggedclaimset = []
returnset = getPatDocSection(patno,"pgpub_and_claims")
patclaimslist = returnset['claimslist']
if returnset['pgpubno'] <> 0:
pgpubno = returnset['pgpubno'][3:14]
else:
pgpubno = returnset['pgpubno']
appclaimslist = getPatDocSection(pgpubno,'claims')['claimslist']
nlp = spacy.load('en')
for claimstr in patclaimslist:
# POSTag with SpaCy
doc = nlp(unicode(claimstr,'utf_8'))
for word in doc:
# this writes the tagged tuple for the word into a list. Three tuple items:
# The text of the word, the lemma of the word and the tag of the word
claimtagged.append((word.text, word.lemma_,word.tag_))
#print(word.text, word.lemma, word.lemma_, word.tag, word.tag_, word.pos, word.pos_)
# aggregate tagged claim into the set
pattaggedclaimset.append(claimtagged)
claimtagged = []
claimtagged = []
for claimstr in appclaimslist:
# POSTag with SpaCy
doc = nlp(unicode(claimstr,'utf_8'))
for word in doc:
# this writes the tagged tuple for the word into a list. Three tuple items:
# The text of the word, the lemma of the word and the tag of the word
claimtagged.append((word.text, word.lemma_,word.tag_))
#print(word.text, word.lemma, word.lemma_, word.tag, word.tag_, word.pos, word.pos_)
# aggregate tagged claim into the set
apptaggedclaimset.append(claimtagged)
claimtagged = []
# Fresh and clean, number one tagged claimslist when it steps out on the scene!!
return {'PatClaims':pattaggedclaimset,"AppClaims":apptaggedclaimset,"PatClaimsUntagged":patclaimslist,"AppClaimsUntagged":appclaimslist}
##########################################################################
# PatPubClaimEv - only returns the text claim lists (not tagged) for a given patent num and
# its corresponding PgPub if there is one
##########################################################################
def PatPubClaimEv(patno):
claimtagged = []
pattaggedclaimset = []
apptaggedclaimset = []
returnset = getPatDocSection(patno,"pgpub_and_claims")
patclaimslist = returnset['claimslist']
if returnset['pgpubno'] <> 0:
pgpubno = returnset['pgpubno'][3:14]
else:
pgpubno = returnset['pgpubno']
appclaimslist = getPatDocSection(pgpubno,'claims')['claimslist']
return {'PatClaims':patclaimslist,"AppClaims":appclaimslist}
####################################################################################
# Given a patent number, finds the claims for that patent, and the corresponding pgpub and its claims
# and writes everything out to a CSV file
# NOTE: If ran too often, this function chokes PatFT or AppFT. So always run bulk jobs in a try/except clause
###################################################################################
def getOnePatAppClaimset(patid):
# First get the PATFT URL
URL = "http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=" + patid + ".PN.&OS=PN/" + patid + "&RS=PN/" + patid
# now call up the webpage using mechanize.
mech = Browser()
mech.set_handle_robots(False)
mech.set_handle_refresh(False) # can sometimes hang without this
mech.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
page = mech.open(URL)
html = page.read()
claimlistfull = []
appclaimslist = []
patclaimslist = []
concatclaim = ""
claimno =''
pgpubno = ""
try:
patsplithtml = html.split("<CENTER><b><i>Claims</b></i></CENTER>")
split = patsplithtml[1].split("<CENTER><b><i>Description</b></i></CENTER>")
except:
print "PatFT is *mad* - Waiting 15 sec before next query!"
time.sleep(15)
try:
page = mech.open(URL)
html = page.read()
patsplithtml = html.split("<CENTER><b><i>Claims</b></i></CENTER>")
split = patsplithtml[1].split("<CENTER><b><i>Description</b></i></CENTER>")
except:
print "PatFT is REALLY *mad* - Waiting 60 sec before next query!"
time.sleep(60)
page = mech.open(URL)
html = page.read()
patsplithtml = html.split("<CENTER><b><i>Claims</b></i></CENTER>")
split = patsplithtml[1].split("<CENTER><b><i>Description</b></i></CENTER>")
if "Prior Publication Data" in html:
section = html.split("<CENTER><b>Prior Publication Data</b></CENTER>")
section = section[1].split("</TR> </TABLE>")
pub = re.findall("US\s{1}\d{11}\s{1}[A-Z][0-9]", section[0])
pgpubno = pub[0]
else:
# Return a zero to indicate that the input patent number does not have a corresponding PgPub
pgpubno = 0
return 0
claimbody = split[0]
# Dividing the claim list by double BR will not always work (it will fail on really long claims that PATFt chunks into portions using brbr)
# An example patent where this will fail is USPN 8,654,878. A better way to split the claimbody string is to regexp on numbers.. Or a combination of both
# so for each BR section, LOOK for an intro number, else concatenate with the portion before..
claimslist = claimbody.split("<BR><BR>")[1:]
claimslist.pop(0)
for idx, claimstr in enumerate(claimslist):
# Get rid of \n from end of claim, after the period
claimstr = re.sub('\.\\n','.',claimstr)
# Now get rid of all other \n's
claimsliststr = re.sub('\\n+',' ', claimstr)
# Now get rid of the space before the claim number - first get the substring with the claim number
if re.match('\s{1}\d+\.\s{2}', claimsliststr):
claimno = re.match('\s{1}\d+\.\s{2}', claimsliststr).group(0)
fixed = claimno[1:-1]
claimsliststr = claimsliststr.replace(claimno, fixed)
concatclaim = claimsliststr # Keep track of claim so far incase it is a long claim appearing in multi BR BR blocks
if idx <> (len(claimslist)-1): # first, check that there is a next chunk
if not (re.match('\s{1}\d+\.\s{1,}', claimslist[idx+1][0:10])): # if the next chunk does not have a number, continue
continue
elif re.match('\s{1}\d+\.\s{1}', claimsliststr):
claimno = re.match('\s{1}\d+\.\s{1}', claimsliststr).group(0)
fixed = claimno[1:]
claimsliststr = claimsliststr.replace(claimno, fixed)
concatclaim = claimsliststr # Keep track of claim so far incase it is a long claim appearing in multi BR BR blocks
if idx <> (len(claimslist)-1): # first, check that there is a next chunk
if not (re.match('\s{1}\d+\.\s{1,}', claimslist[idx+1][0:10])): # if the next chunk does not have a number, continue
continue
else: # There is not a number in this section! It is an intermediate claim chunk! concatenate with previous claim chunk and check if its the last chunk
concatclaim += claimsliststr
#print "Non number chunk found. Concatenated so far is: " + concatclaim
# First check if we are at the end of the claimslist array - if so, assume it's the last text chunk of this claim and process the claim.
if idx <> (len(claimslist)-1): # Then not last claim - look to see if next chunk has a number if so, process the claim. If not, continue.
if not (re.match('\s{1}\d+\.\s{1,}', claimslist[idx+1][0:10])):
continue
else: # Then multi chunk claim has been processed. reset the claimsliststr to the concatenated claim chunk string
claimsliststr = concatclaim
else: # Then because it is the last element of the claimlist, we are done processing this multi-chunk claim. reset the claimsliststr to the concatenated claim chunk string
claimsliststr = concatclaim
# Now get rid of triple spaces
claimsliststr = claimsliststr.replace(" "," ")
claimlistfull.append(claimsliststr)
# Get rid of the _<HR>\n at the end of the last claim:
claimlistfull[-1] = claimlistfull[-1][:-6]
patclaimslist = claimlistfull
# Now get Application claims
docidapp = pgpubno[3:14]
#print docidapp
URL = "http://appft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PG01&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.html&r=1&f=G&l=50&s1=%22" + docidapp + "%22.PGNR.&OS=DN/" + docidapp + "&RS=DN/" + docidapp
page = mech.open(URL)
html = page.read()
claimlistfull = []
claimno =''
concatclaim = ""
claimsliststr = ""
appsplithtml = html.split("<CENTER><B><I>Claims</B></I></CENTER>")
try:
split = appsplithtml[1].split("<CENTER><B><I>Description</B></I></CENTER>")
except IndexError:
print "Index Error occured for: " + str(docidapp)
print "AppFT is *mad* - Waiting 30 sec before next query!"
time.sleep(30)
page = mech.open(URL)
html = page.read()
appsplithtml = html.split("<CENTER><B><I>Claims</B></I></CENTER>")
split = appsplithtml[1].split("<CENTER><B><I>Description</B></I></CENTER>")
claimbody = split[0]
claimslist = claimbody.split("<BR><BR>")[1:]
for idx,claimstr in enumerate(claimslist):
# Get rid of \n from end of claim, after the period
claimstr = re.sub('\.\\n','.',claimstr)
# Now get rid of all other \n's
claimsliststr = re.sub('\\n+',' ', claimstr)
# Now get rid of the space before the claim number - first get the substring with the claim number
if re.match('\s{1}\d+\.\s{2}', claimsliststr):
claimno = re.match('\s{1}\d+\.\s{2}', claimsliststr).group(0)
fixed = claimno[1:-1]
claimsliststr = claimsliststr.replace(claimno, fixed)
concatclaim = claimsliststr # Keep track of claim so far incase it is a long claim appearing in multi BR BR blocks
if idx <> (len(claimslist)-1): # first, check that there is a next chunk
if not (re.match('\s{1}\d+\.\s{1,}', claimslist[idx+1][0:10])): # if the next chunk does not have a number, continue
continue
elif re.match('\s{1}\d+\.\s{1}', claimsliststr):
claimno = re.match('\s{1}\d+\.\s{1}', claimsliststr).group(0)
fixed = claimno[1:]
claimsliststr = claimsliststr.replace(claimno, fixed)
concatclaim = claimsliststr # Keep track of claim so far incase it is a long claim appearing in multi BR BR blocks
if idx <> (len(claimslist)-1): # first, check that there is a next chunk
if not (re.match('\s{1}\d+\.\s{1,}', claimslist[idx+1][0:10])): # if the next chunk does not have a number, continue
continue
else: # There is not a number in this section! It is an intermediate claim chunk! concatenate with previous claim chunk and check if its the last chunk
concatclaim += claimsliststr
#print "Non number chunk found. Concatenated so far is: " + concatclaim
# First check if we are at the end of the claimslist array - if so, assume it's the last text chunk of this claim and process the claim.
if idx <> (len(claimslist)-1): # Then not last claim - look to see if next chunk has a number if so, process the claim. If not, continue.
if not (re.match('\s{1}\d+\.\s{1,}', claimslist[idx+1][0:10])):
continue
else: # Then multi chunk claim has been processed. reset the claimsliststr to the concatenated claim chunk string
claimsliststr = concatclaim
else: # Then because it is the last element of the claimlist, we are done processing this multi-chunk claim. reset the claimsliststr to the concatenated claim chunk string
claimsliststr = concatclaim
# Now get rid of triple spaces
claimsliststr = claimsliststr.replace(" "," ")
claimlistfull.append(claimsliststr)
# Get rid of the _<HR>\n at the end of the last claim:
claimlistfull[-1] = claimlistfull[-1][:-10]
appclaimslist = claimlistfull
return {'patno': str(patid), 'appno':str(docidapp), 'patclaims':patclaimslist, 'appclaims':appclaimslist}
########## TEST FUNCTION
#retdict = getOnePatAppClaimset("8982981")
#print retdict['patclaims']
#file2writeto = "tricky_patappclaimlist.csv"
# First write the headers to our output file
#with open(file2writeto, 'w') as csvfile:
# fieldnames = ['pat_no', 'app_no','pat_claim','app_claim']
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# writer.writeheader()
# Read in each patent from the input file and process for a claimset
#with open("tricky_pats.csv", 'rb') as csvfile:
# reader = csv.reader(csvfile, delimiter=',')
# reader.next() # skip header row
# for row in reader:
# returnhash = getOnePatAppClaimset(row[0])
# if returnhash <> 0:
# # so patent had a pgpub, and both corresponding claimsets were returned
# # Write out all of the claims row by row into the CSV file
# patno = returnhash['patno']
# appno = returnhash['appno']
# patclaims = returnhash['patclaims']
# appclaims = returnhash['appclaims']
# with open(file2writeto, 'a') as csvfile:
# fieldnames = ['pat_no', 'app_no','pat_claim','app_claim']
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# for claim in patclaims:
# writer.writerow({'pat_no': patno, 'app_no':appno, 'pat_claim':claim, 'app_claim':""})
# for claim in appclaims:
# writer.writerow({'pat_no': patno, 'app_no':appno, 'pat_claim':"", 'app_claim':claim})
| 55.210884
| 209
| 0.58473
|
167f32e44ef29fc83080e4415d9aab0fb4c24833
| 11,955
|
py
|
Python
|
tests/grammar/python/base.py
|
philip-h-dye/docpt-parser
|
abb5ea6566dac4cb77851d0cff69ed4aca3b804b
|
[
"MIT"
] | 2
|
2021-05-01T02:34:05.000Z
|
2022-02-13T06:41:36.000Z
|
tests/grammar/python/base.py
|
philip-h-dye/docpt-parser
|
abb5ea6566dac4cb77851d0cff69ed4aca3b804b
|
[
"MIT"
] | null | null | null |
tests/grammar/python/base.py
|
philip-h-dye/docpt-parser
|
abb5ea6566dac4cb77851d0cff69ed4aca3b804b
|
[
"MIT"
] | null | null | null |
parse_debug = False
record = False
analyzing = False
#------------------------------------------------------------------------------
from dataclasses import dataclass
import unittest
from prettyprinter import cpprint as pp
from arpeggio import ParsingExpression, EOF, Sequence, OrderedChoice, OneOrMore
from arpeggio import ParserPython, flatten, ParseTreeNode, NonTerminal
from docopt_parser.parsetreenodes import nodes_equal
#------------------------------------------------------------------------------
from grammar.python.common import t_eof, p_linefeed, p_space
from util import tprint, write_scratch
from p import pp_str
#------------------------------------------------------------------------------
# Prevent explicit prints. If exception, simply re-raise. Assertions and
# exception handling otherwise unchanged. Must not be used to to alter
# control-flow or functionality.
#
@dataclass
class Quiet(object):
grammar : bool = False # nothing applicable yet
parse : bool = False # prevents print on NoMatch
verify : bool = False # nothing applicable yet
analyze : bool = False # prevents all explicit prints
QUIET_NONE = Quiet()
#------------------------------------------------------------------------------
class Test_Base ( unittest.TestCase ) :
MULTIPLE_SEP_LINEFEED = p_linefeed
def setUp(self):
if not hasattr(self, 'parse_debug'):
self.parse_debug = False
if not hasattr(self, 'record'):
self.record = False
if not hasattr(self, 'analyzing'):
self.analyzing = False
if not hasattr(self, 'debug'):
self.debug = False
if not hasattr(self, 'show'):
self.show = False
tprint._on = self.show or self.debug is not False
if self.record :
write_scratch( _clean=True )
#--------------------------------------------------------------------------
def single ( self, rule, text, expect, skipws=True, lead=None,
quiet=QUIET_NONE ):
"""
<lead> : ( text, expect ) for leading value to prefix the text
and expect. Required when rule has
lookbehind. For '(?<=\s)foo', an simple
lead would be ( ' ', t_space ), as
the lead includes whitespace, skipws=False
is also necessary.
"""
# The model unfortunately will show 'rule' as a function
# rather that it's expression. It is tempting to then instantiate
# it as rule(). The parse model will now show it nicely.
#
# Unfortunately, the resulting parse tree drops the node for rule.
#
body = [ rule, EOF ]
if lead is not None :
( text_, expect_ ) = lead
text = text_ + text
body.insert(0, expect_ )
def grammar():
return Sequence( ( *body ), rule_name='grammar', skipws=skipws )
# print(f"\n: single : text = '{text}'")
self.verify_grammar ( grammar, text, [ expect ], skipws=skipws,
quiet=quiet )
#--------------------------------------------------------------------------
def multiple ( self, rule, text, expect, n=1, sep=p_linefeed, lead=None,
skipws=True, quiet=QUIET_NONE ):
"""
<sep> : ParseSpec separator between each element. DEFAULT p_linefeed
from grammar.python.common.p_linefeed,
i.e. ParseSpec( LINEFEED, linefeed_m, t_linefeed )
<lead> : Optional leading value to prefix each text element. This
is necessary when the rule has lookbehind. For exmample,
'(?<=\s)foo', a sufficient lead would be p_space, i.e.
ParseSpec( ' ', space, t_space ). Being whitespace,
skipws=False is also necessary.
"""
text_list = [ text ]
rule_list = [ rule ]
expect_list = [ expect ]
if lead is not None :
text = lead.text + text
rule_list.insert( 0, lead.rule )
expect_list.insert( 0, load.expect )
if n > 1 :
text_list += [ sep.text ]
rule_list += [ sep.rule ]
expect_list += [ sep.expect ]
text_list *= n
# NOT with OrderedChoice # rule_list *= n
expect_list *= n
del text_list[-1]
# NOT with OrderedChoice # del rule_list[-1]
del expect_list[-1]
# rule not tripled since using OrderedChoice. Losening allows
# invalid texts to successfully parse but then be trapped
# in verify()
text = ''.join(text_list)
def grammar():
return Sequence (
( OneOrMore( OrderedChoice( [ *rule_list ] , ) ) , EOF ) ,
rule_name='grammar', skipws=False )
# print(f"\n: multiple : text = '{text}'")
self.verify_grammar ( grammar, text, expect_list,
quiet=quiet )
#--------------------------------------------------------------------------
def verify_grammar ( self, grammar, text, expect_list, skipws=True,
quiet=QUIET_NONE ):
self.grammar = grammar()
self.skipws = skipws
self.text = text
self.expect = NonTerminal( self.grammar, [ *expect_list, t_eof ] )
self.parser = ParserPython ( grammar, ws=" \t\r", skipws=self.skipws,
debug=self.parse_debug, reduce_tree=False )
self.parse_and_verify( self.text, self.expect, quiet=quiet )
#--------------------------------------------------------------------------
def parse_and_verify ( self, text, expect, quiet=QUIET_NONE ):
parsed = self.parse ( text, expect, quiet=quiet )
self.verify ( text, expect, parsed, quiet=quiet )
#--------------------------------------------------------------------------
def parse ( self, text, expect, quiet=QUIET_NONE ):
if not hasattr(self, 'text') or self.text != text :
self.text = text
if not hasattr(self, 'expect') or not nodes_equal(expect, self.expect) :
self.expect = expect
# tprint(f"\nOptions :\n{text}")
# written here and in verify since they may be called independently
if self.record :
write_scratch( grammar=self.grammar, text=self.text,
expect=self.expect, expect_f=flatten(self.expect),
model=self.parser.parser_model, )
try :
# print(f"\n: text = '{text}'")
self.parsed = self.parser.parse(text)
# tprint("[parsed]") ; pp(self.parsed)
if self.record :
write_scratch( parsed=self.parsed )
except Exception as e :
if not quiet.parse :
print("\n"
f"[expect]\n{pp_str(expect)}\n\n"
f"text = '{text}' :\n\n"
f"Parse FAILED :\n"
f"{str(e)}" )
raise
return self.parsed
#--------------------------------------------------------------------------
def verify ( self, text, expect, parsed, quiet=QUIET_NONE ):
if not hasattr(self, 'text') or self.text != text :
self.text = text
if not hasattr(self, 'expect') or not nodes_equal(expect, self.expect) :
self.expect = expect
if not hasattr(self, 'parsed') or not nodes_equal(parsed, self.parsed) :
self.parsed = parsed
if self.record :
write_scratch( grammar=self.grammar, text=self.text,
expect=self.expect, expect_f=flatten(self.expect),
model=self.parser.parser_model, )
if self.analyzing :
self.analyze()
assert nodes_equal(parsed, expect), \
( f"text = '{text}' :\n"
f"[expect]\n{pp_str(expect)}\n"
f"[parsed]\n{pp_str(parsed)}" )
#--------------------------------------------------------------------------
def analyze (self):
# lose 'self.' for legibility
expect = self.expect
parsed = self.parsed
# 'nth_option_line' specific applicable in docopt/test_line, within the
# outer enclosing context it would obviously be deeper and certainly
# not a descendent of the first outer node. Left as an starting point
# when focused and detailed analysis needed.
nth_option_line = 0
expect = expect[0][0] # [0][ nth_option_line ] # [0] [0]
parsed = parsed[0][0] # [0][ nth_option_line ] # [0] [0]
expect_terminal = isinstance(expect, Terminal)
parsed_terminal = isinstance(parsed, Terminal)
if not quiet.analyze :
if expect_terminal :
print(f"\n[expect] rule '{expect.rule_name}', Terminal = {pp_str(expect)}")
else :
print(f"\n[expect] rule '{expect.rule_name}' with {len(expect)} children")
if parsed_terminal :
print(f"\n[parsed] rule '{parsed.rule_name}', Terminal = {pp_str(parsed)}")
else :
print(f"\n[parsed] rule '{parsed.rule_name}' with {len(parsed)} children")
if expect_terminal or parsed_terminal :
assert nodes_equal(parsed, expect), \
( f"Detail nodes are not equal.\n"
f"[text] '{text}'\n"
f"[expect] rule '{expect.rule_name}'\n{pp_str(expect)}\n"
f"[parsed] rule '{parsed.rule_name}'\n{pp_str(parsed)}\n" )
return
if not quiet.analyze :
print(f"[expect] rule '{expect.rule_name}' with {len(expect)} children")
print(f"[parsed] rule '{parsed.rule_name}' with {len(parsed)} children")
assert parsed.rule_name == expect.rule_name, \
( f"Detail node rule names not equal.\n"
f"[text] '{text}'\n"
f"[expect] rule '{expect.rule_name}' with {len(expect)} children\n"
f"[parsed] rule '{parsed.rule_name}' with {len(parsed)} children\n"
f"[expect]\n{pp_str(expect)}\n"
f"[parsed]\n{pp_str(parsed)}" )
for i in range(min( len(expect), len(parsed) )):
assert nodes_equal( parsed[i], expect[i]), \
( f"Detail node child {i} is not equal.\n"
f"[text] '{text}'\n"
f"[expect] rule '{expect.rule_name}' with {len(expect)} children\n"
f"[parsed] rule '{parsed.rule_name}' with {len(parsed)} children\n"
f"[expect] [{i}]\n{pp_str(expect[i])}\n"
f"[parsed] [{i}]\n{pp_str(parsed[i])}" )
assert not ( len(expect) > len(parsed) ), \
( f"Expect has more children than parsed, earlier children equal.\n"
f"[text] '{text}'\n"
f"[expect] rule '{expect.rule_name}' with {len(expect)} children\n"
f"[parsed] rule '{parsed.rule_name}' with {len(parsed)} children\n"
f"[expect] [{i}]\n{pp_str(expect[len(parsed):])}" )
assert not ( len(expect) < len(parsed) ), '\n' + \
( f"Parsed has more children than expect, earlier children equal.\n"
f"[text] '{text}'\n"
f"[expect] rule '{expect.rule_name}' with {len(expect)} children\n"
f"[parsed] rule '{parsed.rule_name}' with {len(parsed)} children\n"
f"[parsed] [{i}]\n{pp_str(parsed[len(expect):])}" )
#------------------------------------------------------------------------------
| 40.802048
| 91
| 0.502217
|
8e02c5d82523732f6ca6cfd3a9fe6bd95a8fecb5
| 19,394
|
py
|
Python
|
tests/test_models.py
|
sergray/marshmallow-objects
|
cc687de0b8689a86a0f525d9b24cf0bf64ec1383
|
[
"MIT"
] | 32
|
2018-03-12T19:59:15.000Z
|
2019-12-24T10:47:08.000Z
|
tests/test_models.py
|
sergray/marshmallow-objects
|
cc687de0b8689a86a0f525d9b24cf0bf64ec1383
|
[
"MIT"
] | 29
|
2018-02-06T23:55:47.000Z
|
2020-02-10T21:16:30.000Z
|
tests/test_models.py
|
sv-tools/marshmallow-objects
|
b645a77b8e00f5b72fc9e2073f64a947a80203d6
|
[
"MIT"
] | 3
|
2018-07-23T19:11:01.000Z
|
2019-08-20T17:21:41.000Z
|
import collections
import copy
import json
import unittest
try:
import yaml
skip_yaml = False
except ImportError:
skip_yaml = True
import marshmallow_objects as marshmallow
class A(marshmallow.Model):
test_field = marshmallow.fields.Str(missing="test_value", allow_none=False)
tag_field = marshmallow.fields.Str(load_only=True)
@marshmallow.post_load
def set_tag_field(self, data, **kwargs):
data.tag_field = data.test_field
return data
class Meta:
ordered = True
def on_bind_field(self, field_name, field_obj):
pass
def handle_error(self, error, data, many, partial):
pass
class AMethod(marshmallow.Model):
yes_no = marshmallow.fields.Method(serialize="serialize_yes_no", deserialize="deserialize_yes_no",)
def serialize_yes_no(self, obj):
return "Yes" if obj else "No"
def deserialize_yes_no(self, obj):
return obj.lower() in ["y", "yes"]
class B(marshmallow.Model):
test_field = marshmallow.fields.Str(allow_none=True)
a = marshmallow.NestedModel(A, allow_none=False, required=True)
class C(marshmallow.Model):
a = marshmallow.NestedModel(A, many=True)
class MultiInheritance(A, B, C):
pass
class CustomSchema(marshmallow.Schema):
def custom_method(self):
pass
class D(marshmallow.Model):
__schema_class__ = CustomSchema
def serialize_context_field(obj, context=None):
return obj.test_field == context["value"]
def deserialize_context_field(obj, context=None):
return obj == context["value"]
class AContext(marshmallow.Model):
test_field = marshmallow.fields.Str()
test_context_field = marshmallow.fields.Function(
serialize=serialize_context_field, deserialize=deserialize_context_field,
)
class BContext(marshmallow.Model):
test_field = marshmallow.fields.Str()
a = marshmallow.NestedModel(AContext)
class TestModelMeta(unittest.TestCase):
def test_schema_name(self):
self.assertEqual("ASchema", A.__schema_class__.__name__)
def test_schema_class(self):
assert issubclass(A.__schema_class__, marshmallow.Schema)
assert issubclass(MultiInheritance.__schema_class__, marshmallow.Schema)
def test_model_class(self):
assert issubclass(A.__schema_class__.__model_class__, marshmallow.Model)
assert issubclass(MultiInheritance.__schema_class__.__model_class__, marshmallow.Model,)
def test_tag_processor(self):
assert hasattr(A.__schema_class__, "set_tag_field")
assert hasattr(MultiInheritance.__schema_class__, "set_tag_field")
def test_meta(self):
assert hasattr(A.__schema_class__, "Meta")
self.assertEqual(id(A.Meta), id(A.__schema_class__.Meta))
assert not hasattr(B, "Meta")
assert hasattr(B.__schema_class__, "Meta")
self.assertEqual(
id(MultiInheritance.Meta), id(MultiInheritance.__schema_class__.Meta),
)
assert hasattr(MultiInheritance.__schema_class__, "Meta")
def test_on_bind_filed(self):
self.assertEqual(id(A.on_bind_field), id(A.__schema_class__.on_bind_field))
self.assertEqual(
id(MultiInheritance.on_bind_field), id(MultiInheritance.__schema_class__.on_bind_field),
)
def test_handle_error(self):
self.assertEqual(id(A.handle_error), id(A.__schema_class__.handle_error))
self.assertEqual(
id(MultiInheritance.handle_error), id(MultiInheritance.__schema_class__.handle_error),
)
def test_schema_class_override(self):
self.assertTrue(issubclass(D.__schema_class__, CustomSchema), D.__schema_class__.__bases__)
class TestModel(unittest.TestCase):
def test_tag_field(self):
a = A(test_field="tag_value", tag_field="fake")
self.assertEqual("tag_value", a.tag_field)
def test_default_value(self):
a = A()
self.assertEqual("test_value", a.test_field)
def test_value(self):
a = A(test_field="foo")
self.assertEqual("foo", a.test_field)
def test_prohibited_none_value(self):
self.assertRaises(marshmallow.ValidationError, B)
def test_nested_object(self):
b = B(a=A(test_field="123"))
self.assertEqual("123", b.a.test_field)
def test_nested_dict(self):
b = B(a=dict(test_field="123"))
self.assertIsInstance(b.a, A)
self.assertEqual("123", b.a.test_field)
def test_nested_dict_many(self):
c = C(a=[dict(test_field="1"), dict(test_field="2")])
self.assertEqual(2, len(c.a))
def test_nested_model_many(self):
c = C(a=[A(test_field="1"), A(test_field="2")])
self.assertEqual(2, len(c.a))
def test_load_model_many(self):
a_list = A.load([dict(test_field="1"), dict(test_field="2")], many=True)
self.assertEqual(2, len(a_list))
self.assertEqual("1", a_list[0].test_field)
self.assertEqual("2", a_list[1].test_field)
def test_partial(self):
self.assertRaises(marshmallow.ValidationError, B)
b = B(partial=True)
self.assertIsNone(b.a)
def test_validate(self):
b = B.validate({})
self.assertIn("a", b)
def test_validate_partial(self):
class APartial(marshmallow.Model):
test_field = marshmallow.fields.Str(required=True)
email = marshmallow.fields.Email()
a = APartial.validate(dict(email="foo"), partial=True)
self.assertNotIn("test_field", a)
self.assertIn("email", a)
def test_eq(self):
a1 = A(test_field="1")
a2 = A(test_field="1")
self.assertNotEqual(id(a1), id(a2))
self.assertEqual(a1, a2)
def test_not_eq(self):
a1 = A(test_field="1")
a2 = A(test_field="2")
self.assertNotEqual(a1, a2)
def test_not_eq_classes(self):
class A1(marshmallow.Model):
pass
class A2(marshmallow.Model):
pass
a1 = A1()
a2 = A2()
self.assertNotEqual(a1, a2)
def test_copy(self):
a1 = A(test_field="1")
a2 = copy.copy(a1)
a3 = copy.deepcopy(a2)
self.assertIs(a1.test_field, a2.test_field, a3.test_field)
self.assertNotEqual(id(a1), id(a2), id(a3))
self.assertEqual(a1, a2, a3)
def test_repr(self):
a = A()
self.assertIn("test_value", repr(a))
def test_str(self):
a = A()
self.assertIn("test_value", str(a))
def test_yes_no(self):
a = AMethod(yes_no="Y")
self.assertTrue(a.yes_no)
self.assertEqual({"yes_no": "Yes"}, a.dump())
a = AMethod(yes_no="NOOOO")
self.assertFalse(a.yes_no)
def test_dump_mode_on(self):
a = A()
self.assertFalse(a.__dump_mode__)
with a.__dump_mode_on__():
self.assertTrue(a.__dump_mode__)
with a.__dump_mode_on__():
self.assertTrue(a.__dump_mode__)
self.assertTrue(a.__dump_mode__)
self.assertFalse(a.__dump_mode__)
class TestModelLoadDump(unittest.TestCase):
def setUp(self):
self.data = dict(test_field="foo")
def test_load_dict(self):
a = A.load(self.data)
self.assertEqual("foo", a.test_field)
def test_load_dict_partial(self):
self.assertRaises(marshmallow.ValidationError, B)
b = B.load({}, partial=True)
self.assertIsNone(b.a)
def test_load_dict_nested(self):
ddata = dict(test_field="foo", a=dict(test_field="bar"))
b = B.load(ddata)
self.assertEqual("foo", b.test_field)
self.assertEqual("bar", b.a.test_field)
def test_dump_dict(self):
a = A(test_field="foo")
self.assertEqual(self.data, a.dump())
def test_load_json(self):
jdata = json.dumps(self.data)
a = A.load_json(jdata)
self.assertEqual("foo", a.test_field)
def test_load_json_partial(self):
self.assertRaises(marshmallow.ValidationError, B)
b = B.load_json("{}", partial=True)
self.assertIsNone(b.a)
def test_dump_json(self):
a = A(test_field="foo")
jdata = json.loads(a.dump_json())
self.assertEqual(self.data, jdata)
@unittest.skipIf(skip_yaml, "PyYaml is not installed")
def test_load_yaml(self):
ydata = yaml.dump(self.data)
a = A.load_yaml(ydata)
self.assertEqual("foo", a.test_field)
@unittest.skipIf(skip_yaml, "PyYaml is not installed")
def test_load_yaml_partial(self):
self.assertRaises(marshmallow.ValidationError, B)
b = B.load_yaml("{}", partial=True)
self.assertIsNone(b.a)
@unittest.skipIf(skip_yaml, "PyYaml is not installed")
def test_dump_yaml(self):
a = A(test_field="foo")
ydata = yaml.load(a.dump_yaml(), Loader=yaml.UnsafeLoader)
self.assertEqual(self.data, ydata)
def test_dump_ordered(self):
a = A(test_field="foo").dump()
b = B(test_field="foo", a=dict(test_field="bar")).dump()
self.assertIsInstance(a, collections.OrderedDict)
self.assertIsInstance(b, dict)
def test_load_unknwon(self):
data = dict(test_field="foo", unknown_b="B", a=dict(test_field="bar", unknown_b="B"),)
with self.assertRaises(marshmallow.ValidationError):
B.load(data)
b = B.load(data, unknown=marshmallow.EXCLUDE)
self.assertEqual(b.test_field, "foo")
self.assertEqual(b.a.test_field, "bar")
class TestContext(unittest.TestCase):
def setUp(self):
self.context = {"value": "foo"}
self.data = dict(test_field="foo")
self.nested_data = dict(a=self.data)
def test_load_context(self):
a = AContext.load(self.data, self.context)
ddata = a.dump()
self.assertTrue(ddata["test_context_field"])
def test_context(self):
a = AContext(context=self.context, **self.data)
ddata = a.dump()
self.assertTrue(ddata["test_context_field"])
def test_no_context(self):
a = AContext(**self.data)
self.assertRaises(KeyError, a.dump)
def test_nested_context(self):
b = BContext(context=self.context, **self.nested_data)
self.assertEqual(b.context, b.a.context)
ddata = b.dump()
self.assertTrue(ddata["a"]["test_context_field"])
def test_update_context(self):
b = BContext(context=self.context, **self.nested_data)
b.context["value"] = "bar"
self.assertEqual(b.context, b.a.context)
ddata = b.dump()
self.assertFalse(ddata["a"]["test_context_field"])
def test_override_context(self):
b = BContext(context=self.context, **self.nested_data)
b.context = {"value": "bar"}
self.assertEqual(b.context, b.a.context)
ddata = b.dump()
self.assertFalse(ddata["a"]["test_context_field"])
def test_validate_partial(self):
class APartial(marshmallow.Model):
test_field = marshmallow.fields.Str(required=True)
email = marshmallow.fields.Email()
aa = APartial.validate([dict(email="foo"), dict(email="bar")], many=True, partial=True)
self.assertEqual(2, len(aa))
for a in aa.values():
self.assertNotIn("test_field", a)
self.assertIn("email", a)
class TestMany(unittest.TestCase):
def setUp(self):
self.data = [
dict(test_field="foo", a=dict(test_field="bar")),
dict(test_field="foo", a=dict(test_field="bar")),
]
def assert_objects(self, bb):
self.assertEqual(2, len(bb))
ids = set()
for b in bb:
self.assertEqual("foo", b.test_field)
self.assertEqual("bar", b.a.test_field)
b_id = id(b)
a_id = id(b.a)
self.assertNotIn(b_id, ids)
self.assertNotIn(a_id, ids)
ids.add(b_id)
ids.add(a_id)
def test_load_many(self):
bb = B.load(self.data, many=True)
self.assert_objects(bb)
def test_load_many_as_one(self):
self.assertRaises(marshmallow.ValidationError, B.load, self.data)
def test_load_many_partial(self):
self.assertRaises(
marshmallow.ValidationError, B.load, data=[{}, {}], many=True, partial=False,
)
bb = B.load([{}, {}], many=True, partial=True)
self.assertEqual(2, len(bb))
for b in bb:
self.assertIsNone(b.test_field)
self.assertIsNone(b.a)
def test_load_json(self):
jdata = json.dumps(self.data)
bb = B.load_json(jdata, many=True)
self.assert_objects(bb)
@unittest.skipIf(skip_yaml, "PyYaml is not installed")
def test_load_yaml(self):
ydata = yaml.dump(self.data, default_flow_style=False)
bb = B.load_yaml(ydata, many=True)
self.assert_objects(bb)
def test_dump_same_classes(self):
bb = B.load(self.data, many=True)
ddata = marshmallow.dump_many(bb)
self.assertEqual(self.data, ddata)
def test_dump_different_classes(self):
adata = dict(test_field="foo")
odata = [B.load(self.data, many=True), A(**adata)]
ddata = marshmallow.dump_many(odata)
self.assertEqual([self.data, adata], ddata)
def test_dump_fake(self):
self.assertRaises(marshmallow.ValidationError, marshmallow.dump_many, data="fake")
def test_dump_context(self):
context = {"value": "bar"}
bb = BContext.load(self.data, context=context, many=True)
ddata = marshmallow.dump_many(bb, context={"value": "foo"})
context_id = id(context)
for b in bb:
self.assertEqual(context_id, id(b.context))
self.assertEqual(context_id, id(b.a.context))
for b in ddata:
self.assertFalse(b["a"]["test_context_field"])
def test_dump_json(self):
bb = B.load(self.data, many=True)
jdata = marshmallow.dump_many_json(bb)
ddata = json.loads(jdata)
self.assertEqual(self.data, ddata)
@unittest.skipIf(skip_yaml, "PyYaml is not installed")
def test_dump_yaml(self):
bb = B.load(self.data, many=True)
ydata = marshmallow.dump_many_yaml(bb)
ddata = yaml.load(ydata, Loader=yaml.UnsafeLoader)
self.assertEqual(self.data, ddata)
class TestIni(unittest.TestCase):
def setUp(self):
self.data = """
[DEFAULT]
test_field = foo
[a]
test_field = bar
""".strip()
def test_load(self):
b = B.load_ini(self.data)
self.assertEqual("foo", b.test_field)
self.assertEqual("bar", b.a.test_field)
def test_dump(self):
b = B(test_field="foo", a=dict(test_field="bar"))
self.assertEqual(self.data, b.dump_ini())
class InitModel(marshmallow.Model):
count = 0
def __init__(self):
super(InitModel, self).__init__()
self.count = self.count + 1
class TestInit(unittest.TestCase):
def test_init(self):
obj = InitModel()
self.assertEqual(1, obj.count)
class OptionalModel(marshmallow.Model):
str_field = marshmallow.fields.Str(missing="foo")
int_field = marshmallow.fields.Int(default=-1)
class TestOptionalModel(unittest.TestCase):
def test_partial_model(self):
model = OptionalModel(partial=True)
self.assertIsNone(model.str_field)
self.assertIsNone(model.int_field)
def test_model_default_and_missing_fields(self):
model = OptionalModel()
self.assertEqual("foo", model.str_field)
self.assertIsNone(model.int_field)
def test_model_present_fields(self):
model = OptionalModel(str_field="bar", int_field=1)
self.assertEqual("bar", model.str_field)
self.assertEqual(1, model.int_field)
def test_dump(self):
ddata = OptionalModel().dump()
self.assertEqual({"int_field": -1, "str_field": "foo"}, ddata)
def test_dump_partial(self):
ddata = OptionalModel(partial=True).dump()
self.assertEqual({"int_field": -1}, ddata)
def test_dump_changed_missing_field(self):
obj = OptionalModel(partial=True)
obj.int_field = 1
ddata = obj.dump()
self.assertEqual({"int_field": 1}, ddata)
class TestValidatePartial(unittest.TestCase):
def setUp(self):
class TestModel(marshmallow.Model):
expected_partial = marshmallow.fields.Boolean(allow_none=True)
@marshmallow.validates_schema
def schema_validator(schema, data, **kwargs):
self.assertEqual(data.get("expected_partial"), schema.partial)
self.test_model_class = TestModel
def test_partial_true(self):
self.test_model_class.validate(dict(expected_partial=True), partial=True)
def test_partial_false(self):
self.test_model_class.validate(dict(expected_partial=False), partial=False)
def test_partial_omitted(self):
self.test_model_class.validate(dict(expected_partial=None))
self.test_model_class.validate(dict())
class MissingPerson(marshmallow.Model):
name = marshmallow.fields.String()
age = marshmallow.fields.Integer()
class MissingCompany(marshmallow.Model):
name = marshmallow.fields.String()
owner = marshmallow.NestedModel(MissingPerson)
hr = marshmallow.NestedModel(MissingPerson, allow_none=True)
workers = marshmallow.NestedModel(MissingPerson, many=True, allow_none=True)
assets = marshmallow.fields.List(marshmallow.NestedModel(MissingPerson))
class TestMissingFields(unittest.TestCase):
def test_field(self):
self.assertEqual({"name": "John Doe"}, MissingPerson(name="John Doe").dump())
def test_nested_field(self):
self.assertEqual({"owner": {"name": "John Doe"}}, MissingCompany(owner={"name": "John Doe"}).dump())
def test_nested_none(self):
obj = MissingCompany(owner={"name": "John Doe"})
self.assertIsNone(obj.hr)
self.assertEqual({"owner": {"name": "John Doe"}}, obj.dump())
def test_nested_list(self):
obj = MissingCompany(owner={"name": "John Doe"}, workers=[{"name": "Bob"}])
self.assertEqual(1, len(obj.workers))
self.assertEqual({"owner": {"name": "John Doe"}, "workers": [{"name": "Bob"}]}, obj.dump())
def test_list_field_nested(self):
obj = MissingCompany.load({"owner": {"name": "John Doe"}, "assets": [{"name": "MissingAsset"}]})
self.assertEqual(1, len(obj.assets))
self.assertEqual({"owner": {"name": "John Doe"}, "assets": [{"name": "MissingAsset"}]}, obj.dump())
class SelfNested(marshmallow.Model):
name = marshmallow.fields.String()
friend = marshmallow.NestedModel("SelfNested")
class WrongNested(marshmallow.Model):
name = marshmallow.fields.String()
friend = marshmallow.NestedModel("UknownNested")
class TestSelfNested(unittest.TestCase):
def test_self_nested(self):
obj = SelfNested.load({"name": "John Doe", "friend": {"name": "Jane Doe"}})
self.assertEqual("John Doe", obj.name)
self.assertEqual("Jane Doe", obj.friend.name)
def test_wrong_nested(self):
with self.assertRaises(marshmallow.ValidationError) as exp:
WrongNested.load({"name": "John Doe", "friend": {"name": "Jane Doe"}})
self.assertEqual("{'friend': [\"The class 'UknownNested' not found\"]}", str(exp))
| 32.269551
| 108
| 0.645767
|
462aa5c3346a5f2bd03cfb570251e723c51a3093
| 55
|
py
|
Python
|
dependencies/extrae/src/others/pyextrae/profile/__init__.py
|
TANGO-Project/compss-tango
|
d9e007b6fe4f8337d4f267f95f383d8962602ab8
|
[
"Apache-2.0"
] | 3
|
2018-03-05T14:52:22.000Z
|
2019-02-08T09:58:24.000Z
|
dependencies/extrae/src/others/pyextrae/profile/__init__.py
|
TANGO-Project/compss-tango
|
d9e007b6fe4f8337d4f267f95f383d8962602ab8
|
[
"Apache-2.0"
] | 1
|
2019-11-13T14:30:21.000Z
|
2019-11-13T14:30:21.000Z
|
dependencies/extrae/src/others/pyextrae/profile/__init__.py
|
TANGO-Project/compss-tango
|
d9e007b6fe4f8337d4f267f95f383d8962602ab8
|
[
"Apache-2.0"
] | null | null | null |
from pyextrae.common.extrae import *
startProfiling()
| 13.75
| 36
| 0.8
|
654f4b6a1e6876880ac7979160937f5c88dd66e5
| 765
|
py
|
Python
|
mchess/chess_link_pyblue.py
|
sshivaji/python-mchess
|
4e6579166a723dc82e19afa996c29c2339f4bde1
|
[
"MIT"
] | null | null | null |
mchess/chess_link_pyblue.py
|
sshivaji/python-mchess
|
4e6579166a723dc82e19afa996c29c2339f4bde1
|
[
"MIT"
] | null | null | null |
mchess/chess_link_pyblue.py
|
sshivaji/python-mchess
|
4e6579166a723dc82e19afa996c29c2339f4bde1
|
[
"MIT"
] | null | null | null |
import logging
import chess_link_protocol as clp
# TODO: expand empty framework with actual functionality!
class Transport():
def __init__(self, que):
self.log = logging.getLogger("ChessLinkPyBlue")
self.que = que # asyncio.Queue()
self.init = True
self.is_open = False
self.log.debug("init ok")
def search_board(self):
self.log.debug("searching for boards")
return None
def test_board(self, address):
return None
def open_mt(self, address):
self.log.debug("open_mt {}".format(address))
return False
def write_mt(self, msg):
return False
def get_name(self):
return "chess_link_pyblue"
def is_init(self):
return self.init
| 21.25
| 57
| 0.627451
|
ea608bc6278c44b1b785460399b524e1a5887339
| 2,484
|
py
|
Python
|
infrastructure/support_orchestrator/source/support_orchestrator/clients/flink.py
|
ptutak/data_management_system
|
01076a9fbfa3d8b02d01f455f8383a7dbdef5ecb
|
[
"Apache-2.0"
] | null | null | null |
infrastructure/support_orchestrator/source/support_orchestrator/clients/flink.py
|
ptutak/data_management_system
|
01076a9fbfa3d8b02d01f455f8383a7dbdef5ecb
|
[
"Apache-2.0"
] | null | null | null |
infrastructure/support_orchestrator/source/support_orchestrator/clients/flink.py
|
ptutak/data_management_system
|
01076a9fbfa3d8b02d01f455f8383a7dbdef5ecb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Piotr Tutak
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Any, Dict
import requests
from requests.models import Response
from support_orchestrator.exceptions.response import FlinkHTTPException
from support_orchestrator.utils.args import format_args
LOGGER = logging.getLogger(__name__)
class FlinkClient:
CONFIG_URL = "{url}/config"
JARS_URL = "{url}/jars"
def __init__(self, host: str, port: int) -> None:
self._host = host
self._port = port
self._url = f"http://{host}:{port}"
@property
def url(self) -> str:
return self._url
def get_jars(self) -> Dict:
response = requests.get(self.JARS_URL.format(url=self._url))
return self.process_response(response)
def upload_jar(self, jar_path: Path) -> Dict:
with open(str(jar_path), "rb") as content:
jar_content = content.read()
response = requests.post(
f"{self.JARS_URL}/upload".format(url=self._url),
files={"file": (str(jar_path), jar_content, "application/x-java-archive")},
)
return self.process_response(response)
def run_jar(self, jar_id: str, *args, **kwargs) -> Dict:
LOGGER.info(f"Run Jar: {jar_id}, {format_args(*args, **kwargs)}")
response = requests.post(
f"{self.JARS_URL}/{jar_id}/run".format(url=self._url),
json={"programArgsList": format_args(*args, **kwargs)},
)
return self.process_response(response)
def get_config(self) -> Dict:
response = requests.get(self.CONFIG_URL.format(url=self._url))
return self.process_response(response)
def process_response(self, response: Response) -> Dict[str, Any]:
if response.status_code == 200:
return response.json()
raise FlinkHTTPException(str(response.text))
if __name__ == "__main__":
client = FlinkClient("192.168.2.121", 8081)
| 33.567568
| 87
| 0.676329
|
d9e45b245122ace6834f5e3b86eeb2b26043b364
| 90
|
py
|
Python
|
code/setup.py
|
zxdclyz/std-project1
|
95340e5a9443556936f47acc605010926fe314ac
|
[
"MIT"
] | 1
|
2021-09-12T06:01:49.000Z
|
2021-09-12T06:01:49.000Z
|
setup.py
|
BobAnkh/std-project3
|
b27589b2c43412943b4e46302881a57df3990637
|
[
"MIT"
] | 1
|
2021-08-25T04:44:26.000Z
|
2021-08-25T04:44:26.000Z
|
code/setup.py
|
duskmoon314/std-project2
|
d029e0a488f20280e1a84ffc928e94a0bf87d9c0
|
[
"MIT"
] | null | null | null |
import os
os.system('pip install -r requirements.txt')
print('Requirements Installed!')
| 15
| 44
| 0.755556
|
49cbcfcf0bede24e58b46b1d8dbd442c5ca39e80
| 1,656
|
py
|
Python
|
auth-api/src/auth_api/schemas/user.py
|
thorwolpert/sbc-auth
|
5da50cde2e5625d1b0ceea090c3656ee374c9b71
|
[
"Apache-2.0"
] | 11
|
2019-09-26T06:58:25.000Z
|
2022-01-26T06:19:39.000Z
|
auth-api/src/auth_api/schemas/user.py
|
thorwolpert/sbc-auth
|
5da50cde2e5625d1b0ceea090c3656ee374c9b71
|
[
"Apache-2.0"
] | 1,622
|
2019-05-07T21:08:38.000Z
|
2022-03-28T17:07:15.000Z
|
auth-api/src/auth_api/schemas/user.py
|
thorwolpert/sbc-auth
|
5da50cde2e5625d1b0ceea090c3656ee374c9b71
|
[
"Apache-2.0"
] | 98
|
2019-03-01T21:36:15.000Z
|
2021-12-01T22:11:25.000Z
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manager for user schema and export."""
from marshmallow import fields
from auth_api.models import User as UserModel
from .base_schema import BaseSchema
class UserSchema(BaseSchema): # pylint: disable=too-many-ancestors, too-few-public-methods
"""This is the schema for the User model."""
class Meta(BaseSchema.Meta): # pylint: disable=too-few-public-methods
"""Maps all of the User fields to a default schema."""
model = UserModel
exclude = (
'orgs',
'is_terms_of_use_accepted',
'terms_of_use_accepted_version',
'terms_of_use_version'
)
user_terms = fields.Method('get_user_terms_object')
contacts = fields.Pluck('ContactLinkSchema', 'contact', many=True)
def get_user_terms_object(self, obj): # pylint: disable=no-self-use
"""Map terms properties into nested object."""
return {
'isTermsOfUseAccepted': obj.is_terms_of_use_accepted,
'termsOfUseAcceptedVersion': obj.terms_of_use_accepted_version
}
| 36
| 91
| 0.703502
|
30b708097afe700a108c762c581e071dea618dac
| 20,217
|
py
|
Python
|
website/canvas/migrations/0142_is_staff_is_over_9000.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 61
|
2015-11-10T17:13:46.000Z
|
2021-08-06T17:58:30.000Z
|
website/canvas/migrations/0142_is_staff_is_over_9000.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 13
|
2015-11-11T07:49:41.000Z
|
2021-06-09T03:45:31.000Z
|
website/canvas/migrations/0142_is_staff_is_over_9000.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 18
|
2015-11-11T04:50:04.000Z
|
2021-08-20T00:57:11.000Z
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for user_info in orm.UserInfo.objects.filter(power_level=9001):
user = user_info.user
user.is_staff = True
user.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.APIApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.bestof': {
'Meta': {'object_name': 'BestOf'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {})
},
'canvas.category': {
'Meta': {'object_name': 'Category'},
'allow_textonlyop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'disable_remix': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['auth.User']"})
},
'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': "orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.externalcontent': {
'Meta': {'object_name': 'ExternalContent'},
'_data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'external_content'", 'to': "orm['canvas.Comment']"}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'null': 'True', 'blank': 'True'})
},
'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'canvas.followcategory': {
'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': "orm['auth.User']"})
},
'canvas.invitecode': {
'Meta': {'object_name': 'InviteCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.remixplugin': {
'Meta': {'object_name': 'RemixPlugin'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
's3md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'power_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': "orm['auth.User']"})
},
'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': "orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.welcomeemailrecipient': {
'Meta': {'object_name': 'WelcomeEmailRecipient'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas_auth.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'}
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
| 76.870722
| 197
| 0.553148
|
165e30d89443b7e8244ed965c34a5d7219e7d1f3
| 30,839
|
py
|
Python
|
kms/scripts/db_setup.py
|
asmotrich/ranger
|
e5abaff2da14a8accccc8747f7f33c92e79f51eb
|
[
"Apache-2.0"
] | null | null | null |
kms/scripts/db_setup.py
|
asmotrich/ranger
|
e5abaff2da14a8accccc8747f7f33c92e79f51eb
|
[
"Apache-2.0"
] | null | null | null |
kms/scripts/db_setup.py
|
asmotrich/ranger
|
e5abaff2da14a8accccc8747f7f33c92e79f51eb
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. See accompanying LICENSE file.
#
import os
import re
import sys
import errno
import shlex
import platform
import logging
import subprocess
import fileinput
from os.path import basename
from subprocess import Popen,PIPE
from datetime import date
import datetime
from time import gmtime, strftime
globalDict = {}
os_name = platform.system()
os_name = os_name.upper()
is_unix = os_name == "LINUX" or os_name == "DARWIN"
jisql_debug=True
RANGER_KMS_HOME = os.getenv("RANGER_KMS_HOME")
if RANGER_KMS_HOME is None:
RANGER_KMS_HOME = os.getcwd()
def check_output(query):
if is_unix:
p = subprocess.Popen(shlex.split(query), stdout=subprocess.PIPE)
elif os_name == "WINDOWS":
p = subprocess.Popen(query, stdout=subprocess.PIPE, shell=True)
output = p.communicate ()[0]
return output.decode()
def log(msg,type):
if type == 'info':
logging.info(" %s",msg)
if type == 'debug':
logging.debug(" %s",msg)
if type == 'warning':
logging.warning(" %s",msg)
if type == 'exception':
logging.exception(" %s",msg)
if type == 'error':
logging.error(" %s",msg)
def populate_global_dict():
global globalDict
if is_unix:
read_config_file = open(os.path.join(RANGER_KMS_HOME,'install.properties'))
elif os_name == "WINDOWS":
read_config_file = open(os.path.join(RANGER_KMS_HOME,'bin','install_config.properties'))
library_path = os.path.join(RANGER_KMS_HOME,"cred","lib","*")
for each_line in read_config_file.read().split('\n') :
each_line = each_line.strip();
if len(each_line) == 0:
continue
elif each_line[0] == "#":
continue
if re.search('=', each_line):
key , value = each_line.split("=",1)
key = key.strip()
if 'PASSWORD' in key:
value = ''
value = value.strip()
globalDict[key] = value
def jisql_log(query, db_password):
if jisql_debug == True:
if os_name == "WINDOWS":
query = query.replace(' -p "'+db_password+'"' , ' -p "********"')
log("[JISQL] "+query, "info")
else:
query = query.replace(" -p '"+db_password+"'" , " -p '********'")
log("[JISQL] "+query, "info")
class BaseDB(object):
def check_connection(self, db_name, db_user, db_password):
log("[I] ---------- Verifying DB connection ----------", "info")
def check_table(self, db_name, db_user, db_password, TABLE_NAME):
log("[I] ---------- Verifying table ----------", "info")
def import_db_file(self, db_name, db_user, db_password, file_name):
log("[I] ---------- Importing db schema ----------", "info")
class MysqlConf(BaseDB):
# Constructor
def __init__(self, host,SQL_CONNECTOR_JAR,JAVA_BIN,db_ssl_enabled,db_ssl_required,db_ssl_verifyServerCertificate,javax_net_ssl_keyStore,javax_net_ssl_keyStorePassword,javax_net_ssl_trustStore,javax_net_ssl_trustStorePassword,db_ssl_auth_type):
self.host = host
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
self.db_ssl_enabled=db_ssl_enabled.lower()
self.db_ssl_required=db_ssl_required.lower()
self.db_ssl_verifyServerCertificate=db_ssl_verifyServerCertificate.lower()
self.db_ssl_auth_type=db_ssl_auth_type.lower()
self.javax_net_ssl_keyStore=javax_net_ssl_keyStore
self.javax_net_ssl_keyStorePassword=javax_net_ssl_keyStorePassword
self.javax_net_ssl_trustStore=javax_net_ssl_trustStore
self.javax_net_ssl_trustStorePassword=javax_net_ssl_trustStorePassword
def get_jisql_cmd(self, user, password ,db_name):
path = RANGER_KMS_HOME
db_ssl_param=''
db_ssl_cert_param=''
if self.db_ssl_enabled == 'true':
db_ssl_param="?useSSL=%s&requireSSL=%s&verifyServerCertificate=%s" %(self.db_ssl_enabled,self.db_ssl_required,self.db_ssl_verifyServerCertificate)
if self.db_ssl_verifyServerCertificate == 'true':
if self.db_ssl_auth_type == '1-way':
db_ssl_cert_param=" -Djavax.net.ssl.trustStore=%s -Djavax.net.ssl.trustStorePassword=%s " %(self.javax_net_ssl_trustStore,self.javax_net_ssl_trustStorePassword)
else:
db_ssl_cert_param=" -Djavax.net.ssl.keyStore=%s -Djavax.net.ssl.keyStorePassword=%s -Djavax.net.ssl.trustStore=%s -Djavax.net.ssl.trustStorePassword=%s " %(self.javax_net_ssl_keyStore,self.javax_net_ssl_keyStorePassword,self.javax_net_ssl_trustStore,self.javax_net_ssl_trustStorePassword)
self.JAVA_BIN = self.JAVA_BIN.strip("'")
if is_unix:
jisql_cmd = "%s %s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -driver mysqlconj -cstring jdbc:mysql://%s/%s%s -u '%s' -p '%s' -noheader -trim -c \;" %(self.JAVA_BIN,db_ssl_cert_param,self.SQL_CONNECTOR_JAR,path,self.host,db_name,db_ssl_param,user,password)
elif os_name == "WINDOWS":
jisql_cmd = "%s %s -cp %s;%s\jisql\\lib\\* org.apache.util.sql.Jisql -driver mysqlconj -cstring jdbc:mysql://%s/%s%s -u \"%s\" -p \"%s\" -noheader -trim" %(self.JAVA_BIN,db_ssl_cert_param,self.SQL_CONNECTOR_JAR, path, self.host, db_name,db_ssl_param, user, password)
return jisql_cmd
def check_connection(self, db_name, db_user, db_password):
log("[I] Checking connection..", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"SELECT version();\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT version();\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('Production |'):
log("[I] Checking connection passed.", "info")
return True
else:
log("[E] Can't establish connection!! Exiting.." ,"error")
log("[I] Please run DB setup first or contact Administrator.." ,"info")
sys.exit(1)
def import_db_file(self, db_name, db_user, db_password, file_name):
name = basename(file_name)
if os.path.isfile(file_name):
log("[I] Importing db schema to database " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s -c ;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " DB schema imported successfully","info")
else:
log("[E] "+name + " DB schema import failed!","error")
sys.exit(1)
else:
log("[E] DB schema file " + name+ " not found","error")
sys.exit(1)
def check_table(self, db_name, db_user, db_password, TABLE_NAME):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"show tables like '%s';\"" %(TABLE_NAME)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"show tables like '%s';\" -c ;" %(TABLE_NAME)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(TABLE_NAME + " |"):
log("[I] Table " + TABLE_NAME +" already exists in database '" + db_name + "'","info")
return True
else:
log("[I] Table " + TABLE_NAME +" does not exist in database " + db_name + "","info")
return False
class OracleConf(BaseDB):
# Constructor
def __init__(self, host, SQL_CONNECTOR_JAR, JAVA_BIN):
self.host = host
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
def get_jisql_cmd(self, user, password):
path = RANGER_KMS_HOME
self.JAVA_BIN = self.JAVA_BIN.strip("'")
if not re.search('-Djava.security.egd=file:///dev/urandom', self.JAVA_BIN):
self.JAVA_BIN = self.JAVA_BIN + " -Djava.security.egd=file:///dev/urandom "
#if self.host.count(":") == 2:
if self.host.count(":") == 2 or self.host.count(":") == 0:
#jdbc:oracle:thin:@[HOST][:PORT]:SID or #jdbc:oracle:thin:@GL
cstring="jdbc:oracle:thin:@%s" %(self.host)
else:
#jdbc:oracle:thin:@//[HOST][:PORT]/SERVICE
cstring="jdbc:oracle:thin:@//%s" %(self.host)
if is_unix:
jisql_cmd = "%s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -driver oraclethin -cstring %s -u '%s' -p '%s' -noheader -trim" %(self.JAVA_BIN, self.SQL_CONNECTOR_JAR,path, cstring, user, password)
elif os_name == "WINDOWS":
jisql_cmd = "%s -cp %s;%s\jisql\\lib\\* org.apache.util.sql.Jisql -driver oraclethin -cstring %s -u \"%s\" -p \"%s\" -noheader -trim" %(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path, cstring, user, password)
return jisql_cmd
def check_connection(self, db_name, db_user, db_password):
log("[I] Checking connection", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -c \; -query \"select * from v$version;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select * from v$version;\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('Production |'):
log("[I] Connection success", "info")
return True
else:
log("[E] Can't establish connection!", "error")
sys.exit(1)
def import_db_file(self, db_name, db_user, db_password, file_name):
name = basename(file_name)
if os.path.isfile(file_name):
log("[I] Importing script " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -input %s -c \;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s -c ;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " imported successfully","info")
else:
log("[E] "+name + " import failed!","error")
sys.exit(1)
else:
log("[E] Import " +name + " sql file not found","error")
sys.exit(1)
def check_table(self, db_name, db_user, db_password, TABLE_NAME):
get_cmd = self.get_jisql_cmd(db_user ,db_password)
if is_unix:
query = get_cmd + " -c \; -query 'select default_tablespace from user_users;'"
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select default_tablespace from user_users;\" -c ;"
jisql_log(query, db_password)
output = check_output(query).strip()
output = output.strip(' |')
db_name = db_name.upper()
if ((output == db_name) or (db_name =='' and output is not None and output != '')):
log("[I] User name " + db_user + " and tablespace " + output + " already exists.","info")
log("[I] Verifying table " + TABLE_NAME +" in tablespace " + output, "info")
get_cmd = self.get_jisql_cmd(db_user, db_password)
if is_unix:
query = get_cmd + " -c \; -query \"select UPPER(table_name) from all_tables where UPPER(tablespace_name)=UPPER('%s') and UPPER(table_name)=UPPER('%s');\"" %(output ,TABLE_NAME)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select UPPER(table_name) from all_tables where UPPER(tablespace_name)=UPPER('%s') and UPPER(table_name)=UPPER('%s');\" -c ;" %(output ,TABLE_NAME)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(TABLE_NAME.upper() + ' |'):
log("[I] Table " + TABLE_NAME +" already exists in tablespace " + output + "","info")
return True
else:
log("[I] Table " + TABLE_NAME +" does not exist in tablespace " + output + "","info")
return False
else:
log("[E] "+db_user + " user already assigned to some other tablespace , provide different DB name.","error")
sys.exit(1)
class PostgresConf(BaseDB):
# Constructor
def __init__(self, host,SQL_CONNECTOR_JAR,JAVA_BIN,db_ssl_enabled,db_ssl_required,db_ssl_verifyServerCertificate,javax_net_ssl_keyStore,javax_net_ssl_keyStorePassword,javax_net_ssl_trustStore,javax_net_ssl_trustStorePassword,db_ssl_auth_type,db_ssl_certificate_file,javax_net_ssl_trustStore_type,javax_net_ssl_keyStore_type):
self.host = host
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
self.db_ssl_enabled=db_ssl_enabled.lower()
self.db_ssl_required=db_ssl_required.lower()
self.db_ssl_verifyServerCertificate=db_ssl_verifyServerCertificate.lower()
self.db_ssl_auth_type=db_ssl_auth_type.lower()
self.db_ssl_certificate_file=db_ssl_certificate_file
self.javax_net_ssl_keyStore=javax_net_ssl_keyStore
self.javax_net_ssl_keyStorePassword=javax_net_ssl_keyStorePassword
self.javax_net_ssl_keyStore_type=javax_net_ssl_keyStore_type.lower()
self.javax_net_ssl_trustStore=javax_net_ssl_trustStore
self.javax_net_ssl_trustStorePassword=javax_net_ssl_trustStorePassword
self.javax_net_ssl_trustStore_type=javax_net_ssl_trustStore_type.lower()
def get_jisql_cmd(self, user, password, db_name):
#TODO: User array for forming command
path = RANGER_KMS_HOME
self.JAVA_BIN = self.JAVA_BIN.strip("'")
db_ssl_param=''
db_ssl_cert_param=''
if self.db_ssl_enabled == 'true':
if self.db_ssl_certificate_file != "":
db_ssl_param="?ssl=%s&sslmode=verify-full&sslrootcert=%s" %(self.db_ssl_enabled,self.db_ssl_certificate_file)
elif self.db_ssl_verifyServerCertificate == 'true' or self.db_ssl_required == 'true':
db_ssl_param="?ssl=%s&sslmode=verify-full&sslfactory=org.postgresql.ssl.DefaultJavaSSLFactory" %(self.db_ssl_enabled)
if self.db_ssl_auth_type == '1-way':
db_ssl_cert_param=" -Djavax.net.ssl.trustStore=%s -Djavax.net.ssl.trustStorePassword=%s -Djavax.net.ssl.trustStoreType=%s" %(self.javax_net_ssl_trustStore,self.javax_net_ssl_trustStorePassword,self.javax_net_ssl_trustStore_type)
else:
db_ssl_cert_param=" -Djavax.net.ssl.keyStore=%s -Djavax.net.ssl.keyStorePassword=%s -Djavax.net.ssl.trustStore=%s -Djavax.net.ssl.trustStorePassword=%s -Djavax.net.ssl.trustStoreType=%s -Djavax.net.ssl.keyStoreType=%s" %(self.javax_net_ssl_keyStore,self.javax_net_ssl_keyStorePassword,self.javax_net_ssl_trustStore,self.javax_net_ssl_trustStorePassword,self.javax_net_ssl_trustStore_type,self.javax_net_ssl_keyStore_type)
else:
db_ssl_param="?ssl=%s" %(self.db_ssl_enabled)
if is_unix:
jisql_cmd = "%s %s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -driver postgresql -cstring jdbc:postgresql://%s/%s%s -u %s -p '%s' -noheader -trim -c \;" %(self.JAVA_BIN, db_ssl_cert_param,self.SQL_CONNECTOR_JAR,path, self.host, db_name, db_ssl_param,user, password)
elif os_name == "WINDOWS":
jisql_cmd = "%s %s -cp %s;%s\jisql\\lib\\* org.apache.util.sql.Jisql -driver postgresql -cstring jdbc:postgresql://%s/%s%s -u %s -p \"%s\" -noheader -trim" %(self.JAVA_BIN, db_ssl_cert_param,self.SQL_CONNECTOR_JAR, path, self.host, db_name, db_ssl_param,user, password)
return jisql_cmd
def check_connection(self, db_name, db_user, db_password):
log("[I] Checking connection", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"SELECT 1;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT 1;\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('1 |'):
log("[I] connection success", "info")
return True
else:
log("[E] Can't establish connection", "error")
sys.exit(1)
def import_db_file(self, db_name, db_user, db_password, file_name):
name = basename(file_name)
if os.path.isfile(file_name):
log("[I] Importing db schema to database " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s -c ;" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " DB schema imported successfully","info")
else:
log("[E] "+name + " DB schema import failed!","error")
sys.exit(1)
else:
log("[E] DB schema file " + name+ " not found","error")
sys.exit(1)
def check_table(self, db_name, db_user, db_password, TABLE_NAME):
log("[I] Verifying table " + TABLE_NAME +" in database " + db_name, "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -query \"select * from (select table_name from information_schema.tables where table_catalog='%s' and table_name = '%s') as temp;\"" %(db_name , TABLE_NAME)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"select * from (select table_name from information_schema.tables where table_catalog='%s' and table_name = '%s') as temp;\" -c ;" %(db_name , TABLE_NAME)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(TABLE_NAME +" |"):
log("[I] Table " + TABLE_NAME +" already exists in database " + db_name, "info")
return True
else:
log("[I] Table " + TABLE_NAME +" does not exist in database " + db_name, "info")
return False
class SqlServerConf(BaseDB):
# Constructor
def __init__(self, host, SQL_CONNECTOR_JAR, JAVA_BIN):
self.host = host
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
def get_jisql_cmd(self, user, password, db_name):
#TODO: User array for forming command
path = RANGER_KMS_HOME
self.JAVA_BIN = self.JAVA_BIN.strip("'")
if is_unix:
jisql_cmd = "%s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -user %s -p '%s' -driver mssql -cstring jdbc:sqlserver://%s\\;databaseName=%s -noheader -trim"%(self.JAVA_BIN, self.SQL_CONNECTOR_JAR,path, user, password, self.host,db_name)
elif os_name == "WINDOWS":
jisql_cmd = "%s -cp %s;%s\\jisql\\lib\\* org.apache.util.sql.Jisql -user %s -p \"%s\" -driver mssql -cstring jdbc:sqlserver://%s;databaseName=%s -noheader -trim"%(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path, user, password, self.host,db_name)
return jisql_cmd
def check_connection(self, db_name, db_user, db_password):
log("[I] Checking connection", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"SELECT 1;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT 1;\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('1 |'):
log("[I] Connection success", "info")
return True
else:
log("[E] Can't establish connection", "error")
sys.exit(1)
def import_db_file(self, db_name, db_user, db_password, file_name):
name = basename(file_name)
if os.path.isfile(file_name):
log("[I] Importing db schema to database " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " DB schema imported successfully","info")
else:
log("[E] "+name + " DB Schema import failed!","error")
sys.exit(1)
else:
log("[I] DB Schema file " + name+ " not found","error")
sys.exit(1)
def check_table(self, db_name, db_user, db_password, TABLE_NAME):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"SELECT TABLE_NAME FROM information_schema.tables where table_name = '%s';\"" %(TABLE_NAME)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT TABLE_NAME FROM information_schema.tables where table_name = '%s';\" -c ;" %(TABLE_NAME)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(TABLE_NAME + " |"):
log("[I] Table '" + TABLE_NAME + "' already exists in database '" + db_name + "'","info")
return True
else:
log("[I] Table '" + TABLE_NAME + "' does not exist in database '" + db_name + "'","info")
return False
class SqlAnywhereConf(BaseDB):
# Constructor
def __init__(self, host, SQL_CONNECTOR_JAR, JAVA_BIN):
self.host = host
self.SQL_CONNECTOR_JAR = SQL_CONNECTOR_JAR
self.JAVA_BIN = JAVA_BIN
def get_jisql_cmd(self, user, password, db_name):
path = RANGER_KMS_HOME
self.JAVA_BIN = self.JAVA_BIN.strip("'")
if is_unix:
jisql_cmd = "%s -cp %s:%s/jisql/lib/* org.apache.util.sql.Jisql -user %s -p '%s' -driver sapsajdbc4 -cstring jdbc:sqlanywhere:database=%s;host=%s -noheader -trim"%(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path,user, password,db_name,self.host)
elif os_name == "WINDOWS":
jisql_cmd = "%s -cp %s;%s\\jisql\\lib\\* org.apache.util.sql.Jisql -user %s -p \"%s\" -driver sapsajdbc4 -cstring jdbc:sqlanywhere:database=%s;host=%s -noheader -trim"%(self.JAVA_BIN, self.SQL_CONNECTOR_JAR, path, user, password,db_name,self.host)
return jisql_cmd
def check_connection(self, db_name, db_user, db_password):
log("[I] Checking connection", "info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"SELECT 1;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT 1;\" -c ;"
jisql_log(query, db_password)
output = check_output(query)
if output.strip('1 |'):
log("[I] Connection success", "info")
return True
else:
log("[E] Can't establish connection", "error")
sys.exit(1)
def import_db_file(self, db_name, db_user, db_password, file_name):
name = basename(file_name)
if os.path.isfile(file_name):
log("[I] Importing db schema to database " + db_name + " from file: " + name,"info")
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
elif os_name == "WINDOWS":
query = get_cmd + " -input %s" %file_name
jisql_log(query, db_password)
ret = subprocess.call(query)
if ret == 0:
log("[I] "+name + " DB schema imported successfully","info")
else:
log("[E] "+name + " DB Schema import failed!","error")
sys.exit(1)
else:
log("[I] DB Schema file " + name+ " not found","error")
sys.exit(1)
def check_table(self, db_name, db_user, db_password, TABLE_NAME):
self.set_options(db_name, db_user, db_password, TABLE_NAME)
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"SELECT name FROM sysobjects where name = '%s' and type='U';\"" %(TABLE_NAME)
elif os_name == "WINDOWS":
query = get_cmd + " -query \"SELECT name FROM sysobjects where name = '%s' and type='U';\" -c ;" %(TABLE_NAME)
jisql_log(query, db_password)
output = check_output(query)
if output.strip(TABLE_NAME + " |"):
log("[I] Table '" + TABLE_NAME + "' already exists in database '" + db_name + "'","info")
return True
else:
log("[I] Table '" + TABLE_NAME + "' does not exist in database '" + db_name + "'","info")
return False
def set_options(self, db_name, db_user, db_password, TABLE_NAME):
get_cmd = self.get_jisql_cmd(db_user, db_password, db_name)
if is_unix:
query = get_cmd + " -c \; -query \"set option public.reserved_keywords='LIMIT';\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"set option public.reserved_keywords='LIMIT';\" -c ;"
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
if is_unix:
query = get_cmd + " -c \; -query \"set option public.max_statement_count=0;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"set option public.max_statement_count=0;\" -c;"
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
if is_unix:
query = get_cmd + " -c \; -query \"set option public.max_cursor_count=0;\""
elif os_name == "WINDOWS":
query = get_cmd + " -query \"set option public.max_cursor_count=0;\" -c;"
jisql_log(query, db_password)
ret = subprocess.call(shlex.split(query))
def main(argv):
populate_global_dict()
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
if os.environ['JAVA_HOME'] == "":
log("[E] ---------- JAVA_HOME environment property not defined, aborting installation. ----------", "error")
sys.exit(1)
else:
JAVA_BIN=os.path.join(os.environ['JAVA_HOME'],'bin','java')
if os_name == "WINDOWS" :
JAVA_BIN = JAVA_BIN+'.exe'
if os.path.isfile(JAVA_BIN):
pass
else:
JAVA_BIN=globalDict['JAVA_BIN']
if os.path.isfile(JAVA_BIN):
pass
else:
log("[E] ---------- JAVA Not Found, aborting installation. ----------", "error")
sys.exit(1)
XA_DB_FLAVOR = globalDict['DB_FLAVOR']
XA_DB_FLAVOR = XA_DB_FLAVOR.upper()
log("[I] DB FLAVOR :" + XA_DB_FLAVOR ,"info")
xa_db_host = globalDict['db_host']
mysql_core_file = globalDict['mysql_core_file']
mysql_patches = os.path.join('db','mysql','patches')
oracle_core_file = globalDict['oracle_core_file']
oracle_patches = os.path.join('db','oracle','patches')
postgres_core_file = globalDict['postgres_core_file']
postgres_patches = os.path.join('db','postgres','patches')
sqlserver_core_file = globalDict['sqlserver_core_file']
sqlserver_patches = os.path.join('db','sqlserver','patches')
sqlanywhere_core_file = globalDict['sqlanywhere_core_file']
sqlanywhere_patches = os.path.join('db','sqlanywhere','patches')
db_name = globalDict['db_name']
db_user = globalDict['db_user']
db_password = globalDict['db_password']
x_db_version = 'x_db_version_h'
x_user = 'ranger_masterkey'
db_ssl_enabled='false'
db_ssl_required='false'
db_ssl_verifyServerCertificate='false'
db_ssl_auth_type='2-way'
javax_net_ssl_keyStore=''
javax_net_ssl_keyStorePassword=''
javax_net_ssl_trustStore=''
javax_net_ssl_trustStorePassword=''
db_ssl_certificate_file=''
javax_net_ssl_trustStore_type='bcfks'
javax_net_ssl_keyStore_type='bcfks'
if XA_DB_FLAVOR == "MYSQL" or XA_DB_FLAVOR == "POSTGRES":
if 'db_ssl_enabled' in globalDict:
db_ssl_enabled=globalDict['db_ssl_enabled'].lower()
if db_ssl_enabled == 'true':
if 'db_ssl_required' in globalDict:
db_ssl_required=globalDict['db_ssl_required'].lower()
if 'db_ssl_verifyServerCertificate' in globalDict:
db_ssl_verifyServerCertificate=globalDict['db_ssl_verifyServerCertificate'].lower()
if 'db_ssl_auth_type' in globalDict:
db_ssl_auth_type=globalDict['db_ssl_auth_type'].lower()
if 'db_ssl_certificate_file' in globalDict:
db_ssl_certificate_file=globalDict['db_ssl_certificate_file']
if 'javax_net_ssl_trustStore' in globalDict:
javax_net_ssl_trustStore=globalDict['javax_net_ssl_trustStore']
if 'javax_net_ssl_trustStorePassword' in globalDict:
javax_net_ssl_trustStorePassword=globalDict['javax_net_ssl_trustStorePassword']
if 'javax_net_ssl_trustStore_type' in globalDict:
javax_net_ssl_trustStore_type=globalDict['javax_net_ssl_trustStore_type']
if db_ssl_verifyServerCertificate == 'true':
if db_ssl_certificate_file != "":
if not os.path.exists(db_ssl_certificate_file):
log("[E] Invalid file Name! Unable to find certificate file:"+db_ssl_certificate_file,"error")
sys.exit(1)
elif db_ssl_auth_type == '1-way' and db_ssl_certificate_file == "" :
if not os.path.exists(javax_net_ssl_trustStore):
log("[E] Invalid file Name! Unable to find truststore file:"+javax_net_ssl_trustStore,"error")
sys.exit(1)
if javax_net_ssl_trustStorePassword is None or javax_net_ssl_trustStorePassword =="":
log("[E] Invalid ssl truststore password!","error")
sys.exit(1)
if db_ssl_auth_type == '2-way':
if 'javax_net_ssl_keyStore' in globalDict:
javax_net_ssl_keyStore=globalDict['javax_net_ssl_keyStore']
if 'javax_net_ssl_keyStorePassword' in globalDict:
javax_net_ssl_keyStorePassword=globalDict['javax_net_ssl_keyStorePassword']
if 'javax_net_ssl_keyStore_type' in globalDict:
javax_net_ssl_keyStore_type=globalDict['javax_net_ssl_keyStore_type']
if not os.path.exists(javax_net_ssl_keyStore):
log("[E] Invalid file Name! Unable to find keystore file:"+javax_net_ssl_keyStore,"error")
sys.exit(1)
if javax_net_ssl_keyStorePassword is None or javax_net_ssl_keyStorePassword =="":
log("[E] Invalid ssl keystore password!","error")
sys.exit(1)
if XA_DB_FLAVOR == "MYSQL":
MYSQL_CONNECTOR_JAR=globalDict['SQL_CONNECTOR_JAR']
xa_sqlObj = MysqlConf(xa_db_host, MYSQL_CONNECTOR_JAR, JAVA_BIN,db_ssl_enabled,db_ssl_required,db_ssl_verifyServerCertificate,javax_net_ssl_keyStore,javax_net_ssl_keyStorePassword,javax_net_ssl_trustStore,javax_net_ssl_trustStorePassword,db_ssl_auth_type)
xa_db_core_file = os.path.join(RANGER_KMS_HOME , mysql_core_file)
elif XA_DB_FLAVOR == "ORACLE":
ORACLE_CONNECTOR_JAR=globalDict['SQL_CONNECTOR_JAR']
xa_sqlObj = OracleConf(xa_db_host, ORACLE_CONNECTOR_JAR, JAVA_BIN)
xa_db_core_file = os.path.join(RANGER_KMS_HOME ,oracle_core_file)
elif XA_DB_FLAVOR == "POSTGRES":
POSTGRES_CONNECTOR_JAR = globalDict['SQL_CONNECTOR_JAR']
xa_sqlObj = PostgresConf(xa_db_host, POSTGRES_CONNECTOR_JAR, JAVA_BIN,db_ssl_enabled,db_ssl_required,db_ssl_verifyServerCertificate,javax_net_ssl_keyStore,javax_net_ssl_keyStorePassword,javax_net_ssl_trustStore,javax_net_ssl_trustStorePassword,db_ssl_auth_type,db_ssl_certificate_file,javax_net_ssl_trustStore_type,javax_net_ssl_keyStore_type)
xa_db_core_file = os.path.join(RANGER_KMS_HOME , postgres_core_file)
elif XA_DB_FLAVOR == "MSSQL":
SQLSERVER_CONNECTOR_JAR = globalDict['SQL_CONNECTOR_JAR']
xa_sqlObj = SqlServerConf(xa_db_host, SQLSERVER_CONNECTOR_JAR, JAVA_BIN)
xa_db_core_file = os.path.join(RANGER_KMS_HOME , sqlserver_core_file)
elif XA_DB_FLAVOR == "SQLA":
if not os_name == "WINDOWS" :
if os.environ['LD_LIBRARY_PATH'] == "":
log("[E] ---------- LD_LIBRARY_PATH environment property not defined, aborting installation. ----------", "error")
sys.exit(1)
SQLANYWHERE_CONNECTOR_JAR = globalDict['SQL_CONNECTOR_JAR']
xa_sqlObj = SqlAnywhereConf(xa_db_host, SQLANYWHERE_CONNECTOR_JAR, JAVA_BIN)
xa_db_core_file = os.path.join(RANGER_KMS_HOME , sqlanywhere_core_file)
else:
log("[E] --------- NO SUCH SUPPORTED DB FLAVOUR!! ---------", "error")
sys.exit(1)
log("[I] --------- Verifying Ranger DB connection ---------","info")
xa_sqlObj.check_connection(db_name, db_user, db_password)
if len(argv)==1:
log("[I] --------- Verifying Ranger DB tables ---------","info")
if xa_sqlObj.check_table(db_name, db_user, db_password, x_user):
pass
else:
log("[I] --------- Importing Ranger Core DB Schema ---------","info")
xa_sqlObj.import_db_file(db_name, db_user, db_password, xa_db_core_file)
main(sys.argv)
| 43.805398
| 426
| 0.710529
|
f4651404394db4148bf9dcde863b52f7beaa8950
| 6,370
|
py
|
Python
|
utools/maya/align_rounded.py
|
theiviaxx/utools
|
6501e52ad77edb2beab8f33028460f32b3231b80
|
[
"BSD-3-Clause"
] | 1
|
2019-09-25T22:47:02.000Z
|
2019-09-25T22:47:02.000Z
|
utools/maya/align_rounded.py
|
theiviaxx/utools
|
6501e52ad77edb2beab8f33028460f32b3231b80
|
[
"BSD-3-Clause"
] | 4
|
2015-12-16T22:14:59.000Z
|
2015-12-16T22:18:04.000Z
|
utools/maya/align_rounded.py
|
theiviaxx/utools
|
6501e52ad77edb2beab8f33028460f32b3231b80
|
[
"BSD-3-Clause"
] | null | null | null |
##################################################################################################
# Copyright (c) 2014 Brett Dixon
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##################################################################################################
from maya import OpenMaya as om
from maya import OpenMayaMPx as omx
class AlignRoundedCommand(omx.MPxCommand):
"""AlignRounded takes the selected edges and aligns the normals to the added face vectors.
This is typically useful for rounded surfaces and yields a nicer normals layout.
"""
def __init__(self):
super(AlignRoundedCommand, self).__init__()
self._verts = {}
self._faceverts = {}
self._currentnormals = []
self._currentfacenormals = []
self._currentlocked = [] # (vtx, locked)
self._mesh = None
def isUndoable(self):
return True
def doIt(self, args):
self._verts = {}
self._faceverts = {}
border = []
facelist = []
selection = om.MSelectionList()
om.MGlobal.getActiveSelectionList(selection)
seliter = om.MItSelectionList(selection, om.MFn.kMeshEdgeComponent)
dag = om.MDagPath()
comp = om.MObject()
while not seliter.isDone():
seliter.getDagPath(dag, comp)
mesh = om.MFnMesh(dag)
nmlcount = om.MIntArray()
nmlids = om.MIntArray()
mesh.getNormalIds(nmlcount, nmlids)
self._currentlocked = [(n, mesh.isNormalLocked(n)) for n in nmlids]
## -- Get all of our border edges:
eiter = om.MItMeshEdge(dag)
while not eiter.isDone():
if eiter.onBoundary():
border.append(eiter.index())
eiter.next()
## -- Find normals of soft edges in selection
eiter = om.MItMeshEdge(dag, comp)
while not eiter.isDone():
faces = om.MIntArray()
eiter.getConnectedFaces(faces)
facelist += faces
veca = om.MVector()
mesh.getPolygonNormal(faces[0], veca)
if eiter.isSmooth():
vecb = om.MVector()
mesh.getPolygonNormal(faces[1], vecb)
vec = veca + vecb
else:
vec = veca
self._verts[eiter.index(0)] = vec
self._verts[eiter.index(1)] = vec
eiter.next()
## -- Find all vertices that are not connected to a hard edge
viter = om.MItMeshVertex(dag)
keys = self._verts.keys()
while not viter.isDone():
edges = om.MIntArray(4)
viter.getConnectedEdges(edges)
index = viter.index()
if index in keys:
for n in edges:
if n in border:
continue
if not mesh.isEdgeSmooth(n) or n in border:
## -- Connected to hard edge so remove it
f = om.MIntArray()
viter.getConnectedFaces(f)
f = set(f) & set(facelist)
self._faceverts[index] = [self._verts[index], list(f)]
del self._verts[index]
break
viter.next()
seliter.next()
self._mesh = mesh
for idx, vec in self._verts.iteritems():
normal = om.MVector()
self._mesh.getVertexNormal(idx, normal)
self._currentnormals.append((idx, normal))
for idx, data in self._faceverts.iteritems():
flist = data[1]
for f in flist:
normal = om.MVector()
self._mesh.getFaceVertexNormal(f, idx, normal)
self._currentfacenormals.append((f, idx, normal))
self.redoIt()
def undoIt(self):
for idx, normal in self._currentnormals:
self._mesh.setVertexNormal(normal, idx)
for f, idx, normal in self._currentfacenormals:
self._mesh.setFaceVertexNormal(normal, f, idx)
locked = [i for i, n in self._currentlocked if n]
util = om.MScriptUtil()
arr = om.MIntArray()
util.createIntArrayFromList(locked, arr)
self._mesh.lockVertexNormals(arr)
unlocked = [i for i, n in self._currentlocked if not n]
util = om.MScriptUtil()
arr = om.MIntArray()
util.createIntArrayFromList(unlocked, arr)
self._mesh.unlockVertexNormals(arr)
def redoIt(self):
for idx, vec in self._verts.iteritems():
self._mesh.setVertexNormal(vec, idx)
for idx, data in self._faceverts.iteritems():
normal = data[0]
flist = data[1]
for f in flist:
self._mesh.setFaceVertexNormal(normal, f, idx)
@staticmethod
def creator():
return omx.asMPxPtr(AlignRoundedCommand())
| 38.143713
| 98
| 0.535793
|
1a4cbb5095002c7f476feb6b4a194691be08ec65
| 2,285
|
py
|
Python
|
app/recipe/tests/test_recipe.py
|
ineph/Receitas-api
|
33c3f591037f41d0765b1b42c5b8083b96827677
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_recipe.py
|
ineph/Receitas-api
|
33c3f591037f41d0765b1b42c5b8083b96827677
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_recipe.py
|
ineph/Receitas-api
|
33c3f591037f41d0765b1b42c5b8083b96827677
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe
from recipe.serializers import RecipeSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def sample_recipe(user, **params):
"""create and return a sample recipe"""
defaults = {
'title': 'Sample Recipe',
'time_minutes': 10,
'price': 5.00
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
"""test authenticated recipe API"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""test that authentication is required"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTest(TestCase):
"""test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@something.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""test retrieving a list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""test retrieving recipes for user"""
user2 = get_user_model().objects.create_user(
'other@something.com',
'pass123123'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
| 28.5625
| 71
| 0.670897
|
85f53bafa119bf996ac9e80c23eb606ae1ec4dd0
| 10,411
|
py
|
Python
|
project/classification_tools.py
|
boredStats/eeg-machine-learning
|
bbf72ef55644f3941120f8f9007d839c1a4731fd
|
[
"MIT"
] | 1
|
2020-07-31T11:38:53.000Z
|
2020-07-31T11:38:53.000Z
|
project/classification_tools.py
|
boredStats/eeg-machine-learning
|
bbf72ef55644f3941120f8f9007d839c1a4731fd
|
[
"MIT"
] | null | null | null |
project/classification_tools.py
|
boredStats/eeg-machine-learning
|
bbf72ef55644f3941120f8f9007d839c1a4731fd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Classifier tools for this project."""
import os
import pandas as pd
import numpy as np
import imblearn
from sklearn import model_selection, ensemble, svm, neighbors
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectFromModel, VarianceThreshold
from sklearn.metrics import balanced_accuracy_score, f1_score, confusion_matrix
class EEG_Classifier:
def __init__(
self,
resample_type=None,
classifier_type='ExtraTrees',
kfold_type='stratified',
n_splits=10, seed=None):
self.resample_type = resample_type
self.classifier_type = classifier_type
self.kfold_type = kfold_type
self.n_splits = n_splits
self.seed = seed
@staticmethod
def _splitter(type='stratified', n_splits=10, random_state=None):
if type == 'stratified':
splitter = model_selection.StratifiedKFold(
n_splits=n_splits, random_state=random_state)
elif type == 'random':
splitter = model_selection.KFold(
n_splits=n_splits, random_state=random_state)
else:
raise ValueError(
"type={type} not recognized. Options are 'stratified'"
" or 'random'".format(type=repr(type)))
return splitter
@staticmethod
def _calc_scores(y_test, predicted):
balanced = balanced_accuracy_score(y_test, predicted)
chance = balanced_accuracy_score(y_test, predicted, adjusted=True)
f1 = f1_score(y_test, predicted, average=None)
return balanced, chance, f1
def feature_selector(
self, x_train, x_test, y_train,
continuous_indices=None,
categorical_indices=None,
thresh="2*mean"):
if continuous_indices is None:
preproc = StandardScaler().fit(x_train)
x_train_data = preproc.transform(x_train)
x_test_data = preproc.transform(x_test)
else:
x_train_cont = x_train[:, continuous_indices]
x_test_cont = x_test[:, continuous_indices]
x_train_cat = x_train[:, categorical_indices]
x_test_cat = x_test[:, categorical_indices]
# Standardization for continuous data
preproc = StandardScaler().fit(x_train_cont)
x_train_z = preproc.transform(x_train_cont)
x_test_z = preproc.transform(x_test_cont)
# Variance threshold for categorical data
varthresh = VarianceThreshold(threshold=0).fit(x_train_cat)
x_train_v = varthresh.transform(x_train_cat)
x_test_v = varthresh.transform(x_test_cat)
x_train_data = np.hstack((x_train_z, x_train_v))
x_test_data = np.hstack((x_test_z, x_test_v))
clf = ensemble.ExtraTreesClassifier(random_state=self.seed)
fs_model = SelectFromModel(clf, threshold=thresh)
x_train_fs = fs_model.fit_transform(x_train_data, y_train)
x_test_fs = fs_model.transform(x_test_data)
feature_indices = fs_model.get_support(indices=True)
return x_train_fs, x_test_fs, feature_indices
def classify(self, eeg_data, target_data, gridsearch=None):
feature_names, cont_indices, cat_indices = check_eeg_data(eeg_data)
kfolder = self._splitter(
type=self.kfold_type,
n_splits=self.n_splits,
random_state=self.seed)
splitter = kfolder.split(
X=eeg_data.values, y=target_data.values)
train_indices, test_indices = [], []
for i in splitter:
train_indices.append(i[0])
test_indices.append(i[1])
resampler = _create_resampler(
type=self.resample_type,
random_state=self.seed)
clf = _create_classifier(
type=self.classifier_type,
random_state=self.seed)
if gridsearch is not None:
searcher = model_selection.GridSearchCV(
clf, gridsearch, scoring='r2', cv=self.n_splits)
kwargs = searcher.best_params_
clf.set_params(**kwargs)
grid_df = searcher.cv_results_
else:
grid_df = None
features_by_fold, confusion_matrices, = {}, {}
balanced_acc, chance_acc, f1_scores = [], [], []
for t in range(len(train_indices)):
train_idx = train_indices[t]
test_idx = test_indices[t]
x_train, x_test = eeg_data.iloc[train_idx], eeg_data.iloc[test_idx].values
y_train, y_test = target_data.iloc[train_idx], target_data.iloc[test_idx]
x_train_rs, y_train_rs = resampler.fit_resample(x_train, np.ravel(y_train.values))
x_train_fs, x_test_fs, feature_indices = self.feature_selector(
x_train_rs, x_test, np.ravel(y_train_rs),
continuous_indices=cont_indices,
categorical_indices=cat_indices)
cleaned_features = [feature_names[i] for i in feature_indices]
clf.fit(x_train_fs, np.ravel(y_train_rs))
predicted = clf.predict(x_test_fs)
try:
importances = np.ndarray.flatten(clf.feature_importances_)
feature_df = pd.DataFrame(columns=cleaned_features)
feature_df.loc['Feature Importances'] = importances
except AttributeError:
feature_df = None
pass
try:
classes = [str(c) for c in clf.classes_]
if len(classes) == 2:
idx_label = ['Coefficients']
else:
idx_label = ['%s coefficients' % c for c in classes]
coef = np.ndarray.flatten(clf.coef_)
feature_df = pd.DataFrame(
coef, index=idx_label, columns=cleaned_features)
except AttributeError:
pass
if feature_df is not None:
features_by_fold['Fold %03d' % (t+1)] = feature_df
balanced, chance, f1 = self._calc_scores(y_test, predicted)
balanced_acc.append(balanced)
chance_acc.append(chance)
f1_scores.append(f1)
# Calculating fold confusion matrix
cm = confusion_matrix(y_test, predicted)
confusion_matrices['Fold %03d' % (t+1)] = pd.DataFrame(
cm, index=clf.classes_, columns=clf.classes_)
f1_df, score_df = _save_scores(
f1_scores=f1_scores,
balanced_scores=balanced_acc,
chance_scores=chance_acc,
class_labels=clf.classes_)
scores_dict = {
'accuracy scores': score_df,
'f1 scores': f1_df}
return scores_dict, confusion_matrices, feature_df, grid_df
def check_eeg_data(eeg_df):
if type(eeg_df) != pd.DataFrame:
pass
feature_names = list(eeg_df)
if "categorical_sex_male" in feature_names:
cont_feats = [f for f in feature_names if 'categorical' not in f]
cont_indices = [eeg_df.columns.get_loc(f) for f in cont_feats]
cat_feats = [f for f in feature_names if 'categorical' in f]
cat_indices = [eeg_df.columns.get_loc(f) for f in cat_feats]
else:
cont_indices, cat_indices = None, None
return feature_names, cont_indices, cat_indices
def _create_resampler(type=None, random_state=None):
if type is None:
class NoResample:
@staticmethod
def fit_resample(a, b):
return a.values, np.asarray(b)
resampler = NoResample()
elif type == 'under':
resampler = imblearn.under_sampling.RandomUnderSampler(
sampling_strategy='not minority',
random_state=random_state)
elif type == 'over':
resampler = imblearn.over_sampling.RandomOverSampler(
sampling_strategy='not majority',
random_state=random_state)
elif type == 'smote':
resampler = imblearn.over_sampling.SMOTE(
sampling_strategy='not majority',
random_state=random_state)
return resampler
def _create_classifier(type='ExtraTrees', kwargs=None, random_state=None):
if type == 'ExtraTrees':
clf = ensemble.ExtraTreesClassifier(
n_estimators=100,
random_state=random_state)
elif type == 'SVM':
clf = svm.SVC()
elif type == 'KNN':
clf = neighbors.KNeighborsClassifier()
if kwargs is not None:
clf.set_params(**kwargs)
return clf
def _save_scores(f1_scores, balanced_scores, chance_scores, class_labels):
# Calculate average performance, save to nice dataframes
n_folds = len(balanced_scores)
f1_array = np.asarray(f1_scores)
if n_folds != f1_array.shape[0]:
raise ValueError("Number of folds does not match")
rownames = ['Fold %02d' % (n+1) for n in range(n_folds)]
rownames.append('Average')
f1_class_averages = np.mean(f1_array, axis=0)
f1_data = np.vstack((f1_array, f1_class_averages))
f1_df = pd.DataFrame(f1_data, index=rownames, columns=class_labels)
balanced_scores.append(np.mean(balanced_scores))
chance_scores.append(np.mean(chance_scores))
accuracy_data = np.asarray([balanced_scores, chance_scores]).T
score_df = pd.DataFrame(
data=accuracy_data,
index=rownames,
columns=['Balanced accuracy', 'Chance accuracy'])
return f1_df, score_df
def _performance_testing():
import proj_utils as pu
print('%s: Loading data' % pu.ctime())
behavior_data, conn_data = pu.load_data_full_subjects()
ml_data_without_covariates = conn_data.astype(float)
side_data = behavior_data['tinnitus_side'].values.astype(float)
side_target = pu.convert_tin_to_str(side_data, 'tinnitus_side')
target_df = pd.DataFrame(side_target, index=ml_data_without_covariates.index,)
print('%s: Testing performance' % pu.ctime())
EC = EEG_Classifier(n_splits=10, seed=13, resample_type='smote')
scores_dict, confusion_matrices, feature_df, grid_df = EC.classify(
eeg_data=ml_data_without_covariates,
target_data=target_df)
pu.save_xls(scores_dict, 'scores_performance_testing.xlsx')
print('%s: Finished performance testing' % pu.ctime())
if __name__ == "__main__":
_performance_testing()
| 36.402098
| 94
| 0.638363
|
50fcb55b19e5502a8b3dbda337045451557fc72e
| 4,274
|
py
|
Python
|
bstbuy.py
|
JayJPatel/Stock-Notifier
|
b076fe26137877946e18847bce5d77c0c2462d64
|
[
"Apache-2.0"
] | null | null | null |
bstbuy.py
|
JayJPatel/Stock-Notifier
|
b076fe26137877946e18847bce5d77c0c2462d64
|
[
"Apache-2.0"
] | null | null | null |
bstbuy.py
|
JayJPatel/Stock-Notifier
|
b076fe26137877946e18847bce5d77c0c2462d64
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from bot import BOT
from dotenv import load_dotenv
import undetected_chromedriver as uc
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
load_dotenv()
# ---------------------------------------------------------------------------
# Constants
URL = os.getenv("BSTBUY_PRODUCT_URL")
API_KEY = os.getenv("API_KEY")
TO_EMAIL = os.getenv("TO_EMAIL")
FROM_EMAIL = os.getenv("FROM_EMAIL")
REFRESH_TIMER = int(os.getenv("page_refresh_timer"))
# Webdriver Setup
WD_PATH = os.getcwd() + "\\chromedriver.exe"
opt = Options()
opt.add_argument("--log-level=3") # Webdriver: Only log critical errors
opt.add_argument("--headless")
opt.add_argument("--disable-gpu")
opt.add_argument("--no-sandbox")
# ---------------------------------------------------------------------------
# CSS Selectors used in BSTBUY_BOT
SKU = "data-sku-id"
ADD_TO_CART = "add-to-cart-button"
ATC_STATE = "data-button-state"
PRODUCT_TITLE = "v-fw-regular"
# ---------------------------------------------------------------------------
# CLASS - BSTBUY_BOT
class BSTBUY_BOT(BOT):
# Constructor
def __init__(self):
self.driver = uc.Chrome(executable_path=WD_PATH, options=opt)
# Open browser & grab SKU
def startup(self):
self.driver.get(URL)
print("Opening Best Buy browser...")
time.sleep(1)
try:
self.sku = self.driver.find_element(By.CLASS_NAME, ADD_TO_CART).get_attribute(SKU)
except NoSuchElementException:
print("\nError: Add to cart button not found.\nPlease ensure your Best Buy link is valid\n")
exit()
return
# Checks if product is in stock
def check_in_stock(self):
while(True):
self.driver.refresh()
# Wait until elem is located, else timeout & loop
try:
elem_exists = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, ADD_TO_CART)))
except TimeoutException:
print("Timeout Error: ATC Button not found.\nRefreshing page...")
continue
if elem_exists:
text = self.driver.find_element(
By.CLASS_NAME, ADD_TO_CART).get_attribute(ATC_STATE)
if (text == "SOLD_OUT"):
print("Product not in stock")
else:
print("Product in stock!!! Sending email...")
return
time.sleep(REFRESH_TIMER)
# Generates Instant ATC Link
def makeIATCLink(self):
IATC_Link = ("https://api.bestbuy.com/click/-/" + self.sku + "/cart/")
return IATC_Link
# Grabs product name
def grabProductName(self):
PRODUCT_NAME = self.driver.find_element(
By.CLASS_NAME, PRODUCT_TITLE).text
return PRODUCT_NAME
# Generates email that the product is in stock
def generate_email_contents(self):
# Body of email
plainText = ('Stock_Notifier has detected that the product:\n' + self.grabProductName() +
'\nis now in stock!\n\nInstant Add To Cart Link: ' + self.makeIATCLink() +
'\n\nProduct Link: ' + URL + '\n\n')
# Email content
message = Mail(from_email=FROM_EMAIL,
to_emails=TO_EMAIL,
subject='STOCK NOTIFIER: ITEM IN STOCK',
plain_text_content=plainText)
return message
# Sends email that product is in stock
def send_email(self):
message = self.generate_email_contents()
try:
sndgrid = SendGridAPIClient(API_KEY)
response = sndgrid.send(message)
# print(response.status_code)
# print(response.body)
# print(response.headers)
except Exception as err:
print(err)
# Exit webdriver session
def close(self):
self.driver.quit()
return
| 35.032787
| 128
| 0.600608
|
b4d4a20959223209a628ff19dcebb853a2ab8da0
| 15,129
|
py
|
Python
|
libraries/botframework-connector/tests/test_conversations.py
|
baruchiro/botbuilder-python
|
736d1d1d16c69304fbaaa3d7499a6bca57706726
|
[
"MIT"
] | 1
|
2020-12-29T22:47:46.000Z
|
2020-12-29T22:47:46.000Z
|
libraries/botframework-connector/tests/test_conversations.py
|
baruchiro/botbuilder-python
|
736d1d1d16c69304fbaaa3d7499a6bca57706726
|
[
"MIT"
] | 1
|
2021-05-09T12:08:10.000Z
|
2021-05-09T12:08:10.000Z
|
libraries/botframework-connector/tests/test_conversations.py
|
baruchiro/botbuilder-python
|
736d1d1d16c69304fbaaa3d7499a6bca57706726
|
[
"MIT"
] | 2
|
2021-12-07T10:42:10.000Z
|
2022-03-28T11:44:08.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import asyncio
import pytest
from azure_devtools.scenario_tests import ReplayableTest
from botbuilder.schema import (
Activity,
ActivityTypes,
Attachment,
AttachmentLayoutTypes,
CardImage,
ChannelAccount,
ConversationParameters,
ErrorResponseException,
HeroCard,
)
from botframework.connector import ConnectorClient
from botframework.connector.auth import MicrosoftAppCredentials
from authentication_stub import MicrosoftTokenAuthenticationStub
SERVICE_URL = "https://slack.botframework.com"
CHANNEL_ID = "slack"
BOT_NAME = "botbuilder-pc-bot"
BOT_ID = "B21UTEF8S:T03CWQ0QB"
RECIPIENT_ID = "U19KH8EHJ:T03CWQ0QB"
CONVERSATION_ID = "B21UTEF8S:T03CWQ0QB:D2369CT7C"
async def get_auth_token():
try:
from .app_creds_real import MICROSOFT_APP_ID, MICROSOFT_APP_PASSWORD
# Define a "app_creds_real.py" file with your bot credentials as follows:
# MICROSOFT_APP_ID = '...'
# MICROSOFT_APP_PASSWORD = '...'
return await MicrosoftAppCredentials(
MICROSOFT_APP_ID, MICROSOFT_APP_PASSWORD
).get_access_token()
except ImportError:
return "STUB_ACCESS_TOKEN"
LOOP = asyncio.get_event_loop()
AUTH_TOKEN = LOOP.run_until_complete(get_auth_token())
class ConversationTest(ReplayableTest):
def __init__(self, method_name): # pylint: disable=useless-super-delegation
super(ConversationTest, self).__init__(method_name)
@property
def credentials(self):
return MicrosoftTokenAuthenticationStub(AUTH_TOKEN)
def test_conversations_create_conversation(self):
test_object = ChannelAccount(id=RECIPIENT_ID)
create_conversation = ConversationParameters(
bot=ChannelAccount(id=BOT_ID),
members=[test_object],
activity=Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
from_property=ChannelAccount(id=BOT_ID),
recipient=test_object,
text="Hi there!",
),
)
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
conversation = connector.conversations.create_conversation(create_conversation)
assert conversation.id is not None
def test_conversations_create_conversation_with_invalid_bot_id_fails(self):
test_object = ChannelAccount(id=RECIPIENT_ID)
create_conversation = ConversationParameters(
bot=ChannelAccount(id="INVALID"),
members=[test_object],
activity=Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
from_property=ChannelAccount(id="INVALID"),
recipient=test_object,
text="Hi there!",
),
)
with pytest.raises(ErrorResponseException) as excinfo:
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
connector.conversations.create_conversation(create_conversation)
assert excinfo.value.error.error.code == "ServiceError"
assert "Invalid userId" in str(excinfo.value.error.error.message)
def test_conversations_create_conversation_without_members_fails(self):
create_conversation = ConversationParameters(
bot=ChannelAccount(id=BOT_ID),
activity=Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
from_property=ChannelAccount(id=BOT_ID),
text="Hi there!",
),
members=[],
)
with pytest.raises(ErrorResponseException) as excinfo:
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
connector.conversations.create_conversation(create_conversation)
assert excinfo.value.error.error.code == "BadArgument"
assert "Conversations" in str(excinfo.value.error.error.message)
def test_conversations_create_conversation_with_bot_as_only_member_fails(self):
test_object = ChannelAccount(id=BOT_ID)
sender = ChannelAccount(id=BOT_ID)
create_conversation = ConversationParameters(
bot=sender,
members=[test_object],
activity=Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
from_property=sender,
recipient=test_object,
text="Hi there!",
),
)
with pytest.raises(ErrorResponseException) as excinfo:
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
connector.conversations.create_conversation(create_conversation)
assert excinfo.value.error.error.code == "BadArgument"
assert "Bots cannot IM other bots" in str(excinfo.value.error.error.message)
def test_conversations_send_to_conversation(self):
activity = Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
recipient=ChannelAccount(id=RECIPIENT_ID),
from_property=ChannelAccount(id=BOT_ID),
text="Hello again!",
)
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
response = connector.conversations.send_to_conversation(
CONVERSATION_ID, activity
)
assert response is not None
def test_conversations_send_to_conversation_with_attachment(self):
card1 = HeroCard(
title="A static image",
text="JPEG image",
images=[
CardImage(
url="https://docs.com/en-us/bot-framework/media/designing-bots/core/dialogs-screens.png"
)
],
)
card2 = HeroCard(
title="An animation",
subtitle="GIF image",
images=[CardImage(url="http://i.giphy.com/Ki55RUbOV5njy.gif")],
)
activity = Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
recipient=ChannelAccount(id=RECIPIENT_ID),
from_property=ChannelAccount(id=BOT_ID),
attachment_layout=AttachmentLayoutTypes.list,
attachments=[
Attachment(content_type="application/vnd.card.hero", content=card1),
Attachment(content_type="application/vnd.card.hero", content=card2),
],
)
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
response = connector.conversations.send_to_conversation(
CONVERSATION_ID, activity
)
assert response is not None
def test_conversations_send_to_conversation_with_invalid_conversation_id_fails(
self
):
activity = Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
recipient=ChannelAccount(id=RECIPIENT_ID),
from_property=ChannelAccount(id=BOT_ID),
text="Error!",
)
with pytest.raises(ErrorResponseException) as excinfo:
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
connector.conversations.send_to_conversation("123", activity)
assert excinfo.value.error.error.code == "ServiceError"
assert "cannot send messages to this id" in str(
excinfo.value.error.error.message
) or "Invalid ConversationId" in str(excinfo.value.error.error.message)
def test_conversations_get_conversation_members(self):
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
members = connector.conversations.get_conversation_members(CONVERSATION_ID)
assert len(members) == 2
assert members[0].name == BOT_NAME
assert members[0].id == BOT_ID
def test_conversations_get_conversation_members_invalid_id_fails(self):
with pytest.raises(ErrorResponseException) as excinfo:
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
connector.conversations.get_conversation_members("INVALID_ID")
assert excinfo.value.error.error.code == "ServiceError"
assert "cannot send messages to this id" in str(
excinfo.value.error.error.message
) or "Invalid ConversationId" in str(excinfo.value.error.error.message)
def test_conversations_update_activity(self):
activity = Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
recipient=ChannelAccount(id=RECIPIENT_ID),
from_property=ChannelAccount(id=BOT_ID),
text="Updating activity...",
)
activity_update = Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
recipient=ChannelAccount(id=RECIPIENT_ID),
from_property=ChannelAccount(id=BOT_ID),
text="Activity updated.",
)
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
response = connector.conversations.send_to_conversation(
CONVERSATION_ID, activity
)
activity_id = response.id
response = connector.conversations.update_activity(
CONVERSATION_ID, activity_id, activity_update
)
assert response is not None
assert response.id == activity_id
def test_conversations_update_activity_invalid_conversation_id_fails(self):
activity = Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
recipient=ChannelAccount(id=RECIPIENT_ID),
from_property=ChannelAccount(id=BOT_ID),
text="Updating activity...",
)
activity_update = Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
recipient=ChannelAccount(id=RECIPIENT_ID),
from_property=ChannelAccount(id=BOT_ID),
text="Activity updated.",
)
with pytest.raises(ErrorResponseException) as excinfo:
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
response = connector.conversations.send_to_conversation(
CONVERSATION_ID, activity
)
activity_id = response.id
connector.conversations.update_activity(
"INVALID_ID", activity_id, activity_update
)
assert excinfo.value.error.error.code == "ServiceError"
assert "Invalid ConversationId" in str(excinfo.value.error.error.message)
def test_conversations_reply_to_activity(self):
activity = Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
recipient=ChannelAccount(id=RECIPIENT_ID),
from_property=ChannelAccount(id=BOT_ID),
text="Thread activity",
)
child_activity = Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
recipient=ChannelAccount(id=RECIPIENT_ID),
from_property=ChannelAccount(id=BOT_ID),
text="Child activity.",
)
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
response = connector.conversations.send_to_conversation(
CONVERSATION_ID, activity
)
activity_id = response.id
response = connector.conversations.reply_to_activity(
CONVERSATION_ID, activity_id, child_activity
)
assert response is not None
assert response.id != activity_id
def test_conversations_reply_to_activity_with_invalid_conversation_id_fails(self):
child_activity = Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
recipient=ChannelAccount(id=RECIPIENT_ID),
from_property=ChannelAccount(id=BOT_ID),
text="Child activity.",
)
with pytest.raises(ErrorResponseException) as excinfo:
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
connector.conversations.reply_to_activity(
"INVALID_ID", "INVALID_ID", child_activity
)
assert excinfo.value.error.error.code == "ServiceError"
assert "Invalid ConversationId" in str(excinfo.value.error.error.message)
def test_conversations_delete_activity(self):
activity = Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
recipient=ChannelAccount(id=RECIPIENT_ID),
from_property=ChannelAccount(id=BOT_ID),
text="Activity to be deleted..",
)
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
response = connector.conversations.send_to_conversation(
CONVERSATION_ID, activity
)
activity_id = response.id
response = connector.conversations.delete_activity(CONVERSATION_ID, activity_id)
assert response is None
def test_conversations_delete_activity_with_invalid_conversation_id_fails(self):
with pytest.raises(ErrorResponseException) as excinfo:
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
connector.conversations.delete_activity("INVALID_ID", "INVALID_ID")
assert excinfo.value.error.error.code == "ServiceError"
assert "Invalid ConversationId" in str(excinfo.value.error.error.message)
def test_conversations_get_activity_members(self):
activity = Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
recipient=ChannelAccount(id=RECIPIENT_ID),
from_property=ChannelAccount(id=BOT_ID),
text="Test Activity",
)
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
response = connector.conversations.send_to_conversation(
CONVERSATION_ID, activity
)
members = connector.conversations.get_activity_members(
CONVERSATION_ID, response.id
)
assert len(members) == 2
assert members[0].name == BOT_NAME
assert members[0].id == BOT_ID
def test_conversations_get_activity_members_invalid_conversation_id_fails(self):
activity = Activity(
type=ActivityTypes.message,
channel_id=CHANNEL_ID,
recipient=ChannelAccount(id=RECIPIENT_ID),
from_property=ChannelAccount(id=BOT_ID),
text="Test Activity",
)
with pytest.raises(ErrorResponseException) as excinfo:
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
response = connector.conversations.send_to_conversation(
CONVERSATION_ID, activity
)
connector.conversations.get_activity_members("INVALID_ID", response.id)
assert excinfo.value.error.error.code == "ServiceError"
assert "Invalid ConversationId" in str(excinfo.value.error.error.message)
| 37.8225
| 108
| 0.660189
|
8dc362a507091ce03cc2b8b278781f5730550eff
| 466
|
py
|
Python
|
tests/system/test_base.py
|
wklken/tcpbeat
|
1145b85c313fdb5bf3fea0bb72a5fa4376fa2534
|
[
"Apache-2.0"
] | null | null | null |
tests/system/test_base.py
|
wklken/tcpbeat
|
1145b85c313fdb5bf3fea0bb72a5fa4376fa2534
|
[
"Apache-2.0"
] | null | null | null |
tests/system/test_base.py
|
wklken/tcpbeat
|
1145b85c313fdb5bf3fea0bb72a5fa4376fa2534
|
[
"Apache-2.0"
] | null | null | null |
from tcpbeat import BaseTest
import os
class Test(BaseTest):
def test_base(self):
"""
Basic test with exiting Tcpbeat normally
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
tcpbeat_proc = self.start_beat()
self.wait_until(lambda: self.log_contains("tcpbeat is running"))
exit_code = tcpbeat_proc.kill_and_wait()
assert exit_code == 0
| 23.3
| 72
| 0.622318
|
c05c13479b2d6b9d835a7a53435b9ceedb72b663
| 146
|
py
|
Python
|
userbot/function/__init__.py
|
saskeuday/sasoke
|
8ed75e7011172547ba1abc8fb75aa72581b1c0c6
|
[
"MIT"
] | null | null | null |
userbot/function/__init__.py
|
saskeuday/sasoke
|
8ed75e7011172547ba1abc8fb75aa72581b1c0c6
|
[
"MIT"
] | null | null | null |
userbot/function/__init__.py
|
saskeuday/sasoke
|
8ed75e7011172547ba1abc8fb75aa72581b1c0c6
|
[
"MIT"
] | 1
|
2020-09-03T13:39:52.000Z
|
2020-09-03T13:39:52.000Z
|
# Copyright (C) Is Distributed Between @StarkGang And @ZeltraxRockz
# Please Ask At @FridayOT Before Copying Any Module
# FridayUserbot (2020-21)
| 36.5
| 67
| 0.780822
|
a0812e7b61780b932174589253889eae13db6f1e
| 840
|
py
|
Python
|
tests/test_record_history_yet_more.py
|
Twangist/log_calls
|
7671a781a9e756844ef2e0adee76412d3d465648
|
[
"MIT"
] | 16
|
2015-02-21T00:00:20.000Z
|
2021-09-26T05:24:24.000Z
|
tests/test_record_history_yet_more.py
|
Twangist/log_calls
|
7671a781a9e756844ef2e0adee76412d3d465648
|
[
"MIT"
] | 3
|
2018-04-25T04:02:41.000Z
|
2018-04-25T04:11:54.000Z
|
tests/test_record_history_yet_more.py
|
Twangist/log_calls
|
7671a781a9e756844ef2e0adee76412d3d465648
|
[
"MIT"
] | 3
|
2015-02-21T13:15:20.000Z
|
2022-01-18T15:02:46.000Z
|
__author__ = "Brian O'Neill"
__version__ = '0.3.0'
from unittest import TestCase
class TestDF(TestCase):
def test__history_as_DataFrame(self):
from log_calls import record_history
@record_history()
def f(a, b, x):
return a*x + b
for i in range(1000): f(3, 5, i)
df = f.stats.history_as_DataFrame
try:
import pandas as pd
except ImportError:
self.assertEqual(df, None)
else:
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual(len(df.retval), 1000)
if __name__ == "__main__":
from log_calls import record_history
@record_history()
def f(a, b, x):
return a * x + b
for i in range(1000):
f(3, 5, i)
df = f.stats.history_as_DataFrame
count = len(df.retval)
| 20
| 51
| 0.584524
|
ceaab883818548570e077a954bb33cebfae3c329
| 8,343
|
py
|
Python
|
bpython/curtsiesfrontend/coderunner.py
|
timgates42/bpython
|
b772e1b0e608d5b17faebc7c2ec94a9958a90779
|
[
"PSF-2.0"
] | null | null | null |
bpython/curtsiesfrontend/coderunner.py
|
timgates42/bpython
|
b772e1b0e608d5b17faebc7c2ec94a9958a90779
|
[
"PSF-2.0"
] | null | null | null |
bpython/curtsiesfrontend/coderunner.py
|
timgates42/bpython
|
b772e1b0e608d5b17faebc7c2ec94a9958a90779
|
[
"PSF-2.0"
] | null | null | null |
# encoding: utf-8
"""For running Python code that could interrupt itself at any time in order to,
for example, ask for a read on stdin, or a write on stdout
The CodeRunner spawns a greenlet to run code in, and that code can suspend its
own execution to ask the main greenlet to refresh the display or get
information.
Greenlets are basically threads that can explicitly switch control to each
other. You can replace the word "greenlet" with "thread" in these docs if that
makes more sense to you.
"""
import code
import threading
import signal
import greenlet
import logging
from bpython._py3compat import py3, is_main_thread
from bpython.config import getpreferredencoding
logger = logging.getLogger(__name__)
class SigintHappened(object):
"""If this class is returned, a SIGINT happened while the main greenlet"""
class SystemExitFromCodeRunner(SystemExit):
"""If this class is returned, a SystemExit happened while in the code
greenlet"""
class RequestFromCodeRunner(object):
"""Message from the code runner"""
class Wait(RequestFromCodeRunner):
"""Running code would like the main loop to run for a bit"""
class Refresh(RequestFromCodeRunner):
"""Running code would like the main loop to refresh the display"""
class Done(RequestFromCodeRunner):
"""Running code is done running"""
class Unfinished(RequestFromCodeRunner):
"""Source code wasn't executed because it wasn't fully formed"""
class SystemExitRequest(RequestFromCodeRunner):
"""Running code raised a SystemExit"""
def __init__(self, args):
self.args = args
class CodeRunner(object):
"""Runs user code in an interpreter.
Running code requests a refresh by calling
request_from_main_context(force_refresh=True), which
suspends execution of the code and switches back to the main greenlet
After load_code() is called with the source code to be run,
the run_code() method should be called to start running the code.
The running code may request screen refreshes and user input
by calling request_from_main_context.
When this are called, the running source code cedes
control, and the current run_code() method call returns.
The return value of run_code() determines whether the method ought
to be called again to complete execution of the source code.
Once the screen refresh has occurred or the requested user input
has been gathered, run_code() should be called again, passing in any
requested user input. This continues until run_code returns Done.
The code greenlet is responsible for telling the main greenlet
what it wants returned in the next run_code call - CodeRunner
just passes whatever is passed in to run_code(for_code) to the
code greenlet
"""
def __init__(self, interp=None, request_refresh=lambda: None):
"""
interp is an interpreter object to use. By default a new one is
created.
request_refresh is a function that will be called each time the running
code asks for a refresh - to, for example, update the screen.
"""
self.interp = interp or code.InteractiveInterpreter()
self.source = None
self.main_context = greenlet.getcurrent()
self.code_context = None
self.request_refresh = request_refresh
# waiting for response from main thread
self.code_is_waiting = False
# sigint happened while in main thread
self.sigint_happened_in_main_context = False
self.orig_sigint_handler = None
@property
def running(self):
"""Returns greenlet if code has been loaded greenlet has been
started"""
return self.source and self.code_context
def load_code(self, source):
"""Prep code to be run"""
assert self.source is None, (
"you shouldn't load code when some is " "already running"
)
self.source = source
self.code_context = None
def _unload_code(self):
"""Called when done running code"""
self.source = None
self.code_context = None
self.code_is_waiting = False
def run_code(self, for_code=None):
"""Returns Truthy values if code finishes, False otherwise
if for_code is provided, send that value to the code greenlet
if source code is complete, returns "done"
if source code is incomplete, returns "unfinished"
"""
if self.code_context is None:
assert self.source is not None
self.code_context = greenlet.greenlet(self._blocking_run_code)
if is_main_thread():
self.orig_sigint_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, self.sigint_handler)
request = self.code_context.switch()
else:
assert self.code_is_waiting
self.code_is_waiting = False
if is_main_thread():
signal.signal(signal.SIGINT, self.sigint_handler)
if self.sigint_happened_in_main_context:
self.sigint_happened_in_main_context = False
request = self.code_context.switch(SigintHappened)
else:
request = self.code_context.switch(for_code)
logger.debug("request received from code was %r", request)
if not isinstance(request, RequestFromCodeRunner):
raise ValueError(
"Not a valid value from code greenlet: %r" % request
)
if isinstance(request, (Wait, Refresh)):
self.code_is_waiting = True
if isinstance(request, Refresh):
self.request_refresh()
return False
elif isinstance(request, (Done, Unfinished)):
self._unload_code()
if is_main_thread():
signal.signal(signal.SIGINT, self.orig_sigint_handler)
self.orig_sigint_handler = None
return request
elif isinstance(request, SystemExitRequest):
self._unload_code()
raise SystemExitFromCodeRunner(request.args)
def sigint_handler(self, *args):
"""SIGINT handler to use while code is running or request being
fulfilled"""
if greenlet.getcurrent() is self.code_context:
logger.debug("sigint while running user code!")
raise KeyboardInterrupt()
else:
logger.debug(
"sigint while fulfilling code request sigint handler "
"running!"
)
self.sigint_happened_in_main_context = True
def _blocking_run_code(self):
try:
unfinished = self.interp.runsource(self.source)
except SystemExit as e:
return SystemExitRequest(*e.args)
return Unfinished() if unfinished else Done()
def request_from_main_context(self, force_refresh=False):
"""Return the argument passed in to .run_code(for_code)
Nothing means calls to run_code must be... ???
"""
if force_refresh:
value = self.main_context.switch(Refresh())
else:
value = self.main_context.switch(Wait())
if value is SigintHappened:
raise KeyboardInterrupt()
return value
class FakeOutput(object):
def __init__(self, coderunner, on_write, fileno=1):
"""Fakes sys.stdout or sys.stderr
on_write should always take unicode
fileno should be the fileno that on_write will
output to (e.g. 1 for standard output).
"""
self.coderunner = coderunner
self.on_write = on_write
self.real_fileno = fileno
def write(self, s, *args, **kwargs):
if not py3 and isinstance(s, str):
s = s.decode(getpreferredencoding(), "ignore")
self.on_write(s, *args, **kwargs)
return self.coderunner.request_from_main_context(force_refresh=True)
# Some applications which use curses require that sys.stdout
# have a method called fileno. One example is pwntools. This
# is not a widespread issue, but is annoying.
def fileno(self):
return self.real_fileno
def writelines(self, l):
for s in l:
self.write(s)
def flush(self):
pass
def isatty(self):
return True
| 34.475207
| 79
| 0.662831
|
9afec6b5d1be86ba04497da91c1ed2182bcc366d
| 5,464
|
py
|
Python
|
tests/tests/views.py
|
aptivate/django-organizations
|
3ac867493508612370066c00ca7bd8d55632e116
|
[
"BSD-2-Clause"
] | 1
|
2015-06-22T20:45:23.000Z
|
2015-06-22T20:45:23.000Z
|
tests/tests/views.py
|
philippeowagner/django-organizations
|
0c2dd98b5c5af0e3de7cbd4a23567213c5222ac6
|
[
"BSD-2-Clause"
] | null | null | null |
tests/tests/views.py
|
philippeowagner/django-organizations
|
0c2dd98b5c5af0e3de7cbd4a23567213c5222ac6
|
[
"BSD-2-Clause"
] | null | null | null |
from django.test import TestCase
from django.core.urlresolvers import reverse
from testing_utils import AccountUserTestingMixin
class AccountUnauthViewTest(TestCase, AccountUserTestingMixin):
"""
For unauthenticated users.
Verify GET requests on all views. This does not cover updates via POST
requests.
"""
def setUp(self):
self.get_fixtures()
def test_account_list(self):
response = self.client.get(reverse("account_list"))
self.assertEqual(response.status_code, 302)
def test_account_add(self):
response = self.client.get(reverse("account_add"))
self.assertEqual(response.status_code, 302)
def test_account_detail(self):
response = self.client.get(reverse("account_detail",
kwargs={"account_pk": self.account1.pk}))
self.assertEqual(response.status_code, 302)
def test_account_update(self):
response = self.client.get(reverse("account_edit",
kwargs={"account_pk": self.account1.pk}))
self.assertEqual(response.status_code, 302)
def test_account_delete(self):
response = self.client.get(reverse("account_delete",
kwargs={"account_pk": self.account1.pk}))
self.assertEqual(response.status_code, 302)
def test_accountuser_list(self):
response = self.client.get(reverse("accountuser_list",
kwargs={"account_pk": self.account1.pk}))
self.assertEqual(response.status_code, 302)
def test_accountuser_add(self):
response = self.client.get(reverse("accountuser_add",
kwargs={"account_pk": self.account1.pk}))
self.assertEqual(response.status_code, 302)
def test_accountuser_detail(self):
response = self.client.get(reverse("accountuser_detail", kwargs={
"account_pk": self.account1.pk,
"accountuser_pk": self.accountuser_1.pk}))
self.assertEqual(response.status_code, 302)
def test_accountuser_update(self):
response = self.client.get(reverse("accountuser_edit", kwargs={
"account_pk": self.account1.pk,
"accountuser_pk": self.accountuser_1.pk}))
self.assertEqual(response.status_code, 302)
def test_accountuser_delete(self):
response = self.client.get(reverse("accountuser_delete", kwargs={
"account_pk": self.account1.pk,
"accountuser_pk": self.accountuser_1.pk}))
self.assertEqual(response.status_code, 302)
class AccountViewTest(TestCase, AccountUserTestingMixin):
"""
For the account owner
Verify GET requests on all views. This does not cover updates via POST
requests.
"""
def setUp(self):
self.get_fixtures()
self.client.login(username="user", password="pass")
def test_account_list(self):
response = self.client.get(reverse("account_list"))
self.assertEqual(response.status_code, 200)
def test_account_add(self):
response = self.client.get(reverse("account_add"))
self.assertEqual(response.status_code, 200)
def test_account_detail(self):
response = self.client.get(reverse("account_detail",
kwargs={"account_pk": self.account1.pk}))
self.assertEqual(response.status_code, 200)
def test_account_update(self):
response = self.client.get(reverse("account_edit",
kwargs={"account_pk": self.account1.pk}))
self.assertEqual(response.status_code, 200)
def test_account_delete(self):
response = self.client.get(reverse("account_delete",
kwargs={"account_pk": self.account1.pk}))
self.assertEqual(response.status_code, 200)
def test_accountuser_list(self):
response = self.client.get(reverse("accountuser_list",
kwargs={"account_pk": self.account1.pk}))
self.assertEqual(response.status_code, 200)
def test_accountuser_add(self):
response = self.client.get(reverse("accountuser_add",
kwargs={"account_pk": self.account1.pk}))
self.assertEqual(response.status_code, 200)
def test_accountuser_detail(self):
response = self.client.get(reverse("accountuser_detail", kwargs={
"account_pk": self.account1.pk,
"accountuser_pk": self.accountuser_1.pk}))
self.assertEqual(response.status_code, 200)
def test_accountuser_update(self):
response = self.client.get(reverse("accountuser_edit", kwargs={
"account_pk": self.account1.pk,
"accountuser_pk": self.accountuser_1.pk}))
self.assertEqual(response.status_code, 200)
def test_accountuser_delete(self):
response = self.client.get(reverse("accountuser_delete", kwargs={
"account_pk": self.account1.pk,
"accountuser_pk": self.accountuser_1.pk}))
self.assertEqual(response.status_code, 200)
class AccountCreationViewsTest(TestCase, AccountUserTestingMixin):
"""
Test the views for creating accounts and account users
"""
def setUp(self):
self.get_fixtures()
class AccountUpdateViewsTest(TestCase, AccountUserTestingMixin):
"""
Test the views for updating accounts and account users
"""
def setUp(self):
self.get_fixtures()
class AccountDeletionViewsTest(TestCase, AccountUserTestingMixin):
"""
Test the views for deleting accounts and account users
"""
def setUp(self):
self.get_fixtures()
| 35.251613
| 74
| 0.676611
|
5e4b89ca67e620cf6932ecbc34141bf7263d4285
| 122
|
py
|
Python
|
Practice/evenodds.py
|
ashishjayamohan/competitive-programming
|
05c5c560c2c2eb36121c52693b8c7d084f435f9e
|
[
"MIT"
] | null | null | null |
Practice/evenodds.py
|
ashishjayamohan/competitive-programming
|
05c5c560c2c2eb36121c52693b8c7d084f435f9e
|
[
"MIT"
] | null | null | null |
Practice/evenodds.py
|
ashishjayamohan/competitive-programming
|
05c5c560c2c2eb36121c52693b8c7d084f435f9e
|
[
"MIT"
] | null | null | null |
x,y = map(int,raw_input().split())
if x%2 == 0:
m =x/2
else:
m = x/2 + 1
if y<=m:
print (y-1)*2+1
else:
print (y-m)*2
| 12.2
| 34
| 0.516393
|
1e9620bd3381f22d1d7c4791a6ec74526cd186f5
| 13,809
|
py
|
Python
|
tests/test_suite.py
|
inverseproblem/pyMag2Dpoly
|
d4210faa691c1ca96677e4a416ae6571d53970ac
|
[
"MIT"
] | 4
|
2021-01-31T00:41:43.000Z
|
2021-09-08T05:01:13.000Z
|
tests/test_suite.py
|
inverseproblem/pyMag2Dpoly
|
d4210faa691c1ca96677e4a416ae6571d53970ac
|
[
"MIT"
] | null | null | null |
tests/test_suite.py
|
inverseproblem/pyMag2Dpoly
|
d4210faa691c1ca96677e4a416ae6571d53970ac
|
[
"MIT"
] | 1
|
2021-03-28T07:19:26.000Z
|
2021-03-28T07:19:26.000Z
|
import sys
import numpy as np
sys.path.append("./")
import mag2dpoly as mag
########################################################
def test1():
Jind = mag.MagnetizVector(mod=4.9,Ideg=90.0,Ddeg=45.0)
Jrem = mag.MagnetizVector(mod=3.1,Ideg=45.0,Ddeg=0.0)
northxax = 90.0
Nobs = 101
xzobs = np.transpose(np.vstack(( np.linspace(0.0,100.0,Nobs), -1.0*np.ones(Nobs))))
vertices = np.array([ [35.0, 50.0],
[65.0, 50.0],
[80.0, 35.0],
[65.0, 20.0],
[35.0, 20.0],
[20.0, 35.0] ])
## array of arrays
nbod = 1
bodyindices = np.empty(shape=(nbod,), dtype=np.object)
inds = range(6)
bodyindices[0] = np.array(inds)
pbody = mag.MagPolyBodies2D(bodyindices,vertices)
## compute total field (superposition of tmag of bodies)
forwardtype = ["talwani","talwani_red","krav","wonbev"]
nfwd = len(forwardtype)
tmag = np.zeros((xzobs.shape[0],nfwd))
nbody = pbody.bo.size
Jindv = np.array([Jind])
Jremv = np.array([Jrem])
for (f,fwdt) in enumerate(forwardtype):
tmag[:,f] = mag.tmagpolybodies2Dgen(xzobs,Jindv,Jremv,northxax,pbody,forwardtype[f])
##=========================================
tmag_ref = np.array([ -116.16336912423309,
-107.60475622349567,
-98.17723975959588,
-87.82662842366796,
-76.49775256995866,
-64.13484643609536,
-50.682001858897294,
-36.08369908036598,
-20.285419949994587,
-3.2343484367466506,
15.119837140202215,
34.82407638693401,
55.92094064483901,
78.44754640867708,
102.43436783270268,
127.90394975470824,
154.8695254168912,
183.3335469464661,
213.28614208527696,
244.70351807578447,
277.5463434505119,
311.758151040835,
347.2638208530303,
383.9682191188879,
421.7550886836475,
460.4863039069605,
500.0016173506611,
540.119031663933,
580.6359235938513,
621.331023429952,
661.9673092115332,
702.2958101588549,
742.0602315222982,
781.0022216070385,
818.8670137044077,
855.4091061413785,
890.3976071295564,
923.62087771655,
954.890158626987,
984.0419590104054,
1010.9391031766961,
1035.4704568268496,
1057.5494684492162,
1077.1117497867892,
1094.111973515768,
1108.5203855942902,
1120.3192191143874,
1129.499264418154,
1136.0568061804884,
1139.991090454424,
1141.302439274996,
1139.9910904544242,
1136.0568061804884,
1129.499264418154,
1120.3192191143867,
1108.52038559429,
1094.111973515767,
1077.1117497867892,
1057.5494684492162,
1035.4704568268494,
1010.9391031766954,
984.0419590104062,
954.8901586269874,
923.6208777165492,
890.3976071295564,
855.4091061413781,
818.8670137044072,
781.0022216070377,
742.0602315222977,
702.295810158854,
661.9673092115327,
621.3310234299516,
580.6359235938512,
540.1190316639322,
500.0016173506604,
460.48630390696064,
421.75508868364693,
383.9682191188882,
347.2638208530297,
311.7581510408341,
277.5463434505112,
244.70351807578382,
213.28614208527665,
183.33354694646584,
154.86952541689124,
127.90394975470798,
102.43436783270306,
78.4475464086772,
55.92094064483902,
34.82407638693425,
15.119837140202181,
-3.234348436746581,
-20.285419949994342,
-36.083699080365705,
-50.68200185889743,
-64.13484643609462,
-76.49775256995939,
-87.82662842366803,
-98.17723975959623,
-107.60475622349514,
-116.16336912423309])
##=========================================
same = [np.allclose(tmag[:,i],tmag_ref) for i in range(tmag.shape[1])]
print("all(same): ",all(same))
if all(same) :
return True
else :
for i in range(nfwd):
if same[i]==False :
print("Failed for ",forwardtype[i])
return False
################################################################
################################################################
def test2():
Jind = mag.MagnetizVector(mod=4.9,Ideg=90.0,Ddeg=45.0)
Jrem = mag.MagnetizVector(mod=3.1,Ideg=45.0,Ddeg=0.0)
northxax = 90.0
Nobs = 101
xzobs = np.transpose(np.vstack(( np.linspace(0.0,100.0,Nobs), -1.0*np.ones(Nobs))))
vertices = np.array([[39.9767752377201, 13.278991954030298],
[43.21168888652307, 13.402291449170173],
[43.317652312473136, 14.739290679698811],
[43.546649812203974, 13.203638738574346],
[43.91560484766585, 13.172384962659983],
[44.14344907863333, 13.366977100309308],
[44.40792260208563, 14.433477629153117],
[44.69197601158024, 13.41736322165884],
[43.051984435135324, 12.28972077523347],
[41.64196146829342, 11.82578388310931],
[40.0, 10.0],
[51.593762919577486, 12.985398228772771],
[52.893379913843816, 11.074145035188602],
[53.03238448156978, 10.749216912218833],
[53.17394928686575, 10.785849813929147],
[53.21679397938986, 10.936364994289892],
[53.31684510201022, 10.901007653562868],
[52.164999382907524, 9.566098324208415],
[51.69760101144281, 10.0],
[60.559336241924306, 25.07742274155419],
[65.61740252955528, 25.405022500973224],
[66.6522482157798, 24.330905878795054],
[66.99370462700534, 22.430809598897174],
[67.95964200423342, 20.871100638551827],
[68.41821282760033, 20.46729028637734],
[67.67999306568265, 20.187645182621033],
[67.42746927845577, 19.369846341974842],
[65.71882888409377, 17.507867891105946],
[65.63379439530597, 16.85561419292242],
[65.52497931177504, 14.899927178489119],
[64.20104498482468, 12.937540781866952],
[62.56031603648637, 11.598110231234838],
[60.78820427692065, 10.0]])
## array of arrays
nbod = 3
bodyindices = np.empty(shape=(nbod,), dtype=np.object)
ind1 = range(11)
ind2 = range(11,19)
ind3 = range(19,33)
bodyindices[0] = np.array(ind1)
bodyindices[1] = np.array(ind2)
bodyindices[2] = np.array(ind3)
pbody = mag.MagPolyBodies2D(bodyindices,vertices)
## compute total field (superposition of tmag of bodies)
## compute total field (superposition of tmag of bodies)
forwardtype = ["talwani","talwani_red","krav","wonbev"]
nfwd = len(forwardtype)
tmag = np.zeros((xzobs.shape[0],nfwd))
nbody = pbody.bo.size
for (f,fwdt) in enumerate(forwardtype):
for i in range(nbody):
tmag[:,f] += mag.tmagpoly2Dgen(xzobs,Jind,Jrem,northxax,pbody.bo[i],forwardtype[f])
###########################################################################
tmag_ref = [-25.18101203031008,
-25.814111679386286,
-26.46591188468871,
-27.136513821069393,
-27.82589071680914,
-28.533854477346786,
-29.260014742480834,
-30.003728645158226,
-30.764039135123586,
-31.53959923300231,
-32.32857897143186,
-33.12855103994676,
-33.93635025992559,
-34.74790095972702,
-35.558005092427436,
-36.360082555206354,
-37.14585368518767,
-37.90495244621838,
-38.624457626684254,
-39.28832887474771,
-39.87673534453047,
-40.36526832574021,
-40.724037390920856,
-40.91666522359578,
-40.89922361225488,
-40.61919790412695,
-40.01463677488,
-39.013746213150505,
-37.53532637130842,
-35.49062356397606,
-32.78735204013638,
-29.336765353626436,
-25.06459486138753,
-19.926213598986948,
-13.925267157081024,
-7.133055532339174,
0.2966799431282361,
8.125321786345921,
16.047468856320766,
23.73409183413679,
30.896898575646777,
37.35947087475763,
43.11364135233236,
48.34131017064244,
53.39281394546982,
58.7274836881353,
64.83295011470159,
72.14348149663033,
80.97541229042082,
91.49257447124981,
103.70790485858134,
117.51814892303605,
132.75663244957155,
149.23869197241677,
166.77366986321647,
185.1314145984579,
203.9747839694488,
222.7894273624335,
240.84727102364297,
257.2295606565289,
270.9152383168951,
280.91830730494206,
286.440275178761,
286.99698347216184,
282.4869194996864,
273.1878553816839,
259.6912319962327,
242.79908349000044,
223.41189102240213,
202.4297310575936,
180.67896175091312,
158.86732337391078,
137.5640994379945,
117.19911113444167,
98.07391651937287,
80.37955931028789,
64.21668987444241,
49.615331309029074,
36.552740278605995,
24.96865481639862,
14.7777648040935,
5.879555320167113,
-1.834170357150894,
-8.473724523855406,
-14.147563163141468,
-18.959577242278417,
-23.00736030015876,
-26.381212717810055,
-29.163688024210938,
-31.429526675334152,
-33.245857032715975,
-34.67257173728606,
-35.76281076807948,
-36.5635008447628,
-37.11591518972078,
-37.45622870164328,
-37.616051936809455,
-37.6229334959912,
-37.50082493720261,
-37.27050556150594,
-36.949966659975665]
##=================================================
same = [np.allclose(tmag[:,i],tmag_ref) for i in range(tmag.shape[1])]
print("all(same): ",all(same))
if all(same) :
return True
else :
for i in range(nfwd):
if same[i]==False :
print("Failed for ",forwardtype[i])
return False
#############################################################
if __name__=="__main__" :
test1()
test2()
| 37.832877
| 95
| 0.436961
|
e7a77921b3535d540d69236f621e28233e3156e4
| 1,022
|
py
|
Python
|
epistasis/models/tests/test_utils.py
|
lperezmo/epistasis
|
4f751d9e2d9ca632a7b688cf32bd950ad7c2a754
|
[
"Unlicense"
] | 21
|
2016-08-31T15:14:55.000Z
|
2021-11-27T14:42:35.000Z
|
epistasis/models/tests/test_utils.py
|
lperezmo/epistasis
|
4f751d9e2d9ca632a7b688cf32bd950ad7c2a754
|
[
"Unlicense"
] | 14
|
2016-11-30T18:39:00.000Z
|
2020-04-07T23:48:49.000Z
|
epistasis/models/tests/test_utils.py
|
lperezmo/epistasis
|
4f751d9e2d9ca632a7b688cf32bd950ad7c2a754
|
[
"Unlicense"
] | 8
|
2016-08-30T00:30:14.000Z
|
2020-04-02T01:03:19.000Z
|
# #from gpmap.simulate import GenotypePhenotypeSimulation
# from ..utils import *
#
# from ..base import BaseModel
#
# class MockModel(BaseModel):
#
# def __init__(self):
# self.gpm = GenotypePhenotypeSimulation.from_length(2)
# self.model_type = "local"
# self.order = 2
# self.Xbuilt = {}
#
# @X_fitter
# def fit(self, X='obs', y='obs'):
# self.coef_ = [0,0,0,0]
# return None
#
# @X_predictor
# def predict(self, X='complete', y='complete'):
# return None
#
# def test_X_fitter():
# model = MockModel()
# model.fit()
# # Test an Xfit matrix was made
# assert "obs" in model.Xbuilt
# assert "fit" in model.Xbuilt
# assert model.Xbuilt["fit"].shape == (4,4)
#
# def test_X_predictor():
# model = MockModel()
# model.fit()
# model.predict()
# # Test an Xfit matrix was made
# assert "complete" in model.Xbuilt
# assert "predict" in model.Xbuilt
# assert model.Xbuilt["predict"].shape == (4,4)
| 26.205128
| 63
| 0.591977
|
b9815118b9ae9e0cd49d67ab035a288051e11c8a
| 543
|
py
|
Python
|
main.py
|
CodeWithAgam/Odd-or-Even
|
32067d1bfeb4d022c559089f1d3984223a9564ae
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
CodeWithAgam/Odd-or-Even
|
32067d1bfeb4d022c559089f1d3984223a9564ae
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
CodeWithAgam/Odd-or-Even
|
32067d1bfeb4d022c559089f1d3984223a9564ae
|
[
"Apache-2.0"
] | null | null | null |
# The below function calculates if a number is odd or even
# Created by Agamdeep Singh / CodeWithAgam
# Youtube: CodeWithAgam
# Github: CodeWithAgam
# Instagram: @agamdeep_21, @coderagam001
# Twitter: @CoderAgam001
# Linkdin: Agamdeep Singh
# Print a Welcome message
print("Welcome to Odd or Even!")
# Get the number as an input
number = int(input("Which number do you want to check? "))
# Check for the conditions and print the final answer
if number % 2 == 1:
print("This is an odd number.")
else:
print("This is an even number.")
| 28.578947
| 58
| 0.72744
|
7f87fe9302df20e123586e581f4cf037a9c5e80b
| 571
|
py
|
Python
|
tests/cryptography/test_key_pad_decode.py
|
JASTYN/pythonmaster
|
46638ab09d28b65ce5431cd0759fe6df272fb85d
|
[
"Apache-2.0",
"MIT"
] | 3
|
2017-05-02T10:28:13.000Z
|
2019-02-06T09:10:11.000Z
|
tests/cryptography/test_key_pad_decode.py
|
JASTYN/pythonmaster
|
46638ab09d28b65ce5431cd0759fe6df272fb85d
|
[
"Apache-2.0",
"MIT"
] | 2
|
2017-06-21T20:39:14.000Z
|
2020-02-25T10:28:57.000Z
|
tests/cryptography/test_key_pad_decode.py
|
JASTYN/pythonmaster
|
46638ab09d28b65ce5431cd0759fe6df272fb85d
|
[
"Apache-2.0",
"MIT"
] | 2
|
2016-07-29T04:35:22.000Z
|
2017-01-18T17:05:36.000Z
|
import unittest
from cryptography.key_pad_decode import decode
class MyTestCase(unittest.TestCase):
def test_1(self):
self.assertEqual(decode("4103432323"), "6957678787")
def test_2(self):
self.assertEqual(decode("4103438970"), "6957672135")
def test_3(self):
self.assertEqual(decode("4104305768"), "6956750342")
def test_4(self):
self.assertEqual(decode("4102204351"), "6958856709")
def test_5(self):
self.assertEqual(decode("4107056043"), "6953504567")
if __name__ == '__main__':
unittest.main()
| 22.84
| 60
| 0.677758
|
34e36b515ff4a006c19e5f98190cd03fa64e089a
| 7,090
|
py
|
Python
|
mc/sim.py
|
swirkert/ipcai2016
|
7193b1b1a001511e7efadc2a40b4ab544be76607
|
[
"BSD-3-Clause"
] | 2
|
2019-12-17T08:45:23.000Z
|
2022-02-03T15:10:11.000Z
|
mc/sim.py
|
swirkert/ipcai2016
|
7193b1b1a001511e7efadc2a40b4ab544be76607
|
[
"BSD-3-Clause"
] | null | null | null |
mc/sim.py
|
swirkert/ipcai2016
|
7193b1b1a001511e7efadc2a40b4ab544be76607
|
[
"BSD-3-Clause"
] | null | null | null |
"""
ipcai2016
Copyright (c) German Cancer Research Center,
Computer Assisted Interventions.
All rights reserved.
This software is distributed WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
See LICENSE for details
"""
'''
Created on Sep 8, 2015
This file contains methods which wrap the mcml simulation so it can be
conveniently called from python. One example for a mcml simulation would be
gpumcml:
https://code.google.com/p/gpumcml/
@author: wirkert
Modified on August 8, 2016: Anant Vemuri
'''
import os
import contextlib
import logging
import subprocess32
""" helper method to change to the correct path and back again """
@contextlib.contextmanager
def cd(newPath):
savedPath = os.getcwd()
os.chdir(newPath)
yield
os.chdir(savedPath)
class MciWrapper(object):
'''
this class provides a wrapper to the mcml monte carlo file.
Its purpose is to create a .mci file which the mcml simulation can use to
create the simulation
'''
def set_mci_filename(self, mci_filename):
self.mci_filename = mci_filename
def set_base_mco_filename(self, base_filename):
self.base_mco_filename = base_filename
def get_base_mco_filename(self):
return self.base_mco_filename
def set_nr_photons(self, nr_photons):
self.nr_photons = nr_photons
def add_layer(self, n=None, ua=None, us=None, g=None, d=None):
"""adds a layer below the currently existing ones.
Arguments:
n: Refraction index of medium
ua: absorption coefficient [1/m]
us: scattering coefficient [1/m]
g: anisotropy factor
d: thickness of layer [m]
"""
if n is None:
n = 1.
if ua is None:
ua = 0.
if us is None:
us = 0.
if g is None:
g = 1.
if d is None:
d = 500.*10 ** -6
self.layers.append([n, ua, us, g, d])
def set_layer(self, layer_nr, n, ua, us, g, d):
"""set a layer with a specific layer_nr (stariting with layer_nr 0).
Note that the layer must already exist, otherwise an error will occure
"""
self.layers[layer_nr] = [n, ua, us, g, d]
def set_file_version(self, file_version):
self.file_version = file_version
def set_nr_runs(self, nr_runs):
self.nr_runs = nr_runs
def set_dz_dr(self, dz, dr):
self.dz = dz
self.dr = dr
def set_nr_dz_dr_da(self, nr_dz, nr_dr, nr_da):
self.nr_dz = nr_dz
self.nr_dr = nr_dr
self.nr_da = nr_da
def set_n_medium_above(self, n_above):
self.n_above = n_above
def set_n_medium_below(self, n_below):
self.n_below = n_below
def create_mci_file(self):
# write header
f = open(self.mci_filename, 'w')
f.write(str(self.file_version) + " # file version\n")
f.write(str(self.nr_runs) + " # number of runs\n\n")
f.close()
def update_mci_file(self, wavelength):
"""this method creates the mci file at the location self.mci_filename"""
open(self.mci_filename, 'a').close()
f = open(self.mci_filename, 'a')
# Generate a new mco fileName
local_mco_filename = self.base_mco_filename + str(wavelength) + '.mco'
# write the data for run
f.write(local_mco_filename + " A # output filename, ASCII/Binary\n")
f.write(str(self.nr_photons) + " # No. of photons\n")
f.write(repr(self.dz) + " " + repr(self.dr) + " # dz, dr\n")
f.write(repr(self.nr_dz) + " " +
repr(self.nr_dr) + " " +
repr(self.nr_da) + " # No. of dz, dr & da.\n\n")
# write layer information
f.write(str(len(self.layers)) + " # No. of layers\n")
f.write("# n mua mus g d # One line for each layer\n")
f.write(repr(self.n_above) + " # n for medium above.\n")
for layer in self.layers:
# factors (/100.; *100.) to convert to mcml expected units:
f.write("%.3f" % layer[0] + " " + # n
"%.5f" % (layer[1] / 100.) + " " + # ua
"%.5f" % (layer[2] / 100.) + " " + # us
"%.3f" % layer[3] + " " + # g
"%.3f" % (layer[4] * 100.) + "\n") # d
f.write(repr(self.n_below) + " # n for medium below.\n\n")
f.close()
if not os.path.isfile(self.mci_filename):
raise IOError("input file for monte carlo simulation not " +
"succesfully created")
def __init__(self):
# set standard parameters
self.file_version = 1.0
self.nr_photons = 10**6
self.nr_runs = 1
self.dz = 0.002
self.dr = 2
self.nr_dz = 500
self.nr_dr = 1
self.nr_da = 1
self.n_above = 1.0
self.n_below = 1.0
# initialize to 0 layers
self.layers = []
class SimWrapper(object):
def set_mci_filename(self, mci_filename):
"""the full path to the input file. E.g. ./data/my.mci
"""
self.mci_filename = mci_filename
def set_mcml_executable(self, mcml_executable):
""" the full path of the excutable. E.g. ./mcml/mcml.exe"""
self.mcml_executable = mcml_executable
def run_simulation(self):
"""this method runs a monte carlo simulation"""
mcml_path, mcml_file = os.path.split(self.mcml_executable)
abs_mci_filename = os.path.abspath(self.mci_filename)
# note: the -A option makes gpumcml much faster, but is not available
# in original mcml. Maybe a switch should be introduced here
args = ("./" + mcml_file, "-A", abs_mci_filename)
# switch to folder where mcml resides in and execute it.
with cd(mcml_path):
try:
mcml_exec = subprocess32.Popen(args)
mcml_exec.wait(timeout=None)
except:
logging.error("couldn't run simulation")
# popen.kill()
def __init__(self):
pass
def get_diffuse_reflectance(mco_filename):
"""
extract reflectance from mco file.
Attention: mco_filename specifies full path.
Returns: the reflectance
"""
with open(mco_filename) as myFile:
for line in myFile:
if "Diffuse reflectance" in line:
return float(line.split(' ', 1)[0])
def get_specular_reflectance(mco_filename):
"""
extract reflectance from mco file.
Attention: mco_filename specifies full path.
Returns: the reflectance
"""
with open(mco_filename) as myFile:
for line in myFile:
if "Specular reflectance" in line:
return float(line.split(' ', 1)[0])
def get_total_reflectance(mco_filename):
"""
extract reflectance from mco file.
Attention: mco_filename specifies full path.
Returns: the reflectance
"""
return get_diffuse_reflectance(mco_filename) + \
get_specular_reflectance(mco_filename)
| 30.170213
| 80
| 0.6
|
44803b97af3a23febdd7af22c342428d81bd92a8
| 1,010
|
py
|
Python
|
cplusplus/level1_single_api/4_op_dev/2_verify_op/acl_execute_lstm/run/out/test_data/data/compare_result.py
|
Ascend/samples
|
5e060ddf8c502cf0e248ecbe1c8986e95351cbbd
|
[
"Apache-2.0"
] | 25
|
2020-11-20T09:01:35.000Z
|
2022-03-29T10:35:38.000Z
|
cplusplus/level1_single_api/4_op_dev/2_verify_op/acl_execute_lstm/run/out/test_data/data/compare_result.py
|
tanmana5/samples
|
3ea657c42d6fc8bfd1d664d52da2e9024c2a94ea
|
[
"Apache-2.0"
] | 5
|
2021-02-28T20:49:37.000Z
|
2022-03-04T21:50:27.000Z
|
cplusplus/level1_single_api/4_op_dev/2_verify_op/acl_execute_lstm/run/out/test_data/data/compare_result.py
|
Ascend/samples
|
5e060ddf8c502cf0e248ecbe1c8986e95351cbbd
|
[
"Apache-2.0"
] | 16
|
2020-12-06T07:26:13.000Z
|
2022-03-01T07:51:55.000Z
|
"""
* @file compare_result.py
*
* Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
"""
import numpy as np
def compare(golden, result):
return np.sum(abs(golden-result) > 0.01) < 0.01*result.size
if __name__ == '__main__':
aicore_result_h = np.fromfile("../../result_files/output_0.bin",
dtype="float16")
aicore_result_c = np.fromfile("../../result_files/output_1.bin",
dtype="float16")
numpy_result_h = np.fromfile("output_golden_0.bin", dtype="float16")
numpy_result_c = np.fromfile("output_golden_1.bin", dtype="float16")
if compare(numpy_result_h, aicore_result_h) and\
compare(numpy_result_c, aicore_result_c):
print("compare success")
else:
print("compare failed")
| 32.580645
| 72
| 0.658416
|
3be3608b6546f5e18120fd5f86355ed77072a711
| 6,726
|
py
|
Python
|
ExcelExport/dt-excel.py
|
dlopes7/dynatrace-api
|
5ed061b5542eea9cee8cfd5dab81d91fc891f2f5
|
[
"BSD-3-Clause"
] | 80
|
2016-09-19T21:06:50.000Z
|
2022-03-31T06:34:29.000Z
|
ExcelExport/dt-excel.py
|
dlopes7/dynatrace-api
|
5ed061b5542eea9cee8cfd5dab81d91fc891f2f5
|
[
"BSD-3-Clause"
] | 36
|
2018-01-29T06:33:10.000Z
|
2022-03-07T08:05:56.000Z
|
ExcelExport/dt-excel.py
|
dlopes7/dynatrace-api
|
5ed061b5542eea9cee8cfd5dab81d91fc891f2f5
|
[
"BSD-3-Clause"
] | 70
|
2017-01-30T09:42:18.000Z
|
2022-03-24T18:57:35.000Z
|
import pycurl
import json
import csv
import certifi
import io
from openpyxl import Workbook
from openpyxl.styles import Alignment,Font
### Setup Variables ###
URL='https://{id}.live.dynatrace.com/api/v1/'
APITOKEN='XXXXXXXXXXXXXXXXXXXXX'
DEST_FILENAME='dt-export.xlsx'
### function to go get the data
def dtApiQuery(endpoint):
buffer=io.BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, URL + endpoint)
c.setopt(pycurl.CAINFO, certifi.where())
c.setopt(c.HTTPHEADER, ['Authorization: Api-Token ' + APITOKEN] )
c.setopt(pycurl.WRITEFUNCTION, buffer.write)
c.perform()
print('Status: %d' % c.getinfo(c.RESPONSE_CODE))
c.close()
return(buffer.getvalue().decode('UTF-8'))
### Setup workbook
wb = Workbook()
wsHosts = wb.create_sheet("hosts")
wsHostHost = wb.create_sheet("host-host")
wsProcess = wb.create_sheet("processes")
wsProcessProcess = wb.create_sheet("process-process")
wsProcessHost = wb.create_sheet("process-host")
wb.remove(wb.active)
### Get & Process hosts data
hostsIO=dtApiQuery('entity/infrastructure/hosts')
hosts=json.loads(hostsIO)
wsHosts.append( ['hostId','displayName','osType','osVersion','hypervisorType','ipAddress1','ipAddress2','ipAddress3'] )
for host in hosts:
wsHosts.append( [ host['entityId'],
host['displayName'],
host['osType'],
host['osVersion'],
host['hypervisorType'] if 'hypervisorType' in host else '',
host['ipAddresses'][0] if 'ipAddresses' in host else '',
host['ipAddresses'][1] if 'ipAddresses' in host and len(host['ipAddresses']) >1 else '',
host['ipAddresses'][2] if 'ipAddresses' in host and len(host['ipAddresses']) >2 else ''
] )
wsHostHost.append( ['fromHostId','toHostId'] )
for fromHost in hosts:
if 'toRelationships' in fromHost and 'isNetworkClientOfHost' in fromHost['toRelationships']:
for toHost in fromHost['toRelationships']['isNetworkClientOfHost']:
wsHostHost.append( [ fromHost['entityId'],
toHost,
] )
### Get & Process processes data
processesIO=dtApiQuery('entity/infrastructure/processes')
processes=json.loads(processesIO)
wsProcess.append( ['processId','displayName','softwareType','softwareVersion','port1','port2','port3','port4','port5'] )
for process in processes:
wsProcess.append( [ process['entityId'],
process['displayName'],
process['softwareTechnologies'][0]['type'] if 'softwareTechnologies' in process else '',
process['softwareTechnologies'][0]['version'] if 'softwareTechnologies' in process else '',
process['listenPorts'][0] if 'listenPorts' in process else '',
process['listenPorts'][1] if 'listenPorts' in process and len(process['listenPorts'])>1 else '',
process['listenPorts'][2] if 'listenPorts' in process and len(process['listenPorts'])>2 else '',
process['listenPorts'][3] if 'listenPorts' in process and len(process['listenPorts'])>3 else '',
process['listenPorts'][4] if 'listenPorts' in process and len(process['listenPorts'])>4 else ''
] )
wsProcessProcess.append( ['fromProcessId','toProcessId'] )
for fromProcess in processes:
if 'toRelationships' in fromProcess and 'isNetworkClientOf' in fromProcess['toRelationships']:
for toProcess in fromProcess['toRelationships']['isNetworkClientOf']:
wsProcessProcess.append( [ fromProcess['entityId'],
toProcess,
] )
wsProcessHost.append( ['processId','hostId'] )
for process in processes:
if 'fromRelationships' in process and 'isProcessOf' in process['fromRelationships']:
for host in process['fromRelationships']['isProcessOf']:
wsProcessHost.append( [ process['entityId'],
host,
] )
### set column widths
for ws in wb.worksheets:
for column_cells in ws.columns:
length = max(len(str(cell.value)) for cell in column_cells)
ws.column_dimensions[column_cells[0].column].width = length+1
### Set header format
for ws in wb.worksheets:
for cell in ws["1:1"]:
cell.style='Headline 3'
### Generate FW Rule Sheet
wsFWRules = wb.create_sheet("FWRules",0)
wsFWRules.append([ 'Linking Pointers','','','','',
'Firewall Rule','','','','',
'Source Extended Info','','','',
'Destination Extended Info','','','',
'Filtering'
])
wsFWRules.merge_cells('A1:E1')
wsFWRules.merge_cells('F1:J1')
wsFWRules.merge_cells('K1:N1')
wsFWRules.merge_cells('O1:R1')
wsFWRules.append([ 'fromProcessId','toProcessId','fromHostId','toHostId','',
'srcIP','dstIP','proto','port','',
'srcHostName','srcProcessName','srcProcessType','',
'dstHostname','dstProcessName','dstProcessType','',
'isIntraHost?'
])
for col in ['A','B','C','D','E']:
wsFWRules.column_dimensions[col].hidden=True
wsFWRules["A1"].style="Accent3"
wsFWRules["F1"].style="Accent1"
wsFWRules["K1"].style="Accent4"
wsFWRules["O1"].style="Accent5"
wsFWRules["S1"].style="Accent2"
for cell in wsFWRules["1:1"]:
cell.font=Font(bold=True,color='FFFFFF')
cell.alignment=Alignment(horizontal='center')
for cell in wsFWRules["2:2"]:
cell.style='Headline 3'
wsFWRules.sheet_properties.tabColor = '0066FF'
i=3
for row in wsProcessProcess.rows:
wsFWRules.append([
"='process-process'!A%i" % i,
"='process-process'!B%i" % i,
"=VLOOKUP(A%i,'process-host'!$A:$B,2,FALSE)" % i,
"=VLOOKUP(B%i,'process-host'!$A:$B,2,FALSE)" % i,
"",
"=VLOOKUP(C%i,'hosts'!$A:$F,6,FALSE)" % i,
"=VLOOKUP(D%i,'hosts'!$A:$F,6,FALSE)" % i,
"TCP",
"=IF(LEN(VLOOKUP(B%i,'processes'!$A:$E,5,FALSE))=0,\"\",VLOOKUP(B%i,'processes'!$A:$E,5,FALSE))" % (i,i),
"",
"=VLOOKUP(C%i,'hosts'!$A:$B,2,FALSE)" % i,
"=VLOOKUP(A%i,'processes'!$A:$B,2,FALSE)" % i,
"=VLOOKUP(A%i,'processes'!$A:$C,3,FALSE)" % i,
"",
"=VLOOKUP(D%i,'hosts'!$A:$B,2,FALSE)" % i,
"=VLOOKUP(B%i,'processes'!$A:$B,2,FALSE)" % i,
"=VLOOKUP(B%i,'processes'!$A:$C,3,FALSE)" % i,
"",
"=IF(C%i=D%i,TRUE,FALSE)" % (i,i)
])
i += 1
wsFWRules.column_dimensions['F'].width = wsHosts.column_dimensions['F'].width
wsFWRules.column_dimensions['G'].width = wsHosts.column_dimensions['F'].width
wsFWRules.column_dimensions['H'].width = 8
wsFWRules.column_dimensions['I'].width = wsProcess.column_dimensions['E'].width
wsFWRules.column_dimensions['J'].width = 5
wsFWRules.column_dimensions['K'].width = wsHosts.column_dimensions['B'].width
wsFWRules.column_dimensions['L'].width = wsProcess.column_dimensions['B'].width
wsFWRules.column_dimensions['M'].width = wsProcess.column_dimensions['C'].width
wsFWRules.column_dimensions['N'].width = 5
wsFWRules.column_dimensions['O'].width = wsHosts.column_dimensions['B'].width
wsFWRules.column_dimensions['P'].width = wsProcess.column_dimensions['B'].width
wsFWRules.column_dimensions['Q'].width = wsProcess.column_dimensions['C'].width
wsFWRules.column_dimensions['R'].width = 5
wsFWRules.column_dimensions['S'].width = 8
wsFWRules.auto_filter.ref="A2:S2"
### Output file
wb.save(filename=DEST_FILENAME)
| 34.670103
| 120
| 0.704133
|
f645714edd0d3366dc5c01844e944a0c158e7787
| 2,912
|
py
|
Python
|
fedjax/core/client_samplers_test.py
|
alshedivat/fedjax
|
ff46ba9955f167160353d7be72f6f5e1febee32c
|
[
"Apache-2.0"
] | null | null | null |
fedjax/core/client_samplers_test.py
|
alshedivat/fedjax
|
ff46ba9955f167160353d7be72f6f5e1febee32c
|
[
"Apache-2.0"
] | null | null | null |
fedjax/core/client_samplers_test.py
|
alshedivat/fedjax
|
ff46ba9955f167160353d7be72f6f5e1febee32c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fedjax.core.client_samplers."""
from absl.testing import absltest
from fedjax.core import client_datasets
from fedjax.core import client_samplers
import jax
import numpy as np
import numpy.testing as npt
class ClientSamplersTest(absltest.TestCase):
def assert_clients_equal(self, actual_clients, expected_clients):
with self.subTest('client_ids'):
self.assertEqual([cid for cid, _, _ in actual_clients],
[cid for cid, _, _ in expected_clients])
with self.subTest('client_datasets'):
self.assertEqual([len(cds) for _, cds, _ in actual_clients],
[len(cds) for _, cds, _ in expected_clients])
with self.subTest('client_rngs'):
npt.assert_array_equal([crng for _, _, crng in actual_clients],
[crng for _, _, crng in expected_clients])
def test_uniform_shuffled_client_sampler(self):
def shuffled_clients():
i = 0
while True:
yield i, client_datasets.ClientDataset({'x': np.arange(i)})
i += 1
client_sampler1 = client_samplers.UniformShuffledClientSampler(
shuffled_clients(), num_clients=2)
for _ in range(4):
clients1 = client_sampler1.sample()
client_sampler2 = client_samplers.UniformShuffledClientSampler(
shuffled_clients(), num_clients=2, start_round_num=3)
self.assert_clients_equal(client_sampler2.sample(), clients1)
def test_uniform_get_client_sampler(self):
class MockFederatedData:
def client_ids(self):
yield from range(100)
def get_clients(self, client_ids):
for cid in client_ids:
yield cid, client_datasets.ClientDataset({'x': np.arange(int(cid))})
num_clients = 2
round_num = 3
client_sampler = client_samplers.UniformGetClientSampler(
MockFederatedData(), num_clients, seed=0, start_round_num=round_num)
client_rngs = jax.random.split(jax.random.PRNGKey(round_num), num_clients)
self.assert_clients_equal(
client_sampler.sample(),
[(78, client_datasets.ClientDataset({'x': np.arange(78)
}), client_rngs[0]),
(56, client_datasets.ClientDataset({'x': np.arange(56)
}), client_rngs[1])])
if __name__ == '__main__':
absltest.main()
| 35.950617
| 78
| 0.682349
|
1e4bd3839eab345fc45832ae908f28c4ea53ff3c
| 1,052
|
py
|
Python
|
edmunds/profiler/drivers/basedriver.py
|
LowieHuyghe/edmunds-python
|
236d087746cb8802a8854b2706b8d3ff009e9209
|
[
"Apache-2.0"
] | 4
|
2017-09-07T13:39:50.000Z
|
2018-05-31T16:14:50.000Z
|
edmunds/profiler/drivers/basedriver.py
|
LowieHuyghe/edmunds-python
|
236d087746cb8802a8854b2706b8d3ff009e9209
|
[
"Apache-2.0"
] | 103
|
2017-03-19T15:58:21.000Z
|
2018-07-11T20:36:17.000Z
|
edmunds/profiler/drivers/basedriver.py
|
LowieHuyghe/edmunds-python
|
236d087746cb8802a8854b2706b8d3ff009e9209
|
[
"Apache-2.0"
] | 2
|
2017-10-14T15:20:11.000Z
|
2018-04-20T09:55:44.000Z
|
from edmunds.globals import abc, ABC
class BaseDriver(ABC):
"""
The base driver for profiler-drivers
"""
def __init__(self, app):
"""
Initiate the instance
:param app: The application
:type app: Edmunds.Application
"""
self._app = app
@abc.abstractmethod
def process(self, profiler, start, end, environment, suggestive_file_name):
"""
Process the results
:param profiler: The profiler
:type profiler: cProfile.Profile
:param start: Start of profiling
:type start: int
:param end: End of profiling
:type end: int
:param environment: The environment
:type environment: Environment
:param suggestive_file_name: A suggestive file name
:type suggestive_file_name: str
"""
pass
| 30.057143
| 79
| 0.495247
|
ec1a33b577225ba7381b15132b32be2f4261ad0f
| 3,366
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/web/v20180201/list_web_application_settings.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/web/v20180201/list_web_application_settings.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/web/v20180201/list_web_application_settings.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListWebApplicationSettingsResult',
'AwaitableListWebApplicationSettingsResult',
'list_web_application_settings',
]
@pulumi.output_type
class ListWebApplicationSettingsResult:
"""
String dictionary resource.
"""
def __init__(__self__, kind=None, name=None, properties=None, type=None):
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Mapping[str, str]:
"""
Settings.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListWebApplicationSettingsResult(ListWebApplicationSettingsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebApplicationSettingsResult(
kind=self.kind,
name=self.name,
properties=self.properties,
type=self.type)
def list_web_application_settings(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebApplicationSettingsResult:
"""
Use this data source to access information about an existing resource.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:web/v20180201:listWebApplicationSettings', __args__, opts=opts, typ=ListWebApplicationSettingsResult).value
return AwaitableListWebApplicationSettingsResult(
kind=__ret__.kind,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| 32.365385
| 158
| 0.642602
|
454b3b99f5ff00c343f69a7a26e4551a75efc922
| 1,833
|
py
|
Python
|
dataspace/clean/dates.py
|
Sam-prog-sudo/dataspace
|
2bab85c4dfa713deb835a46e9214c43a3a674082
|
[
"MIT"
] | 3
|
2021-06-28T09:45:51.000Z
|
2022-01-10T15:38:07.000Z
|
dataspace/clean/dates.py
|
Sam-prog-sudo/dataspace
|
2bab85c4dfa713deb835a46e9214c43a3a674082
|
[
"MIT"
] | null | null | null |
dataspace/clean/dates.py
|
Sam-prog-sudo/dataspace
|
2bab85c4dfa713deb835a46e9214c43a3a674082
|
[
"MIT"
] | 1
|
2021-07-01T08:50:32.000Z
|
2021-07-01T08:50:32.000Z
|
import numpy as np
import pandas as pd
def _to_date(df: pd.DataFrame, *cols: str, **kwargs):
try:
for col in cols:
df[col] = pd.to_datetime(df[col], **kwargs)
except Exception as e:
raise Exception("Can not convert to date", e)
def _fdate(df: pd.DataFrame, *cols, precision: str = "S", format: str = None):
def formatdate(row):
return row.strftime(format)
def convert(row):
encoded = "%Y-%m-%d %H:%M:%S"
if precision == "Min":
encoded = "%Y-%m-%d %H:%M"
elif precision == "H":
encoded = "%Y-%m-%d %H"
elif precision == "D":
encoded = "%Y-%m-%d"
elif precision == "M":
encoded = "%Y-%m"
elif precision == "Y":
encoded = "%Y"
return row.strftime(encoded)
try:
for f in cols:
try:
if format is None:
df[f] = pd.to_datetime(df[f]).apply(convert)
else:
df[f] = pd.to_datetime(df[f]).apply(formatdate)
except ValueError as e:
raise Exception("Can not convert date", e)
except KeyError:
raise Exception("Can not find colums " + " ".join(cols))
except Exception as e:
raise Exception(e, "Can not process date col")
def _timestamps(df: pd.DataFrame, col: str, **kwargs):
try:
name = "timestamp"
if "name" in kwargs:
name = kwargs["name"]
if "errors" not in kwargs:
kwargs["errors"] = "coerce"
if "unit" in kwargs:
kwargs["unit"] = "ms"
df[name] = pd.to_datetime(df[col], **kwargs)
df[name] = df["timestamp"].values.astype(np.int64) // 10 ** 9
except Exception as e:
raise Exception("Can not convert to timestamps", e)
| 31.067797
| 78
| 0.519913
|
e641181e8aa95deb94828a4029425f1c77a37880
| 1,953
|
py
|
Python
|
xlsxwriter/test/comparison/test_hyperlink18.py
|
yxwlr995/-Python-Pandas-XlsxWriter
|
cd28c1b968795b67f3013c49a0e02ffda5898163
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/comparison/test_hyperlink18.py
|
yxwlr995/-Python-Pandas-XlsxWriter
|
cd28c1b968795b67f3013c49a0e02ffda5898163
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/comparison/test_hyperlink18.py
|
yxwlr995/-Python-Pandas-XlsxWriter
|
cd28c1b968795b67f3013c49a0e02ffda5898163
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2020-04-12T16:44:58.000Z
|
2020-04-12T16:44:58.000Z
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, jmcnamara@cpan.org
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'hyperlink18.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks.This example doesn't have any link formatting and tests the relationshiplinkage code."""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet = workbook.add_worksheet()
worksheet.write_url('A1', 'http://google.com/00000000001111111111222222222233333333334444444444555555555566666666666777777777778888888888999999999990000000000111111111122222222223333333333444444444455555555556666666666677777777777888888888899999999999000000000011111111112222222222x')
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
| 31
| 292
| 0.599078
|
b874b7af9e450830332c99d8b2ca941323d40896
| 25,979
|
py
|
Python
|
src/pretix/plugins/stripe/views.py
|
NorDULaN/pretix
|
e2b9fe8e71f3852721a42c594047d88f5181fd29
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-04-25T00:11:00.000Z
|
2020-04-25T00:11:00.000Z
|
src/pretix/plugins/stripe/views.py
|
NorDULaN/pretix
|
e2b9fe8e71f3852721a42c594047d88f5181fd29
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/pretix/plugins/stripe/views.py
|
NorDULaN/pretix
|
e2b9fe8e71f3852721a42c594047d88f5181fd29
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import hashlib
import json
import logging
import requests
import stripe
from django.contrib import messages
from django.core import signing
from django.db import transaction
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from django.views import View
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.views.generic import FormView
from django_scopes import scopes_disabled
from pretix.base.models import Event, Order, OrderPayment, Organizer, Quota
from pretix.base.payment import PaymentException
from pretix.base.services.locking import LockTimeoutException
from pretix.base.settings import GlobalSettingsObject
from pretix.control.permissions import (
AdministratorPermissionRequiredMixin, event_permission_required,
)
from pretix.control.views.event import DecoupleMixin
from pretix.control.views.organizer import OrganizerDetailViewMixin
from pretix.multidomain.urlreverse import eventreverse
from pretix.plugins.stripe.forms import OrganizerStripeSettingsForm
from pretix.plugins.stripe.models import ReferencedStripeObject
from pretix.plugins.stripe.payment import StripeCC, StripeSettingsHolder
from pretix.plugins.stripe.tasks import (
get_domain_for_event, stripe_verify_domain,
)
logger = logging.getLogger('pretix.plugins.stripe')
@xframe_options_exempt
def redirect_view(request, *args, **kwargs):
signer = signing.Signer(salt='safe-redirect')
try:
url = signer.unsign(request.GET.get('url', ''))
except signing.BadSignature:
return HttpResponseBadRequest('Invalid parameter')
r = render(request, 'pretixplugins/stripe/redirect.html', {
'url': url,
})
r._csp_ignore = True
return r
@scopes_disabled()
def oauth_return(request, *args, **kwargs):
if 'payment_stripe_oauth_event' not in request.session:
messages.error(request, _('An error occurred during connecting with Stripe, please try again.'))
return redirect(reverse('control:index'))
event = get_object_or_404(Event, pk=request.session['payment_stripe_oauth_event'])
if request.GET.get('state') != request.session['payment_stripe_oauth_token']:
messages.error(request, _('An error occurred during connecting with Stripe, please try again.'))
return redirect(reverse('control:event.settings.payment.provider', kwargs={
'organizer': event.organizer.slug,
'event': event.slug,
'provider': 'stripe_settings'
}))
gs = GlobalSettingsObject()
testdata = {}
try:
resp = requests.post('https://connect.stripe.com/oauth/token', data={
'grant_type': 'authorization_code',
'client_secret': (
gs.settings.payment_stripe_connect_secret_key or gs.settings.payment_stripe_connect_test_secret_key
),
'code': request.GET.get('code')
})
data = resp.json()
if 'error' not in data:
account = stripe.Account.retrieve(
data['stripe_user_id'],
api_key=gs.settings.payment_stripe_connect_secret_key or gs.settings.payment_stripe_connect_test_secret_key
)
except:
logger.exception('Failed to obtain OAuth token')
messages.error(request, _('An error occurred during connecting with Stripe, please try again.'))
else:
if 'error' not in data and data['livemode']:
try:
testresp = requests.post('https://connect.stripe.com/oauth/token', data={
'grant_type': 'refresh_token',
'client_secret': gs.settings.payment_stripe_connect_test_secret_key,
'refresh_token': data['refresh_token']
})
testdata = testresp.json()
except:
logger.exception('Failed to obtain OAuth token')
messages.error(request, _('An error occurred during connecting with Stripe, please try again.'))
return redirect(reverse('control:event.settings.payment.provider', kwargs={
'organizer': event.organizer.slug,
'event': event.slug,
'provider': 'stripe_settings'
}))
if 'error' in data:
messages.error(request, _('Stripe returned an error: {}').format(data['error_description']))
elif data['livemode'] and 'error' in testdata:
messages.error(request, _('Stripe returned an error: {}').format(testdata['error_description']))
else:
messages.success(request,
_('Your Stripe account is now connected to pretix. You can change the settings in '
'detail below.'))
event.settings.payment_stripe_publishable_key = data['stripe_publishable_key']
# event.settings.payment_stripe_connect_access_token = data['access_token'] we don't need it, right?
event.settings.payment_stripe_connect_refresh_token = data['refresh_token']
event.settings.payment_stripe_connect_user_id = data['stripe_user_id']
event.settings.payment_stripe_merchant_country = account.get('country')
if account.get('business_name') or account.get('display_name') or account.get('email'):
event.settings.payment_stripe_connect_user_name = (
account.get('business_name') or account.get('display_name') or account.get('email')
)
if data['livemode']:
event.settings.payment_stripe_publishable_test_key = testdata['stripe_publishable_key']
else:
event.settings.payment_stripe_publishable_test_key = event.settings.payment_stripe_publishable_key
if request.session.get('payment_stripe_oauth_enable', False):
event.settings.payment_stripe__enabled = True
del request.session['payment_stripe_oauth_enable']
stripe_verify_domain.apply_async(args=(event.pk, get_domain_for_event(event)))
return redirect(reverse('control:event.settings.payment.provider', kwargs={
'organizer': event.organizer.slug,
'event': event.slug,
'provider': 'stripe_settings'
}))
@csrf_exempt
@require_POST
@scopes_disabled()
def webhook(request, *args, **kwargs):
event_json = json.loads(request.body.decode('utf-8'))
# We do not check for the event type as we are not interested in the event it self,
# we just use it as a trigger to look the charge up to be absolutely sure.
# Another reason for this is that stripe events are not authenticated, so they could
# come from anywhere.
if event_json['data']['object']['object'] == "charge":
func = charge_webhook
objid = event_json['data']['object']['id']
elif event_json['data']['object']['object'] == "dispute":
func = charge_webhook
objid = event_json['data']['object']['charge']
elif event_json['data']['object']['object'] == "source":
func = source_webhook
objid = event_json['data']['object']['id']
elif event_json['data']['object']['object'] == "payment_intent":
func = paymentintent_webhook
objid = event_json['data']['object']['id']
else:
return HttpResponse("Not interested in this data type", status=200)
try:
rso = ReferencedStripeObject.objects.select_related('order', 'order__event').get(reference=objid)
return func(rso.order.event, event_json, objid, rso)
except ReferencedStripeObject.DoesNotExist:
if event_json['data']['object']['object'] == "charge" and 'payment_intent' in event_json['data']['object']:
# If we receive a charge webhook *before* the payment intent webhook, we don't know the charge ID yet
# and can't match it -- but we know the payment intent ID!
try:
rso = ReferencedStripeObject.objects.select_related('order', 'order__event').get(
reference=event_json['data']['object']['payment_intent']
)
return func(rso.order.event, event_json, objid, rso)
except ReferencedStripeObject.DoesNotExist:
return HttpResponse("Unable to detect event", status=200)
elif hasattr(request, 'event') and func != paymentintent_webhook:
# This is a legacy integration from back when didn't have ReferencedStripeObject. This can't happen for
# payment intents or charges connected with payment intents since they didn't exist back then. Our best
# hope is to go for request.event and see if we can find the order ID.
return func(request.event, event_json, objid, None)
else:
# Okay, this is probably not an event that concerns us, maybe other applications talk to the same stripe
# account
return HttpResponse("Unable to detect event", status=200)
SOURCE_TYPES = {
'sofort': 'stripe_sofort',
'three_d_secure': 'stripe',
'card': 'stripe',
'giropay': 'stripe_giropay',
'ideal': 'stripe_ideal',
'alipay': 'stripe_alipay',
'bancontact': 'stripe_bancontact',
}
def charge_webhook(event, event_json, charge_id, rso):
prov = StripeCC(event)
prov._init_api()
try:
charge = stripe.Charge.retrieve(charge_id, expand=['dispute'], **prov.api_kwargs)
except stripe.error.StripeError:
logger.exception('Stripe error on webhook. Event data: %s' % str(event_json))
return HttpResponse('Charge not found', status=500)
metadata = charge['metadata']
if 'event' not in metadata:
return HttpResponse('Event not given in charge metadata', status=200)
if int(metadata['event']) != event.pk:
return HttpResponse('Not interested in this event', status=200)
if rso and rso.payment:
order = rso.payment.order
payment = rso.payment
elif rso:
order = rso.order
payment = None
else:
try:
order = event.orders.get(id=metadata['order'])
except Order.DoesNotExist:
return HttpResponse('Order not found', status=200)
payment = None
if not payment:
payment = order.payments.filter(
info__icontains=charge['id'],
provider__startswith='stripe',
amount=prov._amount_to_decimal(charge['amount']),
).last()
if not payment:
payment = order.payments.create(
state=OrderPayment.PAYMENT_STATE_CREATED,
provider=SOURCE_TYPES.get(charge['source'].get('type', charge['source'].get('object', 'card')), 'stripe'),
amount=prov._amount_to_decimal(charge['amount']),
info=str(charge),
)
if payment.provider != prov.identifier:
prov = payment.payment_provider
prov._init_api()
order.log_action('pretix.plugins.stripe.event', data=event_json)
is_refund = charge['refunds']['total_count'] or charge['dispute']
if is_refund:
known_refunds = [r.info_data.get('id') for r in payment.refunds.all()]
migrated_refund_amounts = [r.amount for r in payment.refunds.all() if not r.info_data.get('id')]
for r in charge['refunds']['data']:
a = prov._amount_to_decimal(r['amount'])
if r['status'] in ('failed', 'canceled'):
continue
if a in migrated_refund_amounts:
migrated_refund_amounts.remove(a)
continue
if r['id'] not in known_refunds:
payment.create_external_refund(
amount=a,
info=str(r)
)
if charge['dispute']:
if charge['dispute']['status'] != 'won' and charge['dispute']['id'] not in known_refunds:
a = prov._amount_to_decimal(charge['dispute']['amount'])
if a in migrated_refund_amounts:
migrated_refund_amounts.remove(a)
else:
payment.create_external_refund(
amount=a,
info=str(charge['dispute'])
)
elif charge['status'] == 'succeeded' and payment.state in (OrderPayment.PAYMENT_STATE_PENDING,
OrderPayment.PAYMENT_STATE_CREATED,
OrderPayment.PAYMENT_STATE_CANCELED,
OrderPayment.PAYMENT_STATE_FAILED):
try:
payment.confirm()
except LockTimeoutException:
return HttpResponse("Lock timeout, please try again.", status=503)
except Quota.QuotaExceededException:
pass
elif charge['status'] == 'failed' and payment.state in (OrderPayment.PAYMENT_STATE_PENDING, OrderPayment.PAYMENT_STATE_CREATED):
payment.fail(info=str(charge))
return HttpResponse(status=200)
def source_webhook(event, event_json, source_id, rso):
prov = StripeCC(event)
prov._init_api()
try:
src = stripe.Source.retrieve(source_id, **prov.api_kwargs)
except stripe.error.StripeError:
logger.exception('Stripe error on webhook. Event data: %s' % str(event_json))
return HttpResponse('Charge not found', status=500)
metadata = src['metadata']
if 'event' not in metadata:
return HttpResponse('Event not given in charge metadata', status=200)
if int(metadata['event']) != event.pk:
return HttpResponse('Not interested in this event', status=200)
with transaction.atomic():
if rso and rso.payment:
order = rso.payment.order
payment = rso.payment
elif rso:
order = rso.order
payment = None
else:
try:
order = event.orders.get(id=metadata['order'])
except Order.DoesNotExist:
return HttpResponse('Order not found', status=200)
payment = None
if not payment:
payment = order.payments.filter(
info__icontains=src['id'],
provider__startswith='stripe',
amount=prov._amount_to_decimal(src['amount']) if src['amount'] is not None else order.total,
).last()
if not payment:
payment = order.payments.create(
state=OrderPayment.PAYMENT_STATE_CREATED,
provider=SOURCE_TYPES.get(src['type'], 'stripe'),
amount=prov._amount_to_decimal(src['amount']) if src['amount'] is not None else order.total,
info=str(src),
)
if payment.provider != prov.identifier:
prov = payment.payment_provider
prov._init_api()
order.log_action('pretix.plugins.stripe.event', data=event_json)
go = (event_json['type'] == 'source.chargeable' and
payment.state in (OrderPayment.PAYMENT_STATE_PENDING, OrderPayment.PAYMENT_STATE_CREATED) and
src.status == 'chargeable')
if go:
try:
prov._charge_source(None, source_id, payment)
except PaymentException:
logger.exception('Webhook error')
elif src.status == 'failed':
payment.fail(info=str(src))
elif src.status == 'canceled' and payment.state in (OrderPayment.PAYMENT_STATE_PENDING, OrderPayment.PAYMENT_STATE_CREATED):
payment.info = str(src)
payment.state = OrderPayment.PAYMENT_STATE_CANCELED
payment.save()
return HttpResponse(status=200)
def paymentintent_webhook(event, event_json, paymentintent_id, rso):
prov = StripeCC(event)
prov._init_api()
try:
paymentintent = stripe.PaymentIntent.retrieve(paymentintent_id, **prov.api_kwargs)
except stripe.error.StripeError:
logger.exception('Stripe error on webhook. Event data: %s' % str(event_json))
return HttpResponse('Charge not found', status=500)
for charge in paymentintent.charges.data:
ReferencedStripeObject.objects.get_or_create(
reference=charge.id,
defaults={'order': rso.payment.order, 'payment': rso.payment}
)
return HttpResponse(status=200)
@event_permission_required('can_change_event_settings')
@require_POST
def oauth_disconnect(request, **kwargs):
del request.event.settings.payment_stripe_publishable_key
del request.event.settings.payment_stripe_publishable_test_key
del request.event.settings.payment_stripe_connect_access_token
del request.event.settings.payment_stripe_connect_refresh_token
del request.event.settings.payment_stripe_connect_user_id
del request.event.settings.payment_stripe_connect_user_name
request.event.settings.payment_stripe__enabled = False
messages.success(request, _('Your Stripe account has been disconnected.'))
return redirect(reverse('control:event.settings.payment.provider', kwargs={
'organizer': request.event.organizer.slug,
'event': request.event.slug,
'provider': 'stripe_settings'
}))
@xframe_options_exempt
def applepay_association(request, *args, **kwargs):
r = render(request, 'pretixplugins/stripe/apple-developer-merchantid-domain-association')
r._csp_ignore = True
return r
class StripeOrderView:
def dispatch(self, request, *args, **kwargs):
try:
self.order = request.event.orders.get(code=kwargs['order'])
if hashlib.sha1(self.order.secret.lower().encode()).hexdigest() != kwargs['hash'].lower():
raise Http404('')
except Order.DoesNotExist:
# Do a hash comparison as well to harden timing attacks
if 'abcdefghijklmnopq'.lower() == hashlib.sha1('abcdefghijklmnopq'.encode()).hexdigest():
raise Http404('')
else:
raise Http404('')
return super().dispatch(request, *args, **kwargs)
@cached_property
def payment(self):
return get_object_or_404(self.order.payments,
pk=self.kwargs['payment'],
provider__startswith='stripe')
@cached_property
def pprov(self):
return self.request.event.get_payment_providers()[self.payment.provider]
def _redirect_to_order(self):
if self.request.session.get('payment_stripe_order_secret') != self.order.secret and self.payment.provider != 'stripe_ideal':
messages.error(self.request, _('Sorry, there was an error in the payment process. Please check the link '
'in your emails to continue.'))
return redirect(eventreverse(self.request.event, 'presale:event.index'))
return redirect(eventreverse(self.request.event, 'presale:event.order', kwargs={
'order': self.order.code,
'secret': self.order.secret
}) + ('?paid=yes' if self.order.status == Order.STATUS_PAID else ''))
@method_decorator(xframe_options_exempt, 'dispatch')
class ReturnView(StripeOrderView, View):
def get(self, request, *args, **kwargs):
prov = self.pprov
prov._init_api()
try:
src = stripe.Source.retrieve(request.GET.get('source'), **prov.api_kwargs)
except stripe.error.InvalidRequestError:
logger.exception('Could not retrieve source')
messages.error(self.request, _('Sorry, there was an error in the payment process. Please check the link '
'in your emails to continue.'))
return redirect(eventreverse(self.request.event, 'presale:event.index'))
if src.client_secret != request.GET.get('client_secret'):
messages.error(self.request, _('Sorry, there was an error in the payment process. Please check the link '
'in your emails to continue.'))
return redirect(eventreverse(self.request.event, 'presale:event.index'))
with transaction.atomic():
self.order.refresh_from_db()
self.payment.refresh_from_db()
if self.payment.state == OrderPayment.PAYMENT_STATE_CONFIRMED:
if 'payment_stripe_token' in request.session:
del request.session['payment_stripe_token']
return self._redirect_to_order()
if src.status == 'chargeable':
try:
prov._charge_source(request, src.id, self.payment)
except PaymentException as e:
messages.error(request, str(e))
return self._redirect_to_order()
finally:
if 'payment_stripe_token' in request.session:
del request.session['payment_stripe_token']
elif src.status == 'consumed':
# Webhook was faster, wow! ;)
if 'payment_stripe_token' in request.session:
del request.session['payment_stripe_token']
return self._redirect_to_order()
elif src.status == 'pending':
self.payment.state = OrderPayment.PAYMENT_STATE_PENDING
self.payment.info = str(src)
self.payment.save()
else: # failed or canceled
self.payment.fail(info=str(src))
messages.error(self.request, _('We had trouble authorizing your card payment. Please try again and '
'get in touch with us if this problem persists.'))
return self._redirect_to_order()
@method_decorator(xframe_options_exempt, 'dispatch')
class ScaView(StripeOrderView, View):
def get(self, request, *args, **kwargs):
prov = self.pprov
prov._init_api()
if self.payment.state in (OrderPayment.PAYMENT_STATE_CONFIRMED,
OrderPayment.PAYMENT_STATE_CANCELED,
OrderPayment.PAYMENT_STATE_FAILED):
return self._redirect_to_order()
payment_info = json.loads(self.payment.info)
if 'id' in payment_info:
try:
intent = stripe.PaymentIntent.retrieve(
payment_info['id'],
**prov.api_kwargs
)
except stripe.error.InvalidRequestError:
logger.exception('Could not retrieve payment intent')
messages.error(self.request, _('Sorry, there was an error in the payment process.'))
return self._redirect_to_order()
else:
messages.error(self.request, _('Sorry, there was an error in the payment process.'))
return self._redirect_to_order()
if intent.status == 'requires_action' and intent.next_action.type in ['use_stripe_sdk', 'redirect_to_url']:
ctx = {
'order': self.order,
'stripe_settings': StripeSettingsHolder(self.order.event).settings,
}
if intent.next_action.type == 'use_stripe_sdk':
ctx['payment_intent_client_secret'] = intent.client_secret
elif intent.next_action.type == 'redirect_to_url':
ctx['payment_intent_next_action_redirect_url'] = intent.next_action.redirect_to_url['url']
r = render(request, 'pretixplugins/stripe/sca.html', ctx)
r._csp_ignore = True
return r
else:
try:
prov._handle_payment_intent(request, self.payment, intent)
except PaymentException as e:
messages.error(request, str(e))
return self._redirect_to_order()
@method_decorator(xframe_options_exempt, 'dispatch')
class ScaReturnView(StripeOrderView, View):
def get(self, request, *args, **kwargs):
prov = self.pprov
try:
prov._handle_payment_intent(request, self.payment)
except PaymentException as e:
messages.error(request, str(e))
self.order.refresh_from_db()
return render(request, 'pretixplugins/stripe/sca_return.html', {'order': self.order})
class OrganizerSettingsFormView(DecoupleMixin, OrganizerDetailViewMixin, AdministratorPermissionRequiredMixin, FormView):
model = Organizer
permission = 'can_change_organizer_settings'
form_class = OrganizerStripeSettingsForm
template_name = 'pretixplugins/stripe/organizer_stripe.html'
def get_success_url(self):
return reverse('plugins:stripe:settings.connect', kwargs={
'organizer': self.request.organizer.slug,
})
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['obj'] = self.request.organizer
return kwargs
@transaction.atomic
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
form.save()
if form.has_changed():
self.request.organizer.log_action(
'pretix.organizer.settings', user=self.request.user, data={
k: form.cleaned_data.get(k) for k in form.changed_data
}
)
messages.success(self.request, _('Your changes have been saved.'))
return redirect(self.get_success_url())
else:
messages.error(self.request, _('We could not save your changes. See below for details.'))
return self.get(request)
| 43.154485
| 132
| 0.633589
|
6c05dc37ce9ecd88d04de2c7b1fb1af36407f407
| 2,916
|
py
|
Python
|
setup.py
|
rosscdh/pinax-comments
|
266632ef22b7fd922a8d3ffd6eabdac71e2385d9
|
[
"MIT"
] | 59
|
2015-12-15T22:53:40.000Z
|
2021-07-06T03:28:49.000Z
|
setup.py
|
rosscdh/pinax-comments
|
266632ef22b7fd922a8d3ffd6eabdac71e2385d9
|
[
"MIT"
] | 22
|
2015-10-01T14:34:50.000Z
|
2021-07-27T09:49:33.000Z
|
setup.py
|
rosscdh/pinax-comments
|
266632ef22b7fd922a8d3ffd6eabdac71e2385d9
|
[
"MIT"
] | 19
|
2016-01-27T17:41:35.000Z
|
2021-09-22T09:30:50.000Z
|
from setuptools import find_packages, setup
VERSION = "2.0.0"
LONG_DESCRIPTION = """
.. image:: http://pinaxproject.com/pinax-design/patches/pinax-comments.svg
:target: https://pypi.python.org/pypi/pinax-comments/
==============
Pinax Comments
==============
.. image:: https://img.shields.io/pypi/v/pinax-comments.svg
:target: https://pypi.python.org/pypi/pinax-comments/
\
.. image:: https://img.shields.io/circleci/project/github/pinax/pinax-comments.svg
:target: https://circleci.com/gh/pinax/pinax-comments
.. image:: https://img.shields.io/codecov/c/github/pinax/pinax-comments.svg
:target: https://codecov.io/gh/pinax/pinax-comments
.. image:: https://img.shields.io/github/contributors/pinax/pinax-comments.svg
:target: https://github.com/pinax/pinax-comments/graphs/contributors
.. image:: https://img.shields.io/github/issues-pr/pinax/pinax-comments.svg
:target: https://github.com/pinax/pinax-comments/pulls
.. image:: https://img.shields.io/github/issues-pr-closed/pinax/pinax-comments.svg
:target: https://github.com/pinax/pinax-comments/pulls?q=is%3Apr+is%3Aclosed
\
.. image:: http://slack.pinaxproject.com/badge.svg
:target: http://slack.pinaxproject.com/
.. image:: https://img.shields.io/badge/license-MIT-blue.svg
:target: https://opensource.org/licenses/MIT/
\
``pinax-comments`` is a comments app for Django.
Supported Django and Python Versions
------------------------------------
+-----------------+-----+-----+-----+
| Django / Python | 3.6 | 3.7 | 3.8 |
+=================+=====+=====+=====+
| 2.2 | * | * | * |
+-----------------+-----+-----+-----+
| 3.0 | * | * | * |
+-----------------+-----+-----+-----+
"""
setup(
author="Pinax Team",
author_email="team@pinaxproject.com",
description="a comments app for Django",
name="pinax-comments",
long_description=LONG_DESCRIPTION,
version=VERSION,
url="http://github.com/pinax/pinax-comments/",
license="MIT",
packages=find_packages(),
package_data={
"comments": []
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Python Modules",
],
install_requires=[
"django>=2.2",
"django-appconf>=1.0.1",
],
tests_require=[
"django-test-plus>=1.0.22",
],
test_suite="runtests.runtests",
zip_safe=False
)
| 32.764045
| 82
| 0.594307
|
54f89e215c9268dd792586821b21d45ed5d4fce0
| 5,491
|
py
|
Python
|
rsHRF/rsHRF_GUI/gui_windows/inputWindow.py
|
Remi-Gau/bids-app_rsHRF
|
a07715b764df69fffbc7f1a43718e958662ade9b
|
[
"MIT"
] | 1
|
2021-04-11T14:08:53.000Z
|
2021-04-11T14:08:53.000Z
|
rsHRF/rsHRF_GUI/gui_windows/inputWindow.py
|
Remi-Gau/rsHRF-1
|
a07715b764df69fffbc7f1a43718e958662ade9b
|
[
"MIT"
] | 5
|
2021-04-15T12:49:46.000Z
|
2021-06-21T09:14:40.000Z
|
rsHRF/rsHRF_GUI/gui_windows/inputWindow.py
|
Remi-Gau/rsHRF-1
|
a07715b764df69fffbc7f1a43718e958662ade9b
|
[
"MIT"
] | null | null | null |
import os
from tkinter import Toplevel, Checkbutton, IntVar, Button, filedialog, NORMAL, DISABLED, OptionMenu, StringVar, Label
class InputWindow():
def __init__(self):
# input window
window = Toplevel()
window.title("Input Window")
# get screen width and height
screen_width = window.winfo_screenwidth()
screen_height = window.winfo_screenheight()
# placing the toplevel
window.geometry("350x220+%d+%d" % ((280/1900)*screen_width, ((((1040.0-220)/1000)*screen_height)-390)-280))
# variables which shall get sent to the front end
self.input_file = ()
self.mask_file = ()
self.file_type = ()
self.output_dir = ()
# other class vairables
# 1 corresponds to BIDS input
self.inputFormatVar = IntVar()
self.inputFormatVar.set(0)
# 1 corresponds to mask file being present in the BIDS directory
self.maskFormatVar = IntVar()
self.maskFormatVar.set(0)
# selecting the estimation rule
self.estimationOption = StringVar()
self.estimationOption.set('canon2dd')
def getInputFile():
if self.inputFormatVar.get(): # input takes a directory
self.input_file = filedialog.askdirectory(initialdir=os.getcwd())
maskFormat.configure(state=NORMAL)
else:
self.input_file = filedialog.askopenfilename(initialdir=os.getcwd(), title="Input File Path", filetypes=(("nifti files", "*.nii"), ("nifti files", "*.nii.gz"), ("gifti files", "*.gii"), ("gifti files", "*.gii.gz")))
maskFormat.configure(state=DISABLED)
try:
self.file_type = os.path.splitext(self.input_file)[1]
except:
self.file_type = ()
try:
inputFileLabel.configure(text=self.input_file.split('/')[-1])
except:
inputFileLabel.configure(text="")
def maskFormatButtonState():
if self.maskFormatVar.get():
maskFormatButton.configure(state=DISABLED)
else:
maskFormatButton.configure(state=NORMAL)
def inputFormatButtonState():
if self.inputFormatVar.get():
maskFormat.configure(state=NORMAL)
else:
maskFormat.configure(state=DISABLED)
maskFormatButtonState()
def getMaskFile():
self.mask_file = filedialog.askopenfilename(initialdir=os.getcwd(), title="Input File Path", filetypes=(("nifti files", "*.nii"), ("nifti files", "*.nii.gz"), ("gifti files", "*.gii"), ("gifti files", "*.gii.gz")))
try:
maskFileLabel.configure(text=self.mask_file.split("/")[-1])
except:
maskFileLabel.configure(text="")
def getOutputDir():
self.output_dir = filedialog.askdirectory(initialdir=os.getcwd())
try:
outputPathLabel.configure(text="Output path: " + self.output_dir.split("/")[-1])
except:
outputPathLabel.configure(text="")
# defining widgets
inputFormat = Checkbutton(window, text="BIDS Format", variable=self.inputFormatVar, command=inputFormatButtonState)
maskFormat = Checkbutton(window, text="Mask File in BIDS", variable=self.maskFormatVar, state=DISABLED, command=maskFormatButtonState)
inputFormatButton = Button (window, text="Select Input", command=getInputFile, height=1, width=20)
maskFormatButton = Button (window, text="Select Mask File", state=NORMAL, command=getMaskFile, height=1, width=20)
outputPathButton = Button (window, text="Select Output Directory", command=getOutputDir, height=1, width=20)
estimationLabel = Label (window, text="Estimation Rule: ")
inputFileLabel = Label (window, text="")
maskFileLabel = Label (window, text="")
outputPathLabel = Label (window, text="")
estimationDropDown = OptionMenu (window, self.estimationOption, "canon2dd", "sFIR", "FIR", "gamma", "fourier", "fourier w/ hanning")
# placing widgets
inputFormat.grid (row=0,column=0,padx=(5,5),pady=(5,5))
inputFormatButton.grid (row=0,column=1,padx=(5,5),pady=(5,5))
inputFileLabel.grid (row=1,column=0,padx=(5,5),pady=(5,5),columnspan=2)
maskFormat.grid (row=2,column=0,padx=(5,5),pady=(5,5))
maskFormatButton.grid (row=2,column=1,padx=(5,5),pady=(5,5))
maskFileLabel.grid (row=3,column=0,padx=(5,5),pady=(5,5),columnspan=2)
outputPathButton.grid (row=4,column=1,padx=(5,5),pady=(5,5))
outputPathLabel.grid (row=4,column=0,padx=(5,5),pady=(5,5))
estimationLabel.grid (row=5,column=0,padx=(5,5),pady=(5,5))
estimationDropDown.grid(row=5,column=1,padx=(5,5),pady=(5,5))
def getInput(self):
if self.inputFormatVar.get() * self.maskFormatVar.get():
mode = "bids"
elif self.inputFormatVar.get():
mode = "bids w/ atlas"
else:
mode = "file"
return (self.input_file, self.mask_file, self.file_type, mode, self.estimationOption.get(), self.output_dir)
| 52.798077
| 231
| 0.584411
|
d73ed7931f34a0ece5d2a21a574eb7ff7ea782a5
| 167
|
py
|
Python
|
tests/connection/test_commit.py
|
coverwallet/pysoni
|
49d3a8acb101436ad0724749572be2ad9d86f3ae
|
[
"MIT"
] | 5
|
2019-07-08T15:38:06.000Z
|
2022-03-24T20:36:19.000Z
|
tests/connection/test_commit.py
|
coverwallet/pysoni
|
49d3a8acb101436ad0724749572be2ad9d86f3ae
|
[
"MIT"
] | 2
|
2019-07-07T23:26:32.000Z
|
2020-06-04T07:43:24.000Z
|
tests/connection/test_commit.py
|
coverwallet/pysoni
|
49d3a8acb101436ad0724749572be2ad9d86f3ae
|
[
"MIT"
] | 1
|
2019-05-31T09:11:22.000Z
|
2019-05-31T09:11:22.000Z
|
def test_commit_triggers_commit_in_the_connection(open_connection):
open_connection.commit()
open_connection._connection_handler.commit.assert_called_once()
| 27.833333
| 67
| 0.850299
|
967ca2fb53793ba1dd8fbe1e8c014925c8a2e346
| 1,625
|
py
|
Python
|
shub/compat.py
|
PyExplorer/shub
|
dc38191e6593f3c012cb89ed1551f8b0dd2981d8
|
[
"BSD-3-Clause"
] | 111
|
2015-02-05T15:24:15.000Z
|
2022-03-31T03:31:22.000Z
|
shub/compat.py
|
PyExplorer/shub
|
dc38191e6593f3c012cb89ed1551f8b0dd2981d8
|
[
"BSD-3-Clause"
] | 355
|
2015-01-01T16:18:46.000Z
|
2022-03-18T15:41:10.000Z
|
shub/compat.py
|
PyExplorer/shub
|
dc38191e6593f3c012cb89ed1551f8b0dd2981d8
|
[
"BSD-3-Clause"
] | 79
|
2015-02-23T17:07:32.000Z
|
2022-01-03T09:15:39.000Z
|
import six
def to_unicode(text, encoding=None, errors='strict'):
"""Return the unicode representation of `text`.
If `text` is already a ``unicode`` object, return it as-is.
If `text` is a ``bytes`` object, decode it using `encoding`.
Otherwise, raise an error.
"""
if isinstance(text, six.text_type):
return text
if not isinstance(text, (six.binary_type, bytearray)):
raise TypeError('to_unicode must receive a bytes, str or unicode '
'object, got %s' % type(text).__name__)
if encoding is None:
encoding = 'utf-8'
return text.decode(encoding, errors)
def to_bytes(text, encoding=None, errors='strict'):
"""Return the binary representation of `text`.
If `text` is already a ``bytes`` object, return it as-is.
If `text` is a ``unicode`` object, encode it using `encoding`.
Otherwise, raise an error."""
if isinstance(text, six.binary_type):
return text
if isinstance(text, bytearray):
return bytes(text)
if not isinstance(text, six.text_type):
raise TypeError('to_bytes must receive a unicode, str or bytes '
'object, got %s' % type(text).__name__)
if encoding is None:
encoding = 'utf-8'
return text.encode(encoding, errors)
def to_native_str(text, encoding=None, errors='strict'):
"""Return ``str`` representation of `text`.
``str`` representation means ``bytes`` in PY2 and ``unicode`` in PY3.
"""
if six.PY2:
return to_bytes(text, encoding, errors)
else:
return to_unicode(text, encoding, errors)
| 31.25
| 74
| 0.632
|
0d018b10d18266792bd72e75eff65202e1a21bd3
| 2,654
|
py
|
Python
|
ros/src/tl_detector/light_classification/tl_classifier.py
|
AnkS4/CarND-Capstone
|
da7dfd7b8dd0e6e16f6a14d59accdeb6e2d6e4bb
|
[
"MIT"
] | 2
|
2019-07-22T13:43:25.000Z
|
2019-09-10T22:02:16.000Z
|
ros/src/tl_detector/light_classification/tl_classifier.py
|
AnkS4/CarND-Capstone
|
da7dfd7b8dd0e6e16f6a14d59accdeb6e2d6e4bb
|
[
"MIT"
] | 10
|
2019-10-30T12:05:59.000Z
|
2022-03-11T23:53:13.000Z
|
ros/src/tl_detector/light_classification/tl_classifier.py
|
AnkS4/CarND-Capstone
|
da7dfd7b8dd0e6e16f6a14d59accdeb6e2d6e4bb
|
[
"MIT"
] | null | null | null |
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
import datetime
USE_CLASSIFIER = True
class TLClassifier(object):
def __init__(self):
if (USE_CLASSIFIER == True):
#load classifier
graph_path = r'light_classification/model/frozen_inference_graph.pb'
self.graph = tf.Graph()
self.threshold = 0.5
print graph_path
with self.graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_path, 'rb') as fid:
od_graph_def.ParseFromString(fid.read())
tf.import_graph_def(od_graph_def, name='')
self.image_tensor = self.graph.get_tensor_by_name('image_tensor:0')
self.boxes = self.graph.get_tensor_by_name('detection_boxes:0')
self.scores = self.graph.get_tensor_by_name('detection_scores:0')
self.classes = self.graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.graph.get_tensor_by_name('num_detections:0')
self.sess = tf.Session(graph=self.graph)
else:
pass
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if (USE_CLASSIFIER == True):
with self.graph.as_default():
img_expand = np.expand_dims(image, axis=0)
start = datetime.datetime.now()
(boxes, scores, classes, num_detections) = self.sess.run([self.boxes, self.scores, self.classes, self.num_detections],
feed_dict={self.image_tensor: img_expand})
end = datetime.datetime.now()
c = end-start
print(c.total_seconds())
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
print('Scores: ', scores[0])
print('Classes: ', classes[0])
if scores[0] > self.threshold:
if classes[0] == 1:
print('GREEN')
return TrafficLight.GREEN
elif classes[0] == 2:
print('RED')
return TrafficLight.RED
elif classes[0] == 3:
print('YELLOW')
return TrafficLight.YELLOW
return TrafficLight.UNKNOWN
| 35.386667
| 135
| 0.563301
|
ccb074cd42abd1a3cfc928cdb62ed723ecc53d00
| 3,757
|
py
|
Python
|
youtube_dl/extractor/stv.py
|
incognitoRepo/youtube-dl
|
e9dbbbd87e200d37f15531973fe653c3573cfd0b
|
[
"Unlicense"
] | 16
|
2020-12-01T15:26:58.000Z
|
2022-02-24T23:12:14.000Z
|
youtube_dl/extractor/stv.py
|
incognitoRepo/youtube-dl
|
e9dbbbd87e200d37f15531973fe653c3573cfd0b
|
[
"Unlicense"
] | 5
|
2021-02-20T10:30:00.000Z
|
2021-06-01T21:12:31.000Z
|
youtube_dl/extractor/stv.py
|
incognitoRepo/youtube-dl
|
e9dbbbd87e200d37f15531973fe653c3573cfd0b
|
[
"Unlicense"
] | 7
|
2020-12-01T15:27:04.000Z
|
2022-01-09T23:21:53.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse
)
from ..utils import (
extract_attributes,
float_or_none,
int_or_none,
str_or_none,
)
class STVPlayerIE(InfoExtractor):
IE_NAME = 'stv:player'
_VALID_URL = r'https?://player\.stv\.tv/(?P<type>episode|video)/(?P<id>[a-z0-9]{4})'
_TEST = {
'url': 'https://player.stv.tv/video/7srz/victoria/interview-with-the-cast-ahead-of-new-victoria/',
'md5': '2ad867d4afd641fa14187596e0fbc91b',
'info_dict': {
'id': '6016487034001',
'ext': 'mp4',
'upload_date': '20190321',
'title': 'Interview with the cast ahead of new Victoria',
'description': 'Nell Hudson and Lily Travers tell us what to expect in the new season of Victoria.',
'timestamp': 1553179628,
'uploader_id': '1486976045',
},
'skip': 'this resource is unavailable outside of the UK',
}
_PUBLISHER_ID = '1486976045'
_PTYPE_MAP = {
'episode': 'episodes',
'video': 'shortform',
}
def _real_extract(self, url):
ptype, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, video_id)
qs = compat_parse_qs(compat_urllib_parse_urlparse(self._search_regex(
r'itemprop="embedURL"[^>]+href="([^"]+)',
webpage, 'embed URL', default=None)).query)
publisher_id = qs.get('publisherID', [None])[0] or self._PUBLISHER_ID
player_attr = extract_attributes(self._search_regex(
r'(<[^>]+class="bcplayer"[^>]+>)', webpage, 'player', default=None)) or {}
info = {}
duration = ref_id = series = video_id = None
api_ref_id = player_attr.get('data-player-api-refid')
if api_ref_id:
resp = self._download_json(
'https://player.api.stv.tv/v1/%s/%s' % (self._PTYPE_MAP[ptype], api_ref_id),
api_ref_id, fatal=False)
if resp:
result = resp.get('results') or {}
video = result.get('video') or {}
video_id = str_or_none(video.get('id'))
ref_id = video.get('guid')
duration = video.get('length')
programme = result.get('programme') or {}
series = programme.get('name') or programme.get('shortName')
subtitles = {}
_subtitles = result.get('_subtitles') or {}
for ext, sub_url in _subtitles.items():
subtitles.setdefault('en', []).append({
'ext': 'vtt' if ext == 'webvtt' else ext,
'url': sub_url,
})
info.update({
'description': result.get('summary'),
'subtitles': subtitles,
'view_count': int_or_none(result.get('views')),
})
if not video_id:
video_id = qs.get('videoId', [None])[0] or self._search_regex(
r'<link\s+itemprop="url"\s+href="(\d+)"',
webpage, 'video id', default=None) or 'ref:' + (ref_id or player_attr['data-refid'])
info.update({
'_type': 'url_transparent',
'duration': float_or_none(duration or player_attr.get('data-duration'), 1000),
'id': video_id,
'ie_key': 'BrightcoveNew',
'series': series or player_attr.get('data-programme-name'),
'url': 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s' % (publisher_id, video_id),
})
return info
| 39.547368
| 119
| 0.554166
|
c8c8c260bc7a6a63ac6e60b99a1950acfcd071a3
| 390
|
py
|
Python
|
migrations/0005_auto_20160128_1748.py
|
contraslash/blogs_engine-django
|
b353da29a6302c2a7d8b68d12ae30a18c9c1c405
|
[
"MIT"
] | null | null | null |
migrations/0005_auto_20160128_1748.py
|
contraslash/blogs_engine-django
|
b353da29a6302c2a7d8b68d12ae30a18c9c1c405
|
[
"MIT"
] | null | null | null |
migrations/0005_auto_20160128_1748.py
|
contraslash/blogs_engine-django
|
b353da29a6302c2a7d8b68d12ae30a18c9c1c405
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog_engine', '0004_post_slug'),
]
operations = [
migrations.AlterField(
model_name='post',
name='slug',
field=models.SlugField(blank=True),
),
]
| 19.5
| 47
| 0.592308
|
4dbd6eb4d4bd980243b6230934c5b9cd668ac989
| 2,476
|
py
|
Python
|
demos/paired_mrus_prostate/demo_data.py
|
mathpluscode/DeepReg
|
80854094feafec998fa6237199066556c73f31f9
|
[
"Apache-2.0"
] | null | null | null |
demos/paired_mrus_prostate/demo_data.py
|
mathpluscode/DeepReg
|
80854094feafec998fa6237199066556c73f31f9
|
[
"Apache-2.0"
] | null | null | null |
demos/paired_mrus_prostate/demo_data.py
|
mathpluscode/DeepReg
|
80854094feafec998fa6237199066556c73f31f9
|
[
"Apache-2.0"
] | null | null | null |
"""
Download the demo data
"""
import os
import shutil
import zipfile
from tensorflow.keras.utils import get_file
PROJECT_DIR = "demos/paired_mrus_prostate"
os.chdir(PROJECT_DIR)
DATA_PATH = "dataset"
ZIP_PATH = "example-data-mrus"
ORIGIN = "https://github.com/yipenghu/example-data/archive/mrus.zip"
zip_file = ZIP_PATH + ".zip"
get_file(os.path.abspath(zip_file), ORIGIN)
with zipfile.ZipFile(zip_file, "r") as zf:
zf.extractall()
if os.path.exists(DATA_PATH):
shutil.rmtree(DATA_PATH)
os.rename(ZIP_PATH, DATA_PATH)
os.remove(zip_file)
print("\nMR and ultrasound data downloaded: %s." % os.path.abspath(DATA_PATH))
## now split the data in to num_part partitions
num_part = 11
data_types = ["moving_images", "moving_labels", "fixed_images", "fixed_labels"]
filenames = [sorted(os.listdir(os.path.join(DATA_PATH, fn))) for fn in data_types]
num_data = set([len(fn) for fn in filenames])
if len(num_data) != 1:
raise (
"Number of data are not the same between moving/fixed/images/labels. "
"Please run this download script again."
)
else:
num_data = num_data.pop()
for idx in range(num_part): # create partition folders
os.makedirs(os.path.join(DATA_PATH, "part%02d" % idx))
for fn in data_types:
os.makedirs(os.path.join(DATA_PATH, "part%02d" % idx, fn))
for idx in range(num_data): # copy all files to part folders
for ifn in range(len(data_types)):
os.rename(
os.path.join(DATA_PATH, data_types[ifn], filenames[ifn][idx]),
os.path.join(
DATA_PATH,
"part%02d" % (idx % num_part),
data_types[ifn],
filenames[ifn][idx],
),
)
for fn in data_types: # remove the old type folders
shutil.rmtree(os.path.join(DATA_PATH, fn))
print("All data are partitioned into %d folders." % num_part)
## now download the pre-trained model
MODEL_PATH = os.path.join(DATA_PATH, "pretrained")
if os.path.exists(MODEL_PATH):
shutil.rmtree(MODEL_PATH)
os.mkdir(MODEL_PATH)
ZIP_PATH = "checkpoint"
ORIGIN = "https://github.com/DeepRegNet/deepreg-model-zoo/raw/master/demo/paired_mrus_prostate/20210110.zip"
zip_file = os.path.join(MODEL_PATH, ZIP_PATH + ".zip")
get_file(os.path.abspath(zip_file), ORIGIN)
with zipfile.ZipFile(zip_file, "r") as zf:
zf.extractall(path=MODEL_PATH)
os.remove(zip_file)
print(
"Pre-trained model is downloaded and unzipped in %s." % os.path.abspath(MODEL_PATH)
)
| 29.831325
| 108
| 0.691034
|
05c4c3bbb20ec9e83455d6f69915139017d00194
| 1,853
|
py
|
Python
|
plaso/cli/helpers/text_prepend.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | 1,253
|
2015-01-02T13:58:02.000Z
|
2022-03-31T08:43:39.000Z
|
plaso/cli/helpers/text_prepend.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | 3,388
|
2015-01-02T11:17:58.000Z
|
2022-03-30T10:21:45.000Z
|
plaso/cli/helpers/text_prepend.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | 376
|
2015-01-20T07:04:54.000Z
|
2022-03-04T23:53:00.000Z
|
# -*- coding: utf-8 -*-
"""The text prepend CLI arguments helper."""
from plaso.cli import tools
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.lib import errors
class TextPrependArgumentsHelper(interface.ArgumentsHelper):
"""Text prepend CLI arguments helper."""
NAME = 'text_prepend'
DESCRIPTION = 'Text prepend command line arguments.'
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'-t', '--text', dest='text_prepend', action='store', type=str,
default='', metavar='TEXT', help=(
'Define a free form text string that is prepended to each path '
'to make it easier to distinguish one record from another in a '
'timeline (like c:\\, or host_w_c:\\)'))
@classmethod
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
text_prepend = cls._ParseStringOption(options, 'text_prepend')
setattr(configuration_object, '_text_prepend', text_prepend)
manager.ArgumentHelperManager.RegisterHelper(TextPrependArgumentsHelper)
| 33.089286
| 79
| 0.7102
|
d6db3f521703415c83c55fdcef316e7af68cd7ab
| 161
|
py
|
Python
|
django_jwt/mixins/__init__.py
|
ah450/django-jwt
|
7a68574d762f644a02e5f43a8804f81d5ed0f12c
|
[
"MIT"
] | null | null | null |
django_jwt/mixins/__init__.py
|
ah450/django-jwt
|
7a68574d762f644a02e5f43a8804f81d5ed0f12c
|
[
"MIT"
] | 4
|
2018-11-14T15:08:24.000Z
|
2021-06-10T18:18:54.000Z
|
django_jwt/mixins/__init__.py
|
ah450/django-jwt
|
7a68574d762f644a02e5f43a8804f81d5ed0f12c
|
[
"MIT"
] | null | null | null |
from .authentication_mixin import JWTAuthenticationMixin
from .password_reset_mixin import JWTPasswordResetMixin
from .activation_mixin import JWTActivationMixin
| 53.666667
| 56
| 0.913043
|
bdac4da92cbf011b58e2f2e58b677ff4d34d0765
| 1,334
|
py
|
Python
|
setup.py
|
JakeVestal/financier
|
0ace41f3b39bb2e4252cb90eeecf316767679e34
|
[
"MIT"
] | null | null | null |
setup.py
|
JakeVestal/financier
|
0ace41f3b39bb2e4252cb90eeecf316767679e34
|
[
"MIT"
] | null | null | null |
setup.py
|
JakeVestal/financier
|
0ace41f3b39bb2e4252cb90eeecf316767679e34
|
[
"MIT"
] | 1
|
2021-11-21T20:52:41.000Z
|
2021-11-21T20:52:41.000Z
|
# Initial setup.py template taken from:
# https://towardsdatascience.com/deep-dive-create-and-publish-your-first-python-library-f7f618719e14
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
# The directory containing this file
HERE = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# This call to setup() does all the work
setup(
name="financier",
version="0.1.53",
description="Financial Analysis Toolkit",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://financier.readthedocs.io/",
author="Jake Vestal, Sophie Rubin",
author_email="jmvestal@gmail.com, sophierubin1224@gmail.com",
license="MIT",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Operating System :: OS Independent"
],
packages=["financier"],
include_package_data=True,
install_requires=["numpy", "pandas"]
)
| 33.35
| 102
| 0.702399
|
ed0bf514f81ff67155f70ac117286fd5140fe06e
| 484
|
py
|
Python
|
dynadb/migrations/0102_auto_20200115_1112.py
|
GPCRmd/GPCRmd
|
7dc75359ace4a00c1597bdb7a86ebee17d51f09c
|
[
"Apache-2.0"
] | 3
|
2019-03-06T13:35:38.000Z
|
2020-08-05T15:31:29.000Z
|
dynadb/migrations/0102_auto_20200115_1112.py
|
GPCRmd/GPCRmd
|
7dc75359ace4a00c1597bdb7a86ebee17d51f09c
|
[
"Apache-2.0"
] | null | null | null |
dynadb/migrations/0102_auto_20200115_1112.py
|
GPCRmd/GPCRmd
|
7dc75359ace4a00c1597bdb7a86ebee17d51f09c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2020-01-15 10:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dynadb', '0101_auto_20190307_1830'),
]
operations = [
migrations.AlterField(
model_name='dyndbdynamics',
name='description',
field=models.CharField(blank=True, max_length=1000, null=True),
),
]
| 23.047619
| 75
| 0.632231
|
0c6139c86e3f03a01837e6f2c9b00b0d25ffab51
| 2,772
|
py
|
Python
|
code/models/ngrams.py
|
ayushbaid/RedditCommunityOpinion
|
850904141de2c74abcab616fabc794176b9770f6
|
[
"MIT"
] | null | null | null |
code/models/ngrams.py
|
ayushbaid/RedditCommunityOpinion
|
850904141de2c74abcab616fabc794176b9770f6
|
[
"MIT"
] | null | null | null |
code/models/ngrams.py
|
ayushbaid/RedditCommunityOpinion
|
850904141de2c74abcab616fabc794176b9770f6
|
[
"MIT"
] | null | null | null |
from typing import List
from nltk.lm.preprocessing import padded_everygram_pipeline
from nltk.lm.models import MLE
from nltk.tokenize import ToktokTokenizer
from nltk.tokenize import wordpunct_tokenize
from models.base_model import BaseModel
class NGrams(BaseModel):
'''
NGram language model based on sentence completion
'''
def __init__(self, training_set: List[str], n_param: int = 3, max_predict=4):
super().__init__()
'''
Initialize the completions for the test phrase
'''
# convert sentence to list[words] using tokenizer
# self.tokenizer = ToktokTokenizer()
training_ngrams, padded_sentences = padded_everygram_pipeline(
n_param,
#list(map(self.tokenizer.tokenize, training_set)),
list(map(wordpunct_tokenize, training_set)),
)
# print(len(training_ngrams))
# temp = list(training_ngrams)
# for i in range(10):
# print(list(temp[i]))
self.model_obj = MLE(order=n_param)
self.model_obj.fit(training_ngrams, padded_sentences)
print('Vocab length: {}'.format(len(self.model_obj.vocab)))
print('Counts: ', self.model_obj.counts)
self.max_predict = max_predict
def train(self, train_sentence: List[str]):
raise NotImplementedError(
'Train is not implemented for the NGrams model. Pass the training set in evaluate')
def complete(self, phrase: str):
# input_tokens = self.tokenizer.tokenize(phrase.lower())
input_tokens = wordpunct_tokenize(phrase.lower())
results = []
for res_len in range(self.max_predict):
temp = self.model_obj.generate(
res_len+1, text_seed=input_tokens
)
if type(temp) == str:
temp = [temp]
# filter out the stop words
temp = list(filter(lambda x: x != '</s>', temp))
if len(temp) == 1:
results.append(phrase + ' ' + temp[0])
elif len(temp) > 1:
results.append(phrase + ' ' + ' '.join(temp))
return results
if __name__ == '__main__':
from data.comments_loader import CommentsDataset
obj = CommentsDataset('../dataset/small',
'the_donald', remove_stopwords=True)
obj = NGrams(obj.get_training_set(), n_param=3, max_predict=10)
# load the sample phrases
with open('../config/questions.txt') as f:
phrases = [x.strip() for x in f.readlines()]
phrases = list(filter(None, phrases))
results = dict([(x, obj.complete(x)) for x in phrases])
for key, vals in results.items():
print('Input Phrase: {}'.format(key))
print('Results: ', vals)
| 29.806452
| 95
| 0.612554
|
85bfbd0ab2620c93c9743034c79087148a303033
| 2,024
|
py
|
Python
|
pyfeatures/app/main.py
|
ome/pydoop-features
|
189ee7a8cd28e92be9b7e5f51b61f39449564d2a
|
[
"Apache-2.0"
] | 2
|
2017-03-21T14:05:19.000Z
|
2017-05-20T17:33:55.000Z
|
pyfeatures/app/main.py
|
ome/pydoop-features
|
189ee7a8cd28e92be9b7e5f51b61f39449564d2a
|
[
"Apache-2.0"
] | 15
|
2017-04-12T11:09:58.000Z
|
2017-12-20T07:51:08.000Z
|
pyfeatures/app/main.py
|
IDR/pydoop-features
|
fcb21c69287910fbb5707b4f246ea526dc2b75a7
|
[
"Apache-2.0"
] | 4
|
2017-03-13T16:00:58.000Z
|
2017-11-30T15:33:29.000Z
|
# BEGIN_COPYRIGHT
#
# Copyright (C) 2014-2017 Open Microscopy Environment:
# - University of Dundee
# - CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""\
Pyfeatures command line tool.
"""
import sys
import argparse
import importlib
from .common import get_log_level, get_logger
VERSION = "NOT_TAGGED_YET"
SUBMOD_NAMES = [
"calc",
"deserialize",
"dump",
"plot",
"serialize",
"summarize",
"tiles",
]
def log_level(s):
try:
return get_log_level(s)
except ValueError as e:
raise argparse.ArgumentTypeError(e.message)
def make_parser():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('-V', '--version', action='version', version=VERSION,
help='print version tag and exit')
parser.add_argument('--log-level', metavar="LEVEL", type=log_level,
default="INFO", help="logging level")
subparsers = parser.add_subparsers(help="sub-commands", dest="command")
for n in SUBMOD_NAMES:
mod = importlib.import_module("%s.%s" % (__package__, n))
mod.add_parser(subparsers)
return parser
def main(argv=None):
parser = make_parser()
args, extra_argv = parser.parse_known_args(argv)
logger = get_logger(args.command, level=args.log_level, f=sys.stdout)
logger.info("START")
args.func(logger, args, extra_argv=extra_argv)
logger.info("STOP")
| 27.351351
| 77
| 0.688735
|
0c0519f1c0536a68699d50844bf02a2d09330c73
| 535
|
py
|
Python
|
src/eurocodepy/ec5/sls/vibration.py
|
pcachim/eurocodepy
|
8b68e733e5ccaa41b16135d3a3f8e9d2544fc112
|
[
"MIT"
] | null | null | null |
src/eurocodepy/ec5/sls/vibration.py
|
pcachim/eurocodepy
|
8b68e733e5ccaa41b16135d3a3f8e9d2544fc112
|
[
"MIT"
] | null | null | null |
src/eurocodepy/ec5/sls/vibration.py
|
pcachim/eurocodepy
|
8b68e733e5ccaa41b16135d3a3f8e9d2544fc112
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
# useful functions
aval = [0.5, 1, 2, 4]
bval = [150, 120, 80, 50]
def floor_freq(l, EI, m):
f1 = (math.pi/2/l**2)*math.sqrt(EI/m)
return f1
def vel(f1, b, l, m, EIl, EIt):
n40 = math.pow(((40/f1)**2-1.0)*((b/l)**4)*(EIl/EIt), 0.25)
print(f"n40 = {n40}")
v = 4*(0.4+0.6*n40)/(m*b*l+200)
return v
def b_from_a(a):
return np.interp(a, aval, bval)
def a_from_b(b):
return np.interp(b, np.flip(bval), aval)
def vlim(f1, b, damp):
return math.pow(b, f1*damp-1.0)
| 17.833333
| 63
| 0.564486
|
7624a25f7fae6424207c8b0774903c115bf2fa41
| 2,186
|
py
|
Python
|
Xrbfetch/checks.py
|
FranckLejzerowicz/Xrbfetch
|
0f09eae47a346fa4c5cee86dc4e2eee1ccc85c92
|
[
"BSD-3-Clause"
] | null | null | null |
Xrbfetch/checks.py
|
FranckLejzerowicz/Xrbfetch
|
0f09eae47a346fa4c5cee86dc4e2eee1ccc85c92
|
[
"BSD-3-Clause"
] | null | null | null |
Xrbfetch/checks.py
|
FranckLejzerowicz/Xrbfetch
|
0f09eae47a346fa4c5cee86dc4e2eee1ccc85c92
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2020, Franck Lejzerowicz.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import sys
import biom
from Xrbfetch.io import write_summary, delete_files
def check_fetched_samples(metadata_sams: list, biom_tab: biom.Table) -> None:
"""
Parameters
----------
metadata_sams : list
Samples of the metadata table.
biom_tab : biom.Table
Feature table.
"""
# get the sample names without the prep info number
sams = set(['.'.join(biom_sam.split('.')[:-1]) for biom_sam in biom_tab.ids(axis='sample')])
# get sample only present in metadata, i.e., not fetched
only_meta = set(metadata_sams).difference(sams)
if len(only_meta):
print(' [Warning] %s samples not fetched in any prep:' % len(only_meta))
print(', '.join(only_meta))
def check_replicates_amount(biom_tab: biom.Table) -> None:
"""
Parameters
----------
biom_tab : biom.Table
Biom table.
"""
dups = {}
for sam in biom_tab.ids(axis='sample'):
dups.setdefault('.'.join(sam.split('.')[:-1]), []).append(sam)
dups_to_print = {}
for sam, sams in dups.items():
if len(sams) > 1:
dups_to_print.setdefault(len(sams), []).append(sam)
if dups_to_print:
print(' * Sample ID duplication (without prep number):')
for n, sams in sorted(dups_to_print.items(), key=lambda x: len(x[1])):
print(' --> %s samples have %s replicates' % (len(sams), n))
def potential_stop(
biom_tab: biom.Table,
o_summary_file: str,
summary: list,
redbiom_output: str,
redbiom_samples: str,
do_stop: bool = False):
if not biom_tab.shape[0] or do_stop:
# write the metadata and the biom table outputs
write_summary(o_summary_file, summary)
# delete intermediate files
delete_files(redbiom_output, redbiom_samples)
sys.exit(0)
| 31.228571
| 96
| 0.580512
|
94b9963046486092aea92ff8a9f04d8fb0e50b65
| 247
|
py
|
Python
|
Fenier/apps/users/tests.py
|
hubertzk/Fenier
|
b3fc4a506a045b79f681d8522f476fee6bdd9668
|
[
"MIT"
] | 2
|
2021-04-15T19:11:43.000Z
|
2021-04-20T05:05:32.000Z
|
Fenier/apps/users/tests.py
|
alsmonert/Fenier
|
b3fc4a506a045b79f681d8522f476fee6bdd9668
|
[
"MIT"
] | 4
|
2021-02-26T18:19:23.000Z
|
2021-04-22T23:52:03.000Z
|
Fenier/apps/users/tests.py
|
alsmonert/Fenier
|
b3fc4a506a045b79f681d8522f476fee6bdd9668
|
[
"MIT"
] | 1
|
2022-01-07T23:18:47.000Z
|
2022-01-07T23:18:47.000Z
|
from django import test
from django.test import TestCase
from .serializers import UsersSerializer
# Create your tests here.
class UsersSerializerTestCase(TestCase):
def setUp(self):
pass
def test_serializer_update():
pass
| 22.454545
| 40
| 0.744939
|
588f87060783b0e8e379b0b060abb8a2f0f8b519
| 251
|
py
|
Python
|
demo/superuser.py
|
ixc/django-userena
|
5a8c61dcf5133e8b745c25d3b54e5578043222d8
|
[
"BSD-3-Clause"
] | 2
|
2019-02-14T00:44:29.000Z
|
2020-04-28T17:04:05.000Z
|
demo/superuser.py
|
ixc/django-userena
|
5a8c61dcf5133e8b745c25d3b54e5578043222d8
|
[
"BSD-3-Clause"
] | null | null | null |
demo/superuser.py
|
ixc/django-userena
|
5a8c61dcf5133e8b745c25d3b54e5578043222d8
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from wsgi import *
from django.contrib.auth.models import User
try:
wunki = User.objects.get(username='wunki')
except User.DoesNotExist:
pass
else:
wunki.is_staff = True
wunki.is_superuser = True
wunki.save()
| 20.916667
| 46
| 0.709163
|
a675c7e98b1ca4ff941b805da1663f7bcae3ddd7
| 2,815
|
py
|
Python
|
sparv/modules/misc/ids.py
|
heatherleaf/sparv-pipeline
|
0fe5f27d0d82548ecc6cb21a69289668aac54cf1
|
[
"MIT"
] | 17
|
2018-09-21T07:01:45.000Z
|
2022-02-24T23:26:49.000Z
|
sparv/modules/misc/ids.py
|
heatherleaf/sparv-pipeline
|
0fe5f27d0d82548ecc6cb21a69289668aac54cf1
|
[
"MIT"
] | 146
|
2018-11-13T19:13:25.000Z
|
2022-03-31T09:57:56.000Z
|
sparv/modules/misc/ids.py
|
heatherleaf/sparv-pipeline
|
0fe5f27d0d82548ecc6cb21a69289668aac54cf1
|
[
"MIT"
] | 5
|
2019-02-14T00:50:38.000Z
|
2021-03-29T15:37:41.000Z
|
"""Generate unique IDs for corpus files."""
import math
import random
from binascii import hexlify
from typing import Optional
from sparv import AllDocuments, Annotation, AnnotationData, Document, Output, Wildcard, OutputDataAllDocs, annotator
_ID_LENGTH = 10
@annotator("Give every document a unique ID")
def doc_id(out: OutputDataAllDocs = OutputDataAllDocs("misc.docid", cls="docid"),
docs: Optional[AllDocuments] = AllDocuments(),
doclist: Optional[str] = None,
prefix: str = "",
add: bool = False):
"""Create unique IDs for every document in a list, using the document names as seed.
The resulting IDs are written to the annotation specified by 'out'.
If 'add' is True, existing IDs will not be overwritten.
"""
assert docs or doclist, "docs or doclist must be specified"
if doclist:
with open(doclist) as f:
docs = f.read().strip().splitlines()
docs.sort()
numdocs = len(docs) * 2
used_ids = set()
docs_with_ids = set()
if add:
for doc in docs:
if out.exists(doc):
used_ids.add(out.read(doc))
docs_with_ids.add(doc)
for doc in docs:
if add and doc in docs_with_ids:
continue
_reset_id(doc, numdocs)
new_id = _make_id(prefix, used_ids)
used_ids.add(new_id)
out.write(new_id, doc)
@annotator("Unique IDs for {annotation}", wildcards=[Wildcard("annotation", Wildcard.ANNOTATION)])
def ids(doc: Document = Document(),
annotation: Annotation = Annotation("{annotation}"),
out: Output = Output("{annotation}:misc.id", description="Unique ID for {annotation}"),
docid: AnnotationData = AnnotationData("<docid>"),
prefix: str = ""):
"""Create unique IDs for every span of an existing annotation."""
docid = docid.read()
prefix = prefix + docid
ann = list(annotation.read())
out_annotation = []
# Use doc name and annotation name as seed for the IDs
_reset_id("{}/{}".format(doc, annotation), len(ann))
for _ in ann:
new_id = _make_id(prefix, out_annotation)
out_annotation.append(new_id)
out.write(out_annotation)
def _reset_id(seed, max_ids=None):
"""Reset the random seed for identifiers."""
if max_ids:
global _ID_LENGTH
_ID_LENGTH = int(math.log(max_ids, 16) + 1.5)
seed = int(hexlify(seed.encode()), 16) # For random.seed to work consistently regardless of platform
random.seed(seed)
def _make_id(prefix, existing_ids=()):
"""Create a unique identifier with a given prefix."""
while True:
n = random.getrandbits(_ID_LENGTH * 4)
ident = prefix + hex(n)[2:].zfill(_ID_LENGTH)
if ident not in existing_ids:
return ident
| 32.356322
| 116
| 0.642274
|
051f91a80aca055f25f70cc7c09b1e934c737af3
| 97
|
py
|
Python
|
modules/python-codes/modules/tips-and-tricks/cli-with-typer/modules/rodrigo-portal-gun/tests/test_rodrigo_portal_gun.py
|
drigols/Studies
|
9c293156935b491ded24be6b511daac67fd43538
|
[
"MIT"
] | null | null | null |
modules/python-codes/modules/tips-and-tricks/cli-with-typer/modules/rodrigo-portal-gun/tests/test_rodrigo_portal_gun.py
|
drigols/Studies
|
9c293156935b491ded24be6b511daac67fd43538
|
[
"MIT"
] | null | null | null |
modules/python-codes/modules/tips-and-tricks/cli-with-typer/modules/rodrigo-portal-gun/tests/test_rodrigo_portal_gun.py
|
drigols/Studies
|
9c293156935b491ded24be6b511daac67fd43538
|
[
"MIT"
] | null | null | null |
from rodrigo_portal_gun import __version__
def test_version():
assert __version__ == '0.1.0'
| 16.166667
| 42
| 0.762887
|
2683678fb96bfe8db403432f42980951e89813f9
| 205
|
py
|
Python
|
blender/nodes/converter/__init__.py
|
LewisOrton/taichi_elements_houdini
|
50ef3232f080030213bcb7578a48d03647a9445b
|
[
"MIT"
] | 1
|
2021-05-13T11:34:03.000Z
|
2021-05-13T11:34:03.000Z
|
blender/nodes/converter/__init__.py
|
LewisOrton/taichi_elements_houdini
|
50ef3232f080030213bcb7578a48d03647a9445b
|
[
"MIT"
] | null | null | null |
blender/nodes/converter/__init__.py
|
LewisOrton/taichi_elements_houdini
|
50ef3232f080030213bcb7578a48d03647a9445b
|
[
"MIT"
] | null | null | null |
from .int_to_float import *
from .color_to_vector import *
from .hex_color_to_rgb import *
from .float_math import *
from .vector_math import *
from .combine_vector import *
from .seratate_vector import *
| 25.625
| 31
| 0.795122
|
2d9c8de0747ac7627a05987f52807c13e27e74b1
| 102
|
py
|
Python
|
zhaquirks/bitron/__init__.py
|
WolfRevo/zha-device-handlers
|
0fa4ca1c03c611be0cf2c38c4fec2a197e3dd1d3
|
[
"Apache-2.0"
] | 213
|
2020-04-16T10:48:31.000Z
|
2022-03-30T20:48:07.000Z
|
zhaquirks/bitron/__init__.py
|
WolfRevo/zha-device-handlers
|
0fa4ca1c03c611be0cf2c38c4fec2a197e3dd1d3
|
[
"Apache-2.0"
] | 1,088
|
2020-04-03T13:23:29.000Z
|
2022-03-31T23:55:03.000Z
|
zhaquirks/bitron/__init__.py
|
WolfRevo/zha-device-handlers
|
0fa4ca1c03c611be0cf2c38c4fec2a197e3dd1d3
|
[
"Apache-2.0"
] | 280
|
2020-04-24T08:44:27.000Z
|
2022-03-31T12:58:04.000Z
|
"""Bitron/SMaBiT module."""
BITRON = "Bitron Home"
DIAGNOSTICS_CLUSTER_ID = 0x0B05 # decimal = 2821
| 20.4
| 49
| 0.715686
|
2da4b93e3aac5a260d329dfaedf8a4edc3264868
| 29,896
|
py
|
Python
|
imagecodecs/imagecodecs.py
|
hmaarrfk/imagecodecs
|
a598eda814c01dc1deafb41297b21b016717970b
|
[
"BSD-3-Clause"
] | null | null | null |
imagecodecs/imagecodecs.py
|
hmaarrfk/imagecodecs
|
a598eda814c01dc1deafb41297b21b016717970b
|
[
"BSD-3-Clause"
] | null | null | null |
imagecodecs/imagecodecs.py
|
hmaarrfk/imagecodecs
|
a598eda814c01dc1deafb41297b21b016717970b
|
[
"BSD-3-Clause"
] | null | null | null |
# imagecodecs.py
# Copyright (c) 2008-2021, Christoph Gohlke
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Image transformation, compression, and decompression codecs.
Imagecodecs is a Python library that provides block-oriented, in-memory buffer
transformation, compression, and decompression functions for use in the
tifffile, czifile, and other scientific image input/output modules.
Decode and/or encode functions are implemented for Zlib (DEFLATE), GZIP,
ZStandard (ZSTD), Blosc, Brotli, Snappy, LZMA, BZ2, LZ4, LZ4F, LZ4HC,
LZW, LZF, ZFP, AEC, LERC, NPY, PNG, GIF, TIFF, WebP, JPEG 8-bit, JPEG 12-bit,
JPEG SOF3 (LJPEG), JPEG 2000, JPEG LS, JPEG XR, JPEG XL, AVIF,
PackBits, Packed Integers, Delta, XOR Delta, Floating Point Predictor,
Bitorder reversal, Bitshuffle, and Float24 (24-bit floating point).
:Author:
`Christoph Gohlke <https://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics. University of California, Irvine
:License: BSD 3-Clause
:Version: 2021.1.11
:Status: Alpha
Requirements
------------
This release has been tested with the following requirements and dependencies
(other versions may work):
* `CPython 3.7.9, 3.8.7, 3.9.1 64-bit <https://www.python.org>`_
* `Numpy 1.19.5 <https://pypi.org/project/numpy/>`_
* `Cython 0.29.21 <https://cython.org>`_
* `zlib 1.2.11 <https://github.com/madler/zlib>`_
* `lz4 1.9.3 <https://github.com/lz4/lz4>`_
* `zstd 1.4.8 <https://github.com/facebook/zstd>`_
* `blosc 1.21.0 <https://github.com/Blosc/c-blosc>`_
* `bzip2 1.0.8 <https://sourceware.org/bzip2>`_
* `liblzma 5.2.5 <https://github.com/xz-mirror/xz>`_
* `liblzf 3.6 <http://oldhome.schmorp.de/marc/liblzf.html>`_
* `libpng 1.6.37 <https://github.com/glennrp/libpng>`_
* `libwebp 1.1.0 <https://github.com/webmproject/libwebp>`_
* `libtiff 4.2.0 <https://gitlab.com/libtiff/libtiff>`_
* `libjpeg-turbo 2.0.6 <https://github.com/libjpeg-turbo/libjpeg-turbo>`_
(8 and 12-bit)
* `libjpeg 9d <http://libjpeg.sourceforge.net/>`_
* `charls 2.2.0 <https://github.com/team-charls/charls>`_
* `openjpeg 2.4.0 <https://github.com/uclouvain/openjpeg>`_
* `jxrlib 1.1 <https://packages.debian.org/source/sid/jxrlib>`_
* `zfp 0.5.5 <https://github.com/LLNL/zfp>`_
* `bitshuffle 0.3.5 <https://github.com/kiyo-masui/bitshuffle>`_
* `libaec 1.0.4 <https://gitlab.dkrz.de/k202009/libaec>`_
* `snappy 1.1.8 <https://github.com/google/snappy>`_
* `zopfli-1.0.3 <https://github.com/google/zopfli>`_
* `brotli 1.0.9 <https://github.com/google/brotli>`_
* `brunsli 0.1 <https://github.com/google/brunsli>`_
* `giflib 5.2.1 <http://giflib.sourceforge.net/>`_
* `lerc 2.2.1 <https://github.com/Esri/lerc>`_
* `libdeflate 1.7 <https://github.com/ebiggers/libdeflate>`_
* `libavif 0.8.4 <https://github.com/AOMediaCodec/libavif>`_
* `dav1d 0.8.1 <https://github.com/videolan/dav1d>`_
* `rav1e 0.3.5 <https://github.com/xiph/rav1e>`_
* `aom 2.0.1 <https://aomedia.googlesource.com/aom>`_
* `lcms 2.11 <https://github.com/mm2/Little-CMS>`_
Required Python packages for testing (other versions may work):
* `tifffile 2021.1.11 <https://pypi.org/project/tifffile/>`_
* `czifile 2019.7.2 <https://pypi.org/project/czifile/>`_
* `python-blosc 1.10.1 <https://github.com/Blosc/python-blosc>`_
* `python-lz4 3.1.1 <https://github.com/python-lz4/python-lz4>`_
* `python-zstd 1.4.8.1 <https://github.com/sergey-dryabzhinsky/python-zstd>`_
* `python-lzf 0.2.4 <https://github.com/teepark/python-lzf>`_
* `python-brotli 1.0.9 <https://github.com/google/brotli/tree/master/python>`_
* `python-snappy 0.6.0 <https://github.com/andrix/python-snappy>`_
* `zopflipy 1.4 <https://github.com/hattya/zopflipy>`_
* `bitshuffle 0.3.5 <https://github.com/kiyo-masui/bitshuffle>`_
Notes
-----
The API is not stable yet and might change between revisions.
Works on little-endian platforms only.
Python 32-bit versions are deprecated. Python <= 3.6 are no longer supported.
Some codecs are currently decode-only: ``tiff``, ``lzw``, ``packints``, and
``jpegsof3``.
The latest `Microsoft Visual C++ Redistributable for Visual Studio 2015, 2017
and 2019 <https://support.microsoft.com/en-us/help/2977003/
the-latest-supported-visual-c-downloads>`_ is required on Windows.
Refer to the imagecodecs/licenses folder for 3rd-party library licenses.
This software is based in part on the work of the Independent JPEG Group.
This software includes modified versions of `dcm2niix's jpg_0XC3.cpp
<https://github.com/rordenlab/dcm2niix/blob/master/console/jpg_0XC3.cpp>`.
Build instructions and wheels for manylinux and macOS courtesy of
`Grzegorz Bokota <https://github.com/Czaki/imagecodecs_build>`_.
Update pip and setuptools to the latest version before installing imagecodecs:
``python -m pip install --upgrade pip setuptools``
Install imagecodecs using precompiled wheels:
``python -m pip install --upgrade imagecodecs``
Install the requirements for building imagecodecs from source code on
latest Ubuntu Linux distributions:
``sudo apt-get install build-essential python3-dev cython3
python3-setuptools python3-pip python3-wheel python3-numpy
python3-pytest python3-blosc python3-brotli python3-snappy python3-lz4
libz-dev libblosc-dev liblzma-dev liblz4-dev libzstd-dev libpng-dev
libwebp-dev libbz2-dev libopenjp2-7-dev libjpeg-dev libjxr-dev
liblcms2-dev libcharls-dev libaec-dev libbrotli-dev libsnappy-dev
libzopfli-dev libgif-dev libtiff-dev libdeflate-dev libavif-dev``
Use the ``--lite`` build option to only build extensions without 3rd-party
dependencies. Use the ``--skip-extension`` build options to skip building
specific extensions, e.g.:
``python -m pip install imagecodecs --global-option="build_ext"
--global-option="--skip-bitshuffle"``
The ``jpeg12``, ``jpegls``, ``jpegxl``, ``zfp``, ``avif``, ``lz4f``, and
``lerc`` extensions are disabled by default when building from source.
To modify other build settings such as library names and compiler arguments,
provide a ``imagecodecs_distributor_setup.customize_build`` function, which
will be imported and executed during setup. See ``setup.py`` for examples.
Other Python packages and C libraries providing imaging or compression codecs:
* `numcodecs <https://github.com/zarr-developers/numcodecs>`_
* `Python zlib <https://docs.python.org/3/library/zlib.html>`_
* `Python bz2 <https://docs.python.org/3/library/bz2.html>`_
* `Python lzma <https://docs.python.org/3/library/lzma.html>`_
* `backports.lzma <https://github.com/peterjc/backports.lzma>`_
* `python-lzo <https://bitbucket.org/james_taylor/python-lzo-static>`_
* `python-lzw <https://github.com/joeatwork/python-lzw>`_
* `python-lerc <https://pypi.org/project/lerc/>`_
* `packbits <https://github.com/psd-tools/packbits>`_
* `fpzip <https://github.com/seung-lab/fpzip>`_
* `libmng <https://sourceforge.net/projects/libmng/>`_
* `APNG patch for libpng <https://sourceforge.net/projects/libpng-apng/>`_
* `OpenEXR <https://github.com/AcademySoftwareFoundation/openexr>`_
* `tinyexr <https://github.com/syoyo/tinyexr>`_
* `pytinyexr <https://github.com/syoyo/pytinyexr>`_
* `jpeg-xl <https://gitlab.com/wg1/jpeg-xl>`_
* `libjpeg <https://github.com/thorfdbg/libjpeg>`_ (GPL)
* `pylibjpeg <https://github.com/pydicom/pylibjpeg>`_
* `pylibjpeg-libjpeg <https://github.com/pydicom/pylibjpeg-libjpeg>`_ (GPL)
* `pylibjpeg-openjpeg <https://github.com/pydicom/pylibjpeg-openjpeg>`_
* `glymur <https://github.com/quintusdias/glymur>`_
* `pyheif <https://github.com/carsales/pyheif>`_
* `libheif <https://github.com/strukturag/libheif>`_ (LGPL)
Revisions
---------
2021.1.11
Pass 4852 tests.
Fix build issues (#7, #8).
Return bytearray instead of bytes on PyPy.
Raise TypeError if output provided is bytes (breaking).
2021.1.8
Add float24 codec.
Update copyrights.
2020.12.24
Update dependencies and build scripts.
2020.12.22
Add AVIF codec via libavif (WIP).
Add DEFLATE/Zlib and GZIP codecs via libdeflate.
Add LZ4F codec.
Add high compression mode option to lz4_encode.
Convert JPEG XR 16 and 32-bit fixed point pixel types to float32.
Fix JPEG 2000 lossy encoding.
Fix GIF disposal handling.
Remove support for Python 3.6 (NEP 29).
2020.5.30
Add LERC codec via ESRI's lerc library.
Enable building JPEG extensions with libjpeg >= 8.
Enable distributors to modify build settings.
2020.2.18
Fix segfault when decoding corrupted LZW segments.
Work around Cython raises AttributeError when using incompatible numpy.
Raise ValueError if in-place decoding is not possible (except floatpred).
2020.1.31
Add GIF codec via giflib.
Add TIFF decoder via libtiff (WIP).
Add codec_check functions (WIP).
Fix formatting libjpeg error messages.
Use xfail in tests.
Load extensions on demand on Python >= 3.7.
Add build options to skip building specific extensions.
Split imagecodecs extension into individual extensions.
Move shared code into shared extension.
Rename imagecodecs_lite extension and imagecodecs C library to 'imcd'.
Remove support for Python 2.7 and 3.5.
2019.12.31
Fix decoding of indexed PNG with transparency.
Last version to support Python 2.7 and 3.5.
2019.12.16
Add Zopfli codec.
Add Snappy codec.
Rename j2k codec to jpeg2k.
Rename jxr codec to jpegxr.
Use Debian's jxrlib.
Support pathlib and binary streams in imread and imwrite.
Move external C declarations to pxd files.
Move shared code to pxi file.
Update copyright notices.
2019.12.10
Add version functions.
Add Brotli codec (WIP).
Add optional JPEG XL codec via Brunsli repacker (WIP).
2019.12.3
Sync with imagecodecs-lite.
2019.11.28
Add AEC codec via libaec (WIP).
Do not require scikit-image for testing.
Require CharLS 2.1.
2019.11.18
Add bitshuffle codec.
Fix formatting of unknown error numbers.
Fix test failures with official python-lzf.
2019.11.5
Rebuild with updated dependencies.
2019.5.22
Add optional YCbCr chroma subsampling to JPEG encoder.
Add default reversible mode to ZFP encoder.
Add imread and imwrite helper functions.
2019.4.20
Fix setup requirements.
2019.2.22
Move codecs without 3rd-party C library dependencies to imagecodecs_lite.
2019.2.20
Rebuild with updated dependencies.
2019.1.20
Add more pixel formats to JPEG XR codec.
Add JPEG XR encoder.
2019.1.14
Add optional ZFP codec via zfp library (WIP).
Add numpy NPY and NPZ codecs.
Fix some static codechecker errors.
2019.1.1
...
Refer to the CHANGES file for older revisions.
"""
__version__ = '2021.1.11'
import os
import sys
import io
import importlib
import numpy
# names of public attributes by module
# will be updated with standard attributes
_API = {
None: [
'version',
'imread',
'imwrite',
'imagefileext',
'DelayedImportError',
('none', 'numpy', 'jpeg'),
],
'imcd': [
'imcd_version',
'numpy_abi_version',
'cython_version',
(
'bitorder',
'delta',
'float24',
'floatpred',
'lzw',
'packbits',
'packints',
'xor',
),
],
'aec': [],
'avif': [],
# 'exr': [],
'bitshuffle': [],
'blosc': [],
'brotli': [],
'bz2': [],
'deflate': ['deflate_crc32', 'deflate_adler32', ('deflate', 'gzip')],
'gif': [],
'jpeg2k': [],
'jpeg8': [],
'jpeg12': [],
'jpegls': [],
'jpegsof3': [],
'jpegxl': [],
'jpegxr': [],
'lerc': [],
'lz4': [],
'lz4f': [],
'lzf': [],
'lzma': [],
'png': [],
'snappy': [],
# 'szip': [],
'tiff': [],
'webp': [],
'zfp': [],
'zlib': ['zlib_crc32'],
'zopfli': [],
'zstd': [],
# 'module': ['attribute1', 'attribute2', ('codec1', 'code2')]
}
# map extra to existing attributes
# e.g. keep deprecated names for older versions of tifffile and czifile
_COMPATIBILITY = {
'JPEG': 'JPEG8',
'jpeg_check': 'jpeg8_check',
'jpeg_version': 'jpeg8_version',
'zopfli_check': 'zlib_check',
'zopfli_decode': 'zlib_decode',
'j2k_encode': 'jpeg2k_encode',
'j2k_decode': 'jpeg2k_decode',
'jxr_encode': 'jpegxr_encode',
'jxr_decode': 'jpegxr_decode',
}
# map attribute names to module names
_ATTRIBUTES = {}
# map of codec names to module names
_CODECS = {}
def _add_codec(module, codec=None, attributes=None):
"""Register codec in global _API, _ATTRIBUTES, and _CODECS."""
if codec is None:
codec = module
if attributes is None:
attributes = (
f'{codec}_encode',
f'{codec}_decode',
f'{codec}_check',
f'{codec}_version',
f'{codec.capitalize()}Error',
f'{codec.upper()}',
)
if module in _API:
_API[module].extend(attributes)
else:
_API[module] = attributes
_ATTRIBUTES.update({attr: module for attr in _API[module]})
_CODECS[codec] = module
def _register_codecs():
"""Parse _API and register all codecs."""
for module, attributes in _API.items():
for attr in attributes.copy():
if isinstance(attr, tuple):
attributes.remove(attr)
for codec in attr:
_add_codec(module, codec)
break
else:
_add_codec(module)
def _load_all():
"""Add all registered attributes to package namespace."""
for name in __dir__():
__getattr__(name)
def __dir__():
"""Module __dir__."""
return sorted(list(_ATTRIBUTES) + list(_COMPATIBILITY))
def __getattr__(name):
"""Load attribute's extension and add its attributes to package namespace.
"""
name_ = name
name = _COMPATIBILITY.get(name, name)
if name not in _ATTRIBUTES:
raise AttributeError(f"module 'imagecodecs' has no attribute {name!r}")
module_ = _ATTRIBUTES[name]
if module_ is None:
return None
try:
module = importlib.import_module('._' + module_, 'imagecodecs')
except ImportError:
module = None
except AttributeError:
# AttributeError: type object 'imagecodecs._module.array' has no
# attribute '__reduce_cython__'
# work around Cython raises AttributeError e.g. when the _shared
# module failed to import due to an incompatible numpy version
from . import _shared # noqa
module = None
for n in _API[module_]:
if n in _COMPATIBILITY:
continue
attr = getattr(module, n, None)
if attr is None:
attr = _stub(n, module)
setattr(imagecodecs, n, attr)
attr = getattr(imagecodecs, name)
if name != name_:
setattr(imagecodecs, name_, attr)
return attr
class DelayedImportError(ImportError):
def __init__(self, name):
msg = f"could not import name {name!r} from 'imagecodecs'"
super().__init__(msg)
def _stub(name, module):
"""Return stub function or class."""
if name.endswith('_version'):
if module is None:
def stub_version():
f"""Stub for imagecodecs.{name}."""
return f"{name[:-8]} n/a"
else:
def stub_version():
f"""Stub for imagecodecs.{name}."""
return f"{name[:-8]} unknow"
return stub_version
if name.endswith('_check'):
if module is None:
def stub_check(arg):
f"""Stub for imagecodecs.{name}."""
return False
else:
def stub_check(arg):
f"""Stub for imagecodecs.{name}."""
return None
return stub_check
if name.endswith('_decode'):
def stub_decode(*args, **kwargs):
f"""Stub for imagecodecs.{name}."""
raise DelayedImportError(name)
return stub_decode
if name.endswith('_encode'):
def stub_encode(*args, **kwargs):
f"""Stub for imagecodecs.{name}."""
raise DelayedImportError(name)
return stub_encode
if name.islower():
def stub_function(*args, **kwargs):
f"""Stub for imagecodecs.{name}."""
raise DelayedImportError(name)
return stub_function
if name.endswith('Error'):
class StubError(RuntimeError):
f"""Stub for imagecodecs.{name}."""
def __init__(self, *args, **kwargs):
raise DelayedImportError(name)
return StubError
class StubType(type):
def __getattr__(cls, arg):
raise DelayedImportError(name)
if module is None:
def __bool__(cls):
return False
if name.isupper():
class STUB(metaclass=StubType):
f"""Stub for imagecodecs.{name}."""
return STUB
class Stub(metaclass=StubType):
f"""Stub for imagecodecs.{name}."""
return Stub
def _extensions():
"""Return sorted list of extension names."""
return sorted(e for e in _API if e is not None)
def version(astype=None, _versions_=[]):
"""Return version information about all codecs and dependencies."""
if not _versions_:
_versions_.extend(
(
f'imagecodecs {__version__}',
imagecodecs.cython_version(),
imagecodecs.numpy_version(),
imagecodecs.numpy_abi_version(),
imagecodecs.imcd_version(),
)
)
_versions_.extend(
sorted(
set(
getattr(imagecodecs, v)()
for v in _ATTRIBUTES
if v.endswith('_version')
and v
not in (
'imcd_version',
'numpy_abi_version',
'numpy_version',
'cython_version',
'none_version',
)
)
)
)
if astype is None or astype is str:
return ', '.join(ver.replace(' ', '-') for ver in _versions_)
if astype is dict:
return dict(ver.split(' ') for ver in _versions_)
return tuple(_versions_)
def imread(fileobj, codec=None, memmap=True, return_codec=False, **kwargs):
"""Return image data from file as numpy array."""
import mmap
codecs = []
if codec is None:
# find codec based on file extension
if isinstance(fileobj, (str, os.PathLike)):
ext = os.path.splitext(os.fspath(fileobj))[-1][1:].lower()
else:
ext = None
if ext in _imcodecs():
codec = _imcodecs()[ext]
if codec == 'jpeg':
codecs.extend(('jpeg8', 'jpeg12', 'jpegls', 'jpegsof3'))
else:
codecs.append(codec)
# try other imaging codecs
codecs.extend(
c
for c in (
'tiff',
'png',
'gif',
'webp',
'jpeg8',
'jpeg12',
'jpegsof3',
'jpeg2k',
'jpegls',
'jpegxr',
'jpegxl',
'avif',
# 'exr',
'zfp',
'lerc',
'numpy',
)
if c not in codecs
)
else:
# use provided codecs
if not isinstance(codec, (list, tuple)):
codec = [codec]
for c in codec:
if isinstance(c, str):
c = c.lower()
c = _imcodecs().get(c, c)
codecs.append(c)
offset = None
close = False
if isinstance(fileobj, mmap.mmap):
data = fileobj
offset = data.tell()
elif hasattr(fileobj, 'read'):
# binary stream: open file, BytesIO
data = fileobj.read()
elif isinstance(fileobj, (str, os.PathLike)):
# TODO: support urllib.request.urlopen ?
# file name
with open(os.fspath(fileobj), 'rb') as fh:
if memmap:
offset = 0
close = True
data = mmap.mmap(fh.fileno(), 0, access=mmap.ACCESS_READ)
else:
data = fh.read()
else:
# binary data
data = fileobj
exceptions = []
image = None
for codec in codecs:
if callable(codec):
func = codec
else:
try:
func = getattr(imagecodecs, codec + '_decode')
except Exception as exc:
exceptions.append(f'{repr(codec).upper()}: {exc}')
continue
try:
image = func(data, **kwargs)
if image.dtype == 'object':
image = None
raise ValueError('failed')
break
except DelayedImportError:
pass
except Exception as exc:
# raise
exceptions.append(f'{func.__name__.upper()}: {exc}')
if offset is not None:
data.seek(offset)
if close:
data.close()
if image is None:
raise ValueError('\n'.join(exceptions))
if return_codec:
return image, func
return image
def imwrite(fileobj, data, codec=None, **kwargs):
"""Write numpy array to image file."""
if codec is None:
# find codec based on file extension
if isinstance(fileobj, (str, os.PathLike)):
ext = os.path.splitext(os.fspath(fileobj))[-1].lower()[1:]
else:
raise ValueError('no codec specified')
codec = _imcodecs().get(ext, ext)
try:
codec = getattr(imagecodecs, codec + '_encode')
except AttributeError as exc:
raise ValueError(f'invalid codec {codec!r}') from exc
elif isinstance(codec, str):
codec = codec.lower()
codec = _imcodecs().get(codec, codec)
try:
codec = getattr(imagecodecs, codec + '_encode')
except AttributeError as exc:
raise ValueError(f'invalid codec {codec!r}') from exc
elif not callable(codec):
raise ValueError(f'invalid codec {codec!r}')
data = codec(data, **kwargs)
if hasattr(fileobj, 'write'):
# binary stream: open file, BytesIO
fileobj.write(data)
else:
# file name
with open(str(fileobj), 'wb') as fh:
fh.write(data)
def _imcodecs(_codecs_={}):
"""Return map of image file extensions to codec names."""
if not _codecs_:
codecs = {
'avif': ('avif', 'avifs'),
# 'exr': ('exr',),
'gif': ('gif',),
'jpeg': ('jpg', 'jpeg', 'jpe', 'jfif', 'jif', 'ljpeg'),
'jpeg2k': ('j2k', 'jp2', 'j2c', 'jpc', 'jpx', 'jpf'), # jpm, mj2
'jpegls': ('jls',),
'jpegxl': ('jxl', 'brn'),
'jpegxr': ('jxr', 'hdp', 'wdp'),
'lerc': ('lerc1', 'lerc2'),
'numpy': ('npy', 'npz'),
'png': ('png',),
'tiff': ('tif', 'tiff', 'tf8', 'tf2', 'btf'),
'webp': ('webp',),
'zfp': ('zfp',),
}
_codecs_.update(
(ext, codec) for codec, exts in codecs.items() for ext in exts
)
return _codecs_
def imagefileext():
"""Return list of image file extensions handled by imread and imwrite."""
return list(_imcodecs().keys())
NONE = True
NoneError = RuntimeError
def none_version():
"""Return empty version string."""
return ''
def none_check(data):
"""Return True if data likely contains Template data."""
def none_decode(data, *args, **kwargs):
"""Decode NOP."""
return data
def none_encode(data, *args, **kwargs):
"""Encode NOP."""
return data
NUMPY = True
NumpyError = RuntimeError
def numpy_version():
"""Return numpy version string."""
return f'numpy {numpy.__version__}'
def numpy_check(data):
"""Return True if data likely contains NPY or NPZ data."""
with io.BytesIO(data) as fh:
data = fh.read(64)
magic = b'\x93NUMPY'
return data.startswith(magic) or (data.startswith(b'PK') and magic in data)
def numpy_decode(data, index=0, out=None, **kwargs):
"""Decode NPY and NPZ."""
with io.BytesIO(data) as fh:
try:
out = numpy.load(fh, **kwargs)
except ValueError as exc:
raise ValueError('not a numpy array') from exc
if hasattr(out, 'files'):
try:
index = out.files[index]
except Exception:
pass
out = out[index]
return out
def numpy_encode(data, level=None, out=None):
"""Encode NPY and NPZ."""
with io.BytesIO() as fh:
if level:
numpy.savez_compressed(fh, data)
else:
numpy.save(fh, data)
fh.seek(0)
out = fh.read()
return out
JpegError = RuntimeError
def jpeg_decode(
data,
bitspersample=None,
tables=None,
colorspace=None,
outcolorspace=None,
shape=None,
out=None,
):
"""Decode JPEG 8-bit, 12-bit, SOF3, LS, or XL.
"""
if bitspersample is None:
try:
return imagecodecs.jpeg8_decode(
data,
tables=tables,
colorspace=colorspace,
outcolorspace=outcolorspace,
shape=shape,
out=out,
)
except Exception as exc:
msg = str(exc)
if 'Empty JPEG image' in msg:
# TODO: handle Hamamatsu NDPI slides with dimensions > 65500
raise exc
if 'Unsupported JPEG data precision' in msg:
return imagecodecs.jpeg12_decode(
data,
tables=tables,
colorspace=colorspace,
outcolorspace=outcolorspace,
shape=shape,
out=out,
)
if 'SOF type' in msg:
return imagecodecs.jpegsof3_decode(data, out=out)
# Unsupported marker type
try:
return imagecodecs.jpegls_decode(data, out=out)
except Exception:
try:
return imagecodecs.jpegxl_decode(data, out=out)
except Exception:
raise exc
try:
if bitspersample == 8:
return imagecodecs.jpeg8_decode(
data,
tables=tables,
colorspace=colorspace,
outcolorspace=outcolorspace,
shape=shape,
out=out,
)
if bitspersample == 12:
return imagecodecs.jpeg12_decode(
data,
tables=tables,
colorspace=colorspace,
outcolorspace=outcolorspace,
shape=shape,
out=out,
)
try:
return imagecodecs.jpegsof3_decode(data, out=out)
except Exception:
return imagecodecs.jpegls_decode(data, out=out)
except Exception as exc:
msg = str(exc)
if 'Empty JPEG image' in msg:
raise exc
if 'SOF type' in msg:
return imagecodecs.jpegsof3_decode(data, out=out)
try:
return imagecodecs.jpegls_decode(data, out=out)
except Exception:
try:
return imagecodecs.jpegxl_decode(data, out=out)
except Exception:
raise exc
def jpeg_encode(
data,
level=None,
colorspace=None,
outcolorspace=None,
subsampling=None,
optimize=None,
smoothing=None,
out=None,
):
"""Encode JPEG 8-bit or 12-bit.
"""
if data.dtype == numpy.uint8:
func = imagecodecs.jpeg8_encode
elif data.dtype == numpy.uint16:
func = imagecodecs.jpeg12_encode
else:
raise ValueError(f'invalid data type {data.dtype}')
return func(
data,
level=level,
colorspace=colorspace,
outcolorspace=outcolorspace,
subsampling=subsampling,
optimize=optimize,
smoothing=smoothing,
out=out,
)
# initialize package
imagecodecs = sys.modules['imagecodecs']
_register_codecs()
| 31.012448
| 79
| 0.608409
|
7b252486d8f50287050f179a30a9c787e42ae507
| 225
|
py
|
Python
|
idunn/utils/index_names.py
|
bbecquet/idunn
|
16a6225809180b5a92eca9cb63705f156542d4d9
|
[
"Apache-2.0"
] | 26
|
2018-11-30T09:17:17.000Z
|
2020-11-07T01:53:07.000Z
|
idunn/utils/index_names.py
|
bbecquet/idunn
|
16a6225809180b5a92eca9cb63705f156542d4d9
|
[
"Apache-2.0"
] | 38
|
2018-06-08T09:41:04.000Z
|
2020-12-07T17:39:12.000Z
|
idunn/utils/index_names.py
|
Qwant/idunn
|
65582dfed732093778bf7c2998db1e2cd78255b8
|
[
"Apache-2.0"
] | 9
|
2018-05-18T13:07:00.000Z
|
2020-08-01T16:42:40.000Z
|
from .settings import settings
INDICES = {
"admin": settings["PLACE_ADMIN_INDEX"],
"street": settings["PLACE_STREET_INDEX"],
"address": settings["PLACE_ADDRESS_INDEX"],
"poi": settings["PLACE_POI_INDEX"],
}
| 22.5
| 47
| 0.688889
|
e0b86d53f1d6ecbceaa057b3d960d9a39f06ed13
| 2,060
|
py
|
Python
|
lambdarank.py
|
imaginal/RankNet
|
098c487d705b32b43b3f3f740af607eab4525f7e
|
[
"MIT"
] | 1
|
2018-10-04T04:39:31.000Z
|
2018-10-04T04:39:31.000Z
|
lambdarank.py
|
Sean0719/RankNet-1
|
098c487d705b32b43b3f3f740af607eab4525f7e
|
[
"MIT"
] | null | null | null |
lambdarank.py
|
Sean0719/RankNet-1
|
098c487d705b32b43b3f3f740af607eab4525f7e
|
[
"MIT"
] | null | null | null |
# Michael A. Alcorn (malcorn@redhat.com)
# A (slightly modified) implementation of LamdaRank as described in [1].
# [1] https://papers.nips.cc/paper/2971-learning-to-rank-with-nonsmooth-cost-functions.pdf
# [2] https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/MSR-TR-2010-82.pdf
import numpy as np
import torch
import torch.nn as nn
def idcg(n_rel):
# Assuming binary relevance.
nums = np.ones(n_rel)
denoms = np.log2(np.arange(n_rel) + 1 + 1)
return (nums / denoms).sum()
# Data.
input_dim = 50
n_docs = 20
n_rel = 5
n_irr = n_docs - n_rel
doc_features = np.random.randn(n_docs, input_dim)
# Model.
model = torch.nn.Sequential(
nn.Linear(input_dim, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 1))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
# Document scores.
docs = torch.from_numpy(np.array(doc_features, dtype = "float32"))
docs = docs.to(device)
doc_scores = model(docs)
# Document ranks.
(sorted_scores, sorted_idxs) = doc_scores.sort(dim = 0, descending = True)
doc_ranks = torch.zeros(n_docs).to(device)
doc_ranks[sorted_idxs] = 1 + torch.arange(n_docs).view((n_docs, 1)).to(device)
doc_ranks = doc_ranks.view((n_docs, 1))
# Compute lambdas.
diffs = doc_scores[:n_rel] - doc_scores[n_rel:].view(n_irr)
exped = diffs.exp()
# See equation (6) in [2].
N = 1 / idcg(n_rel)
ndcg_diffs = (1 / (1 + doc_ranks[:n_rel])).log2() - (1 / (1 + doc_ranks[n_rel:])).log2().view(n_irr)
lamb_updates = -1 / (1 + exped) * N * ndcg_diffs.abs()
# See section 6.1 in [1], but lambdas have opposite signs from [2].
lambs = torch.zeros((n_docs, 1)).to(device)
lambs[:n_rel] -= lamb_updates.sum(dim = 1, keepdim = True)
lambs[n_rel:] += lamb_updates.sum(dim = 0, keepdim = True).t()
# Accumulate lambda scaled gradients.
model.zero_grad()
doc_scores.backward(lambs)
# Update model weights.
lr = 0.00001
with torch.no_grad():
for param in model.parameters():
param += lr * param.grad
| 29.014085
| 100
| 0.682039
|
9e6df9b2fbf19ee174b69a47b62ffe0da8c614ae
| 932
|
py
|
Python
|
src/visions/backends/pandas/types/ip_address.py
|
dah33/visions
|
381b9c1aa700bf0b352a014a50af4d8159c2d0e7
|
[
"BSD-4-Clause"
] | null | null | null |
src/visions/backends/pandas/types/ip_address.py
|
dah33/visions
|
381b9c1aa700bf0b352a014a50af4d8159c2d0e7
|
[
"BSD-4-Clause"
] | null | null | null |
src/visions/backends/pandas/types/ip_address.py
|
dah33/visions
|
381b9c1aa700bf0b352a014a50af4d8159c2d0e7
|
[
"BSD-4-Clause"
] | null | null | null |
from ipaddress import _BaseAddress, ip_address
import pandas as pd
from visions.backends.pandas import test_utils
from visions.backends.pandas.parallelization_engines import pandas_apply
from visions.backends.pandas.series_utils import series_handle_nulls, series_not_empty
from visions.types.ip_address import IPAddress
from visions.types.string import String
@IPAddress.register_relationship(String, pd.Series)
def string_is_ip_address(series: pd.Series, state: dict) -> bool:
return test_utils.coercion_test(lambda s: pandas_apply(s, ip_address))(series)
@IPAddress.register_transformer(String, pd.Series)
def string_to_ip_address(series: pd.Series, state: dict) -> pd.Series:
return pandas_apply(series, ip_address)
@IPAddress.contains_op.register
@series_not_empty
@series_handle_nulls
def ip_address_contains(series: pd.Series, state: dict) -> bool:
return all(isinstance(x, _BaseAddress) for x in series)
| 34.518519
| 86
| 0.81867
|
4e9b8de051fdae70a91af9d3e126f878f5bc865c
| 1,917
|
py
|
Python
|
test.py
|
z33bs/deep-rl-agent-solves-unity-reacher
|
47fa7d4ee174b8a8144ee16c5edab498fa31d3c2
|
[
"MIT"
] | null | null | null |
test.py
|
z33bs/deep-rl-agent-solves-unity-reacher
|
47fa7d4ee174b8a8144ee16c5edab498fa31d3c2
|
[
"MIT"
] | null | null | null |
test.py
|
z33bs/deep-rl-agent-solves-unity-reacher
|
47fa7d4ee174b8a8144ee16c5edab498fa31d3c2
|
[
"MIT"
] | null | null | null |
from unityagents import UnityEnvironment
import numpy as np
env = UnityEnvironment(file_name="Reacher.app")
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
states = env_info.vector_observations # get the current state (for each agent)
scores = np.zeros(num_agents) # initialize the score (for each agent)
while True:
actions = np.random.randn(num_agents, action_size) # select an action (for each agent)
actions = np.clip(actions, -1, 1) # all actions between -1 and 1
env_info = env.step(actions)[brain_name] # send all actions to tne environment
next_states = env_info.vector_observations # get next state (for each agent)
rewards = env_info.rewards # get reward (for each agent)
dones = env_info.local_done # see if episode finished
scores += env_info.rewards # update the score (for each agent)
states = next_states # roll over states to next time step
if np.any(dones): # exit loop if episode finished
break
print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores)))
| 46.756098
| 103
| 0.663015
|
f90b4035a9b99dd48426f4fe2bea2bc1f1ca6198
| 4,940
|
py
|
Python
|
setup.py
|
Congee/jq.py
|
df6c93352fb21d5536dd6d718b14796cf137975c
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
Congee/jq.py
|
df6c93352fb21d5536dd6d718b14796cf137975c
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
Congee/jq.py
|
df6c93352fb21d5536dd6d718b14796cf137975c
|
[
"BSD-2-Clause"
] | 1
|
2021-12-03T01:18:05.000Z
|
2021-12-03T01:18:05.000Z
|
#!/usr/bin/env python
import os
import subprocess
import tarfile
import shutil
import sysconfig
import platform
import requests
from setuptools import setup
from setuptools.command.build_ext import build_ext
from setuptools.extension import Extension
if platform.system() == 'Windows':
from setup_windows import win_setup
raise SystemExit(win_setup())
def urlretrieve(source_url, destination_path):
response = requests.get(source_url, stream=True)
if response.status_code != 200:
raise Exception("status code was: {}".format(response.status_code))
with open(destination_path, "wb") as fileobj:
for chunk in response.iter_content(chunk_size=128):
fileobj.write(chunk)
def path_in_dir(relative_path):
return os.path.abspath(os.path.join(os.path.dirname(__file__), relative_path))
def dependency_path(relative_path):
return os.path.join(path_in_dir("_deps"), relative_path)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
jq_lib_tarball_path = dependency_path("jq-lib-1.6.tar.gz")
jq_lib_dir = dependency_path("jq-1.6")
oniguruma_version = "6.9.4"
oniguruma_lib_tarball_path = dependency_path("onig-{}.tar.gz".format(oniguruma_version))
oniguruma_lib_build_dir = dependency_path("onig-{}".format(oniguruma_version))
oniguruma_lib_install_dir = dependency_path("onig-install-{}".format(oniguruma_version))
class jq_build_ext(build_ext):
def run(self):
if not os.path.exists(dependency_path(".")):
os.makedirs(dependency_path("."))
self._build_oniguruma()
self._build_libjq()
build_ext.run(self)
def _build_oniguruma(self):
self._build_lib(
source_url="https://github.com/kkos/oniguruma/releases/download/v{0}/onig-{0}.tar.gz".format(oniguruma_version),
tarball_path=oniguruma_lib_tarball_path,
lib_dir=oniguruma_lib_build_dir,
commands=[
["./configure", "CFLAGS=-fPIC", "--prefix=" + oniguruma_lib_install_dir],
["make"],
["make", "install"],
])
def _build_libjq(self):
self._build_lib(
source_url="https://github.com/stedolan/jq/releases/download/jq-1.6/jq-1.6.tar.gz",
tarball_path=jq_lib_tarball_path,
lib_dir=jq_lib_dir,
commands=[
["autoreconf", "-i"],
["./configure", "CFLAGS=-fPIC", "--disable-maintainer-mode", "--with-oniguruma=" + oniguruma_lib_install_dir],
["make"],
])
def _build_lib(self, source_url, tarball_path, lib_dir, commands):
self._download_tarball(
source_url=source_url,
tarball_path=tarball_path,
lib_dir=lib_dir,
)
macosx_deployment_target = sysconfig.get_config_var("MACOSX_DEPLOYMENT_TARGET")
if macosx_deployment_target:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = macosx_deployment_target
def run_command(args):
print("Executing: %s" % ' '.join(args))
subprocess.check_call(args, cwd=lib_dir)
for command in commands:
run_command(command)
def _download_tarball(self, source_url, tarball_path, lib_dir):
if os.path.exists(tarball_path):
os.unlink(tarball_path)
print("Downloading {}".format(source_url))
urlretrieve(source_url, tarball_path)
print("Downloaded {}".format(source_url))
if os.path.exists(lib_dir):
shutil.rmtree(lib_dir)
tarfile.open(tarball_path, "r:gz").extractall(dependency_path("."))
jq_extension = Extension(
"jq",
sources=["jq.c"],
include_dirs=[os.path.join(jq_lib_dir, "src")],
extra_link_args=["-lm"],
extra_objects=[
os.path.join(jq_lib_dir, ".libs/libjq.a"),
os.path.join(oniguruma_lib_install_dir, "lib/libonig.a"),
],
)
setup(
name='jq',
version='1.1.1',
description='jq is a lightweight and flexible JSON processor.',
long_description=read("README.rst"),
author='Michael Williamson',
url='http://github.com/mwilliamson/jq.py',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
license='BSD 2-Clause',
ext_modules = [jq_extension],
cmdclass={"build_ext": jq_build_ext},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| 33.605442
| 126
| 0.64413
|
c66ff23c452c3458962931e799368ebb87d8171b
| 882
|
py
|
Python
|
props/scripts/MonsterPop.py
|
camsdu59/Zelda_BlenderGame
|
0f5d5d15bfa79e9f8ea15f0ebcb76bce92f77a21
|
[
"FSFAP"
] | 27
|
2016-01-13T14:16:13.000Z
|
2022-01-03T05:38:44.000Z
|
props/scripts/MonsterPop.py
|
camsdu59/Zelda_BlenderGame
|
0f5d5d15bfa79e9f8ea15f0ebcb76bce92f77a21
|
[
"FSFAP"
] | 1
|
2017-04-29T00:51:26.000Z
|
2017-04-29T00:54:43.000Z
|
props/scripts/MonsterPop.py
|
camsdu59/Zelda_BlenderGame
|
0f5d5d15bfa79e9f8ea15f0ebcb76bce92f77a21
|
[
"FSFAP"
] | 14
|
2016-01-20T21:02:37.000Z
|
2020-07-19T05:47:20.000Z
|
from bge import logic
import mathutils
scene = logic.getCurrentScene()
def pop(cont):
own = cont.owner
# add obj
for i in range(0, 12):
# Get position of origin object
pos = mathutils.Vector(own.worldPosition)
pos[0] = (own.worldPosition[0]) + own['x']
pos[1] = (own.worldPosition[1]) + own['y']
# Rotate vector
pos.rotate(own.orientation)
# Add object
dust = scene.addObject("dust1", own, 80)
# Set new transformation
dust.worldPosition = pos
# Create range
nxRange = range(3, 6)
nxRange2 = range(7, 9)
nyRange = range(7, 12)
space = 0.1
# Increment etc.
if (i == 6):
own['x'] = -space
own['y'] = (own['startY'] * -1) + space
else:
# increment x
if (i in nxRange or i in nxRange2):
own['x'] -= space
else:
own['x'] += space
# increment y
if (i in nyRange):
own['y'] -= space
else:
own['y'] += space
| 21.512195
| 44
| 0.603175
|
56cdfd60c54a4bc7e37013e033fc1289e04c83b9
| 11,976
|
py
|
Python
|
core/domain/collection_jobs_one_off_test.py
|
jlau323/oppia
|
37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691
|
[
"Apache-2.0"
] | 2
|
2020-10-13T12:59:08.000Z
|
2020-10-13T17:10:26.000Z
|
core/domain/collection_jobs_one_off_test.py
|
gitter-badger/oppia
|
7d8e659264582d7ce74bc6c139e597b82bca0e04
|
[
"Apache-2.0"
] | 1
|
2020-05-27T06:08:17.000Z
|
2020-05-27T06:08:17.000Z
|
core/domain/collection_jobs_one_off_test.py
|
gitter-badger/oppia
|
7d8e659264582d7ce74bc6c139e597b82bca0e04
|
[
"Apache-2.0"
] | 1
|
2018-03-20T14:12:31.000Z
|
2018-03-20T14:12:31.000Z
|
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Collection-related one-off jobs."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
from core.domain import collection_domain
from core.domain import collection_jobs_one_off
from core.domain import collection_services
from core.domain import rights_manager
from core.platform import models
from core.tests import test_utils
import feconf
(job_models, collection_models,) = models.Registry.import_models([
models.NAMES.job, models.NAMES.collection])
class CollectionMigrationOneOffJobTests(test_utils.GenericTestBase):
ALBERT_EMAIL = 'albert@example.com'
ALBERT_NAME = 'albert'
COLLECTION_ID = 'collection_id'
EXP_ID = 'exp_id'
def setUp(self):
super(CollectionMigrationOneOffJobTests, self).setUp()
# Setup user who will own the test collections.
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.process_and_flush_pending_mapreduce_tasks()
def test_migration_job_does_not_convert_up_to_date_collection(self):
"""Tests that the collection migration job does not convert an
collection that is already the latest collection content schema version.
"""
# Create a new collection that should not be affected by the
# job.
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID, title='A title',
category='A Category', objective='An Objective')
collection_services.save_new_collection(self.albert_id, collection)
self.assertEqual(
collection.schema_version,
feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
yaml_before_migration = collection.to_yaml()
# Start migration job.
job_id = (
collection_jobs_one_off.CollectionMigrationOneOffJob.create_new())
collection_jobs_one_off.CollectionMigrationOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
# Verify the collection is exactly the same after migration.
updated_collection = (
collection_services.get_collection_by_id(self.COLLECTION_ID))
self.assertEqual(
updated_collection.schema_version,
feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
after_converted_yaml = updated_collection.to_yaml()
self.assertEqual(after_converted_yaml, yaml_before_migration)
output = (
collection_jobs_one_off.CollectionMigrationOneOffJob.get_output(
job_id))
expected = [[u'collection_migrated',
[u'1 collections successfully migrated.']]]
self.assertEqual(expected, [ast.literal_eval(x) for x in output])
def test_migration_job_skips_deleted_collection(self):
"""Tests that the collection migration job skips deleted collection
and does not attempt to migrate.
"""
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID, title='A title',
category='A Category', objective='An Objective')
collection_services.save_new_collection(self.albert_id, collection)
# Note: This creates a summary based on the upgraded model (which is
# fine). A summary is needed to delete the collection.
collection_services.regenerate_collection_summary(
self.COLLECTION_ID, None)
# Delete the exploration before migration occurs.
collection_services.delete_collection(
self.albert_id, self.COLLECTION_ID)
# Ensure the exploration is deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
collection_services.get_collection_by_id(self.COLLECTION_ID)
# Start migration job on sample collection.
job_id = (
collection_jobs_one_off.CollectionMigrationOneOffJob.create_new())
collection_jobs_one_off.CollectionMigrationOneOffJob.enqueue(job_id)
# This running without errors indicates the deleted collection is
# being ignored.
self.process_and_flush_pending_mapreduce_tasks()
# Ensure the exploration is still deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
collection_services.get_collection_by_id(self.COLLECTION_ID)
output = (
collection_jobs_one_off.CollectionMigrationOneOffJob.get_output(
job_id))
expected = [[u'collection_deleted',
[u'Encountered 1 deleted collections.']]]
self.assertEqual(expected, [ast.literal_eval(x) for x in output])
def test_migrate_collections_failing_strict_validation(self):
"""Tests that the collection migration job migrates collections which
do not pass strict validation.
"""
# Save a collection without an objective or explorations in version 1.
collection_title = 'A title'
collection_category = 'A category'
rights_manager.create_new_collection_rights(
self.COLLECTION_ID, self.albert_id)
model = collection_models.CollectionModel(
id=self.COLLECTION_ID,
category=collection_title,
title=collection_category,
objective='',
tags=[],
schema_version=2,
)
model.commit(self.albert_id, 'Made a new collection!', [{
'cmd': collection_services.CMD_CREATE_NEW,
'title': collection_title,
'category': collection_category,
}])
# Save a collection summary object for indexing. The explicit commit
# does not create a summary object, which is needed for the
# job to update the index after updating the collection.
collection_summary = collection_services.compute_summary_of_collection(
model, self.albert_id)
collection_services.save_collection_summary(collection_summary)
# Start migration job on sample collection.
job_id = (
collection_jobs_one_off.CollectionMigrationOneOffJob.create_new())
collection_jobs_one_off.CollectionMigrationOneOffJob.enqueue(job_id)
# This running without errors indicates the collection is migrated.
self.process_and_flush_pending_mapreduce_tasks()
# Check the version number of the new model.
new_model = collection_models.CollectionModel.get(self.COLLECTION_ID)
self.assertEqual(
new_model.schema_version, feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
def test_migration_job_skips_collection_failing_validation(self):
"""Tests that the collection migration job skips the collection
failing validation and does not attempt to migrate.
"""
# Create a collection directly using the model and with an
# invalid language code.
collection_title = 'A title'
collection_category = 'A category'
collection_language_code = 'abc'
collection_schema_version = 2
rights_manager.create_new_collection_rights(
self.COLLECTION_ID, self.albert_id)
model = collection_models.CollectionModel(
id=self.COLLECTION_ID,
category=collection_title,
title=collection_category,
language_code=collection_language_code,
objective='An objective',
tags=[],
schema_version=collection_schema_version
)
model.commit(self.albert_id, 'Made a new collection!', [{
'cmd': collection_services.CMD_CREATE_NEW,
'title': collection_title,
'category': collection_category,
}])
# Start migration job on sample collection.
job_id = (
collection_jobs_one_off.CollectionMigrationOneOffJob.create_new())
collection_jobs_one_off.CollectionMigrationOneOffJob.enqueue(job_id)
# This running without errors indicates the collection failing
# validation is being ignored.
self.process_and_flush_pending_mapreduce_tasks()
# Check that the version number of the new model is same as old model.
new_model = collection_models.CollectionModel.get(self.COLLECTION_ID)
self.assertEqual(new_model.schema_version, collection_schema_version)
output = (
collection_jobs_one_off.CollectionMigrationOneOffJob.get_output(
job_id))
expected = [[u'validation_error',
[u'Collection %s failed validation: Invalid '
u'language code: %s'
% (self.COLLECTION_ID, collection_language_code)]]]
self.assertEqual(expected, [ast.literal_eval(x) for x in output])
def test_migration_job_migrates_collection_nodes(self):
"""Tests that the collection migration job migrates content from
nodes to collection_contents if collection_contents is empty.
"""
# Create an exploration to put in the collection.
self.save_new_default_exploration(self.EXP_ID, self.albert_id)
node = collection_domain.CollectionNode.create_default_node(self.EXP_ID)
# Create a collection directly using the model, so that the collection
# nodes are stored in the 'nodes' property rather than the
# 'collection_contents' property.
collection_title = 'A title'
collection_category = 'A category'
rights_manager.create_new_collection_rights(
self.COLLECTION_ID, self.albert_id)
model = collection_models.CollectionModel(
id=self.COLLECTION_ID,
category=collection_category,
title=collection_title,
objective='An objective',
tags=[],
schema_version=2,
nodes=[{
'exploration_id': self.EXP_ID,
'prerequisite_skills': [],
'acquired_skills': []
}],
)
model.commit(self.albert_id, 'Made a new collection!', [{
'cmd': collection_services.CMD_CREATE_NEW,
'title': collection_title,
'category': collection_category,
}])
# Save a collection summary object for indexing. The explicit commit
# does not create a summary object, which is needed for the
# job to update the index after updating the collection.
collection_summary = collection_services.compute_summary_of_collection(
model, self.albert_id)
collection_services.save_collection_summary(collection_summary)
# Check that collection_contents is empty.
self.assertEqual(model.collection_contents, {})
# Run the job. This should populate collection_contents.
job_id = (
collection_jobs_one_off.CollectionMigrationOneOffJob.create_new())
collection_jobs_one_off.CollectionMigrationOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
new_model = collection_models.CollectionModel.get(self.COLLECTION_ID)
self.assertEqual(
new_model.collection_contents, {
'nodes': [node.to_dict()]
})
| 43.079137
| 80
| 0.686957
|
3ac3f1fbb18349ae4090c45ce39366326b05806a
| 23,502
|
py
|
Python
|
Mohr_Circle/MC_figure_sources.py
|
ChairOfStructuralMechanicsTUM/Mechanics_Apps
|
b064a42d4df3fa9bde62a5cff9cb27ca61b0127c
|
[
"MIT"
] | 11
|
2017-05-06T17:05:29.000Z
|
2020-11-12T09:26:47.000Z
|
Mohr_Circle/MC_figure_sources.py
|
ChairOfStructuralMechanicsTUM/Mechanics_Apps
|
b064a42d4df3fa9bde62a5cff9cb27ca61b0127c
|
[
"MIT"
] | 49
|
2017-04-20T11:26:11.000Z
|
2020-05-29T13:18:06.000Z
|
Mohr_Circle/MC_figure_sources.py
|
ChairOfStructuralMechanicsTUM/Mechanics_Apps
|
b064a42d4df3fa9bde62a5cff9cb27ca61b0127c
|
[
"MIT"
] | 4
|
2017-02-14T12:55:34.000Z
|
2022-01-12T15:07:07.000Z
|
"""
Mohr Circle - definition of figure specific functions and sources
"""
# general imports
from math import pi, sin, cos, atan
# bokeh imports
from bokeh.models import ColumnDataSource
# internal imports
from MC_helper_functions import calculate_radius_and_center, clear_rect_source, clear_arrow_source
# ----------------------------------------------------------------- #
# define the sources for each figure here to avoid blasting the main code
class fig1():
# initialize ColumnDataSources
def __init__(self):
# Arrows
self.NxP_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
self.NzP_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
self.NxN_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
self.NzN_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
self.Nxz1_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
self.Nxz2_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
self.Nxz3_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
self.Nxz4_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
# Rectangles
self.NxP_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
self.NzP_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
self.NxN_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
self.NzN_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
self.Nxz1_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
self.Nxz2_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
self.Nxz3_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
self.Nxz4_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
### Labels
self.Perm_Label_source = ColumnDataSource(data=dict(x=[22,1], y=[-5, -27], names=['x', 'z']))
def plot_normal_forces_x(self, MohrNx):
MohrNx = MohrNx*0.75
if(MohrNx<0):
self.NxP_arrow_source.stream(dict(xS=[12.5-MohrNx], xE=[12.5], yS=[0], yE=[0], lW = [2]),rollover=1)
self.NxN_arrow_source.stream(dict(xS=[-12.5+MohrNx], xE=[-12.5], yS=[0], yE=[0], lW = [2]),rollover=1)
self.NxP_rect_source.data = dict(x=[(25-MohrNx)/2], y=[0], w=[MohrNx-1.5], h = [13], angle=[0])
self.NxN_rect_source.data = dict(x=[(-25+MohrNx)/2], y=[0], w=[MohrNx-1.5], h = [13], angle=[0])
elif(MohrNx==0):
clear_arrow_source( [self.NxP_arrow_source, self.NxN_arrow_source] )
clear_rect_source ( [self.NxP_rect_source, self.NxN_rect_source ] )
else:
self.NxP_arrow_source.stream(dict(xS=[12.5], xE=[12.5+MohrNx], yS=[0], yE=[0], lW = [2]),rollover=1)
self.NxN_arrow_source.stream(dict(xS=[-12.5], xE=[-12.5-MohrNx], yS=[0], yE=[0], lW = [2]),rollover=1)
self.NxP_rect_source.data = dict(x=[(25+MohrNx)/2], y=[0], w=[MohrNx+1.5], h = [13], angle=[0])
self.NxN_rect_source.data = dict(x=[(-25-MohrNx)/2], y=[0], w=[MohrNx+1.5], h = [13], angle=[0])
def plot_normal_forces_z(self, MohrNz):
MohrNz = MohrNz*0.75
if(MohrNz<0):
self.NzP_arrow_source.stream(dict(xS=[0], xE=[0], yS=[12.5-MohrNz], yE=[12.5], lW = [2]),rollover=1)
self.NzN_arrow_source.stream(dict(xS=[0], xE=[0], yS=[-12.5+MohrNz], yE=[-12.5], lW = [2]),rollover=1)
self.NzP_rect_source.data = dict(x=[0], y=[(25-MohrNz)/2], w=[13], h = [MohrNz-1.5], angle=[0])
self.NzN_rect_source.data = dict(x=[0], y=[(-25+MohrNz)/2], w=[13], h = [MohrNz-1.5], angle=[0])
elif (MohrNz==0):
clear_arrow_source( [self.NzP_arrow_source, self.NzN_arrow_source] )
clear_rect_source ( [self.NzP_rect_source, self.NzN_rect_source ] )
else:
self.NzP_arrow_source.stream(dict(xS=[0], xE=[0], yS=[12.5], yE=[12.5+MohrNz], lW = [2]),rollover=1)
self.NzN_arrow_source.stream(dict(xS=[0], xE=[0], yS=[-12.5], yE=[-12.5-MohrNz], lW = [2]),rollover=1)
self.NzP_rect_source.data = dict(x=[0], y=[(25+MohrNz)/2], w=[13], h = [MohrNz+1.5], angle=[0])
self.NzN_rect_source.data = dict(x=[0], y=[(-25-MohrNz)/2], w=[13], h = [MohrNz+1.5], angle=[0])
def plot_shear_forces(self, MohrNxz):
MohrNxz = MohrNxz*0.75
if(MohrNxz==0):
clear_arrow_source( [self.Nxz1_arrow_source, self.Nxz2_arrow_source, self.Nxz3_arrow_source, self.Nxz4_arrow_source] )
clear_rect_source( [self.Nxz1_rect_source, self.Nxz2_rect_source, self.Nxz3_rect_source, self.Nxz4_rect_source] )
else:
self.Nxz1_arrow_source.stream(dict(xS=[9], xE=[9], yS=[0+(MohrNxz/2)], yE=[0-(MohrNxz/2)], lW = [2]),rollover=1)
self.Nxz2_arrow_source.stream(dict(xS=[-9], xE=[-9], yS=[0-(MohrNxz/2)], yE=[0+(MohrNxz/2)], lW = [2]),rollover=1)
self.Nxz3_arrow_source.stream(dict(xS=[MohrNxz/2], xE=[-MohrNxz/2], yS=[9], yE=[9], lW = [2]),rollover=1)
self.Nxz4_arrow_source.stream(dict(xS=[-(MohrNxz/2)], xE=[(MohrNxz/2)], yS=[-9], yE=[-9], lW = [2]),rollover=1)
self.Nxz1_rect_source.data = dict(x=[9], y=[0], w=[0.3*MohrNxz+0.5], h=[13], angle=[0])
self.Nxz2_rect_source.data = dict(x=[-9], y=[0], w=[0.3*MohrNxz+0.5], h=[13], angle=[0])
self.Nxz3_rect_source.data = dict(x=[0], y=[9], w=[13], h=[0.3*MohrNxz+0.5], angle=[0])
self.Nxz4_rect_source.data = dict(x=[0], y=[-9], w=[13], h=[0.3*MohrNxz+0.5], angle=[0])
class fig2():
# initialize ColumnDataSources
def __init__(self):
self.Mohr_Circle_source = ColumnDataSource(data=dict(x=[], y=[], radius=[]))
self.Wedge_source = ColumnDataSource(data=dict(x=[], y=[],radius=[], sA=[], eA=[]))
self.Newplane_line_source = ColumnDataSource(data=dict(x=[],y=[]))
self.OriginalPlane_line_source = ColumnDataSource(data=dict(x=[],y=[]))
### Labels
self.Perm_Label_source = ColumnDataSource(data=dict(x=[23.5,1.5], y=[-2.5, 23], names=["\\sigma", "\\tau"]))
self.Moving_Label_source = ColumnDataSource(data=dict(x=[], y=[], names=[]))
self.Show_Label_source = ColumnDataSource(data=dict(x=[], y=[], names=[]))
def ChangeMohrCircle(self,input_vars):
MohrNx = input_vars["MohrNx"]
MohrNz = input_vars["MohrNz"]
MohrNxz = input_vars["MohrNxz"]
MohrP_Angle = input_vars["MohrP_Angle"]
[radius, centreX, rleft_x] = calculate_radius_and_center(input_vars)
rleft_z = 0
self.Mohr_Circle_source.data = dict(x=[centreX], y=[0], radius=[radius])
self.OriginalPlane_line_source.data = dict(x=[rleft_x,MohrNz,MohrNz], y=[rleft_z,MohrNxz,0])
## Calculate forces in rotated element
Nzeta = float(((MohrNx+MohrNz)/2)+(((MohrNx-MohrNz)/2)*cos(2*MohrP_Angle))+MohrNxz*sin(2*MohrP_Angle))
Neta = float(((MohrNx+MohrNz)/2)-(((MohrNx-MohrNz)/2)*cos(2*MohrP_Angle))-MohrNxz*sin(2*MohrP_Angle))
Nzetaeta = float((-(((MohrNx-MohrNz)/2)*sin(2*MohrP_Angle)))+MohrNxz*cos(2*MohrP_Angle))
if MohrP_Angle == 0:
Nzeta = MohrNx
Neta = MohrNz
Nzetaeta = MohrNxz
if MohrP_Angle == (pi/2):
Nzeta = MohrNz
Neta = MohrNx
Nzetaeta = -MohrNxz
self.Newplane_line_source.data = dict(x=[rleft_x,Neta], y=[rleft_z,Nzetaeta])
self.Moving_Label_source.data = dict(x=[MohrNx,MohrNz,0.0, 0.0, Neta,Nzeta,MohrNz,Neta],
y=[0.0,0.0,MohrNxz, Nzetaeta,0.0,0.0,MohrNxz,Nzetaeta],
names=['\\sigma_x','\\sigma_z','\\tau_{xz}','\\tau_{\\overline{xz}}','\\sigma_{\\overline{z}}','\\sigma_{\\overline{x}}',"A","B"])
def reset_circle(self, centreX, radius, angle_label):
self.Mohr_Circle_source.data = dict(x=[centreX], y=[0], radius=[radius])
self.Newplane_line_source.data = dict(x=[], y=[])
self.OriginalPlane_line_source.data = dict(x=[], y=[])
self.Moving_Label_source.data = dict(x=[], y=[], names =[])
self.Show_Label_source.data = dict(x=[], y=[], names =[])
self.Wedge_source.data = dict(x=[], y=[], radius=[], sA=[], eA=[])
angle_label.text = ''
class fig3():
# initialize ColumnDataSources
def __init__(self):
## Rotating plane:
self.Rotating_Plane_source = ColumnDataSource(data=dict(x=[], y=[],angle = [],size =[]))
self.Rotating_Plane_red_source = ColumnDataSource(data=dict(x=[], y=[],angle = [],size =[]))
### Rotating Coordinate-System:
self.Rotating_Axis_X_source = ColumnDataSource(data=dict(xS=[], yS=[], xE=[], yE=[]))
self.Rotating_Axis_Y_source = ColumnDataSource(data=dict(xS=[], yS=[], xE=[], yE=[]))
## Arrows:
self.NzetaP_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
self.NzetaN_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
self.NetaP_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
self.NetaN_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
self.Nzetaeta1_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
self.Nzetaeta2_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
self.Nzetaeta3_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
self.Nzetaeta4_arrow_source = ColumnDataSource(data=dict(xS=[], xE=[], yS=[], yE=[], lW = []))
## Rectangles:
self.NzetaP_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
self.NzetaN_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
self.NetaP_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
self.NetaN_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
self.Nzetaeta1_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
self.Nzetaeta2_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
self.Nzetaeta3_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
self.Nzetaeta4_rect_source = ColumnDataSource(data=dict(x=[], y=[], w=[], h=[], angle=[]))
### Labels
self.Perm_Label_source = ColumnDataSource(data=dict(x=[22,1], y=[-5, -27], names=['x', 'z']))
self.Moving_Label_source = ColumnDataSource(data=dict(x=[], y=[], names =[]))
def reset_rotating_plane(self):
self.Rotating_Axis_X_source.stream(dict(xS=[], yS=[], xE=[], yE=[]), rollover=-1) # arrow glyph
self.Rotating_Axis_Y_source.stream(dict(xS=[], yS=[], xE=[], yE=[]), rollover=-1) # arrow glyph
self.Moving_Label_source.data = dict(x=[], y=[], names =[])
self.Rotating_Plane_source.data = dict(x=[], y=[], angle =[], size = [])
self.Rotating_Plane_red_source.data = dict(x=[], y=[], angle =[], size = [])
def move_labels(self, MohrP_Angle):
self.Moving_Label_source.data = dict(x=[(25+2.5)*cos(-MohrP_Angle)-1,(-25-2.5)*sin(MohrP_Angle)-1],y=[(25+2.5)*sin(-MohrP_Angle)-1,(-25-2.5)*cos(MohrP_Angle)-1],
names = ['\\overline{x}', '\\overline{z}'])
def ChangeRotatingPlane_Forces(self, input_vars):
MohrNx = input_vars["MohrNx"]
MohrNz = input_vars["MohrNz"]
MohrNxz = input_vars["MohrNxz"]
MohrP_Angle = input_vars["MohrP_Angle"]
Nzeta = float(float((MohrNx+MohrNz)/2)+(float((MohrNx-MohrNz)/2)*cos(2*MohrP_Angle))+float(MohrNxz*sin(2*MohrP_Angle)))
Neta = float(float((MohrNx+MohrNz)/2)-(float((MohrNx-MohrNz)/2)*cos(2*MohrP_Angle))-float(MohrNxz*sin(2*MohrP_Angle)))
Nzetaeta = float((-(((MohrNx-MohrNz)/2)*sin(2*MohrP_Angle)))+MohrNxz*cos(2*MohrP_Angle))
MohrP_Angle = -MohrP_Angle
## Set Nzetaeta=0 if angle-slider is set to principal direction
[radius, centreX, rleft_x] = calculate_radius_and_center(input_vars)
alpha_0 = 180*atan(MohrNxz/(MohrNz+(-rleft_x+0.00001)))/(pi)
alpha_0 = int(alpha_0+0.5)
alpharepetitions = [-90, -180, 0, 90, 180]
for n in alpharepetitions:
if input_vars["alpha"] == alpha_0+n:
Nzetaeta = 0
break
## Set Nzeta = 0 if alpha equals value in list MohrNzeta_zero_angles
for m in input_vars["MohrNzeta_zero_angles"]:
if input_vars["alpha"] == m:
Nzeta = 0
break
## Set Neta = 0 if alpha equals value in list MohrNeta_zero_angles
for m in input_vars["MohrNeta_zero_angles"]:
if input_vars["alpha"] == m:
Neta = 0
break
Nzeta = 0.75*Nzeta
if Nzeta>0:
self.NzetaP_arrow_source.stream(dict(xS=[12.5*cos(MohrP_Angle)], xE=[(12.5+Nzeta)*cos(MohrP_Angle)], yS=[(12.5*sin(MohrP_Angle))], yE=[(((12.5+Nzeta)*sin(MohrP_Angle)))], lW = [2]),rollover=1)
self.NzetaN_arrow_source.stream(dict(xS=[-12.5*cos(MohrP_Angle)], xE=[(-12.5-Nzeta)*cos(MohrP_Angle)], yS=[0-(12.5*sin(MohrP_Angle))], yE=[(0-((12.5+Nzeta)*sin(MohrP_Angle)))], lW = [2]),rollover=1)
self.NzetaP_rect_source.data = dict(x=[(12.5*cos(MohrP_Angle)+(12.5+Nzeta)*cos(MohrP_Angle))/2], y=[((12.5*sin(MohrP_Angle))+(((12.5+Nzeta)*sin(MohrP_Angle))))/2], w=[Nzeta+1.5], h = [13], angle=[MohrP_Angle])
self.NzetaN_rect_source.data = dict(x=[(-12.5*cos(MohrP_Angle)+(-12.5-Nzeta)*cos(MohrP_Angle))/2], y=[((-12.5*sin(MohrP_Angle))+(-((12.5+Nzeta)*sin(MohrP_Angle))))/2], w=[Nzeta+1.5], h = [13], angle=[MohrP_Angle])
elif Nzeta==0:
clear_arrow_source( [self.NzetaP_arrow_source, self.NzetaN_arrow_source] )
clear_rect_source( [self.NzetaP_rect_source, self.NzetaN_rect_source] )
else:
self.NzetaP_arrow_source.stream(dict(xS=[(12.5-Nzeta)*cos(MohrP_Angle)], xE=[12.5*cos(MohrP_Angle)], yS=[0+((12.5-Nzeta)*sin(MohrP_Angle))], yE=[0+(12.5*sin(MohrP_Angle))], lW = [2]),rollover=1)
self.NzetaN_arrow_source.stream(dict(xS=[(-12.5+Nzeta)*cos(MohrP_Angle)], xE=[-12.5 *cos(MohrP_Angle)], yS=[(0-((12.5-Nzeta)*sin(MohrP_Angle)))], yE=[0-(12.5*sin(MohrP_Angle))], lW = [2]),rollover=1)
self.NzetaP_rect_source.data = dict(x=[(12.5*cos(MohrP_Angle)+(12.5-Nzeta)*cos(MohrP_Angle))/2], y=[((12.5*sin(MohrP_Angle))+(((12.5-Nzeta)*sin(MohrP_Angle))))/2], w=[Nzeta-1.5], h = [13], angle=[MohrP_Angle])
self.NzetaN_rect_source.data = dict(x=[(-12.5*cos(MohrP_Angle)+(-12.5+Nzeta)*cos(MohrP_Angle))/2], y=[((-12.5*sin(MohrP_Angle))+(-((12.5-Nzeta)*sin(MohrP_Angle))))/2], w=[Nzeta-1.5], h = [13], angle=[MohrP_Angle])
Neta = 0.75*Neta
if Neta>0:
self.NetaP_arrow_source.stream(dict(xS=[12.5*cos((pi/2)+MohrP_Angle)], xE=[(12.5+Neta)*cos((pi/2)+MohrP_Angle)], yS=[(12.5*sin((pi/2)+MohrP_Angle))], yE=[((12.5+Neta)*sin((pi/2)+MohrP_Angle))], lW = [2]),rollover=1)
self.NetaN_arrow_source.stream(dict(xS=[12.5*sin(MohrP_Angle)], xE=[(12.5+Neta)*sin(MohrP_Angle)], yS=[-(12.5*cos(MohrP_Angle))], yE=[-((12.5+Neta)*cos(MohrP_Angle))], lW = [2]),rollover=1)
self.NetaP_rect_source.data = dict(x=[(12.5*cos((pi/2)+MohrP_Angle)+(12.5+Neta)*cos((pi/2)+MohrP_Angle))/2], y=[((12.5*sin((pi/2)+MohrP_Angle))+((12.5+Neta)*sin((pi/2)+MohrP_Angle)))/2], h=[Neta+1.5], w = [13], angle=[MohrP_Angle])
self.NetaN_rect_source.data = dict(x=[(12.5*sin(MohrP_Angle)+(12.5+Neta)*sin(MohrP_Angle))/2], y=[(-(12.5*cos(MohrP_Angle))+-((12.5+Neta)*cos(MohrP_Angle)))/2], h=[Neta+1.5], w = [13], angle=[MohrP_Angle])
elif Neta==0:
clear_arrow_source( [self.NetaP_arrow_source, self.NetaN_arrow_source] )
clear_rect_source( [self.NetaP_rect_source, self.NetaN_rect_source] )
else:
self.NetaP_arrow_source.stream(dict(xS=[(12.5-Neta)*cos((pi/2)+MohrP_Angle)],xE=[12.5*cos((pi/2)+MohrP_Angle)], yS=[((12.5-Neta)*sin((pi/2)+MohrP_Angle))], yE=[0+(12.5*sin((pi/2)+MohrP_Angle))], lW = [2]),rollover=1)
self.NetaN_arrow_source.stream(dict(xS=[(12.5-Neta)*sin(MohrP_Angle)],xE=[12.5*sin(MohrP_Angle)], yS=[-(12.5-Neta)*cos(MohrP_Angle)], yE=[-12.5*cos(MohrP_Angle)], lW = [2]),rollover=1)
self.NetaP_rect_source.data = dict(x=[((12.5-Neta)*cos((pi/2)+MohrP_Angle)+12.5*cos((pi/2)+MohrP_Angle))/2], y=[(((12.5-Neta)*sin((pi/2)+MohrP_Angle))+0+(12.5*sin((pi/2)+MohrP_Angle)))/2], h=[Neta-1.5], w = [13], angle=[MohrP_Angle])
self.NetaN_rect_source.data = dict(x=[((12.5-Neta)*sin(MohrP_Angle)+12.5*sin(MohrP_Angle))/2], y=[(-(12.5-Neta)*cos(MohrP_Angle)+-12.5*cos(MohrP_Angle))/2], h=[Neta-1.5], w = [13], angle=[MohrP_Angle])
Nzetaeta=0.75*Nzetaeta
if Nzetaeta>0:
self.Nzetaeta1_arrow_source.stream(dict(xS=[9*cos(MohrP_Angle)-((Nzetaeta/2)*sin(MohrP_Angle))], xE=[9*cos(MohrP_Angle)+((Nzetaeta/2)*sin(MohrP_Angle))], yS=[(0+9*sin(MohrP_Angle))+((Nzetaeta/2)*cos(MohrP_Angle))], yE=[(0+9*sin(MohrP_Angle))-((Nzetaeta/2)*cos(MohrP_Angle))], lW = [2]),rollover=1)
self.Nzetaeta2_arrow_source.stream(dict(xS=[-9*sin(MohrP_Angle)+((Nzetaeta/2)*cos(MohrP_Angle))], xE=[-9*sin(MohrP_Angle)-((Nzetaeta/2)*cos(MohrP_Angle))], yS=[(0+9*cos(MohrP_Angle))+((Nzetaeta/2)*sin(MohrP_Angle))], yE=[(0+9*cos(MohrP_Angle))-((Nzetaeta/2)*sin(MohrP_Angle))], lW = [2]),rollover=1)
self.Nzetaeta3_arrow_source.stream(dict(xS=[-9*cos(MohrP_Angle)+((Nzetaeta/2)*sin(MohrP_Angle))], xE=[-9*cos(MohrP_Angle)-((Nzetaeta/2)*sin(MohrP_Angle))], yS=[(0-9*sin(MohrP_Angle))-((Nzetaeta/2)*cos(MohrP_Angle))], yE=[(0-9*sin(MohrP_Angle))+((Nzetaeta/2)*cos(MohrP_Angle))], lW = [2]),rollover=1)
self.Nzetaeta4_arrow_source.stream(dict(xS=[9*sin(MohrP_Angle)-((Nzetaeta/2)*cos(MohrP_Angle))], xE=[9*sin(MohrP_Angle)+((Nzetaeta/2)*cos(MohrP_Angle))], yS=[(0-9*cos(MohrP_Angle))-((Nzetaeta/2)*sin(MohrP_Angle))], yE=[(0-9*cos(MohrP_Angle))+((Nzetaeta/2)*sin(MohrP_Angle))], lW = [2]),rollover=1)
self.Nzetaeta1_rect_source.data = dict(x=[(9*cos(MohrP_Angle)+((Nzetaeta/2)*sin(MohrP_Angle))+9*cos(MohrP_Angle)-((Nzetaeta/2)*sin(MohrP_Angle)))/2], y=[((0+9*sin(MohrP_Angle))-((Nzetaeta/2)*cos(MohrP_Angle))+(0+9*sin(MohrP_Angle))+((Nzetaeta/2)*cos(MohrP_Angle)))/2], w=[0.3*Nzetaeta+.5], h = [13], angle=[MohrP_Angle])
self.Nzetaeta2_rect_source.data = dict(x=[(-9*sin(MohrP_Angle)-((Nzetaeta/2)*cos(MohrP_Angle))+-9*sin(MohrP_Angle)+((Nzetaeta/2)*cos(MohrP_Angle)))/2], y=[((0+9*cos(MohrP_Angle))-((Nzetaeta/2)*sin(MohrP_Angle))+(0+9*cos(MohrP_Angle))+((Nzetaeta/2)*sin(MohrP_Angle)))/2], h=[0.3*Nzetaeta+.5], w = [13], angle=[MohrP_Angle])
self.Nzetaeta3_rect_source.data = dict(x=[(-9*cos(MohrP_Angle)-((Nzetaeta/2)*sin(MohrP_Angle))-9*cos(MohrP_Angle)+((Nzetaeta/2)*sin(MohrP_Angle)))/2], y=[((0-9*sin(MohrP_Angle))+((Nzetaeta/2)*cos(MohrP_Angle))+(0-9*sin(MohrP_Angle))-((Nzetaeta/2)*cos(MohrP_Angle)))/2], w=[0.3*Nzetaeta+.5], h = [13], angle=[MohrP_Angle])
self.Nzetaeta4_rect_source.data = dict(x=[(9*sin(MohrP_Angle)+((Nzetaeta/2)*cos(MohrP_Angle))+9*sin(MohrP_Angle)-((Nzetaeta/2)*cos(MohrP_Angle)))/2], y=[((0-9*cos(MohrP_Angle))+((Nzetaeta/2)*sin(MohrP_Angle))+(0-9*cos(MohrP_Angle))-((Nzetaeta/2)*sin(MohrP_Angle)))/2], h=[0.3*Nzetaeta+.5], w = [13], angle=[MohrP_Angle])
elif Nzetaeta==0:
clear_arrow_source( [self.Nzetaeta1_arrow_source, self.Nzetaeta2_arrow_source, self.Nzetaeta3_arrow_source, self.Nzetaeta4_arrow_source] )
clear_rect_source( [self.Nzetaeta1_rect_source, self.Nzetaeta2_rect_source, self.Nzetaeta3_rect_source, self.Nzetaeta4_rect_source] )
else:
self.Nzetaeta1_arrow_source.stream(dict(xS=[9*cos(MohrP_Angle)-((Nzetaeta/2)*sin(MohrP_Angle))], xE=[9*cos(MohrP_Angle)+((Nzetaeta/2)*sin(MohrP_Angle))], yS=[(0+9*sin(MohrP_Angle))+((Nzetaeta/2)*cos(MohrP_Angle))], yE=[(0+9*sin(MohrP_Angle))-((Nzetaeta/2)*cos(MohrP_Angle))], lW = [2]),rollover=1)
self.Nzetaeta2_arrow_source.stream(dict(xS=[-9*sin(MohrP_Angle)+((Nzetaeta/2)*cos(MohrP_Angle))], xE=[-9*sin(MohrP_Angle)-((Nzetaeta/2)*cos(MohrP_Angle))], yS=[(0+9*cos(MohrP_Angle))+((Nzetaeta/2)*sin(MohrP_Angle))], yE=[(0+9*cos(MohrP_Angle))-((Nzetaeta/2)*sin(MohrP_Angle))], lW = [2]),rollover=1)
self.Nzetaeta3_arrow_source.stream(dict(xS=[-9*cos(MohrP_Angle)+((Nzetaeta/2)*sin(MohrP_Angle))], xE=[-9*cos(MohrP_Angle)-((Nzetaeta/2)*sin(MohrP_Angle))], yS=[(0-9*sin(MohrP_Angle))-((Nzetaeta/2)*cos(MohrP_Angle))], yE=[(0-9*sin(MohrP_Angle))+((Nzetaeta/2)*cos(MohrP_Angle))], lW = [2]),rollover=1)
self.Nzetaeta4_arrow_source.stream(dict(xS=[9*sin(MohrP_Angle)-((Nzetaeta/2)*cos(MohrP_Angle))], xE=[9*sin(MohrP_Angle)+((Nzetaeta/2)*cos(MohrP_Angle))], yS=[(0-9*cos(MohrP_Angle))-((Nzetaeta/2)*sin(MohrP_Angle))], yE=[(0-9*cos(MohrP_Angle))+((Nzetaeta/2)*sin(MohrP_Angle))], lW = [2]),rollover=1)
self.Nzetaeta1_rect_source.data = dict(x=[(9*cos(MohrP_Angle)+((Nzetaeta/2)*sin(MohrP_Angle))+9*cos(MohrP_Angle)-((Nzetaeta/2)*sin(MohrP_Angle)))/2], y=[((0+9*sin(MohrP_Angle))-((Nzetaeta/2)*cos(MohrP_Angle))+(0+9*sin(MohrP_Angle))+((Nzetaeta/2)*cos(MohrP_Angle)))/2], w=[0.3*Nzetaeta-.5], h = [13], angle=[MohrP_Angle])
self.Nzetaeta2_rect_source.data = dict(x=[(-9*sin(MohrP_Angle)-((Nzetaeta/2)*cos(MohrP_Angle))+-9*sin(MohrP_Angle)+((Nzetaeta/2)*cos(MohrP_Angle)))/2], y=[((0+9*cos(MohrP_Angle))-((Nzetaeta/2)*sin(MohrP_Angle))+(0+9*cos(MohrP_Angle))+((Nzetaeta/2)*sin(MohrP_Angle)))/2], h=[0.3*Nzetaeta-.5], w = [13], angle=[MohrP_Angle])
self.Nzetaeta3_rect_source.data = dict(x=[(-9*cos(MohrP_Angle)-((Nzetaeta/2)*sin(MohrP_Angle))-9*cos(MohrP_Angle)+((Nzetaeta/2)*sin(MohrP_Angle)))/2], y=[((0-9*sin(MohrP_Angle))+((Nzetaeta/2)*cos(MohrP_Angle))+(0-9*sin(MohrP_Angle))-((Nzetaeta/2)*cos(MohrP_Angle)))/2], w=[0.3*Nzetaeta-.5], h = [13], angle=[MohrP_Angle])
self.Nzetaeta4_rect_source.data = dict(x=[(9*sin(MohrP_Angle)+((Nzetaeta/2)*cos(MohrP_Angle))+9*sin(MohrP_Angle)-((Nzetaeta/2)*cos(MohrP_Angle)))/2], y=[((0-9*cos(MohrP_Angle))+((Nzetaeta/2)*sin(MohrP_Angle))+(0-9*cos(MohrP_Angle))-((Nzetaeta/2)*sin(MohrP_Angle)))/2], h=[0.3*Nzetaeta-.5], w = [13], angle=[MohrP_Angle])
input_vars["MohrP_Angle"] = -MohrP_Angle # /output
| 79.131313
| 335
| 0.596758
|
c0694cdbababbd7332768dcd7b23dbe867864eaa
| 3,116
|
py
|
Python
|
ga_methods.py
|
ra6had/GA-Cluster
|
ed86262475d1f89b31444d97cc841a0ec0982710
|
[
"MIT"
] | null | null | null |
ga_methods.py
|
ra6had/GA-Cluster
|
ed86262475d1f89b31444d97cc841a0ec0982710
|
[
"MIT"
] | null | null | null |
ga_methods.py
|
ra6had/GA-Cluster
|
ed86262475d1f89b31444d97cc841a0ec0982710
|
[
"MIT"
] | null | null | null |
import galuster
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans as km
import matplotlib.pyplot as plt
from datetime import datetime
import copy
#Define Variables
gens = 5
gen_size = 1000
n_clusters = 19
cluster_list = []
sum_of_dist = []
comp_duration = []
methods = ['random', 'spatial', 'mixed_spatial', 'hybrid']
#Import and prepare OA data for clustering
X = []
with open('LOAC_Input_Data.csv') as file:
df = pd.read_csv(file).set_index('OA')
X = df.values
#Instantiate initial generation
init_pop = galuster.Generation(gen_size, n_clusters=n_clusters, n_variables=60, env=X)
Fittest = []
Top_scores = []
#Survivors = []
Distances = []
for method in methods:
fittest = []
top_scores = []
survivors = []
print('Computing using: ' + method + ' breed method')
start = datetime.now()
GENERATION = copy.deepcopy(init_pop)
for i in range(gens):
print('Generation no: ' + str(i +1))
GENERATION.select()
survivors.append(GENERATION.population)
top_scores.append((min(GENERATION.score())))
fittest.append(GENERATION.population[GENERATION.sorted_scores[0]])
GENERATION.mutate(0.001)
GENERATION.breed(method=method)
GENERATION.population = fittest
GENERATION.sorted_scores = np.argsort(GENERATION.score())
fit_rank = GENERATION.sorted_scores
alpha = fittest[fit_rank[0]]
ga_means = km(n_clusters, alpha, 1).fit(X)
cluster_list.append(ga_means)
end = datetime.now()
comp_duration.append(end - start)
Fittest.append(alpha)
#Survivors.append(survivors)
for i in range(len(cluster_list)):
Distances.append(galuster.sum_distances(cluster_list[i], X))
plt.figure(i)
galuster.lolipop_plot(cluster_list[i], X)
#Iterate GA operations over number of generations
#ga_start = datetime.now()
#
#for i in range(gens):
# print('Generation no: ' + str(i +1))
# GENERATION.select()
# survivors.append(GENERATION.population)
# top_scores.append((min(GENERATION.score())))
# fittest.append(GENERATION.population[GENERATION.sorted_scores[0]])
# GENERATION.mutate(0.001)
# GENERATION.breed(method='mixed_spatial')
#
#
##Cluster the data using the fittest seed
#GENERATION.population = fittest
#GENERATION.sorted_scores = np.argsort(GENERATION.score())
#fit_rank = GENERATION.sorted_scores
#ga_means = km(n_clusters, fittest[fit_rank[0]],1).fit(X)
#
#ga_end = datetime.now()
#comp_duration.append(ga_end - ga_start)
#
#cluster_list.append(ga_means)
#sum_of_dist.append(galuster.sum_distances(ga_means, X))
#
##Plot GA cluster membership
#plt.figure(0)
#galuster.lolipop_plot(ga_means, X)
#for i in range(no_kmeans):
# print('Starting kmeans algorithm no: ' + str(i + 1))
# start = datetime.now() #Record starting time
# kmeans = km(n_clusters, n_init=n_seed).fit(X) #compute kmeans
# end = datetime.now() #Record ending time
# cluster_list.append(kmeans) #Append to cluster list
# sum_of_dist.append(galuster.sum_distances(kmeans, X))
# comp_duration.append(end - start)
#
# #Plot cluster membership
# plt.figure(i+1)
# galuster.lolipop_plot(kmeans, X)
| 27.575221
| 87
| 0.716945
|
96b4711eacb58f560b09b8dce803a7872c1ae8c3
| 997
|
py
|
Python
|
bindings/python/v1/lte/trigger/uerep/ltetuerep.py
|
Nibamot/empower-enb-proto
|
e0ddad51f7b4a8bf6f2973929408a027747d8c42
|
[
"Apache-2.0"
] | null | null | null |
bindings/python/v1/lte/trigger/uerep/ltetuerep.py
|
Nibamot/empower-enb-proto
|
e0ddad51f7b4a8bf6f2973929408a027747d8c42
|
[
"Apache-2.0"
] | null | null | null |
bindings/python/v1/lte/trigger/uerep/ltetuerep.py
|
Nibamot/empower-enb-proto
|
e0ddad51f7b4a8bf6f2973929408a027747d8c42
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""Base class for a generic LTE Empower Message"""
#
# Copyright (c) 2018 FBK-CREATENET
# AUTHOR- Abin Ninan Thomas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ctypes as ct
from v1.lte.trigger.ltetriggermsg import LTETriggerMsg
class LTETUeRep(LTETriggerMsg):
"""Class that deals with a trigger UE Report message"""
def __init__(self):
super().__init__()
self.proto = ct.CDLL("libemproto.so")
self._action = 4
self._id = 1
| 34.37931
| 66
| 0.725176
|
08bd22a9ba93bf2fa4049205926dbe805dcf24b1
| 733
|
py
|
Python
|
src/cclib/method/__init__.py
|
maxscheurer/cclib
|
722a8b534686465d4e3ae57b8dd285a56f197e4a
|
[
"BSD-3-Clause"
] | 3
|
2018-05-30T18:14:35.000Z
|
2018-11-06T21:22:07.000Z
|
src/cclib/method/__init__.py
|
maxscheurer/cclib
|
722a8b534686465d4e3ae57b8dd285a56f197e4a
|
[
"BSD-3-Clause"
] | null | null | null |
src/cclib/method/__init__.py
|
maxscheurer/cclib
|
722a8b534686465d4e3ae57b8dd285a56f197e4a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Example analyses and calculations based on data parsed by cclib."""
from cclib.method.cda import CDA
from cclib.method.cspa import CSPA
from cclib.method.density import Density
from cclib.method.electrons import Electrons
from cclib.method.fragments import FragmentAnalysis
from cclib.method.lpa import LPA
from cclib.method.mbo import MBO
from cclib.method.mpa import MPA
from cclib.method.nuclear import Nuclear
from cclib.method.opa import OPA
from cclib.method.orbitals import Orbitals
# from cclib.method.volume import Volume
| 33.318182
| 78
| 0.789905
|
f3372a77385fb5037b4afa6c68bd9e1d4967fe83
| 20,907
|
py
|
Python
|
src/gripit/core/ynk_utils.py
|
yor1001/GripIt
|
a06b300df56473f692cbb9154d60525d35137ee3
|
[
"MIT"
] | null | null | null |
src/gripit/core/ynk_utils.py
|
yor1001/GripIt
|
a06b300df56473f692cbb9154d60525d35137ee3
|
[
"MIT"
] | null | null | null |
src/gripit/core/ynk_utils.py
|
yor1001/GripIt
|
a06b300df56473f692cbb9154d60525d35137ee3
|
[
"MIT"
] | null | null | null |
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import open
from builtins import range
from builtins import dict
from builtins import int
from future import standard_library
standard_library.install_aliases()
from builtins import object
from gripit.core.edge_model import EdgeAttributes, EdgeModel, EdgePair
from gripit.core.point_cloud_model import PointCloudModel
from gripit.core.point_cloud_render_element import SceneElement
import pyqtgraph.opengl as gl
from PyQt5.QtCore import QPointF as Point2D
from PyQt5.QtCore import QLineF
from PyQt5 import QtGui
import math
import cv2 as cv2
import numpy as np
import random as rand
from skimage import morphology
import copy
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import copy
import logging as log
import gripit.edgelib.util
import scipy
from skimage.measure import LineModel, ransac
from gripit.edgelib.plane_model_nd import PlaneModelND
from scipy.spatial import distance
import os
import gripit.deps.pypcd.pypcd as pcd
def processFace(imageModel, edgePair):
depthImage = imageModel.getCroppedDepthImage()
pointCloud = imageModel.getPointCloudFromCrop().getPointCloudData()
pointsOfInterest = []
for m in range(2):
edge = edgePair[m]
edgePoints = edge.getEdgePointCloudIndexes()
for point in edgePoints:
index = point[1] * depthImage.shape[1] + point[0] % depthImage.shape[1]
pointCloudPoint = pointCloud[index]
pointsOfInterest.append(pointCloudPoint)
if len(pointsOfInterest) < 3:
log.info("Not Enough Points to process face.")
model_robustP, inliersP = ransac(np.asarray(pointsOfInterest).squeeze(), PlaneModelND, min_samples=3, residual_threshold=1, max_trials=20)
return (model_robustP.params[0], model_robustP.params[1], model_robustP.eigenvectors)
def shiftLineOnObject(imageModel, edgeModel):
"""
Shift edge onto object.
Imagemodel: Working Image
EdgeModel: Current Edge that is being shift
"""
if edgeModel.getAttribute("edge_clasification") != EdgeAttributes.EDGE_TYPE_DD1:
log.info("Edge Object {} has a edge classification of {} will not be shfited:".format(edgeModel.getID(), edgeModel.getAttribute("edge_clasification")))
return -1
if edgeModel.getAttribute("object_direction") == EdgeAttributes.OBJECT_CENTERED:
log.info("Edge Object {} is centered: will not be shifted.".format(edgeModel.getID()))
return 0
if edgeModel.getAttribute("points_shifted") == True:
log.info("Edge Object {} is already shifted.".format(edgeModel.getID()))
return 0
gradientBorder = 1
depthImage = imageModel.getCroppedDepthImage()
# Get Line Orientation
startPos = (edgeModel.x1(), edgeModel.y1())
endPos = (edgeModel.x2(), edgeModel.y2())
edgeLength = edgeModel.length()
pointList = edgeModel.getEdgePointCloudIndexes()
pointCloud = imageModel.getPointCloudFromCrop()
perpendicularSampleLength = 25
perpendicularVector = edgeModel.normalVector().unitVector()
if edgeModel.getAttribute("object_direction") == 2:
perpendicularVector.setAngle(perpendicularVector.angle() + 180)
else:
perpendicularVector.setAngle(perpendicularVector.angle() + 180)
# perpendicularVector.setP2(Point2D(-1*perpendicularVector.dx(), -1*perpendicularVector.dy()))
perpendicularVector1 = QLineF(0, 0, perpendicularVector.dx() * perpendicularSampleLength,
perpendicularVector.dy() * perpendicularSampleLength)
gradientData = []
renderGradient = []
# append start and end Positons to be shifted also
# pointList.append(startPos)
# pointList.append(endPos)
# Obtain a sample values of all pixels on the line and add the array to gradientdata
initialValues = 5
for i in range(len(pointList)):
point = pointList[i]
sampleLine = QLineF(perpendicularVector1)
sampleLine.translate(point[0] - perpendicularVector.dx()*initialValues, point[1] - perpendicularVector.dy()*initialValues)
sampleData = sampleLineFromImage(depthImage, sampleLine, perpendicularSampleLength)
sampleData = fixLineSample(sampleData)
gradientData.append(sampleData)
## Shift values
newPointList = []
for index in range(len(gradientData)):
x, y = pointList[index]
lineSample = gradientData[index]
grad = np.diff(lineSample)
# print("Sample data\n")
# print(gradientData[index])
# print("Gradient\n")
# print(grad)
# print("\n")
absoluteGrad = abs(grad)
absoluteGrad = np.gradient(absoluteGrad)
renderGradient.append(np.diff(absoluteGrad)/np.amax(np.diff(absoluteGrad)))
deltaIndex = np.argmin(absoluteGrad) - initialValues + 3
# maxDelta = 0
# deltaIndex = 5
# thresHold = 200
# for i in range(1, len(gradientData[index])):
# delta = abs(gradientData[index][i] - gradientData[index][i - 1])
# if delta > thresHold and delta > maxDelta:
# maxDelta = delta
# deltaIndex = i
if deltaIndex > 0:
x = int(x + perpendicularVector.dx() * deltaIndex)
y = int(y + perpendicularVector.dy() * deltaIndex)
finalIndex = (x, y)
newPointList.append(finalIndex)
# edgeModel.setAttribute("old_edgePointList", pointList)
# pointListSize = len(newPointList) - 2
edgeModel.setAttribute("perpendicularVector", perpendicularVector)
edgeModel.setAttribute("perpendicularVectorOffset", initialValues)
edgeModel.setAttribute("line_shift_gradient", renderGradient)
edgeModel.setAttribute("oldPointList", pointList)
edgeModel.setEdgePoint(newPointList)
edgeModel.setAttribute("points_shifted", True)
def fixLineSample(sampleData):
minimumDistance = 100
if sampleData[0] < minimumDistance:
for j in range(1, len(sampleData)):
if sampleData[j] > minimumDistance:
sampleData[0] = sampleData[j]
break
for i in range(len(sampleData)-1):
if sampleData[i] <= minimumDistance:
sampleData[i] = sampleData[i - 1]
return sampleData
def fitLinetoEdgeModel(imageModel, edgeModel):
pointList = np.array(edgeModel.getPointCloudPoints())
# print("{} - PointList Length {}".format(edgeModel.getID(), len(pointList)))
sampleNumber = int(len(pointList) * .20)
if sampleNumber > 20:
sampleNumber = 20
direction = None
try:
model_robust, inliers = ransac(pointList, LineModel, min_samples=2,
residual_threshold=1, max_trials=1000)
origin, direction = model_robust.params
startPos = origin
except ValueError as err:
log.warn("Exception Thrown for model robust again!!!")
direction = None
pointList = edgeModel.getEdgePointCloudIndexes()
startPoint = getKNearestPoint(pointList[0:sampleNumber], imageModel.getPointCloudFromCrop())
endPoint = getKNearestPoint(pointList[-1*sampleNumber:-1], imageModel.getPointCloudFromCrop())
startPoint = imageModel.getPointCloudFromCrop().getPointfromXYCoordinate(startPoint[0], startPoint[1])
endPoint = imageModel.getPointCloudFromCrop().getPointfromXYCoordinate(endPoint[0], endPoint[1])
dst = distance.euclidean(startPoint,endPoint)
if direction is not None:
if np.dot(endPoint - startPoint, direction) < 0:
direction = direction * -1
endPos = startPoint + (direction * dst)
else:
endPos = endPoint
edgeModel.setAttribute("startPos3D", startPoint)
edgeModel.setAttribute("endPos3D", endPos)
# edgeModel.setP1(Point2D(startPos[0], startPos[1]))
# edgeModel.setP2(Point2D(endPos[0], endPos[1]))
def getKNearestPoint(sampleGroup, pointCloud):
thresHold = 4
# throwaway
data = np.empty((0,3))
for sample in sampleGroup:
data = np.vstack((data, np.array(pointCloud.getPointfromXYCoordinate(sample[0], sample[1]))))
mean = np.mean(data, axis=0)
std = np.std(data, axis=0)
t_remove = abs(data - mean) < (1.8 * std)
t_remove_2 = []
for i in range(len(t_remove)):
row = t_remove[i]
if False in row:
t_remove_2.append(i)
data = np.delete(data,t_remove_2, axis=0)
newSampleGroup = np.delete(sampleGroup, t_remove_2, axis=0)
mean = np.mean(data, axis=0)
index = 0
if len(newSampleGroup) > 2:
index = scipy.spatial.KDTree(data).query(mean)[1]
return newSampleGroup[index]
return sampleGroup[0]
def sampleLineFromImage(cvImage, sampleLine, interpolationSize):
x = np.linspace(sampleLine.x1(), sampleLine.x2(), interpolationSize)
y = np.linspace(sampleLine.y1(), sampleLine.y2(), interpolationSize)
# Extract values along line
lineValue = scipy.ndimage.map_coordinates(cvImage, np.vstack((y,x)))
return np.array(lineValue, dtype=float)
def processPointCloud(context, imageModel):
depthImg = imageModel.getCroppedDepthImage()
pointCloudData = np.zeros((len(depthImg)*len(depthImg[0]), 3))
for y_coord in range(len(depthImg)):
for x_coord in range(len(depthImg[0])):
x, y, z, = depthToPointCloud(context, x_coord, y_coord, imageModel)
pointCloudData[(y_coord*len(depthImg[1]))+x_coord%len(depthImg[1])] = (x, y, z)
return PointCloudModel(pointCloudData=pointCloudData, imageModel=imageModel, context=context)
def depthToPointCloud(context, x_cord, y_cord, imageModel):
depthImg = imageModel.getCroppedDepthImage()
scaleFactor = 20
cx = len(depthImg[0])/2
cy = len(depthImg)/2
f = 550 #context.focal_length
z = depthImg[y_cord][x_cord]
x = ((x_cord - cx) * (z / (f)))/scaleFactor
y = ((y_cord - cy) * (z / (f)))/scaleFactor
z = ((-1 * z)/scaleFactor) + 50
point = QtGui.QVector3D(x, y, z)
tr = QtGui.QMatrix4x4()
tr.translate( 0.0, 0.0, 0.0)
tr.rotate(50, 1, 0, 0)
# center = params['center']
# tr.translate(-center.x(), -center.y(), -center.z())
inv, result = tr.inverted()
point = inv * (point)
return -point.x(), point.y(), point.z()
def ddepthtTo3DPoint(context, x_cord, y_cord, imageModel):
depthImg = imageModel.getCroppedDepthImage()
params = SceneElement.getSceneParameters(imageModel)
coords = "xyz"
cameraPos = []
# center = Point3D(0, 0, 0)
for i in range(len(coords)):
value = imageModel.getAttribute("camera_location_{}".format(coords[i]))
cameraPos.append(value)
cx = len(depthImg[0])/2
cy = len(depthImg)/2
focalLength = params["focal_length"]
scaleFactor = focalLength*9
z = depthImg[y_cord][x_cord]/25
x = (x_cord-cx)*(z/scaleFactor)
y = -1*(y_cord-cy)*(z/scaleFactor)
z = -z
# x = math.sin((fov/2)*math.pi/180) * z_cord * 20 #nearClip/z_cord * proj_x
# y = math.sin((fov/2)*math.pi/180) * z_cord * 20 #nearClip/z_cord * proj_y
# z = -z_cord
point = QtGui.QVector3D(x, y, z)
tr = QtGui.QMatrix4x4()
tr.translate( 0.0, 0.0, -params['distance'])
tr.rotate(params['elevation']-90, 1, 0, 0)
tr.rotate(params['azimuth']+90, 0, 0, -1)
# center = params['center']
# tr.translate(-center.x(), -center.y(), -center.z())
inv, result = tr.inverted()
point = inv * (point)
return point.x(), point.y(), point.z()
def depthtTo3DPoint(context, x_cord, y_cord, imageModel):
depthImg = imageModel.getCroppedDepthImage()
params = SceneElement.getSceneParameters(imageModel)
nMax = np.amax(depthImg)
coords = "xyz"
cameraPos = []
# center = Point3D(0, 0, 0)
for i in range(len(coords)):
value = imageModel.getAttribute("camera_location_{}".format(coords[i]))
cameraPos.append(value)
cx = len(depthImg[0])/2
cy = len(depthImg)/2
focalLength = params["focal_length"]
scaleFactor = focalLength
z = depthImg[y_cord][x_cord]
z = (z / nMax) * params['distance']
x = (x_cord-cx)*(z/scaleFactor)
y = -1*(y_cord-cy)*(z/scaleFactor)
z = -z
# x = math.sin((fov/2)*math.pi/180) * z_cord * 20 #nearClip/z_cord * proj_x
# y = math.sin((fov/2)*math.pi/180) * z_cord * 20 #nearClip/z_cord * proj_y
# z = -z_cord
point = QtGui.QVector3D(x, y, z)
tr = QtGui.QMatrix4x4()
tr.translate( 0.0, 0.0, -params['distance'])
tr.rotate(params['elevation']-90, 1, 0, 0)
tr.rotate(params['azimuth']+90, 0, 0, -1)
# center = params['center']
# tr.translate(-center.x(), -center.y(), -center.z())
inv, result = tr.inverted()
point = inv * (point)
return point.x(), point.y(), point.z()
def pointToImg(context, point, imageModel):
scaleFactor = 40
depthImg = imageModel.getCroppedDepthImage()
params = SceneElement.getSceneParameters(imageModel)
pos = QVector3D(point[0], point[1], point[2])
coords = "xyz"
cameraPos = []
# center = Point3D(0, 0, 0)
for i in range(len(coords)):
value = imageModel.getAttribute("camera_location_{}".format(coords[i]))
cameraPos.append(value)
cx = len(depthImg[0])/2
cy = len(depthImg)/2
focalLength = params["focal_length"]
z = depthImg[y_cord][x_cord]
x = (x_cord-cx)#*(z/focalLength) #((x_cord - cx) * (z / (focalLength)))
y = -1*(y_cord-cx)#*(z/focalLength) #((y_cord - cy) * (z / (focalLength)))
z = -1*((z))/(scaleFactor/3)
# x = math.sin((fov/2)*math.pi/180) * z_cord * 20 #nearClip/z_cord * proj_x
# y = math.sin((fov/2)*math.pi/180) * z_cord * 20 #nearClip/z_cord * proj_y
# z = -z_cord
point = QtGui.QVector3D(x, y, z)
tr = QtGui.QMatrix4x4()
tr.translate( 0.0, 0.0, -params['distance'])
tr.rotate(params['elevation']-90, 1, 0, 0)
tr.rotate(params['azimuth']+90, 0, 0, -1)
# center = params['center']
# tr.translate(-center.x(), -center.y(), -center.z())
inv, result = tr.inverted()
point = inv * (point)
return point.x(), point.y(), point.z()
## Code taking directly from main.py main function.... Why was it defined there... IDK
def roipoly(src, poly):
mask = np.zeros_like(src, dtype=np.uint8)
win = util.swap_indices(poly)
cv2.fillConvexPoly(mask, win, 255) # Create the ROI
res = src * mask
# cv2.imshow("roi", res)
# cv2.waitKey(0)
return res
def getOrientation(line, window_size):
dy = abs(line[0] - line[2])
dx = abs(line[1] - line[3])
# Vertical or horizontal line test
if dy > dx or dy == dx:
pt1 = [line[0], line[1] - window_size]
pt2 = [line[0], line[1] + window_size]
pt3 = [line[2], line[3] - window_size]
pt4 = [line[2], line[3] + window_size]
return pt1, pt2, pt3, pt4
else:
pt1 = [line[0] - window_size, line[1]]
pt2 = [line[0] + window_size, line[1]]
pt3 = [line[2] - window_size, line[3]]
pt4 = [line[2] + window_size, line[3]]
return pt1, pt2, pt3, pt4
def getOrdering(pt1, pt2, pt3, pt4):
temp1 = np.linalg.norm(np.subtract((np.add(pt1, pt3) / 2.0), (np.add(pt2, pt4) / 2.0)))
temp2 = np.linalg.norm(np.subtract((np.add(pt1, pt4) / 2.0), (np.add(pt2, pt3) / 2.0)))
res = np.array([pt1, pt3, pt4, pt2]) if temp1 > temp2 else np.array([pt1, pt4, pt3, pt2])
return [[int(i) for i in pt] for pt in res]
def gradDir(img):
# compute x and y derivatives
# OpenCV's Sobel operator gives better results than numpy gradient
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=-1)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=-1)
# calculate gradient direction angles
# phase needs 64-bit input
angle = cv2.phase(sobelx, sobely)
# truncates number
gradir = np.fix(180 + angle)
return gradir
class ObjParser(object):
"""This defines a generalized parse dispatcher; all parse functions
reside in subclasses."""
def parseFile(self, file_path):
self.vertexList = []
self.faceIndecies = []
self.materialDefinition = {}
self.selectedMaterialName = None
self.colors = []
for line in open(file_path, 'r'):
self.parseLine(line, dir=os.path.dirname(file_path))
return {
'vertexList': self.vertexList,
'faceIndecies': self.faceIndecies,
'colors':self.colors
}
def parseLine(self, line, dir):
"""Determine what type of line we are and dispatch
appropriately."""
if line.startswith('#'):
return
values = line.split()
if len(values) < 2:
return
line_type = values[0]
args = values[1:]
i = 0
for arg in args:
if dir != '' and ('mtllib' in line or 'map_Kd' in line):
args[i] = dir + '/' + arg
else:
args[i] = arg
i += 1
if hasattr(self, "parse_{}".format(line_type)):
parse_function = getattr(self, 'parse_%s' % line_type)
parse_function(args)
else:
return
def parse_mtllib(self, args):
materialParser = MaterialParser()
self.materialDefinition = dict(self.materialDefinition, **materialParser.parseFile(args[0]))
def parse_usemtl(self, args):
materialName = args[0]
if materialName in self.materialDefinition.keys():
self.selectedMaterialName = materialName
else:
raise RuntimeError("Material {} not defined".format(materialName))
def parse_v(self, args):
vector = []
for arg in args:
vectorElement = float(arg)
vector.append(vectorElement)
self.vertexList.append(vector)
def parse_f(self, args):
face = []
for arg in args:
attributes = arg.split('/')
face.append(int(attributes[0]) - 1)
self.faceIndecies.append(face)
self.colors.append(self.materialDefinition[self.selectedMaterialName]["Kd"])
def getMeshData(self):
# print("Mesh Data ")
# print(self.vertexList)
# print(self.faceIndecies)
# print(self.colors)
mesh_data = gl.MeshData(vertexes=np.array(self.vertexList)*1, faces=np.array(self.faceIndecies), faceColors=np.array(self.colors))
return mesh_data
class MaterialParser(object):
"""This defines a generalized parse dispatcher; all parse functions
reside in subclasses."""
def parseFile(self, file_path):
self.materialDefinitions = {}
self.currentMaterialName = None
for line in open(file_path, 'r'):
self.parseLine(line, dir=os.path.dirname(file_path))
return self.materialDefinitions
def parseLine(self, line, dir):
"""Determine what type of line we are and dispatch
appropriately."""
if line.startswith('#'):
return
values = line.split()
if len(values) < 2:
return
line_type = values[0]
args = values[1:]
i = 0
if hasattr(self, "parse_{}".format(line_type)):
parse_function = getattr(self, 'parse_%s' % line_type)
parse_function(args)
elif line_type in ("Ka", "Kd", "Ks", "Ke", "Ni", "d", "illum"):
values = []
for arg in args:
val = float(arg)
values.append(val)
self.materialDefinitions[self.currentMaterialName][line_type] = values
else:
return
def parse_newmtl(self, args):
self.currentMaterialName = args[0]
self.materialDefinitions[self.currentMaterialName] = {}
def parse_Kd(self, args):
values = []
for arg in args:
val = float(arg)
values.append(val)
values.append(1.0)
self.materialDefinitions[self.currentMaterialName]["Kd"] = values
def getOBJParser():
return ObjParser()
def loadPCD(fileName):
pc = pcd.PointCloud.from_path(fileName)
width = pc.width
height = pc.height
imgd = np.reshape(pc.pc_data["rgb"], (height, width, 1))
img = imgd.copy()
img = img.view(np.uint8)
img = np.delete(img, 3, 2)
x = pc.pc_data['x'].copy()
y = pc.pc_data['y'].copy()
z = pc.pc_data['z'].copy()
depth = np.asarray((z/ np.nanmax(z)) * 2000, np.uint16)
depth = np.reshape(depth, (height, width))
# pdb.set_trace()
# depth = util.
# depth = cv2.
return img, depth, (x, y, z)
def rotateImage(image, angle):
(h, w) = image.shape[:2]
center = (w / 2, h / 2)
M = cv2.getRotationMatrix2D(center,angle,1.0)
rotated_image = cv2.warpAffine(image, M, (w,h))
return rotated_image
| 33.291401
| 159
| 0.638303
|
1da1adf3452ff5130b1467a4f361e89ea5754a5d
| 25,001
|
py
|
Python
|
scripts/python/catalyst/updatedSCR/init_450C.py
|
aladshaw3/cats
|
a24b7c141f3d87146905e422682e125422b98b67
|
[
"MIT"
] | 1
|
2022-02-17T16:37:12.000Z
|
2022-02-17T16:37:12.000Z
|
scripts/python/catalyst/updatedSCR/init_450C.py
|
aladshaw3/cats
|
a24b7c141f3d87146905e422682e125422b98b67
|
[
"MIT"
] | 5
|
2021-10-30T00:06:31.000Z
|
2022-03-09T13:34:07.000Z
|
scripts/python/catalyst/updatedSCR/init_450C.py
|
aladshaw3/cats
|
a24b7c141f3d87146905e422682e125422b98b67
|
[
"MIT"
] | 3
|
2021-05-08T02:26:08.000Z
|
2022-03-01T22:27:40.000Z
|
import sys
sys.path.append('../..')
from catalyst.isothermal_monolith_catalysis import *
#Importing all reaction dictionaries
from rxns_v5 import *
# Create dict to iterate through
rxn_list = {"r5f": r5f,"r5r": r5r,"r6f": r6f,"r6r": r6r,"r7": r7,"r8": r8,"r9": r9,
"r10": r10,"r11": r11,"r12": r12,"r13": r13,"r14": r14,
"r15": r15,"r16": r16,"r17": r17,"r18": r18,"r19": r19,"r20": r20,
"r21": r21,"r22": r22,"r23": r23,"r24": r24,"r25": r25,
"r26": r26,"r27": r27,"r28": r28,"r29": r29,"r30": r30,"r31": r31,
"r32": r32,"r33": r33,"r34": r34,"r35": r35,"r36": r36,
"r37": r37,"r38": r38,"r39": r39}
T = 450 + 273.15
Tstr = "450C"
O2 = 0.001685618
H2O = 0.000840133
# Apply transformation to kinetics for this temperature set
for rxn in rxn_list:
k = arrhenius_rate_const(rxn_list[rxn]["parameters"]["A"], 0, rxn_list[rxn]["parameters"]["E"], T)
rxn_list[rxn]["parameters"]["E"] = 0
rxn_list[rxn]["parameters"]["A"] = k
# ========= create bulk of script ===========
# Read in the data (data is now a dictionary containing the data we want)
data = naively_read_data_file("inputfiles/SCR_all-ages_"+Tstr+".txt",factor=1)
time_list = time_point_selector(data["time"], data)
# Testing
sim = Isothermal_Monolith_Simulator()
sim.add_axial_dim(0,5)
sim.add_axial_dataset(5) # Location of observations (in cm)
sim.add_temporal_dim(point_list=time_list)
sim.add_temporal_dataset(data["time"]) #Temporal observations (in min)
sim.add_age_set(["Unaged","2hr","4hr","8hr","16hr"])
sim.add_data_age_set(["Unaged","2hr","4hr","8hr","16hr"]) # Data observations can be a sub-set
sim.add_temperature_set([Tstr])
sim.add_data_temperature_set([Tstr]) # Data observations can be a sub-set
sim.add_gas_species(["NH3","H2O","O2","NO","NO2","N2O","N2"])
sim.add_data_gas_species(["NH3","NO","NO2","N2O"]) # Data observations can be a sub-set
sim.set_data_values_for("NH3","Unaged",Tstr,5,data["time"],data["NH3_Unaged"])
sim.set_data_values_for("NO","Unaged",Tstr,5,data["time"],data["NO_Unaged"])
sim.set_data_values_for("NO2","Unaged",Tstr,5,data["time"],data["NO2_Unaged"])
sim.set_data_values_for("N2O","Unaged",Tstr,5,data["time"],data["N2O_Unaged"])
sim.set_data_values_for("NH3","2hr",Tstr,5,data["time"],data["NH3_2hr"])
sim.set_data_values_for("NO","2hr",Tstr,5,data["time"],data["NO_2hr"])
sim.set_data_values_for("NO2","2hr",Tstr,5,data["time"],data["NO2_2hr"])
sim.set_data_values_for("N2O","2hr",Tstr,5,data["time"],data["N2O_2hr"])
sim.set_data_values_for("NH3","4hr",Tstr,5,data["time"],data["NH3_4hr"])
sim.set_data_values_for("NO","4hr",Tstr,5,data["time"],data["NO_4hr"])
sim.set_data_values_for("NO2","4hr",Tstr,5,data["time"],data["NO2_4hr"])
sim.set_data_values_for("N2O","4hr",Tstr,5,data["time"],data["N2O_4hr"])
sim.set_data_values_for("NH3","8hr",Tstr,5,data["time"],data["NH3_8hr"])
sim.set_data_values_for("NO","8hr",Tstr,5,data["time"],data["NO_8hr"])
sim.set_data_values_for("NO2","8hr",Tstr,5,data["time"],data["NO2_8hr"])
sim.set_data_values_for("N2O","8hr",Tstr,5,data["time"],data["N2O_8hr"])
sim.set_data_values_for("NH3","16hr",Tstr,5,data["time"],data["NH3_16hr"])
sim.set_data_values_for("NO","16hr",Tstr,5,data["time"],data["NO_16hr"])
sim.set_data_values_for("NO2","16hr",Tstr,5,data["time"],data["NO2_16hr"])
sim.set_data_values_for("N2O","16hr",Tstr,5,data["time"],data["N2O_16hr"])
#Clear up memory space after we don't need the dictionary anymore
data.clear()
sim.add_surface_species(["Z1CuOH-NH3",
"Z2Cu-NH3",
"Z2Cu-(NH3)2",
"ZNH4",
"Z1CuOH-H2O",
"Z2Cu-H2O",
"Z1CuOH-NH4NO3",
"Z2Cu-NH4NO3",
"ZH-NH4NO3"])
sim.add_surface_sites(["Z1CuOH","Z2Cu","ZH","CuO"])
sim.add_reactions({"r1": ReactionType.EquilibriumArrhenius,
"r2a": ReactionType.EquilibriumArrhenius,
"r2b": ReactionType.EquilibriumArrhenius,
"r3": ReactionType.EquilibriumArrhenius,
"r4a": ReactionType.EquilibriumArrhenius,
"r4b": ReactionType.EquilibriumArrhenius,
# NO Oxidation
"r5f": ReactionType.Arrhenius,
"r5r": ReactionType.Arrhenius,
"r6f": ReactionType.Arrhenius,
"r6r": ReactionType.Arrhenius,
#NH3 Oxidation to N2
"r7": ReactionType.Arrhenius,
"r8": ReactionType.Arrhenius,
"r9": ReactionType.Arrhenius,
#NH3 Oxidation to NO
"r10": ReactionType.Arrhenius,
"r11": ReactionType.Arrhenius,
"r12": ReactionType.Arrhenius,
#NO SCR
"r13": ReactionType.Arrhenius,
"r14": ReactionType.Arrhenius,
"r15": ReactionType.Arrhenius,
"r16": ReactionType.Arrhenius,
"r17": ReactionType.Arrhenius,
#N2O Formation from NO SCR
"r18": ReactionType.Arrhenius,
"r19": ReactionType.Arrhenius,
"r20": ReactionType.Arrhenius,
#NH4NO3 Formation
"r21": ReactionType.Arrhenius,
"r22": ReactionType.Arrhenius,
"r23": ReactionType.Arrhenius,
"r24": ReactionType.Arrhenius,
#NH4NO3 Fast SCR
"r25": ReactionType.Arrhenius,
"r26": ReactionType.Arrhenius,
"r27": ReactionType.Arrhenius,
#NH4NO3 NO2 SCR
"r28": ReactionType.Arrhenius,
"r29": ReactionType.Arrhenius,
"r30": ReactionType.Arrhenius,
#NH4NO3 N2O Formation
"r31": ReactionType.Arrhenius,
"r32": ReactionType.Arrhenius,
"r33": ReactionType.Arrhenius,
#CuO NH3 Oxidation @ High Temp
"r34": ReactionType.Arrhenius,
"r35": ReactionType.Arrhenius,
"r36": ReactionType.Arrhenius,
#N2O formation from NH3 oxidation
"r37": ReactionType.Arrhenius,
"r38": ReactionType.Arrhenius,
"r39": ReactionType.Arrhenius
})
sim.set_bulk_porosity(0.3309)
sim.set_washcoat_porosity(0.4)
sim.set_reactor_radius(1)
sim.set_space_velocity_all_runs(1000) #volumes/min
sim.set_cell_density(62) # 62 cells per cm^2 (~400 cpsi)
# Setting up site balances using dicts
s1_data = {"mol_occupancy": {"Z1CuOH-NH3": 1, "Z1CuOH-H2O": 1, "Z1CuOH-NH4NO3": 1}}
s2_data = {"mol_occupancy": {"Z2Cu-NH3": 1, "Z2Cu-(NH3)2": 1, "Z2Cu-H2O": 1, "Z2Cu-NH4NO3": 1}}
s3_data = {"mol_occupancy": {"ZNH4": 1, "ZH-NH4NO3": 1}}
CuO_data = {"mol_occupancy": {}}
sim.set_site_balance("Z1CuOH",s1_data)
sim.set_site_balance("Z2Cu",s2_data)
sim.set_site_balance("ZH",s3_data)
sim.set_site_balance("CuO",CuO_data)
sim.set_reaction_info("r1", r1_equ)
sim.set_reaction_info("r2a", r2a_equ)
sim.set_reaction_info("r2b", r2b_equ)
sim.set_reaction_info("r3", r3_equ)
sim.set_reaction_info("r4a", r4a_equ)
sim.set_reaction_info("r4b", r4b_equ)
sim.set_reaction_info("r5f", r5f)
sim.set_reaction_info("r5r", r5r)
sim.set_reaction_info("r6f", r6f)
sim.set_reaction_info("r6r", r6r)
sim.set_reaction_info("r7", r7)
sim.set_reaction_info("r8", r8)
sim.set_reaction_info("r9", r9)
sim.set_reaction_info("r10", r10)
sim.set_reaction_info("r11", r11)
sim.set_reaction_info("r12", r12)
sim.set_reaction_info("r13", r13)
sim.set_reaction_info("r14", r14)
sim.set_reaction_info("r15", r15)
sim.set_reaction_info("r16", r16)
sim.set_reaction_info("r17", r17)
sim.set_reaction_info("r18", r18)
sim.set_reaction_info("r19", r19)
sim.set_reaction_info("r20", r20)
sim.set_reaction_info("r21", r21)
sim.set_reaction_info("r22", r22)
sim.set_reaction_info("r23", r23)
sim.set_reaction_info("r24", r24)
sim.set_reaction_info("r25", r25)
sim.set_reaction_info("r26", r26)
sim.set_reaction_info("r27", r27)
sim.set_reaction_info("r28", r28)
sim.set_reaction_info("r29", r29)
sim.set_reaction_info("r30", r30)
sim.set_reaction_info("r31", r31)
sim.set_reaction_info("r32", r32)
sim.set_reaction_info("r33", r33)
sim.set_reaction_info("r34", r34)
sim.set_reaction_info("r35", r35)
sim.set_reaction_info("r36", r36)
sim.set_reaction_info("r37", r37)
sim.set_reaction_info("r38", r38)
sim.set_reaction_info("r39", r39)
# ----------------- Unaged Site Densities -----------
sim.set_site_density("Z1CuOH","Unaged",0.052619016)
sim.set_site_density("Z2Cu","Unaged",0.023125746)
sim.set_site_density("ZH","Unaged",0.01632+0.003233+0.006699)
sim.set_site_density("CuO","Unaged",0.001147378)
# ----------------- 2hr Site Densities -----------
sim.set_site_density("Z1CuOH","2hr",0.051274815)
sim.set_site_density("Z2Cu","2hr",0.025820144)
sim.set_site_density("ZH","2hr",0.009147918+0.000423397+0.008572669)
sim.set_site_density("CuO","2hr",2.0144E-05)
# ----------------- 4hr Site Densities -----------
sim.set_site_density("Z1CuOH","4hr",0.049679956)
sim.set_site_density("Z2Cu","4hr",0.02692473)
sim.set_site_density("ZH","4hr",0.005127864+5.54458E-05+0.009298203)
sim.set_site_density("CuO","4hr",7.85352E-07)
# ----------------- 8hr Site Densities -----------
sim.set_site_density("Z1CuOH","8hr",0.04838926)
sim.set_site_density("Z2Cu","8hr",0.026648589)
sim.set_site_density("ZH","8hr",0.001611258+9.50848E-07+0.009687883)
sim.set_site_density("CuO","8hr",4.54455E-09)
# ----------------- 16hr Site Densities -----------
sim.set_site_density("Z1CuOH","16hr",0.050359742)
sim.set_site_density("Z2Cu","16hr",0.025179871)
sim.set_site_density("ZH","16hr",0.000159082+2.79637E-10+0.009755058)
sim.set_site_density("CuO","16hr",2.33028E-12)
# Setup all temperatures
sim.set_isothermal_temp("Unaged",Tstr,T)
sim.set_isothermal_temp("2hr",Tstr,T)
sim.set_isothermal_temp("4hr",Tstr,T)
sim.set_isothermal_temp("8hr",Tstr,T)
sim.set_isothermal_temp("16hr",Tstr,T)
# Build the constraints then discretize
sim.build_constraints()
sim.discretize_model(method=DiscretizationMethod.OrthogonalCollocation,
tstep=90,elems=5,colpoints=2)
# Initial conditions and Boundary Conditions should be set AFTER discretization
# ---------------- Unaged ICs ------------------
sim.set_const_IC("O2","Unaged",Tstr,O2)
sim.set_const_IC("H2O","Unaged",Tstr,H2O)
sim.set_const_IC("NH3","Unaged",Tstr,0)
sim.set_const_IC("NO","Unaged",Tstr,0)
sim.set_const_IC("NO2","Unaged",Tstr,0)
sim.set_const_IC("N2O","Unaged",Tstr,0)
sim.set_const_IC("N2","Unaged",Tstr,0.0184)
sim.set_const_IC("Z1CuOH-NH3","Unaged",Tstr,0)
sim.set_const_IC("Z2Cu-NH3","Unaged",Tstr,0)
sim.set_const_IC("Z2Cu-(NH3)2","Unaged",Tstr,0)
sim.set_const_IC("ZNH4","Unaged",Tstr,0)
sim.set_const_IC("Z1CuOH-H2O","Unaged",Tstr,0)
sim.set_const_IC("Z2Cu-H2O","Unaged",Tstr,0)
sim.set_const_IC("Z1CuOH-NH4NO3","Unaged",Tstr,0)
sim.set_const_IC("Z2Cu-NH4NO3","Unaged",Tstr,0)
sim.set_const_IC("ZH-NH4NO3","Unaged",Tstr,0)
# ---------------- 2hr ICs ------------------
sim.set_const_IC("O2","2hr",Tstr,O2)
sim.set_const_IC("H2O","2hr",Tstr,H2O)
sim.set_const_IC("NH3","2hr",Tstr,0)
sim.set_const_IC("NO","2hr",Tstr,0)
sim.set_const_IC("NO2","2hr",Tstr,0)
sim.set_const_IC("N2O","2hr",Tstr,0)
sim.set_const_IC("N2","2hr",Tstr,0.0184)
sim.set_const_IC("Z1CuOH-NH3","2hr",Tstr,0)
sim.set_const_IC("Z2Cu-NH3","2hr",Tstr,0)
sim.set_const_IC("Z2Cu-(NH3)2","2hr",Tstr,0)
sim.set_const_IC("ZNH4","2hr",Tstr,0)
sim.set_const_IC("Z1CuOH-H2O","2hr",Tstr,0)
sim.set_const_IC("Z2Cu-H2O","2hr",Tstr,0)
sim.set_const_IC("Z1CuOH-NH4NO3","2hr",Tstr,0)
sim.set_const_IC("Z2Cu-NH4NO3","2hr",Tstr,0)
sim.set_const_IC("ZH-NH4NO3","2hr",Tstr,0)
# ---------------- 4hr ICs ------------------
sim.set_const_IC("O2","4hr",Tstr,O2)
sim.set_const_IC("H2O","4hr",Tstr,H2O)
sim.set_const_IC("NH3","4hr",Tstr,0)
sim.set_const_IC("NO","4hr",Tstr,0)
sim.set_const_IC("NO2","4hr",Tstr,0)
sim.set_const_IC("N2O","4hr",Tstr,0)
sim.set_const_IC("N2","4hr",Tstr,0.0184)
sim.set_const_IC("Z1CuOH-NH3","4hr",Tstr,0)
sim.set_const_IC("Z2Cu-NH3","4hr",Tstr,0)
sim.set_const_IC("Z2Cu-(NH3)2","4hr",Tstr,0)
sim.set_const_IC("ZNH4","4hr",Tstr,0)
sim.set_const_IC("Z1CuOH-H2O","4hr",Tstr,0)
sim.set_const_IC("Z2Cu-H2O","4hr",Tstr,0)
sim.set_const_IC("Z1CuOH-NH4NO3","4hr",Tstr,0)
sim.set_const_IC("Z2Cu-NH4NO3","4hr",Tstr,0)
sim.set_const_IC("ZH-NH4NO3","4hr",Tstr,0)
# ---------------- 8hr ICs ------------------
sim.set_const_IC("O2","8hr",Tstr,O2)
sim.set_const_IC("H2O","8hr",Tstr,H2O)
sim.set_const_IC("NH3","8hr",Tstr,0)
sim.set_const_IC("NO","8hr",Tstr,0)
sim.set_const_IC("NO2","8hr",Tstr,0)
sim.set_const_IC("N2O","8hr",Tstr,0)
sim.set_const_IC("N2","8hr",Tstr,0.0184)
sim.set_const_IC("Z1CuOH-NH3","8hr",Tstr,0)
sim.set_const_IC("Z2Cu-NH3","8hr",Tstr,0)
sim.set_const_IC("Z2Cu-(NH3)2","8hr",Tstr,0)
sim.set_const_IC("ZNH4","8hr",Tstr,0)
sim.set_const_IC("Z1CuOH-H2O","8hr",Tstr,0)
sim.set_const_IC("Z2Cu-H2O","8hr",Tstr,0)
sim.set_const_IC("Z1CuOH-NH4NO3","8hr",Tstr,0)
sim.set_const_IC("Z2Cu-NH4NO3","8hr",Tstr,0)
sim.set_const_IC("ZH-NH4NO3","8hr",Tstr,0)
# ---------------- 16hr ICs ------------------
sim.set_const_IC("O2","16hr",Tstr,O2)
sim.set_const_IC("H2O","16hr",Tstr,H2O)
sim.set_const_IC("NH3","16hr",Tstr,0)
sim.set_const_IC("NO","16hr",Tstr,0)
sim.set_const_IC("NO2","16hr",Tstr,0)
sim.set_const_IC("N2O","16hr",Tstr,0)
sim.set_const_IC("N2","16hr",Tstr,0.0184)
sim.set_const_IC("Z1CuOH-NH3","16hr",Tstr,0)
sim.set_const_IC("Z2Cu-NH3","16hr",Tstr,0)
sim.set_const_IC("Z2Cu-(NH3)2","16hr",Tstr,0)
sim.set_const_IC("ZNH4","16hr",Tstr,0)
sim.set_const_IC("Z1CuOH-H2O","16hr",Tstr,0)
sim.set_const_IC("Z2Cu-H2O","16hr",Tstr,0)
sim.set_const_IC("Z1CuOH-NH4NO3","16hr",Tstr,0)
sim.set_const_IC("Z2Cu-NH4NO3","16hr",Tstr,0)
sim.set_const_IC("ZH-NH4NO3","16hr",Tstr,0)
#Read in data tuples to use as BCs
data_tup = naively_read_data_file("inputfiles/protocol_SCR_all-ages_"+Tstr+".txt",
factor=1,dict_of_tuples=True)
# ---------------- Unaged BCs ------------------
sim.set_time_dependent_BC("O2","Unaged",Tstr,
time_value_pairs=data_tup["O2_Unaged"],
initial_value=O2)
sim.set_time_dependent_BC("H2O","Unaged",Tstr,
time_value_pairs=data_tup["H2O_Unaged"],
initial_value=H2O)
sim.set_time_dependent_BC("NH3","Unaged",Tstr,
time_value_pairs=data_tup["NH3_Unaged"],
initial_value=0)
sim.set_time_dependent_BC("NO","Unaged",Tstr,
time_value_pairs=data_tup["NO_Unaged"],
initial_value=0)
sim.set_time_dependent_BC("NO2","Unaged",Tstr,
time_value_pairs=data_tup["NO2_Unaged"],
initial_value=0)
sim.set_const_BC("N2O","Unaged",Tstr,0)
sim.set_const_BC("N2","Unaged",Tstr,0.0184)
# ---------------- 2hr BCs ------------------
sim.set_time_dependent_BC("O2","2hr",Tstr,
time_value_pairs=data_tup["O2_2hr"],
initial_value=O2)
sim.set_time_dependent_BC("H2O","2hr",Tstr,
time_value_pairs=data_tup["H2O_2hr"],
initial_value=H2O)
sim.set_time_dependent_BC("NH3","2hr",Tstr,
time_value_pairs=data_tup["NH3_2hr"],
initial_value=0)
sim.set_time_dependent_BC("NO","2hr",Tstr,
time_value_pairs=data_tup["NO_2hr"],
initial_value=0)
sim.set_time_dependent_BC("NO2","2hr",Tstr,
time_value_pairs=data_tup["NO2_2hr"],
initial_value=0)
sim.set_const_BC("N2O","2hr",Tstr,0)
sim.set_const_BC("N2","2hr",Tstr,0.0184)
# ---------------- 4hr BCs ------------------
sim.set_time_dependent_BC("O2","4hr",Tstr,
time_value_pairs=data_tup["O2_4hr"],
initial_value=O2)
sim.set_time_dependent_BC("H2O","4hr",Tstr,
time_value_pairs=data_tup["H2O_4hr"],
initial_value=H2O)
sim.set_time_dependent_BC("NH3","4hr",Tstr,
time_value_pairs=data_tup["NH3_4hr"],
initial_value=0)
sim.set_time_dependent_BC("NO","4hr",Tstr,
time_value_pairs=data_tup["NO_4hr"],
initial_value=0)
sim.set_time_dependent_BC("NO2","4hr",Tstr,
time_value_pairs=data_tup["NO2_4hr"],
initial_value=0)
sim.set_const_BC("N2O","4hr",Tstr,0)
sim.set_const_BC("N2","4hr",Tstr,0.0184)
# ---------------- 8hr BCs ------------------
sim.set_time_dependent_BC("O2","8hr",Tstr,
time_value_pairs=data_tup["O2_8hr"],
initial_value=O2)
sim.set_time_dependent_BC("H2O","8hr",Tstr,
time_value_pairs=data_tup["H2O_8hr"],
initial_value=H2O)
sim.set_time_dependent_BC("NH3","8hr",Tstr,
time_value_pairs=data_tup["NH3_8hr"],
initial_value=0)
sim.set_time_dependent_BC("NO","8hr",Tstr,
time_value_pairs=data_tup["NO_8hr"],
initial_value=0)
sim.set_time_dependent_BC("NO2","8hr",Tstr,
time_value_pairs=data_tup["NO2_8hr"],
initial_value=0)
sim.set_const_BC("N2O","8hr",Tstr,0)
sim.set_const_BC("N2","8hr",Tstr,0.0184)
# ---------------- 16hr BCs ------------------
sim.set_time_dependent_BC("O2","16hr",Tstr,
time_value_pairs=data_tup["O2_16hr"],
initial_value=O2)
sim.set_time_dependent_BC("H2O","16hr",Tstr,
time_value_pairs=data_tup["H2O_16hr"],
initial_value=H2O)
sim.set_time_dependent_BC("NH3","16hr",Tstr,
time_value_pairs=data_tup["NH3_16hr"],
initial_value=0)
sim.set_time_dependent_BC("NO","16hr",Tstr,
time_value_pairs=data_tup["NO_16hr"],
initial_value=0)
sim.set_time_dependent_BC("NO2","16hr",Tstr,
time_value_pairs=data_tup["NO2_16hr"],
initial_value=0)
sim.set_const_BC("N2O","16hr",Tstr,0)
sim.set_const_BC("N2","16hr",Tstr,0.0184)
data_tup.clear()
# Fix all reactions for simulation mode only
sim.fix_all_reactions()
#Customize the weight factors
sim.auto_select_all_weight_factors()
#Select specific weight factor windows based on observed data
sim.ignore_weight_factor("NH3","Unaged",Tstr,time_window=(93,120))
sim.ignore_weight_factor("NO","Unaged",Tstr,time_window=(93,120))
sim.ignore_weight_factor("NO2","Unaged",Tstr,time_window=(93,120))
sim.ignore_weight_factor("N2O","Unaged",Tstr,time_window=(93,120))
sim.ignore_weight_factor("NH3","2hr",Tstr,time_window=(101,120))
sim.ignore_weight_factor("NO","2hr",Tstr,time_window=(101,120))
sim.ignore_weight_factor("NO2","2hr",Tstr,time_window=(101,120))
sim.ignore_weight_factor("N2O","2hr",Tstr,time_window=(101,120))
sim.ignore_weight_factor("NH3","4hr",Tstr,time_window=(48,57))
sim.ignore_weight_factor("NO","4hr",Tstr,time_window=(48,57))
sim.ignore_weight_factor("NO2","4hr",Tstr,time_window=(48,57))
sim.ignore_weight_factor("N2O","4hr",Tstr,time_window=(48,57))
sim.ignore_weight_factor("NH3","8hr",Tstr,time_window=(51,58))
sim.ignore_weight_factor("NO","8hr",Tstr,time_window=(51,58))
sim.ignore_weight_factor("NO2","8hr",Tstr,time_window=(51,58))
sim.ignore_weight_factor("N2O","8hr",Tstr,time_window=(51,58))
sim.ignore_weight_factor("NH3","16hr",Tstr,time_window=(48,57))
sim.ignore_weight_factor("NO","16hr",Tstr,time_window=(48,57))
sim.ignore_weight_factor("NO2","16hr",Tstr,time_window=(48,57))
sim.ignore_weight_factor("N2O","16hr",Tstr,time_window=(48,57))
sim.initialize_auto_scaling()
sim.initialize_simulator()
sim.finalize_auto_scaling()
sim.run_solver()
sim.print_results_of_breakthrough(["NH3","NO","NO2","N2O","O2","N2","H2O"],
"Unaged", Tstr, file_name="Unaged_SCR_"+Tstr+"_breakthrough.txt")
sim.print_results_of_location(["NH3","NO","NO2","N2O","O2","N2","H2O"],
"Unaged", Tstr, 0, file_name="Unaged_SCR_"+Tstr+"_bypass.txt")
sim.print_results_of_integral_average(["Z1CuOH-NH3","Z2Cu-NH3","Z2Cu-(NH3)2","ZNH4",
"Z1CuOH-NH4NO3", "Z2Cu-NH4NO3", "ZH-NH4NO3"],
"Unaged", Tstr, file_name="Unaged_SCR_"+Tstr+"_average_ads.txt")
sim.print_results_of_breakthrough(["NH3","NO","NO2","N2O","O2","N2","H2O"],
"2hr", Tstr, file_name="2hr_SCR_"+Tstr+"_breakthrough.txt")
sim.print_results_of_location(["NH3","NO","NO2","N2O","O2","N2","H2O"],
"2hr", Tstr, 0, file_name="2hr_SCR_"+Tstr+"_bypass.txt")
sim.print_results_of_integral_average(["Z1CuOH-NH3","Z2Cu-NH3","Z2Cu-(NH3)2","ZNH4",
"Z1CuOH-NH4NO3", "Z2Cu-NH4NO3", "ZH-NH4NO3"],
"2hr", Tstr, file_name="2hr_SCR_"+Tstr+"_average_ads.txt")
sim.print_results_of_breakthrough(["NH3","NO","NO2","N2O","O2","N2","H2O"],
"4hr", Tstr, file_name="4hr_SCR_"+Tstr+"_breakthrough.txt")
sim.print_results_of_location(["NH3","NO","NO2","N2O","O2","N2","H2O"],
"4hr", Tstr, 0, file_name="4hr_SCR_"+Tstr+"_bypass.txt")
sim.print_results_of_integral_average(["Z1CuOH-NH3","Z2Cu-NH3","Z2Cu-(NH3)2","ZNH4",
"Z1CuOH-NH4NO3", "Z2Cu-NH4NO3", "ZH-NH4NO3"],
"4hr", Tstr, file_name="4hr_SCR_"+Tstr+"_average_ads.txt")
sim.print_results_of_breakthrough(["NH3","NO","NO2","N2O","O2","N2","H2O"],
"8hr", Tstr, file_name="8hr_SCR_"+Tstr+"_breakthrough.txt")
sim.print_results_of_location(["NH3","NO","NO2","N2O","O2","N2","H2O"],
"8hr", Tstr, 0, file_name="8hr_SCR_"+Tstr+"_bypass.txt")
sim.print_results_of_integral_average(["Z1CuOH-NH3","Z2Cu-NH3","Z2Cu-(NH3)2","ZNH4",
"Z1CuOH-NH4NO3", "Z2Cu-NH4NO3", "ZH-NH4NO3"],
"8hr", Tstr, file_name="8hr_SCR_"+Tstr+"_average_ads.txt")
sim.print_results_of_breakthrough(["NH3","NO","NO2","N2O","O2","N2","H2O"],
"16hr", Tstr, file_name="16hr_SCR_"+Tstr+"_breakthrough.txt")
sim.print_results_of_location(["NH3","NO","NO2","N2O","O2","N2","H2O"],
"16hr", Tstr, 0, file_name="16hr_SCR_"+Tstr+"_bypass.txt")
sim.print_results_of_integral_average(["Z1CuOH-NH3","Z2Cu-NH3","Z2Cu-(NH3)2","ZNH4",
"Z1CuOH-NH4NO3", "Z2Cu-NH4NO3", "ZH-NH4NO3"],
"16hr", Tstr, file_name="16hr_SCR_"+Tstr+"_average_ads.txt")
sim.print_kinetic_parameter_info(file_name=Tstr+"_opt_params.txt")
sim.save_model_state(file_name=Tstr+"_model.json")
sim.plot_vs_data("NH3", "Unaged", Tstr, 5, display_live=False)
sim.plot_vs_data("NH3", "2hr", Tstr, 5, display_live=False)
sim.plot_vs_data("NH3", "4hr", Tstr, 5, display_live=False)
sim.plot_vs_data("NH3", "8hr", Tstr, 5, display_live=False)
sim.plot_vs_data("NH3", "16hr", Tstr, 5, display_live=False)
sim.plot_vs_data("NO", "Unaged", Tstr, 5, display_live=False)
sim.plot_vs_data("NO", "2hr", Tstr, 5, display_live=False)
sim.plot_vs_data("NO", "4hr", Tstr, 5, display_live=False)
sim.plot_vs_data("NO", "8hr", Tstr, 5, display_live=False)
sim.plot_vs_data("NO", "16hr", Tstr, 5, display_live=False)
sim.plot_vs_data("NO2", "Unaged", Tstr, 5, display_live=False)
sim.plot_vs_data("NO2", "2hr", Tstr, 5, display_live=False)
sim.plot_vs_data("NO2", "4hr", Tstr, 5, display_live=False)
sim.plot_vs_data("NO2", "8hr", Tstr, 5, display_live=False)
sim.plot_vs_data("NO2", "16hr", Tstr, 5, display_live=False)
sim.plot_vs_data("N2O", "Unaged", Tstr, 5, display_live=False)
sim.plot_vs_data("N2O", "2hr", Tstr, 5, display_live=False)
sim.plot_vs_data("N2O", "4hr", Tstr, 5, display_live=False)
sim.plot_vs_data("N2O", "8hr", Tstr, 5, display_live=False)
sim.plot_vs_data("N2O", "16hr", Tstr, 5, display_live=False)
| 40.784666
| 105
| 0.614895
|
d075d819e3fff20b434ff003783aae953b28ea70
| 927
|
py
|
Python
|
djangocms_link/migrations/0004_auto_20150708_1133.py
|
yakky/djangocms-link
|
31ed11a74c8abc93d851ca54d426de27ac398838
|
[
"BSD-3-Clause"
] | null | null | null |
djangocms_link/migrations/0004_auto_20150708_1133.py
|
yakky/djangocms-link
|
31ed11a74c8abc93d851ca54d426de27ac398838
|
[
"BSD-3-Clause"
] | null | null | null |
djangocms_link/migrations/0004_auto_20150708_1133.py
|
yakky/djangocms-link
|
31ed11a74c8abc93d851ca54d426de27ac398838
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import djangocms_link.validators
class Migration(migrations.Migration):
dependencies = [
('djangocms_link', '0003_auto_20150212_1310'),
]
operations = [
migrations.AlterField(
model_name='link',
name='anchor',
field=models.CharField(help_text='This applies only to page and text links. Do <em>not</em> include a preceding "#" symbol.', max_length=128, verbose_name='anchor', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='link',
name='url',
field=models.CharField(blank=True, max_length=2048, null=True, verbose_name='link', validators=[djangocms_link.validators.IntranetURLValidator(intranet_host_re=None)]),
preserve_default=True,
),
]
| 33.107143
| 189
| 0.649407
|
4dba2b70fa9ae61569cc7bd52c17176cec9d67f6
| 10,424
|
py
|
Python
|
contentcuration/contentcuration/tests/viewsets/test_clipboard.py
|
kollivier/studio
|
9089780858ae9870421056b4e6e5659ae854db57
|
[
"MIT"
] | 1
|
2019-03-30T18:14:25.000Z
|
2019-03-30T18:14:25.000Z
|
contentcuration/contentcuration/tests/viewsets/test_clipboard.py
|
kollivier/studio
|
9089780858ae9870421056b4e6e5659ae854db57
|
[
"MIT"
] | 2
|
2019-04-06T07:06:08.000Z
|
2019-04-08T23:33:53.000Z
|
contentcuration/contentcuration/tests/viewsets/test_clipboard.py
|
MisRob/studio
|
92a5c780c8952f7d37db38952483ab7a28d3cb9d
|
[
"MIT"
] | 1
|
2020-10-20T05:21:56.000Z
|
2020-10-20T05:21:56.000Z
|
from __future__ import absolute_import
import uuid
from django.core.management import call_command
from django.core.urlresolvers import reverse
from le_utils.constants import content_kinds
from contentcuration import models
from contentcuration.tests import testdata
from contentcuration.tests.base import StudioAPITestCase
from contentcuration.viewsets.sync.constants import CLIPBOARD
from contentcuration.viewsets.sync.utils import generate_create_event
from contentcuration.viewsets.sync.utils import generate_delete_event
from contentcuration.viewsets.sync.utils import generate_update_event
class SyncTestCase(StudioAPITestCase):
@classmethod
def setUpClass(cls):
cls.create_bucket()
super(SyncTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(SyncTestCase, cls).tearDownClass()
cls.create_bucket()
@classmethod
def setUpTestData(cls):
call_command("loadconstants")
cls.user = testdata.user()
cls.channel = testdata.channel()
cls.channel.viewers.add(cls.user)
@property
def sync_url(self):
return reverse("sync")
@property
def clipboard_metadata(self):
return {
"id": uuid.uuid4().hex,
"kind": content_kinds.VIDEO,
"parent": self.user.clipboard_tree_id,
"source_node_id": self.channel.main_tree.get_descendants()
.filter(kind_id=content_kinds.VIDEO)
.first()
.node_id,
"source_channel_id": self.channel.id,
}
@property
def clipboard_db_metadata(self):
return {
"id": uuid.uuid4().hex,
"kind_id": content_kinds.VIDEO,
"parent_id": self.user.clipboard_tree_id,
"source_node_id": self.channel.main_tree.get_descendants()
.filter(kind_id=content_kinds.VIDEO)
.first()
.node_id,
"source_channel_id": self.channel.id,
}
def test_create_clipboard(self):
self.client.force_authenticate(user=self.user)
clipboard = self.clipboard_metadata
response = self.client.post(
self.sync_url,
[generate_create_event(clipboard["id"], CLIPBOARD, clipboard)],
format="json",
)
self.assertEqual(response.status_code, 200, response.content)
try:
models.ContentNode.objects.get(id=clipboard["id"])
except models.ContentNode.DoesNotExist:
self.fail("ContentNode was not created")
def test_create_clipboard_with_null_extra_fields(self):
self.client.force_authenticate(user=self.user)
clipboard = self.clipboard_metadata
clipboard["extra_fields"] = None
response = self.client.post(
self.sync_url,
[generate_create_event(clipboard["id"], CLIPBOARD, clipboard)],
format="json",
)
self.assertEqual(response.status_code, 200, response.content)
try:
models.ContentNode.objects.get(id=clipboard["id"])
except models.ContentNode.DoesNotExist:
self.fail("ContentNode was not created")
def test_create_clipboard_with_parent(self):
channel = testdata.channel()
channel.editors.add(self.user)
self.client.force_authenticate(user=self.user)
clipboard = self.clipboard_metadata
clipboard["parent"] = channel.main_tree_id
response = self.client.post(
self.sync_url,
[generate_create_event(clipboard["id"], CLIPBOARD, clipboard)],
format="json",
)
self.assertEqual(response.status_code, 200, response.content)
try:
new_node = models.ContentNode.objects.get(id=clipboard["id"])
except models.ContentNode.DoesNotExist:
self.fail("ContentNode was not created")
self.assertEqual(new_node.parent_id, channel.main_tree_id)
def test_create_clipboards(self):
self.client.force_authenticate(user=self.user)
clipboard1 = self.clipboard_metadata
clipboard2 = self.clipboard_metadata
response = self.client.post(
self.sync_url,
[
generate_create_event(clipboard1["id"], CLIPBOARD, clipboard1),
generate_create_event(clipboard2["id"], CLIPBOARD, clipboard2),
],
format="json",
)
self.assertEqual(response.status_code, 200, response.content)
try:
models.ContentNode.objects.get(id=clipboard1["id"])
except models.ContentNode.DoesNotExist:
self.fail("ContentNode 1 was not created")
try:
models.ContentNode.objects.get(id=clipboard2["id"])
except models.ContentNode.DoesNotExist:
self.fail("ContentNode 2 was not created")
def test_update_clipboard_extra_fields(self):
clipboard = models.ContentNode.objects.create(**self.clipboard_db_metadata)
node_id1 = uuid.uuid4().hex
self.client.force_authenticate(user=self.user)
response = self.client.post(
self.sync_url,
[
generate_update_event(
clipboard.id,
CLIPBOARD,
{"extra_fields.excluded_descendants.{}".format(node_id1): True},
)
],
format="json",
)
self.assertEqual(response.status_code, 200, response.content)
self.assertTrue(
models.ContentNode.objects.get(id=clipboard.id).extra_fields[
"excluded_descendants"
][node_id1]
)
node_id2 = uuid.uuid4().hex
response = self.client.post(
self.sync_url,
[
generate_update_event(
clipboard.id,
CLIPBOARD,
{"extra_fields.excluded_descendants.{}".format(node_id2): True},
)
],
format="json",
)
self.assertEqual(response.status_code, 200, response.content)
self.assertTrue(
models.ContentNode.objects.get(id=clipboard.id).extra_fields[
"excluded_descendants"
][node_id1]
)
self.assertTrue(
models.ContentNode.objects.get(id=clipboard.id).extra_fields[
"excluded_descendants"
][node_id2]
)
def test_delete_clipboard(self):
clipboard = models.ContentNode.objects.create(**self.clipboard_db_metadata)
self.client.force_authenticate(user=self.user)
response = self.client.post(
self.sync_url,
[generate_delete_event(clipboard.id, CLIPBOARD)],
format="json",
)
self.assertEqual(response.status_code, 200, response.content)
try:
models.ContentNode.objects.get(id=clipboard.id)
self.fail("ContentNode was not deleted")
except models.ContentNode.DoesNotExist:
pass
def test_delete_clipboards(self):
clipboard1 = models.ContentNode.objects.create(**self.clipboard_db_metadata)
clipboard2 = models.ContentNode.objects.create(**self.clipboard_db_metadata)
self.client.force_authenticate(user=self.user)
response = self.client.post(
self.sync_url,
[
generate_delete_event(clipboard1.id, CLIPBOARD),
generate_delete_event(clipboard2.id, CLIPBOARD),
],
format="json",
)
self.assertEqual(response.status_code, 200, response.content)
try:
models.ContentNode.objects.get(id=clipboard1.id)
self.fail("ContentNode 1 was not deleted")
except models.ContentNode.DoesNotExist:
pass
try:
models.ContentNode.objects.get(id=clipboard2.id)
self.fail("ContentNode 2 was not deleted")
except models.ContentNode.DoesNotExist:
pass
class CRUDTestCase(StudioAPITestCase):
@classmethod
def setUpClass(cls):
cls.create_bucket()
super(CRUDTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(CRUDTestCase, cls).tearDownClass()
cls.create_bucket()
@classmethod
def setUpTestData(cls):
call_command("loadconstants")
cls.user = testdata.user()
cls.channel = testdata.channel()
cls.channel.viewers.add(cls.user)
@property
def clipboard_metadata(self):
return {
"id": uuid.uuid4().hex,
"kind": content_kinds.VIDEO,
"parent": self.user.clipboard_tree_id,
"source_node_id": self.channel.main_tree.get_descendants()
.filter(kind_id=content_kinds.VIDEO)
.first()
.node_id,
"source_channel_id": self.channel.id,
}
@property
def clipboard_db_metadata(self):
return {
"id": uuid.uuid4().hex,
"kind_id": content_kinds.VIDEO,
"parent_id": self.user.clipboard_tree_id,
"source_node_id": self.channel.main_tree.get_descendants()
.filter(kind_id=content_kinds.VIDEO)
.first()
.node_id,
"source_channel_id": self.channel.id,
}
def test_create_clipboard(self):
self.client.force_authenticate(user=self.user)
clipboard = self.clipboard_metadata
response = self.client.post(
reverse("clipboard-list"), clipboard, format="json",
)
self.assertEqual(response.status_code, 201, response.content)
try:
models.ContentNode.objects.get(id=clipboard["id"])
except models.ContentNode.DoesNotExist:
self.fail("ContentNode was not created")
def test_delete_clipboard(self):
clipboard = models.ContentNode.objects.create(**self.clipboard_db_metadata)
self.client.force_authenticate(user=self.user)
response = self.client.delete(
reverse("clipboard-detail", kwargs={"pk": clipboard.id})
)
self.assertEqual(response.status_code, 204, response.content)
try:
models.ContentNode.objects.get(id=clipboard.id)
self.fail("ContentNode was not deleted")
except models.ContentNode.DoesNotExist:
pass
| 34.979866
| 84
| 0.620299
|
cf79cb7d8c5161f88581e48b3f1d393a3676d375
| 160
|
py
|
Python
|
sentry_auth_adfs_oauth2/__init__.py
|
ccpgames/sentry-auth-adfs-oauth2
|
14a29ac7232978b670df6b7fe579a9beea3cc8c9
|
[
"MIT"
] | null | null | null |
sentry_auth_adfs_oauth2/__init__.py
|
ccpgames/sentry-auth-adfs-oauth2
|
14a29ac7232978b670df6b7fe579a9beea3cc8c9
|
[
"MIT"
] | null | null | null |
sentry_auth_adfs_oauth2/__init__.py
|
ccpgames/sentry-auth-adfs-oauth2
|
14a29ac7232978b670df6b7fe579a9beea3cc8c9
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from sentry.auth import register
from .provider import AdfsOAuth2Provider
register('afds_oauth2', AdfsOAuth2Provider)
| 20
| 43
| 0.85
|
656c8acdb6d9e3b718ee2cb335308aa48d3ed365
| 181
|
py
|
Python
|
src/compendium/filetypes/__init__.py
|
kuwv/python-compendium
|
881c280ac09391594154ce85916a03dbb655b166
|
[
"Apache-2.0"
] | null | null | null |
src/compendium/filetypes/__init__.py
|
kuwv/python-compendium
|
881c280ac09391594154ce85916a03dbb655b166
|
[
"Apache-2.0"
] | null | null | null |
src/compendium/filetypes/__init__.py
|
kuwv/python-compendium
|
881c280ac09391594154ce85916a03dbb655b166
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# copyright: (c) 2020 by Jesse Johnson.
# license: Apache 2.0, see LICENSE for more details.
"""Provide modules to support disparate configuration types."""
| 36.2
| 63
| 0.701657
|
8ff11fa6f0c813ee01535bff22861560a5c343ea
| 2,089
|
py
|
Python
|
dbaas/notification/migrations/0007_auto__del_field_taskhistory_db_id.py
|
didindinn/database-as-a-service
|
747de31ff8546f7874ddd654af860e130afd17a0
|
[
"BSD-3-Clause"
] | 303
|
2015-01-08T10:35:54.000Z
|
2022-02-28T08:54:06.000Z
|
dbaas/notification/migrations/0007_auto__del_field_taskhistory_db_id.py
|
nouraellm/database-as-a-service
|
5e655c9347bea991b7218a01549f5e44f161d7be
|
[
"BSD-3-Clause"
] | 124
|
2015-01-14T12:56:15.000Z
|
2022-03-22T20:45:11.000Z
|
dbaas/notification/migrations/0007_auto__del_field_taskhistory_db_id.py
|
nouraellm/database-as-a-service
|
5e655c9347bea991b7218a01549f5e44f161d7be
|
[
"BSD-3-Clause"
] | 110
|
2015-01-02T11:59:48.000Z
|
2022-02-28T08:54:06.000Z
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'TaskHistory.db_id'
db.delete_column(u'notification_taskhistory', 'db_id_id')
def backwards(self, orm):
# Adding field 'TaskHistory.db_id'
db.add_column(u'notification_taskhistory', 'db_id',
self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'database', null=True, to=orm['logical.Database'], on_delete=models.SET_NULL, blank=True),
keep_default=False)
models = {
u'notification.taskhistory': {
'Meta': {'object_name': 'TaskHistory'},
'arguments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ended_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'task_status': ('django.db.models.fields.CharField', [], {'default': "u'PENDING'", 'max_length': '100', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['notification']
| 53.564103
| 180
| 0.592628
|
f328a31a99555f1313557b0f3ba0081728c81b3a
| 3,830
|
py
|
Python
|
stubs.min/Rhino/Geometry/__init___parts/Matrix.py
|
ricardyn/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | 1
|
2021-02-02T13:39:16.000Z
|
2021-02-02T13:39:16.000Z
|
stubs.min/Rhino/Geometry/__init___parts/Matrix.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
stubs.min/Rhino/Geometry/__init___parts/Matrix.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
class Matrix(object,IDisposable):
"""
Matrix(rowCount: int,columnCount: int)
Matrix(xform: Transform)
"""
def BackSolve(self,zeroTolerance,b):
""" BackSolve(self: Matrix,zeroTolerance: float,b: Array[float]) -> Array[float] """
pass
def BackSolvePoints(self,zeroTolerance,b):
""" BackSolvePoints(self: Matrix,zeroTolerance: float,b: Array[Point3d]) -> Array[Point3d] """
pass
def Dispose(self):
""" Dispose(self: Matrix) """
pass
def Duplicate(self):
""" Duplicate(self: Matrix) -> Matrix """
pass
def GetHashCode(self):
""" GetHashCode(self: Matrix) -> int """
pass
def Invert(self,zeroTolerance):
""" Invert(self: Matrix,zeroTolerance: float) -> bool """
pass
def RowReduce(self,zeroTolerance,*__args):
"""
RowReduce(self: Matrix,zeroTolerance: float,b: Array[Point3d]) -> (int,float)
RowReduce(self: Matrix,zeroTolerance: float,b: Array[float]) -> (int,float)
RowReduce(self: Matrix,zeroTolerance: float) -> (int,float,float)
"""
pass
def Scale(self,s):
""" Scale(self: Matrix,s: float) """
pass
def SetDiagonal(self,d):
""" SetDiagonal(self: Matrix,d: float) """
pass
def SwapColumns(self,columnA,columnB):
""" SwapColumns(self: Matrix,columnA: int,columnB: int) -> bool """
pass
def SwapRows(self,rowA,rowB):
""" SwapRows(self: Matrix,rowA: int,rowB: int) -> bool """
pass
def Transpose(self):
""" Transpose(self: Matrix) -> bool """
pass
def Zero(self):
""" Zero(self: Matrix) """
pass
def __add__(self,*args):
""" x.__add__(y) <==> x+y """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __mul__(self,*args):
""" x.__mul__(y) <==> x*y """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,rowCount: int,columnCount: int)
__new__(cls: type,xform: Transform)
"""
pass
def __radd__(self,*args):
""" __radd__(a: Matrix,b: Matrix) -> Matrix """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
def __rmul__(self,*args):
""" __rmul__(a: Matrix,b: Matrix) -> Matrix """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
ColumnCount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ColumnCount(self: Matrix) -> int
"""
IsColumnOrthogonal=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsColumnOrthogonal(self: Matrix) -> bool
"""
IsColumnOrthoNormal=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsColumnOrthoNormal(self: Matrix) -> bool
"""
IsRowOrthogonal=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsRowOrthogonal(self: Matrix) -> bool
"""
IsRowOrthoNormal=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsRowOrthoNormal(self: Matrix) -> bool
"""
IsSquare=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsSquare(self: Matrix) -> bool
"""
IsValid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsValid(self: Matrix) -> bool
"""
RowCount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: RowCount(self: Matrix) -> int
"""
| 30.15748
| 215
| 0.643603
|
9df50268fb2f9cc50105fece77343c8d5e7ef878
| 3,062
|
py
|
Python
|
080 Search in Rotated Sorted Array II.py
|
scorpionpd/LeetCode-all
|
0d65494f37d093d650b83b93409e874c041f3abe
|
[
"MIT"
] | null | null | null |
080 Search in Rotated Sorted Array II.py
|
scorpionpd/LeetCode-all
|
0d65494f37d093d650b83b93409e874c041f3abe
|
[
"MIT"
] | null | null | null |
080 Search in Rotated Sorted Array II.py
|
scorpionpd/LeetCode-all
|
0d65494f37d093d650b83b93409e874c041f3abe
|
[
"MIT"
] | null | null | null |
"""
Follow up for "Search in Rotated Sorted Array":
What if duplicates are allowed?
Would this affect the run-time complexity? How and why?
Write a function to determine if a given target is in the array.
"""
__author__ = 'Danyang'
class Solution:
def search_set(self, A, target):
"""
Follow up 033 Search in Rotated Sorted Array
Duplicate allowed
rotated cases:
target is 3
start mid end
case 1:
1, 1, 1, 1, 1, 3, 1
case 2:
1, 3, 1, 1, 1, 1, 1
Algorithm: eliminate duplicates
:param A: a list of integers
:param target: an integer
:return: a boolean
"""
A = list(set(A)) # short-cut eliminate duplicates # but O(n)
length = len(A)
start = 0
end = length-1
while start<=end:
mid = (start+end)/2
# found
if A[mid]==target:
return True
# case 1
if A[start]<A[mid]<A[end]:
if target>A[mid]:
start = mid+1
else:
end = mid-1
# case 2
elif A[start]>A[mid] and A[mid]<A[end]:
if target>A[mid] and target<=A[end]:
start = mid+1
else:
end = mid-1
# case 3
else:
if target<A[mid] and target>=A[start]:
end = mid-1
else:
start = mid+1
return False
def search(self, A, target):
"""
Follow up 033 Search in Rotated Sorted Array
Duplicate allowed
rotated cases:
target is 3
start mid end
case 1:
1, 1, 1, 1, 1, 3, 1
case 2:
1, 3, 1, 1, 1, 1, 1
Algorithm: advance pointer if undetermined
:param A: a list of integers
:param target: an integer
:return: a boolean
"""
length = len(A)
start = 0
end = length-1
while start<=end:
mid = (start+end)/2
# found
if A[mid]==target:
return True
# undetermined # the only significant difference.
if A[start]==A[mid]:
start += 1
# case 1
elif A[start]<A[mid]<=A[end]:
if target>A[mid]:
start = mid+1
else:
end = mid-1
# case 2
elif A[start]>A[mid] and A[mid]<=A[end]: # slight difference compared to A[mid]<A[end]
if target>A[mid] and target<=A[end]:
start = mid+1
else:
end = mid-1
# case 3
else:
if target<A[mid] and target>=A[start]:
end = mid-1
else:
start = mid+1
return False
if __name__=="__main__":
assert Solution().search([1,1,3,1], 3)==True
| 27.585586
| 99
| 0.441868
|
9b9ebade1d0c1ab34f8c8dab507bf67f49f54292
| 3,672
|
py
|
Python
|
nmosauth/auth_server/models.py
|
bbc/nmos-auth-server
|
0f7a57f77798325e0b36b6a253570d111dd89603
|
[
"Apache-2.0"
] | 9
|
2019-05-17T07:43:15.000Z
|
2020-12-06T00:40:55.000Z
|
nmosauth/auth_server/models.py
|
bbc/nmos-auth-server
|
0f7a57f77798325e0b36b6a253570d111dd89603
|
[
"Apache-2.0"
] | 3
|
2019-06-14T14:50:48.000Z
|
2020-05-07T13:40:54.000Z
|
nmosauth/auth_server/models.py
|
bbc/nmos-auth-server
|
0f7a57f77798325e0b36b6a253570d111dd89603
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import check_password_hash
from authlib.integrations.sqla_oauth2 import (
OAuth2ClientMixin,
OAuth2AuthorizationCodeMixin,
OAuth2TokenMixin,
)
__all__ = ['db', 'AdminUser', 'OAuth2Client',
'OAuth2AuthorizationCode', 'OAuth2Token', 'ResourceOwner']
db = SQLAlchemy()
class AdminUser(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(40), unique=True)
password = db.Column(db.String(20))
def __str__(self):
output = {}
for c in self.__table__.columns:
output[c.name] = getattr(self, c.name)
return str(output)
def get_user_id(self):
return self.id
def check_password(self, password):
return check_password_hash(self.password, password)
class OAuth2Client(db.Model, OAuth2ClientMixin):
__tablename__ = 'oauth2_client'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(
db.Integer, db.ForeignKey('admin_user.id', ondelete='CASCADE'))
admin_user = db.relationship('AdminUser')
class OAuth2AuthorizationCode(db.Model, OAuth2AuthorizationCodeMixin):
__tablename__ = 'oauth2_code'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(
db.Integer, db.ForeignKey('admin_user.id', ondelete='CASCADE'))
admin_user = db.relationship('AdminUser')
code_challenge = db.Column(db.String(48))
code_challenge_method = db.Column(db.String(5))
class OAuth2Token(db.Model, OAuth2TokenMixin):
__tablename__ = 'oauth2_token'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(
db.Integer, db.ForeignKey('admin_user.id', ondelete='CASCADE'))
admin_user = db.relationship('AdminUser')
access_token = db.Column(db.String(1000), nullable=False)
def is_refresh_token_expired(self):
expires_at = self.issued_at + self.expires_in * 1440
return expires_at < time.time()
class ResourceOwner(db.Model):
__tablename__ = 'resource_owner'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(
db.Integer, db.ForeignKey('admin_user.id', ondelete='CASCADE'))
admin_user = db.relationship('AdminUser')
username = db.Column(db.String(40), unique=True, nullable=False)
password = db.Column(db.String(20))
# Permissions of User for each NMOS API
registration_access = db.Column(db.String(25))
query_access = db.Column(db.String(25))
node_access = db.Column(db.String(25))
connection_access = db.Column(db.String(25))
netctrl_access = db.Column(db.String(25))
events_access = db.Column(db.String(25))
channelmapping_access = db.Column(db.String(25))
def get_user_id(self):
return self.id
def check_password(self, password):
return check_password_hash(self.password, password)
def __str__(self):
output = {}
for c in self.__table__.columns:
output[c.name] = getattr(self, c.name)
return str(output)
| 32.495575
| 74
| 0.702887
|
1a41760f8814ef69a10811ce858f434a8831fd73
| 9,863
|
py
|
Python
|
youtube_dl/extractor/dplay.py
|
siddht4/youtube_dl_custom
|
648d01c1cdc7b3e11b4f812520fa39474719cdfc
|
[
"Unlicense"
] | 12
|
2017-11-01T12:35:47.000Z
|
2020-02-26T19:41:30.000Z
|
youtube_dl/extractor/dplay.py
|
siddht4/youtube_dl_custom
|
648d01c1cdc7b3e11b4f812520fa39474719cdfc
|
[
"Unlicense"
] | 8
|
2017-12-05T23:45:54.000Z
|
2022-02-09T23:28:51.000Z
|
youtube_dl/extractor/dplay.py
|
siddht4/youtube_dl_custom
|
648d01c1cdc7b3e11b4f812520fa39474719cdfc
|
[
"Unlicense"
] | 6
|
2017-07-15T07:17:29.000Z
|
2018-03-13T07:31:18.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import json
import re
import time
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
compat_HTTPError,
)
from ..utils import (
USER_AGENTS,
ExtractorError,
int_or_none,
unified_strdate,
remove_end,
update_url_query,
)
class DPlayIE(InfoExtractor):
_VALID_URL = r'https?://(?P<domain>www\.dplay\.(?:dk|se|no))/[^/]+/(?P<id>[^/?#]+)'
_TESTS = [{
# non geo restricted, via secure api, unsigned download hls URL
'url': 'http://www.dplay.se/nugammalt-77-handelser-som-format-sverige/season-1-svensken-lar-sig-njuta-av-livet/',
'info_dict': {
'id': '3172',
'display_id': 'season-1-svensken-lar-sig-njuta-av-livet',
'ext': 'mp4',
'title': 'Svensken lär sig njuta av livet',
'description': 'md5:d3819c9bccffd0fe458ca42451dd50d8',
'duration': 2650,
'timestamp': 1365454320,
'upload_date': '20130408',
'creator': 'Kanal 5 (Home)',
'series': 'Nugammalt - 77 händelser som format Sverige',
'season_number': 1,
'episode_number': 1,
'age_limit': 0,
},
}, {
# geo restricted, via secure api, unsigned download hls URL
'url': 'http://www.dplay.dk/mig-og-min-mor/season-6-episode-12/',
'info_dict': {
'id': '70816',
'display_id': 'season-6-episode-12',
'ext': 'mp4',
'title': 'Episode 12',
'description': 'md5:9c86e51a93f8a4401fc9641ef9894c90',
'duration': 2563,
'timestamp': 1429696800,
'upload_date': '20150422',
'creator': 'Kanal 4 (Home)',
'series': 'Mig og min mor',
'season_number': 6,
'episode_number': 12,
'age_limit': 0,
},
}, {
# geo restricted, via direct unsigned hls URL
'url': 'http://www.dplay.no/pga-tour/season-1-hoydepunkter-18-21-februar/',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
domain = mobj.group('domain')
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'data-video-id=["\'](\d+)', webpage, 'video id')
info = self._download_json(
'http://%s/api/v2/ajax/videos?video_id=%s' % (domain, video_id),
video_id)['data'][0]
title = info['title']
PROTOCOLS = ('hls', 'hds')
formats = []
def extract_formats(protocol, manifest_url):
if protocol == 'hls':
m3u8_formats = self._extract_m3u8_formats(
manifest_url, video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id=protocol, fatal=False)
# Sometimes final URLs inside m3u8 are unsigned, let's fix this
# ourselves. Also fragments' URLs are only served signed for
# Safari user agent.
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(manifest_url).query)
for m3u8_format in m3u8_formats:
m3u8_format.update({
'url': update_url_query(m3u8_format['url'], query),
'http_headers': {
'User-Agent': USER_AGENTS['Safari'],
},
})
formats.extend(m3u8_formats)
elif protocol == 'hds':
formats.extend(self._extract_f4m_formats(
manifest_url + '&hdcore=3.8.0&plugin=flowplayer-3.8.0.0',
video_id, f4m_id=protocol, fatal=False))
domain_tld = domain.split('.')[-1]
if domain_tld in ('se', 'dk', 'no'):
for protocol in PROTOCOLS:
# Providing dsc-geo allows to bypass geo restriction in some cases
self._set_cookie(
'secure.dplay.%s' % domain_tld, 'dsc-geo',
json.dumps({
'countryCode': domain_tld.upper(),
'expiry': (time.time() + 20 * 60) * 1000,
}))
stream = self._download_json(
'https://secure.dplay.%s/secure/api/v2/user/authorization/stream/%s?stream_type=%s'
% (domain_tld, video_id, protocol), video_id,
'Downloading %s stream JSON' % protocol, fatal=False)
if stream and stream.get(protocol):
extract_formats(protocol, stream[protocol])
# The last resort is to try direct unsigned hls/hds URLs from info dictionary.
# Sometimes this does work even when secure API with dsc-geo has failed (e.g.
# http://www.dplay.no/pga-tour/season-1-hoydepunkter-18-21-februar/).
if not formats:
for protocol in PROTOCOLS:
if info.get(protocol):
extract_formats(protocol, info[protocol])
self._sort_formats(formats)
subtitles = {}
for lang in ('se', 'sv', 'da', 'nl', 'no'):
for format_id in ('web_vtt', 'vtt', 'srt'):
subtitle_url = info.get('subtitles_%s_%s' % (lang, format_id))
if subtitle_url:
subtitles.setdefault(lang, []).append({'url': subtitle_url})
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': info.get('video_metadata_longDescription'),
'duration': int_or_none(info.get('video_metadata_length'), scale=1000),
'timestamp': int_or_none(info.get('video_publish_date')),
'creator': info.get('video_metadata_homeChannel'),
'series': info.get('video_metadata_show'),
'season_number': int_or_none(info.get('season')),
'episode_number': int_or_none(info.get('episode')),
'age_limit': int_or_none(info.get('minimum_age')),
'formats': formats,
'subtitles': subtitles,
}
class DPlayItIE(InfoExtractor):
_VALID_URL = r'https?://it\.dplay\.com/[^/]+/[^/]+/(?P<id>[^/?#]+)'
_GEO_COUNTRIES = ['IT']
_TEST = {
'url': 'http://it.dplay.com/nove/biografie-imbarazzanti/luigi-di-maio-la-psicosi-di-stanislawskij/',
'md5': '2b808ffb00fc47b884a172ca5d13053c',
'info_dict': {
'id': '6918',
'display_id': 'luigi-di-maio-la-psicosi-di-stanislawskij',
'ext': 'mp4',
'title': 'Biografie imbarazzanti: Luigi Di Maio: la psicosi di Stanislawskij',
'description': 'md5:3c7a4303aef85868f867a26f5cc14813',
'thumbnail': r're:^https?://.*\.jpe?g',
'upload_date': '20160524',
'series': 'Biografie imbarazzanti',
'season_number': 1,
'episode': 'Luigi Di Maio: la psicosi di Stanislawskij',
'episode_number': 1,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
info_url = self._search_regex(
r'url\s*[:=]\s*["\']((?:https?:)?//[^/]+/playback/videoPlaybackInfo/\d+)',
webpage, 'video id')
title = remove_end(self._og_search_title(webpage), ' | Dplay')
try:
info = self._download_json(
info_url, display_id, headers={
'Authorization': 'Bearer %s' % self._get_cookies(url).get(
'dplayit_token').value,
'Referer': url,
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (400, 403):
info = self._parse_json(e.cause.read().decode('utf-8'), display_id)
error = info['errors'][0]
if error.get('code') == 'access.denied.geoblocked':
self.raise_geo_restricted(
msg=error.get('detail'), countries=self._GEO_COUNTRIES)
raise ExtractorError(info['errors'][0]['detail'], expected=True)
raise
hls_url = info['data']['attributes']['streaming']['hls']['url']
formats = self._extract_m3u8_formats(
hls_url, display_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id='hls')
series = self._html_search_regex(
r'(?s)<h1[^>]+class=["\'].*?\bshow_title\b.*?["\'][^>]*>(.+?)</h1>',
webpage, 'series', fatal=False)
episode = self._search_regex(
r'<p[^>]+class=["\'].*?\bdesc_ep\b.*?["\'][^>]*>\s*<br/>\s*<b>([^<]+)',
webpage, 'episode', fatal=False)
mobj = re.search(
r'(?s)<span[^>]+class=["\']dates["\'][^>]*>.+?\bS\.(?P<season_number>\d+)\s+E\.(?P<episode_number>\d+)\s*-\s*(?P<upload_date>\d{2}/\d{2}/\d{4})',
webpage)
if mobj:
season_number = int(mobj.group('season_number'))
episode_number = int(mobj.group('episode_number'))
upload_date = unified_strdate(mobj.group('upload_date'))
else:
season_number = episode_number = upload_date = None
return {
'id': info_url.rpartition('/')[-1],
'display_id': display_id,
'title': title,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'series': series,
'season_number': season_number,
'episode': episode,
'episode_number': episode_number,
'upload_date': upload_date,
'formats': formats,
}
| 40.257143
| 157
| 0.538477
|
e1218b6a489e3550fabc3ebb7b67fe0845b3f9b7
| 971
|
py
|
Python
|
lambda_code/elasticbeanstalk/Tags/index.py
|
ikben/custom-resources
|
6857c4bc86dd118e44221efa43e8298f748bf116
|
[
"Apache-2.0"
] | 2
|
2019-05-02T13:06:40.000Z
|
2019-12-20T15:48:31.000Z
|
lambda_code/elasticbeanstalk/Tags/index.py
|
ikben/custom-resources
|
6857c4bc86dd118e44221efa43e8298f748bf116
|
[
"Apache-2.0"
] | 2
|
2019-09-26T14:18:49.000Z
|
2021-09-01T10:52:08.000Z
|
lambda_code/elasticbeanstalk/Tags/index.py
|
ikben/custom-resources
|
6857c4bc86dd118e44221efa43e8298f748bf116
|
[
"Apache-2.0"
] | 1
|
2019-08-02T15:26:19.000Z
|
2019-08-02T15:26:19.000Z
|
from cfn_custom_resource import CloudFormationCustomResource
class Tags(CloudFormationCustomResource):
def validate(self):
self.environmentArn = self.resource_properties['EnvironmentArn']
self.tags = self.resource_properties['Tags']
@staticmethod
def tags_to_update(tags):
return list(map(lambda tag: {'Key': tag[0], 'Value': tag[1]}, tags.items()))
def update_tags(self):
client = self.get_boto3_session().client('elasticbeanstalk')
client.update_tags_for_resource(
ResourceArn=self.environmentArn,
TagsToAdd=self.tags_to_update(self.tags)
)
return {'TagsToUpdate': self.tags_to_update(self.tags)}
def create(self):
return self.update_tags()
def update(self):
return self.update_tags()
def delete(self):
# Deleting not supported for now. Tags will disappear when environment is deleted.
pass
handler = Tags.get_handler()
| 29.424242
| 90
| 0.677652
|
b51a21dd5106a09d22346c0f242ada6da8519618
| 2,367
|
py
|
Python
|
assignments/04_cat/cat.py
|
reidloeffler/be434-fall-2021
|
ee2337fd8ba7d3100ffd32ecc4513f95128f5589
|
[
"MIT"
] | null | null | null |
assignments/04_cat/cat.py
|
reidloeffler/be434-fall-2021
|
ee2337fd8ba7d3100ffd32ecc4513f95128f5589
|
[
"MIT"
] | null | null | null |
assignments/04_cat/cat.py
|
reidloeffler/be434-fall-2021
|
ee2337fd8ba7d3100ffd32ecc4513f95128f5589
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Author : reidloeffler <reidloeffler@localhost>
Date : 2021-09-28
Purpose: Rock the Casbah
"""
import argparse
from io import FileIO
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Python cat',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file_input',
metavar='FILE',
type=argparse.FileType('rt'),
nargs='+',
help='Input file(s)')
# Positional argument: obtains file handles
parser.add_argument('-n',
'--number',
help='Number the lines',
action='store_true',
default=False)
# Optional boolean argument: allows the user to view
# file contents with numbered lines
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
files = args.file_input
optional_num = args.number
for num_files, file_info in enumerate(files):
# For statment: Iterates over list containing input files
num_line = 1
text = files[num_files].read()
file_name = FileIO(file_info.name)
# Pulls name attribute from file_info and converts it to FileIO type
for num_characters, characters in enumerate(text):
# For statment: Iterates over list containing characters
# for a given file
if (optional_num is True and
(text[num_characters - 1] == '\n' or num_characters == 0)):
# If statement: Determines if line numbers will be printed
# based on user input
alt_begining = ' ' + str(num_line) + '\t' + characters
# Builds alterative beginning string for numbered lines
print(alt_begining, end='')
num_line += 1
else:
print(characters, end='')
# Prints contents from file(s)
file_name.close()
# Closes files that were opened
# --------------------------------------------------
if __name__ == '__main__':
main()
| 28.518072
| 79
| 0.528517
|
a0b8e42394284548b3ae49c9f926aaceed22ff3b
| 5,910
|
py
|
Python
|
scanner.py
|
JustinDPerkins/cloudconformity
|
920d0742f2d216d06bcabe433925d4dbb9ae19d6
|
[
"MIT"
] | 3
|
2020-04-16T04:08:20.000Z
|
2021-03-17T01:28:43.000Z
|
scanner.py
|
JustinDPerkins/cloudconformity
|
920d0742f2d216d06bcabe433925d4dbb9ae19d6
|
[
"MIT"
] | null | null | null |
scanner.py
|
JustinDPerkins/cloudconformity
|
920d0742f2d216d06bcabe433925d4dbb9ae19d6
|
[
"MIT"
] | 1
|
2020-04-30T16:43:57.000Z
|
2020-04-30T16:43:57.000Z
|
import sys
import os
import requests
import json
import argparse
def optionA(access,access2,access3, grade):
for data in access:
rules = data['relationships']
ruledata = rules['rule']
rulelist = ruledata['data']
aruleid = rulelist['id']
attributes = data['attributes']
rulename = attributes["rule-title"]
category = attributes['status']
message = attributes['message']
risk = attributes['risk-level']
info = data["id"]
if category == grade:
for data in access2:
relationship = data['relationships']
rules = relationship['rules']
identifier = rules['data']
attributes = data['attributes']
provider = attributes['provider']
awsservice = attributes['name']
for data in access3:
ruleid = data['id']
html = data['knowledge-base-html']
for data in identifier:
rulenum = data['id']
if aruleid == rulenum:
if aruleid == ruleid:
if ruleid == rulenum:
test = ( "https://www.cloudconformity.com/knowledge-base/" + provider + "/" + awsservice + "/" + html + ".html")
#print(ruleid)
print("\n", aruleid, "\n", rulename,"\n", category,"\n", message,"\n", risk,"\n", info, "\n", test)
def optionG(access,access2,access3, desired_risk):
for data in access:
rules = data['relationships']
ruledata = rules['rule']
rulelist = ruledata['data']
aruleid = rulelist['id']
attributes = data['attributes']
rulename = attributes["rule-title"]
category = attributes['status']
message = attributes['message']
risk = attributes['risk-level']
info = data["id"]
if category == "FAILURE":
if risk == desired_risk:
for data in access2:
relationship = data['relationships']
rules = relationship['rules']
identifier = rules['data']
attributes = data['attributes']
provider = attributes['provider']
awsservice = attributes['name']
for data in access3:
ruleid = data['id']
html = data['knowledge-base-html']
for data in identifier:
rulenum = data['id']
if aruleid == rulenum:
if aruleid == ruleid:
if ruleid == rulenum:
test = ( "https://www.cloudconformity.com/knowledge-base/" + provider + "/" + awsservice + "/" + html + ".html")
#print(ruleid)
print("\n", aruleid, "\n", rulename,"\n", category,"\n", message,"\n", risk,"\n", info, "\n", test)
def main():
# create cli argument for filepath
parser = argparse.ArgumentParser(description='Scan a CFT Template')
parser.add_argument("--scan",
choices=["all", "fail", "extreme", "veryhigh", "high", "medium", "low", "default"],
required=True, type=str, help="Filter your Scan by Severity")
parser.add_argument(dest="cloudformationtemp", help="specify file path")
args = parser.parse_args()
cloudformationtemp = args.cloudformationtemp
scan = args.scan
# set Environment variable.
api= os.environ.get('apiKey')
#API connection for CC
endpoint = 'https://us-west-2-api.cloudconformity.com'
url = endpoint+'/v1/template-scanner/scan'
url2 = endpoint+'/v1/services'
headers = {
'Content-Type': 'application/vnd.api+json',
'Authorization': api
}
#open file and print contents.
try:
contents = open(cloudformationtemp, 'r').read()
except ValueError:
print("Template Scanner could not process your template...")
sys.exit()
payload = {
'data': {
'attributes': {
'type': 'cloudformation-template',
'contents': contents
}
}
}
# post method
resp = requests.post(url, headers=headers, data=json.dumps(payload))
TurnResponsetoString = json.dumps(resp.json(), indent=2, sort_keys=True)
formResponse = json.loads(TurnResponsetoString)
# get method
response = requests.get(url2,headers=headers)
formatResponse = json.dumps(response.json(), indent=3, sort_keys=False)
results = json.loads(formatResponse)
# key for post call
access = formResponse['data']
# keys for get call
access2 = results['data']
access3 = results['included']
if scan == "all":
optionA(access,access2,access3, "SUCCESS")
optionA(access,access2,access3, "FAILURE")
elif scan == "fail":
optionA(access,access2,access3, "FAILURE")
elif scan == "extreme":
optionG(access,access2,access3, "EXTREME")
elif scan == "veryhigh":
optionG(access,access2,access3, "VERY_HIGH")
elif scan == "high":
optionG(access,access2,access3, "HIGH")
elif scan == "medium":
optionG(access,access2,access3, "MEDIUM")
elif scan == "low":
optionG(access,access2,access3, "LOW")
elif scan == "default":
print(TurnResponsetoString)
if __name__ =="__main__":
main()
| 39.932432
| 157
| 0.511506
|
aac6d03aecf1f99064c1c3d0b50c304ec8df5c40
| 3,850
|
py
|
Python
|
tests/unit/drivers/test_encodingevaluation_driver.py
|
facebbook/jina
|
e8079af3d58f1de0f51f8aef6cdf1eb3d87a9873
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/drivers/test_encodingevaluation_driver.py
|
facebbook/jina
|
e8079af3d58f1de0f51f8aef6cdf1eb3d87a9873
|
[
"Apache-2.0"
] | 2
|
2021-02-15T01:40:38.000Z
|
2021-02-15T02:00:21.000Z
|
tests/unit/drivers/test_encodingevaluation_driver.py
|
facebbook/jina
|
e8079af3d58f1de0f51f8aef6cdf1eb3d87a9873
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pytest
from jina import Document, Request
from jina.drivers.evaluate import NDArrayEvaluateDriver
from jina.executors.evaluators.embedding import BaseEmbeddingEvaluator
from jina.proto import jina_pb2
from jina.types.document.helper import DocGroundtruthPair
class MockDiffEvaluator(BaseEmbeddingEvaluator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def evaluate(
self, actual: 'np.array', desired: 'np.array', *args, **kwargs
) -> float:
""" "
:param actual: the embedding of the document (resulting from an Encoder)
:param desired: the expected embedding of the document
:return the evaluation metric value for the request document
"""
return abs(sum(actual - desired) / len(actual))
@pytest.fixture
def mock_diff_evaluator():
return MockDiffEvaluator()
class SimpleEvaluateDriver(NDArrayEvaluateDriver):
@property
def exec_fn(self):
return self._exec_fn
@pytest.fixture
def simple_evaluate_driver():
return SimpleEvaluateDriver()
@pytest.fixture
def ground_truth_pairs():
num_docs = 10
pairs = []
for idx in range(num_docs):
doc = Document(embedding=np.array([1, 1]))
gt = Document(embedding=np.array([2, 2]))
pairs.append(DocGroundtruthPair(doc=doc, groundtruth=gt))
return pairs
def test_encoding_evaluate_driver(
mock_diff_evaluator, simple_evaluate_driver, ground_truth_pairs
):
simple_evaluate_driver.attach(executor=mock_diff_evaluator, runtime=None)
simple_evaluate_driver._apply_all(ground_truth_pairs)
for pair in ground_truth_pairs:
doc = pair.doc
assert len(doc.evaluations) == 1
assert doc.evaluations[0].op_name == 'MockDiffEvaluator'
assert doc.evaluations[0].value == 1.0
class SimpleChunkEvaluateDriver(NDArrayEvaluateDriver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.eval_request = None
self._traversal_paths = ('c',)
@property
def exec_fn(self):
return self._exec_fn
@property
def req(self) -> 'jina_pb2.RequestProto':
"""Get the current (typed) request, shortcut to ``self.pea.request``"""
return self.eval_request
@property
def expect_parts(self) -> int:
return 1
@pytest.fixture
def simple_chunk_evaluate_driver():
return SimpleChunkEvaluateDriver()
@pytest.fixture
def eval_request():
num_docs = 10
req = jina_pb2.RequestProto()
for idx in range(num_docs):
doc = Document(req.index.docs.add())
gt = Document(req.index.groundtruths.add())
chunk_doc = doc.chunks.new()
chunk_gt = gt.chunks.new()
chunk_doc.embedding = np.array([1, 1])
chunk_gt.embedding = np.array([2, 2])
return Request(req).as_typed_request('index')
def test_encoding_evaluate_driver_embedding_in_chunks(
simple_chunk_evaluate_driver, mock_diff_evaluator, eval_request
):
# this test proves that we can evaluate matches at chunk level,
# proving that the driver can traverse in a parallel way docs and groundtruth
simple_chunk_evaluate_driver.attach(executor=mock_diff_evaluator, runtime=None)
simple_chunk_evaluate_driver.eval_request = eval_request
simple_chunk_evaluate_driver()
ed = list(eval_request.docs)
eg = list(eval_request.groundtruths)
assert len(ed) == len(eg)
assert len(ed) == 10
for doc in ed:
assert len(doc.evaluations) == 0 # evaluation done at chunk level
dc = list(doc.chunks)
assert len(dc) == 1
chunk = dc[0]
assert len(chunk.evaluations) == 1 # evaluation done at chunk level
assert chunk.evaluations[0].op_name == 'MockDiffEvaluator'
assert chunk.evaluations[0].value == 1.0
| 30.8
| 83
| 0.694286
|
1141c2f9897bb9599873e0a11f00ef92ea16b1f7
| 3,920
|
py
|
Python
|
saber/tests/test_metrics.py
|
anarkia7115/saber
|
0994d355c01035092679b8fc49a9fefac7a91d1d
|
[
"MIT"
] | 93
|
2018-09-25T12:10:02.000Z
|
2022-02-15T07:20:10.000Z
|
saber/tests/test_metrics.py
|
anarkia7115/saber
|
0994d355c01035092679b8fc49a9fefac7a91d1d
|
[
"MIT"
] | 155
|
2018-08-25T00:57:43.000Z
|
2020-11-25T15:05:31.000Z
|
saber/tests/test_metrics.py
|
anarkia7115/saber
|
0994d355c01035092679b8fc49a9fefac7a91d1d
|
[
"MIT"
] | 17
|
2018-10-10T14:07:30.000Z
|
2021-09-09T02:09:40.000Z
|
"""Any and all unit tests for the Metrics class (saber/metrics.py).
"""
import pytest
from .. import constants
from ..config import Config
from ..dataset import Dataset
from ..metrics import Metrics
from ..utils import model_utils
from .resources.dummy_constants import *
PATH_TO_METRICS_OUTPUT = 'totally/arbitrary'
######################################### PYTEST FIXTURES #########################################
@pytest.fixture
def dummy_config():
"""Returns an instance of a Config object."""
dummy_config = Config(PATH_TO_DUMMY_CONFIG)
return dummy_config
@pytest.fixture
def dummy_dataset():
"""Returns a single dummy Dataset instance after calling Dataset.load().
"""
# Don't replace rare tokens for the sake of testing
dataset = Dataset(directory=PATH_TO_DUMMY_DATASET_1, replace_rare_tokens=False)
dataset.load()
return dataset
@pytest.fixture
def dummy_output_dir(tmpdir, dummy_config):
"""Returns list of output directories."""
# make sure top-level directory is the pytest tmpdir
dummy_config.output_folder = tmpdir.strpath
output_dirs = model_utils.prepare_output_directory(dummy_config)
return output_dirs
@pytest.fixture
def dummy_training_data(dummy_dataset):
"""Returns training data from `dummy_dataset`.
"""
training_data = {'x_train': [dummy_dataset.idx_seq['train']['word'],
dummy_dataset.idx_seq['train']['char']],
'x_valid': None,
'x_test': None,
'y_train': dummy_dataset.idx_seq['train']['tag'],
'y_valid': None,
'y_test': None,
}
return training_data
@pytest.fixture
def dummy_metrics(dummy_config, dummy_dataset, dummy_training_data, dummy_output_dir):
"""Returns an instance of Metrics.
"""
metrics = Metrics(config=dummy_config,
training_data=dummy_training_data,
index_map=dummy_dataset.idx_to_tag,
output_dir=dummy_output_dir,
# to test passing of arbitrary keyword args to constructor
totally_arbitrary='arbitrary')
return metrics
############################################ UNIT TESTS ############################################
def test_attributes_after_initilization(dummy_config,
dummy_dataset,
dummy_output_dir,
dummy_training_data,
dummy_metrics):
"""Asserts instance attributes are initialized correctly when Metrics object is initialized."""
# attributes that are passed to __init__
assert dummy_metrics.config is dummy_config
assert dummy_metrics.training_data is dummy_training_data
assert dummy_metrics.index_map is dummy_dataset.idx_to_tag
assert dummy_metrics.output_dir == dummy_output_dir
# other instance attributes
assert dummy_metrics.current_epoch == 0
assert dummy_metrics.performance_metrics == {p: [] for p in constants.PARTITIONS}
# test that we can pass arbitrary keyword arguments
assert dummy_metrics.totally_arbitrary == 'arbitrary'
def test_precision_recall_f1_support_value_error():
"""Asserts that call to `Metrics.get_precision_recall_f1_support` raises a `ValueError` error
when an invalid value for parameter `criteria` is passed."""
# these are totally arbitrary
y_true = [('test', 0, 3), ('test', 4, 7), ('test', 8, 11)]
y_pred = [('test', 0, 3), ('test', 4, 7), ('test', 8, 11)]
# anything but 'exact', 'left', or 'right' should throw an error
invalid_args = ['right ', 'LEFT', 'eXact', 0, []]
for arg in invalid_args:
with pytest.raises(ValueError):
Metrics.get_precision_recall_f1_support(y_true, y_pred, criteria=arg)
| 39.2
| 100
| 0.629337
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.