hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d25b3d6bc31f3ca7960ee1d2b2edc46e92e9ff1d | 6,142 | py | Python | skills/eliza/test_eliza.py | oserikov/dream | 109ba2df799025dcdada1fddbb7380e1c03100eb | [
"Apache-2.0"
] | 34 | 2021-08-18T14:51:44.000Z | 2022-03-10T14:14:48.000Z | skills/eliza/test_eliza.py | oserikov/dream | 109ba2df799025dcdada1fddbb7380e1c03100eb | [
"Apache-2.0"
] | 27 | 2021-08-30T14:42:09.000Z | 2022-03-17T22:11:45.000Z | skills/eliza/test_eliza.py | oserikov/dream | 109ba2df799025dcdada1fddbb7380e1c03100eb | [
"Apache-2.0"
] | 40 | 2021-08-22T07:13:32.000Z | 2022-03-29T11:45:32.000Z | import unittest
import eliza
if __name__ == "__main__":
unittest.main()
| 45.496296 | 120 | 0.545262 |
d25b9fd6524f688abaf7d222a5e27a028065bdf6 | 5,387 | py | Python | examples/tasks/dtw-energy-plus-models-data/code.py | dburian/ivis-core | 7789821c3750ccab68396aa2bfdd405fd4d21520 | [
"MIT"
] | 2 | 2021-05-17T13:20:56.000Z | 2021-11-04T16:36:29.000Z | examples/tasks/dtw-energy-plus-models-data/code.py | dburian/ivis-core | 7789821c3750ccab68396aa2bfdd405fd4d21520 | [
"MIT"
] | 37 | 2019-05-08T04:53:58.000Z | 2022-03-02T03:50:42.000Z | examples/tasks/dtw-energy-plus-models-data/code.py | dburian/ivis-core | 7789821c3750ccab68396aa2bfdd405fd4d21520 | [
"MIT"
] | 12 | 2019-04-06T15:00:32.000Z | 2021-11-06T08:56:07.000Z | import sys
import os
import json
from elasticsearch import Elasticsearch, helpers
from datetime import datetime, timezone
import numpy as np
from dtw import dtw
# Get parameters and set up elasticsearch
data = json.loads(sys.stdin.readline())
es = Elasticsearch([{'host': data['es']['host'], 'port': int(data['es']['port'])}])
state = data.get('state')
params= data['params']
entities= data['entities']
# Get ES index and fields
sensor_set = entities['signalSets'][params['sensors']]
sensor_ts = entities['signals'][params['sensors']][params['ts']]
sensor_co2 = entities['signals'][params['sensors']][params['co2']]
limit_val = float(params['limitValue'])
limit = limit_val
if state is None or state.get('index') is None:
ns = sensor_set['namespace']
msg = {}
msg['type'] = 'sets'
# Request new signal set creation
msg['sigSet'] = {
"cid" : "e_plus_mod",
"name" : "E+ comparison" ,
"namespace": ns,
"description" : "Comparison of Energy+ models" ,
"aggs" : "0"
}
signals= []
signals.append({
"cid": "ts",
"name": "ts",
"description": "timestamp",
"namespace": ns,
"type": "date",
"indexed": False,
"settings": {}
})
signals.append({
"cid": "mod",
"name": "mod",
"description": "mod",
"namespace": ns,
"type": "keyword",
"indexed": False,
"settings": {}
})
signals.append({
"cid": "model",
"name": "model",
"description": "Closest model's cid",
"namespace": ns,
"type": "keyword",
"indexed": False,
"settings": {}
})
msg['sigSet']['signals'] = signals
ret = os.write(3,(json.dumps(msg) + '\n').encode())
state = json.loads(sys.stdin.readline())
error = state.get('error')
if error:
sys.stderr.write(error+"\n")
sys.exit(1)
else:
store_msg = {}
store_msg["type"] = "store"
store_msg["state"] = state
ret = os.write(3,(json.dumps(store_msg) + '\n').encode())
sensor_data = get_co2_values(sensor_set['index'], sensor_ts['field'], sensor_co2['field'])
if not sensor_data:
print('No sensor data to measure on')
exit()
sensor_np = np.array(sensor_data, dtype=float).reshape(-1, 1)
euclidean_norm = lambda x, y: np.abs(x - y)
min_model={}
min_distance=float("inf")
for model in params['models']:
ts =entities['signals'][model['sigSet']][model['ts']]['field']
co2 =entities['signals'][model['sigSet']][model['co2']]['field']
sig_set = entities['signalSets'][model['sigSet']]['index']
model_data = get_co2_values(sig_set, ts,co2)
if not model_data:
print(f'No data for signal set {sig_set}')
continue
# Calculate for all models
model_np = np.array(model_data, dtype=float).reshape(-1, 1)
# Calculate for all models
d, cost_matrix, acc_cost_matrix, path = dtw(sensor_np, model_np, dist=euclidean_norm)
if d<min_distance:
min_distance = d
min_model['name'] = entities["signalSets"][model["sigSet"]]["name"]
min_model['cid'] = model["sigSet"]
min_model['ts'] = ts
min_model['co2'] = co2
min_model['index'] = sig_set
# Do something with closest model
if not min_model:
print('No model found')
exit()
print(f'Closest model is: {min_model["name"]}')
# Query prediction
query = {
'_source': [min_model['co2'], min_model['ts']],
'sort': [{min_model['ts']: 'asc'}],
"aggs" : {
"max_co2" : { "max" : { "field" : min_model['co2'] } }
},
'query': {
"range" : {
min_model['ts'] : {
"gt" : "now/m",
"lt" : "now+60m/m"
}
}
}
}
results = es.search(index=min_model['index'], body=query)
max_co2 = results['aggregations']['max_co2']['value']
# Get current mode
# TODO this will probably change later on to take data from the actual system
query = {
'size': 1,
'_source': [state['fields']['mod']],
'sort': [{state['fields']['ts']: 'desc'}],
'query': {
"match_all": {}
}
}
results = es.search(index=state['index'], body=query)
mod = results['hits']['hits'][0]['_source'][state['fields']['mod']] if results['hits']['total'] > 0 else 'mod1'
# If currently over limit or going to be according to models data, open more
if sensor_data[-1] > limit or max_co2 > limit:
mod = 'mod2'
elif sensor_data[-1] < limit - 200:
mod = 'mod1'
print(f'Chosen: {mod}')
ts = datetime.now(timezone.utc).astimezone()
doc = {
state['fields']['ts']: ts,
state['fields']['model']: min_model['cid'],
state['fields']['mod']: mod
}
res = es.index(index=state['index'], doc_type='_doc', id=ts, body=doc)
#prediction_data = []
#for item in results['hits']['hits']:
# val = item["_source"][min_model['co2']]
# if val is not None:
# prediction_data.append(val)
# else:
# continue
#print (prediction_data)
| 25.530806 | 111 | 0.593837 |
d25d20d8eebe1fa8e5a33aad978f268f206cda23 | 602 | py | Python | xmrswap/interface_part.py | tecnovert/xmrswap | ad2983a4df03184453ff680c17602497acc75a87 | [
"MIT"
] | 2 | 2020-09-21T17:33:23.000Z | 2020-10-03T08:54:01.000Z | xmrswap/interface_part.py | tecnovert/xmrswap | ad2983a4df03184453ff680c17602497acc75a87 | [
"MIT"
] | 2 | 2020-10-03T09:18:48.000Z | 2020-10-13T19:58:34.000Z | xmrswap/interface_part.py | tecnovert/xmrswap | ad2983a4df03184453ff680c17602497acc75a87 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 tecnovert
# Distributed under the MIT software license, see the accompanying
# file LICENSE.txt or http://www.opensource.org/licenses/mit-license.php.
from .contrib.test_framework.messages import (
CTxOutPart,
)
from .interface_btc import BTCInterface
| 22.296296 | 73 | 0.704319 |
d25df58bed9f8be63b8c4a15d08e86c300ade0fd | 2,511 | py | Python | pelican_resume/resume.py | cmenguy/pelican-resume | 57105e72c24ef04ad96857f51e5e9060e6aff1f6 | [
"MIT"
] | 12 | 2016-02-07T05:16:44.000Z | 2019-11-20T08:46:10.000Z | pelican_resume/resume.py | cmenguy/pelican-resume | 57105e72c24ef04ad96857f51e5e9060e6aff1f6 | [
"MIT"
] | 1 | 2019-01-20T20:57:35.000Z | 2019-01-20T20:59:59.000Z | pelican_resume/resume.py | cmenguy/pelican-resume | 57105e72c24ef04ad96857f51e5e9060e6aff1f6 | [
"MIT"
] | 5 | 2016-06-07T23:34:36.000Z | 2020-07-13T18:01:23.000Z | '''
resume
==============================================================================
This plugin generates a PDF resume from a Markdown file using customizable CSS
'''
import os
import logging
import tempfile
from subprocess import Popen
from pelican import signals
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
CSS_DIR = os.path.join(CURRENT_DIR, "static", "css")
logger = logging.getLogger(__name__) | 38.630769 | 87 | 0.682597 |
d25e34eee54e20d2dc920f68d0031efffaa533b3 | 331 | py | Python | app/machine_learning.py | ludthor/CovidVisualizer | 721015e8f9f0b1c0fb2e5ba985884341d22046e2 | [
"MIT"
] | null | null | null | app/machine_learning.py | ludthor/CovidVisualizer | 721015e8f9f0b1c0fb2e5ba985884341d22046e2 | [
"MIT"
] | null | null | null | app/machine_learning.py | ludthor/CovidVisualizer | 721015e8f9f0b1c0fb2e5ba985884341d22046e2 | [
"MIT"
] | null | null | null | from sklearn.linear_model import Ridge
| 15.761905 | 38 | 0.570997 |
d25f4bca7ddaa3e56f525ab91b8973856914e4df | 6,246 | py | Python | connection/connection_handler.py | valsoares/td | 4856604c71ff7d996f4e2580e0cdd9b904805225 | [
"MIT"
] | null | null | null | connection/connection_handler.py | valsoares/td | 4856604c71ff7d996f4e2580e0cdd9b904805225 | [
"MIT"
] | null | null | null | connection/connection_handler.py | valsoares/td | 4856604c71ff7d996f4e2580e0cdd9b904805225 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: Marcos F. Caetano (mfcaetano@unb.br) 11/03/2020
@description: PyDash Project
The ConnectionHandler is a Singleton class implementation
The class responsible to retrieve segments in the web server.
Also it implements a traffic shaping approach.
"""
from base.simple_module import SimpleModule
from base.message import Message, MessageKind, SSMessage
from base.configuration_parser import ConfigurationParser
from player.parser import *
import http.client
import time
from scipy.stats import expon
from base.timer import Timer
import seaborn as sns
import matplotlib.pyplot as plt
| 34.131148 | 168 | 0.646013 |
d260f409a5bee0d6f1b71a1546aadce5730647ca | 37,634 | py | Python | latexpp/fixes/pkg/phfqit.py | psg-mit/latexpp | 0b7b523c9ce147c2d34cc430b1abd39972e33fa9 | [
"MIT"
] | 4 | 2020-08-28T18:41:48.000Z | 2021-11-11T11:23:58.000Z | latexpp/fixes/pkg/phfqit.py | psg-mit/latexpp | 0b7b523c9ce147c2d34cc430b1abd39972e33fa9 | [
"MIT"
] | 4 | 2020-07-31T07:34:38.000Z | 2021-11-23T19:05:00.000Z | latexpp/fixes/pkg/phfqit.py | psg-mit/latexpp | 0b7b523c9ce147c2d34cc430b1abd39972e33fa9 | [
"MIT"
] | 1 | 2020-07-22T02:44:48.000Z | 2020-07-22T02:44:48.000Z | import re
import yaml
import logging
logger = logging.getLogger(__name__)
from pylatexenc.macrospec import MacroSpec, ParsedMacroArgs, MacroStandardArgsParser
from pylatexenc import latexwalker
from latexpp.macro_subst_helper import MacroSubstHelper
from latexpp.fix import BaseFix
# parse entropy macros etc.
_qitobjdefs = yaml.safe_load(r"""
stdset:
HH:
type: Hbase
Hzero:
type: Hbase
sub: '\mathrm{max},0'
Hmin:
type: Hbase
sub: '\mathrm{min}'
Hmaxf:
type: Hbase
sub: '\mathrm{max}'
Hfn:
type: Hfnbase
Dmax:
type: Dbase
sub: '\mathrm{max}'
Dminz:
type: Dbase
sub: '0'
Dminf:
type: Dbase
sub: '\mathrm{min}'
Dr:
type: Dbase
sub: '\mathrm{Rob}'
DHyp:
type: Dbase
sub: '\mathrm{H}'
Dhyp:
type: Dbase
sub: '\mathrm{h}'
DCoh:
type: DCohbase
DCohx:
type: DCohbase
DD:
type: DD
""")
baseqitobjs = yaml.safe_load("""
IdentProc:
type: IdentProc
ee:
type: ee
""")
_fixed_repl = {
'DSym': lambda self: self.DSym,
'HSym': lambda self: self.HSym,
}
mathtools_delims_macros = {
'abs': (r'\lvert', r'\rvert'),
'norm': (r'\lVert', r'\rVert'),
'avg': (r'\langle', r'\rangle'),
'ket': (r'\lvert', r'{%(1)s}', r'\rangle'),
'bra': (r'\langle', r'{%(1)s}', r'\rvert'),
'braket': (r'\langle', r'{%(1)s}%(phfqitKetsBarSpace)s%(delimsize)s\vert\phfqitKetsBarSpace{%(2)s}',
r'\rangle'),
'ketbra': (r'\lvert', r'{%(1)s}%(delimsize)s\rangle %(phfqitKetsRLAngleSpace)s%(delimsize)s\langle{%(2)s}',
r'\rvert'),
'proj': (r'\lvert', r'{%(1)s}%(delimsize)s\rangle %(phfqitKetsRLAngleSpace)s%(delimsize)s\langle{%(1)s}',
r'\rvert'),
'matrixel': (r'\langle',
r'{%(1)s}%(phfqitKetsBarSpace)s%(delimsize)s\vert %(phfqitKetsBarSpace)s{%(2)s}'
+r'%(phfqitKetsBarSpace)s%(delimsize)s\vert %(phfqitKetsBarSpace)s{%(3)s}',
r'\rangle'),
'dmatrixel': (r'\langle',
r'{%(1)s}%(phfqitKetsBarSpace)s%(delimsize)s\vert %(phfqitKetsBarSpace)s{%(2)s}'
+r'%(phfqitKetsBarSpace)s%(delimsize)s\vert %(phfqitKetsBarSpace)s{%(1)s}',
r'\rangle'),
'innerprod': (r'\langle',
r'{%(1)s}%(phfqitBeforeCommaSpace)s,%(phfqitAfterCommaSpace)s{%(2)s}',
r'\rangle'),
'oket': (r'\lvert', r'{%(1)s}', r'\rrangle'),
'obra': (r'\llangle', r'{%(1)s}', r'\rvert'),
'obraket': (r'\llangle', r'{%(1)s}%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(2)s}',
r'\rrangle'),
'oketbra': (r'\lvert', r'{%(1)s}%(delimsize)s\rrangle %(phfqitOKetsRLAngleSpace)s%(delimsize)s\llangle{%(2)s}',
r'\rvert'),
'oproj': (r'\lvert', r'{%(1)s}%(delimsize)s\rrangle %(phfqitOKetsRLAngleSpace)s%(delimsize)s\llangle{%(1)s}',
r'\rvert'),
'omatrixel': (r'\llangle',
r'{%(1)s}%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(2)s}'
+r'%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(3)s}',
r'\rrangle'),
'odmatrixel': (r'\llangle',
r'{%(1)s}%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(2)s}'
+r'%(phfqitOKetsBarSpace)s%(delimsize)s\vert %(phfqitOKetsBarSpace)s{%(1)s}',
r'\rrangle'),
'intervalc': (r'[', r'{%(1)s\mathclose{},\mathopen{}%(2)s}', r']'),
'intervalo': (r']', r'{%(1)s\mathclose{},\mathopen{}%(2)s}', r'['),
'intervalco': (r'[', r'{%(1)s\mathclose{},\mathopen{}%(2)s}', r'['),
'intervaloc': (r']', r'{%(1)s\mathclose{},\mathopen{}%(2)s}', r']'),
}
simple_substitution_macros = {
r'Hs': r'\mathscr{H}',
r'Ident': r'\mathds{1}',
# bits and gates
r'bit': {'qitargspec': '{', 'repl': r'\texttt{%(1)s}'},
r'bitstring': {'qitargspec': '{', 'repl': r'\ensuremath{\underline{\overline{\texttt{%(1)s}}}}'},
r'gate': {'qitargspec': '{',
'repl': gate("%(1)s") },
r'AND': gate('And'),
r'XOR': gate('Xor'),
r'CNOT': gate('C-Not'),
r'NOT': gate('Not'),
r'NOOP': gate('No-Op'),
# math groups
'uu': dict(qitargspec='(', repl=r'\mathrm{u}({%(1)s})'),
'UU': dict(qitargspec='(', repl=r'\mathrm{U}({%(1)s})'),
'su': dict(qitargspec='(', repl=r'\mathrm{su}({%(1)s})'),
'SU': dict(qitargspec='(', repl=r'\mathrm{SU}({%(1)s})'),
'so': dict(qitargspec='(', repl=r'\mathrm{so}({%(1)s})'),
'SO': dict(qitargspec='(', repl=r'\mathrm{SO}({%(1)s})'),
#'sl': dict(qitargspec='(', repl=r'\mathrm{sl}({%(1)s})'), # not in phfqit -- why? should add it there
#'SL': dict(qitargspec='(', repl=r'\mathrm{SL}({%(1)s})'),
'GL': dict(qitargspec='(', repl=r'\mathrm{GL}({%(1)s})'),
'SN': dict(qitargspec='(', repl=r'\mathrm{S}_{%(1)s}'),
}
math_operators = {
'tr': 'tr',
'supp': 'supp',
'rank': 'rank',
'linspan': 'span',
'spec': 'spec',
'diag': 'diag',
'Re': 'Re',
'Im': 'Im',
'poly': 'poly',
}
rx_hspace = re.compile(r'\\hspace\*?\{[^}]+\}')
# qitargspec: extension of argspec with:
# *, [, { -- as in latexwalker
# ` -- optional size arg
# ( -- mandatory arg in (...)
# _ -- optional arg (subscript) that is marked by '_', e.g. \DD_{min}{...}{...}
# ^ -- optional arg (superscript) that is marked by '^', e.g. \DD^{\epsilon}{...}{...}
def qitargspec_to_argspec(qitargspec):
return "".join( x if x in ('*', '[', '{') else '[' for x in qitargspec )
| 37.041339 | 115 | 0.533693 |
d26193d8f95b87350b91fd8517bcdb1ccfde7d7b | 3,936 | py | Python | Ch_5/linear_alg5.py | Skyblueballykid/linalg | 515eea984856ad39c823314178929876b21f8014 | [
"MIT"
] | null | null | null | Ch_5/linear_alg5.py | Skyblueballykid/linalg | 515eea984856ad39c823314178929876b21f8014 | [
"MIT"
] | null | null | null | Ch_5/linear_alg5.py | Skyblueballykid/linalg | 515eea984856ad39c823314178929876b21f8014 | [
"MIT"
] | null | null | null | import numpy as np
import scipy
import sympy
from numpy import linalg as lg
from numpy.linalg import solve
from numpy.linalg import eig
from scipy.integrate import quad
# Question 1
'''
A. Determinant = -21
B. Determinant = -21
'''
m1 = np.array([[3, 0, 3], [2, 3, 3], [0, 4, -1]])
print(m1)
det1 = np.linalg.det(m1)
print(det1) # correct
# Question 2
# Det = -159
# Question 3
'''
A.
Replace row 3 with k times row 3.
B.
The determinant is multiplied by k.
'''
# Question 4
m2 = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
det2 = np.linalg.det(m2)
print(det2) # correct
# Question 5
'''
A.
False, because the determinant of A can be computed by cofactor expansion across any row or down any column. Since the determinant of A is well defined, both of these cofactor expansions will be equal.
B.
False, because the determinant of a triangular matrix is the product of the entries along the main diagonal.
'''
# Question 6
'''
If two rows of A are interchanged to produce B, then det Upper B equals negative det A.
'''
# Question 7
'''
If a multiple of one row of A is added to another row to produce matrix B, then det Upper B equals det Upper A.
'''
# Question 8
m3 = sympy.Matrix([[1, 5, -6], [-1, -4, -5], [1, 4, 7]])
print(m3)
rref1 = m3.rref()
print(rref1)
m4 = np.array([[1, 5, -6], [-1, -4, -5], [1, 4, 7]])
det3 = np.linalg.det(m4)
print(det3) # correct, det = 2
# Question 9
# Switch the rows, det of original matrix = -10, det of changed matrix = 10
# Question 10
m5 = np.array([[-25, -4, -2], [-5, 12, -4], [0, -20, 6]])
det4 = np.linalg.det(m5)
print(det4)
# The matrix is invertible because the determinant of the matrix is not zero.
# Question 11
# formula
# Question 12
mat = np.array([[1,1,0], [3, 0, 5], [0, 1, -5]])
print(mat)
det8 = np.linalg.det(mat)
print(det8)
#Cramer's Rule
# Find A1b by replacing the first column with column b
mat2 = np.array([[2,1,0], [0, 0, 5], [3, 1, -5]])
print(mat2)
det9 = np.linalg.det(mat2)
print(det9)
print(det9/det8)
#Find A2b by replacing the second column with b
mat3 = np.array([[1, 2, 0], [3, 0, 5], [0, 3, -5]])
print(mat3)
det10 = np.linalg.det(mat3)
print(det10)
print(det10/det8)
#Find A3b by replacing the third column with b
mat4 = np.array([[1, 1, 2], [3, 0, 0], [0, 1, 3]])
print(mat4)
det11 = np.linalg.det(mat4)
print(det11)
print(det11/det8)
# Answers above are correct, but try again because I misread the print output
matr = np.array([[1,1,0], [5, 0, 4], [0, 1, -4]])
print(matr)
deter = np.linalg.det(matr)
print(deter)
# Find A1b by replacing first column with b
matr1 = np.array([[5, 1, 0], [0, 0, 4], [6, 1, -4]])
print(matr1)
deter1 = np.linalg.det(matr1)
print(deter1/deter)
# Find A2b by replacing second column with b
matr2 = np.array([[1, 5, 0], [5, 0, 4], [0, 6, -4]])
print(matr2)
deter2 = np.linalg.det(matr2)
print(deter2/deter)
# Find A3b by replacing third column with b
matr3 = np.array([[1, 1, 5], [5, 0, 0], [0, 1, 6]])
print(matr3)
deter3 = np.linalg.det(matr3)
print(deter3/deter)
# Question 13
# Compute the adjugate of the given matrix
matri = np.matrix([[2, 5, 4], [1, 0, 1], [3, 2, 2]])
print(matri)
# Hermitian transpose (not correct)
print(matri.getH())
# Det of matrix
determ = np.linalg.det(matri)
print(determ)
adj_matr = np.array([[-2, -2, 5], [1, -8, 2], [2, 11, -5]])
print(adj_matr * 1/determ) # Correct
# Question 14
m6 = np.array([[3, 7], [6, 2]])
print(m6)
det5 = np.linalg.det(m6)
print(det5) # correct
# The area of the parellelogram is the absolute value of the det. In this case = 36
# Question 15
# First find the area of the parellelogram
m7 = np.array([[-5, -5], [5, 10]])
det6 = np.linalg.det(m7)
print(det6) # -25
# next find the det of matrix A
m8 = np.array([[7, -8], [-2, 8]])
print(m8)
det7 = np.linalg.det(m8)
print(det7) # 40
# Finally, multiply the absolute value of the det of the first matrix (area of the parellelogram) by the det of the second matrix
# Answer = 1000
| 23.152941 | 202 | 0.653201 |
d26272e6f74f04e9cc5cdc1f6a997a3ad8bdee52 | 2,620 | py | Python | builder/tasks_bullet/standimitation_task_bullet.py | FrankTianTT/laikago_robot | a5d54f10ea6a5620762c2210893ae8abe2f9ac05 | [
"MIT"
] | 6 | 2020-12-02T07:49:36.000Z | 2021-12-24T01:36:07.000Z | builder/tasks_bullet/standimitation_task_bullet.py | FrankTianTT/laikago_robot | a5d54f10ea6a5620762c2210893ae8abe2f9ac05 | [
"MIT"
] | null | null | null | builder/tasks_bullet/standimitation_task_bullet.py | FrankTianTT/laikago_robot | a5d54f10ea6a5620762c2210893ae8abe2f9ac05 | [
"MIT"
] | 3 | 2021-01-12T14:09:40.000Z | 2021-12-24T01:36:17.000Z | from builder.laikago_task_bullet import LaikagoTaskBullet
from builder.laikago_task import InitPose
import math
import numpy as np
ABDUCTION_P_GAIN = 220.0
ABDUCTION_D_GAIN = 0.3
HIP_P_GAIN = 220.0
HIP_D_GAIN = 2.0
KNEE_P_GAIN = 220.0
KNEE_D_GAIN = 2.0 | 42.95082 | 92 | 0.596183 |
d26509e1b720c708ef4c28d0e261a51f29110955 | 425 | py | Python | build.py | chrahunt/conan-protobuf | c49350d1c69d2e5b40305803f3184561f433554c | [
"MIT"
] | null | null | null | build.py | chrahunt/conan-protobuf | c49350d1c69d2e5b40305803f3184561f433554c | [
"MIT"
] | null | null | null | build.py | chrahunt/conan-protobuf | c49350d1c69d2e5b40305803f3184561f433554c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bincrafters import build_template_default
if __name__ == "__main__":
builder = build_template_default.get_builder()
# Todo: re-enable shared builds when issue resolved
# github issue: https://github.com/google/protobuf/issues/2502
builder.items = filter(lambda build: build.options["protobuf:shared"] == False, builder.items)
builder.run()
| 26.5625 | 98 | 0.701176 |
d2657376a64e94e969ed1edf71ca0efd3af9b3de | 2,046 | py | Python | pytrek/settings/LimitsSettings.py | hasii2011/PyArcadeStarTrek | 370edbb62f15f69322aa7f109d6d36ebf20cbe4a | [
"MIT"
] | 1 | 2021-06-13T00:56:24.000Z | 2021-06-13T00:56:24.000Z | pytrek/settings/LimitsSettings.py | hasii2011/PyArcadeStarTrek | 370edbb62f15f69322aa7f109d6d36ebf20cbe4a | [
"MIT"
] | 94 | 2021-04-16T20:34:10.000Z | 2022-01-13T19:58:20.000Z | pytrek/settings/LimitsSettings.py | hasii2011/PyArcadeStarTrek | 370edbb62f15f69322aa7f109d6d36ebf20cbe4a | [
"MIT"
] | null | null | null |
from logging import Logger
from logging import getLogger
from pytrek.settings.BaseSubSetting import BaseSubSetting
from pytrek.settings.SettingsCommon import SettingsCommon
from pytrek.settings.SettingsCommon import SettingsNameValues
| 33.540984 | 133 | 0.729717 |
d26592ea9cca4872fa15f4c5aedeb743d022345c | 2,366 | py | Python | tests/integration_tests/testYieldCurve.py | neoyung/IrLib | 942793c49a477c9f5747410be74daf868391f289 | [
"MIT"
] | 1 | 2021-10-04T03:15:50.000Z | 2021-10-04T03:15:50.000Z | tests/integration_tests/testYieldCurve.py | neoyung/IrLib | 942793c49a477c9f5747410be74daf868391f289 | [
"MIT"
] | null | null | null | tests/integration_tests/testYieldCurve.py | neoyung/IrLib | 942793c49a477c9f5747410be74daf868391f289 | [
"MIT"
] | null | null | null | import unittest
from datetime import date
from irLib.marketConvention.dayCount import ACT_ACT
from irLib.marketConvention.compounding import annually_k_Spot
from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve
import numpy as np
alias_disC = 'disC'
alias_forC = 'forC'
referenceDate = date(2020, 6, 26)
dayCount = ACT_ACT()
compounding = annually_k_Spot()
allowExtrapolation = False
# set synthetic data
timeIndex = [1, 2, 3, 4, 5]
flatR = 0.03
dF = ((flatR + 1) ** -np.arange(1, 6)).tolist()
forwardRates = (flatR * np.ones(5)).tolist()
spots = (flatR * np.ones(5)).tolist()
yearFrac = np.arange(1, 6).tolist()
par = (flatR * np.ones(5)).tolist()
t = date(2021, 6, 30) # try date(2021, 6, 26) will trigger extrapolation warning msg
t1 = date(2022, 6, 26)
t2 = date(2023, 6, 26)
| 37.555556 | 111 | 0.674979 |
d26666e751893b180e8e39534e0d885f31d24a15 | 1,876 | py | Python | src/python/setup.py | blaine141/NVISII | 1675bb9bb74a1fe441bbb10ca98ea5cc4b0e4e24 | [
"Apache-2.0"
] | 149 | 2021-02-09T11:35:23.000Z | 2022-03-29T10:06:22.000Z | src/python/setup.py | blaine141/NVISII | 1675bb9bb74a1fe441bbb10ca98ea5cc4b0e4e24 | [
"Apache-2.0"
] | 66 | 2020-05-28T18:53:21.000Z | 2021-02-07T05:34:14.000Z | src/python/setup.py | blaine141/NVISII | 1675bb9bb74a1fe441bbb10ca98ea5cc4b0e4e24 | [
"Apache-2.0"
] | 14 | 2021-02-09T08:51:44.000Z | 2022-03-11T00:39:21.000Z | # Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
from setuptools import setup, dist
import wheel
import os
# required to geneerate a platlib folder required by audittools
from setuptools.command.install import install
# for generating a wheel version from git tag
from setuptools_scm import get_version
# force setuptools to recognize that this is
# actually a binary distribution
# This gets the version from the most recent git tag, potentially concatinating
# a commit hash at the end.
current_version = get_version(
root = "..",
relative_to = __file__,
fallback_version='0.0.0-dev0'
)
optix_version = os.environ.get("OPTIX_VERSION", None)
if optix_version:
current_version = current_version + "." + optix_version
print(current_version)
setup(
# This package is called nvisii
name='nvisii',
install_requires = ['numpy>=1.19.5'],
packages = ['nvisii', "nvisii.importers"], # include the package "nvisii"
# make sure the shared library is included
package_data = {'': ("*.dll", "*.pyd", "*.so")},
include_package_data=True,
description='',
# See class BinaryDistribution that was defined earlier
distclass=BinaryDistribution,
version = current_version,
author='Nate Morrical',
author_email='',
maintainer='',
maintainer_email='',
python_requires = ">=3.6",
cmdclass={'install': InstallPlatlib},
)
| 28 | 83 | 0.710554 |
d26a2cfd9b0c9f91c37793b0017bd2b85c25f09b | 1,060 | py | Python | portal_gun/configuration/schemas/compute_aws.py | Coderik/portal-gun | 081020a46b16b649497bceb6c2435b1ba135b487 | [
"MIT"
] | 69 | 2018-05-03T18:25:43.000Z | 2021-02-10T11:37:28.000Z | portal_gun/configuration/schemas/compute_aws.py | Coderik/portal-gun | 081020a46b16b649497bceb6c2435b1ba135b487 | [
"MIT"
] | 7 | 2018-09-19T06:39:11.000Z | 2022-03-29T21:55:08.000Z | portal_gun/configuration/schemas/compute_aws.py | Coderik/portal-gun | 081020a46b16b649497bceb6c2435b1ba135b487 | [
"MIT"
] | 11 | 2018-07-30T18:09:12.000Z | 2019-10-03T15:36:13.000Z | from marshmallow import fields, Schema
from .provision import ProvisionActionSchema
| 24.090909 | 68 | 0.779245 |
d26a6bee5f324041d60e07e49f5e1f8b0a925d37 | 1,099 | py | Python | extras/unbundle.py | mstriemer/amo-validator | 35b502204183d783634207e7c2e7766ea1070ce8 | [
"BSD-3-Clause"
] | 1 | 2015-07-15T20:06:09.000Z | 2015-07-15T20:06:09.000Z | extras/unbundle.py | mstriemer/amo-validator | 35b502204183d783634207e7c2e7766ea1070ce8 | [
"BSD-3-Clause"
] | null | null | null | extras/unbundle.py | mstriemer/amo-validator | 35b502204183d783634207e7c2e7766ea1070ce8 | [
"BSD-3-Clause"
] | null | null | null | import sys
import os
import zipfile
from zipfile import ZipFile
from StringIO import StringIO
source = sys.argv[1]
target = sys.argv[2]
if not target.endswith("/"):
target = "%s/" % target
if not os.path.exists(target):
os.mkdir(target)
_unbundle(source, target)
| 22.895833 | 51 | 0.526843 |
d26afa5cb9899f00bda32076f95a8a1292054119 | 81,920 | py | Python | linuxOperation/app/domain/forms.py | zhouli121018/core | f9700204349ecb22d45e700e9e27e79412829199 | [
"MIT"
] | null | null | null | linuxOperation/app/domain/forms.py | zhouli121018/core | f9700204349ecb22d45e700e9e27e79412829199 | [
"MIT"
] | 1 | 2021-06-10T20:45:55.000Z | 2021-06-10T20:45:55.000Z | linuxOperation/app/domain/forms.py | zhouli121018/core | f9700204349ecb22d45e700e9e27e79412829199 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
import time
import os
import math
import json
from lib.forms import BaseFied, BaseFieldFormatExt, DotDict, BaseCfilterActionFied, BaseCfilterOptionFied
from app.core.models import Mailbox, MailboxUserAttr, Domain, CoCompany, CoreAlias, DomainAttr, \
Department, CoreConfig, CoreMonitor, CoreWhitelist
from app.domain.models import Signature, SecretMail, WmCustomerInfo, WmCustomerCate, WmTemplate
from app.utils.MailboxLimitChecker import MailboxLimitChecker
from django import forms
from django.db.models import Sum,Count
from lib import validators
from lib.formats import dict_compatibility
from lib.tools import clear_redis_cache, download_excel, GenerateRsaKeys, generate_rsa, get_unicode, get_string,\
get_system_user_id, get_system_group_id, recursion_make_dir, get_random_string, \
phpLoads, phpDumps, get_client_request
from lib.validators import check_domain, check_email_ordomain
from django_redis import get_redis_connection
from django.utils.translation import ugettext as _
import base64
import time
import copy
import constants
import chardet
from auditlog.api import api_create_admin_log
from app.core.constants import MAILBOX_SEND_PERMIT, MAILBOX_RECV_PERMIT
#
#
def is_valid(self):
if not self.domain_id.value:
self.valid = False
self.domain_id.set_error(_(u""))
return self.valid
self.check()
return self.valid
def check(self):
return self.valid
def checkSave(self):
if self.is_valid():
self.save()
#
#
#
#
#
#
#/
#
#
#webmail
#webmail---
#logo
#
#
#
#
| 39.083969 | 154 | 0.585278 |
d26b10ff6669fa3fb71b08771c9e2a65a51f7bb3 | 9,074 | py | Python | deep_coach.py | jendelel/rhl-algs | d5b8779d7e271265d4f0bfcb3602bc56958e3eb3 | [
"Apache-2.0"
] | 2 | 2019-03-30T23:29:10.000Z | 2019-04-05T21:54:21.000Z | deep_coach.py | jendelel/rhl-algs | d5b8779d7e271265d4f0bfcb3602bc56958e3eb3 | [
"Apache-2.0"
] | 3 | 2019-03-29T11:23:17.000Z | 2020-12-28T02:00:17.000Z | deep_coach.py | jendelel/rhl-algs | d5b8779d7e271265d4f0bfcb3602bc56958e3eb3 | [
"Apache-2.0"
] | null | null | null | from PyQt5 import QtGui, QtCore, QtWidgets
from collections import namedtuple
import time
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import utils
HumanFeedback = namedtuple('HumanFeedback', ['feedback_value'])
SavedAction = namedtuple('SavedAction', ['state', 'action', 'logprob'])
SavedActionsWithFeedback = namedtuple('SavedActionsWithFeedback', ['saved_actions', 'final_feedback'])
| 45.144279 | 119 | 0.622548 |
d26dbcccea877eec0764524f32244d3a230c796d | 434 | py | Python | model/DB Automation/add_db.py | chrisdcao/Covid_Map_Hanoi | 07d18cad8c1b4988795d9ec2aca5ae1fefdff892 | [
"MIT"
] | 1 | 2021-09-09T07:55:00.000Z | 2021-09-09T07:55:00.000Z | model/DB Automation/add_db.py | chrisdcao/Covid_Map_Hanoi | 07d18cad8c1b4988795d9ec2aca5ae1fefdff892 | [
"MIT"
] | null | null | null | model/DB Automation/add_db.py | chrisdcao/Covid_Map_Hanoi | 07d18cad8c1b4988795d9ec2aca5ae1fefdff892 | [
"MIT"
] | null | null | null | import pyodbc
import mysql.connector
conn = mysql.connector.connect(user='root', password='', port='3307', host='localhost', database='coviddb')
cursor = conn.cursor(buffered=True)
cursor.execute('SELECT * FROM coviddb.markers')
cursor.execute('''
INSERT INTO coviddb.markers(id, name, address, subject, lat, lng, type)
VALUES
('0','0','0','0','0','0','None')
''')
conn.commit()
| 22.842105 | 107 | 0.615207 |
d26eb1d2c5907c778452828f508b0d406c3d409a | 605 | py | Python | conduit_tests/fixtu.py | ArtuZi2/conduit | 804fc2b69dda7e244fc91025eb30ad1847b81f6a | [
"MIT"
] | null | null | null | conduit_tests/fixtu.py | ArtuZi2/conduit | 804fc2b69dda7e244fc91025eb30ad1847b81f6a | [
"MIT"
] | null | null | null | conduit_tests/fixtu.py | ArtuZi2/conduit | 804fc2b69dda7e244fc91025eb30ad1847b81f6a | [
"MIT"
] | null | null | null | import time
import pytest
# preparing selenium and chrome web driver manager
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
# importing os for environmental variable, and docker-compose up
import os
| 28.809524 | 79 | 0.77686 |
d26eb2453ea9164766469c382d7b579b2c3779e5 | 22,390 | py | Python | scope/utils.py | jasonmsetiadi/scope | e718998d0a7ac64e5f86554383030341dbe940f9 | [
"MIT"
] | 3 | 2021-03-05T01:32:34.000Z | 2022-01-19T03:13:44.000Z | scope/utils.py | jasonmsetiadi/scope | e718998d0a7ac64e5f86554383030341dbe940f9 | [
"MIT"
] | 57 | 2021-01-14T19:49:44.000Z | 2022-03-25T22:32:03.000Z | scope/utils.py | jasonmsetiadi/scope | e718998d0a7ac64e5f86554383030341dbe940f9 | [
"MIT"
] | 10 | 2021-01-08T19:59:24.000Z | 2022-02-16T10:54:44.000Z | __all__ = [
"Dataset",
"forgiving_true",
"load_config",
"log",
"make_tdtax_taxonomy",
"plot_gaia_density",
"plot_gaia_hr",
"plot_light_curve_data",
"plot_periods",
]
from astropy.io import fits
import datetime
import json
import healpy as hp
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pathlib
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tqdm.auto import tqdm
from typing import Mapping, Optional, Union
import yaml
def load_config(config_path: Union[str, pathlib.Path]):
"""
Load config and secrets
"""
with open(config_path) as config_yaml:
config = yaml.load(config_yaml, Loader=yaml.FullLoader)
return config
def time_stamp():
"""
:return: UTC time as a formatted string
"""
return datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S")
def make_tdtax_taxonomy(taxonomy: Mapping):
"""Recursively convert taxonomy definition from config["taxonomy"]
into tdtax-parsable dictionary
:param taxonomy: config["taxonomy"] section
:return:
"""
tdtax_taxonomy = dict()
if taxonomy["class"] not in ("tds", "phenomenological", "ontological"):
tdtax_taxonomy["name"] = f"{taxonomy['class']}: {taxonomy['name']}"
else:
tdtax_taxonomy["name"] = taxonomy["name"]
if "subclasses" in taxonomy:
tdtax_taxonomy["children"] = []
for cls in taxonomy["subclasses"]:
tdtax_taxonomy["children"].append(make_tdtax_taxonomy(cls))
return tdtax_taxonomy
def plot_light_curve_data(
light_curve_data: pd.DataFrame,
period: Optional[float] = None,
title: Optional[str] = None,
save: Optional[str] = None,
):
"""Plot and save to file light curve data
:param light_curve_data:
:param period: float [days] if set, a phase-folded light curve will be displayed
:param title: plot title
:param save: path to save the plot
:return:
"""
plt.close("all")
# Official start of ZTF MSIP survey, March 17, 2018
jd_start = 2458194.5
colors = {
1: "#28a745",
2: "#dc3545",
3: "#00415a",
"default": "#f3dc11",
}
mask_good_data = light_curve_data["catflags"] == 0
df = light_curve_data.loc[mask_good_data]
if period is not None:
fig = plt.figure(figsize=(16, 9), dpi=200)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
else:
fig = plt.figure(figsize=(16, 5), dpi=200)
ax1 = fig.add_subplot(111)
if title is not None:
fig.suptitle(title, fontsize=24)
# plot different ZTF bands/filters
for band in df["filter"].unique():
mask_filter = df["filter"] == band
ax1.errorbar(
df.loc[mask_filter, "hjd"] - jd_start,
df.loc[mask_filter, "mag"],
df.loc[mask_filter, "magerr"],
marker=".",
color=colors[band],
lw=0,
)
if period is not None:
for n in [0, -1]:
ax2.errorbar(
(df.loc[mask_filter, "hjd"] - jd_start) / period % 1 + n,
df.loc[mask_filter, "mag"],
df.loc[mask_filter, "magerr"],
marker=".",
color=colors[band],
lw=0,
)
# invert y axes since we are displaying magnitudes
ax1.invert_yaxis()
if period is not None:
ax2.invert_yaxis()
ax1.set_xlabel("Time")
ax1.grid(lw=0.3)
if period is not None:
ax2.set_xlabel(f"phase [period={period:4.4g} days]")
ax2.set_xlim(-1, 1)
ax2.grid(lw=0.3)
if save is not None:
fig.tight_layout()
plt.savefig(save)
def plot_periods(
features: pd.DataFrame,
limits: Optional[list] = None,
loglimits: Optional[bool] = False,
number_of_bins: Optional[int] = 20,
title: Optional[str] = None,
save: Optional[Union[str, pathlib.Path]] = None,
):
"""Plot a histogram of periods for the sample"""
# plot the H-R diagram for 1 M stars within 200 pc from the Sun
plt.rc("text", usetex=True)
# make figure
fig, ax = plt.subplots(figsize=(6, 6))
if title is not None:
fig.suptitle(title, fontsize=24)
if limits is not None:
if loglimits:
edges = np.logspace(
np.log10(limits[0]), np.log10(limits[1]), number_of_bins
)
else:
edges = np.linspace(limits[0], limits[1], number_of_bins)
else:
if loglimits:
edges = np.linspace(
np.log10(0.9 * np.min(features["period"])),
np.log10(1.1 * np.max(features["period"])),
number_of_bins,
)
else:
edges = np.linspace(
0.9 * np.min(features["period"]),
1.1 * np.max(features["period"]),
number_of_bins,
)
hist, bin_edges = np.histogram(features["period"], bins=edges)
hist = hist / np.sum(hist)
bins = (bin_edges[1:] + bin_edges[:-1]) / 2.0
ax.plot(bins, hist, linestyle="-", drawstyle="steps")
ax.set_xlabel("Period [day]")
ax.set_ylabel("Probability Density Function")
# display grid behind all other elements on the plot
ax.set_axisbelow(True)
ax.grid(lw=0.3)
if loglimits:
ax.set_xscale("log")
ax.set_xlim([0.9 * bins[0], 1.1 * bins[-1]])
if save is not None:
fig.tight_layout()
plt.savefig(save)
def plot_gaia_hr(
gaia_data: pd.DataFrame,
path_gaia_hr_histogram: Union[str, pathlib.Path],
title: Optional[str] = None,
save: Optional[Union[str, pathlib.Path]] = None,
):
"""Plot the Gaia HR diagram with a sample of objects over-plotted
source: https://vlas.dev/post/gaia-dr2-hrd/
"""
# plot the H-R diagram for 1 M stars within 200 pc from the Sun
plt.rc("text", usetex=True)
# load background histogram
histogram = np.loadtxt(path_gaia_hr_histogram)
# make figure
fig, ax = plt.subplots(figsize=(6, 6), dpi=200)
if title is not None:
fig.suptitle(title, fontsize=24)
x_edges = np.arange(-0.681896, 5.04454978, 0.02848978)
y_edges = np.arange(-2.90934, 16.5665952, 0.0968952)
ax.pcolormesh(x_edges, y_edges, histogram.T, antialiased=False)
ax.set_xlim(x_edges[0], x_edges[-1])
ax.set_ylim(y_edges[0], y_edges[-1])
ax.invert_yaxis()
ax.set_xlabel(r"$G_{BP} - G_{RP}$")
ax.set_ylabel(r"$M_G$")
# plot sample data
ax.errorbar(
gaia_data["BP-RP"],
gaia_data["M"],
gaia_data["M"] - gaia_data["Ml"],
marker=".",
color="#e68a00",
alpha=0.75,
ls="",
lw=0.5,
)
# display grid behind all other elements on the plot
ax.set_axisbelow(True)
ax.grid(lw=0.3)
if save is not None:
fig.tight_layout()
plt.savefig(save)
def plot_gaia_density(
positions: pd.DataFrame,
path_gaia_density: Union[str, pathlib.Path],
title: Optional[str] = None,
save: Optional[Union[str, pathlib.Path]] = None,
):
"""Plot the RA/DEC Gaia density plot with a sample of objects over-plotted
source: https://vlas.dev/post/gaia-dr2-hrd/
"""
# plot the H-R diagram for 1 M stars within 200 pc from the Sun
plt.rc("text", usetex=True)
# load the data
hdulist = fits.open(path_gaia_density)
hist = hdulist[1].data["srcdens"][np.argsort(hdulist[1].data["hpx8"])]
# make figure
fig, ax = plt.subplots(figsize=(6, 6), dpi=200)
if title is not None:
fig.suptitle(title, fontsize=24)
# background setup
coordsys = ["C", "C"]
nest = True
# colormap
cm = plt.cm.get_cmap("viridis") # colorscale
cm.set_under("w")
cm.set_bad("w")
# plot the data in healpy
norm = "log"
hp.mollview(
hist,
norm=norm,
unit="Stars per sq. arcmin.",
cbar=False,
nest=nest,
title="",
coord=coordsys,
notext=True,
cmap=cm,
flip="astro",
nlocs=4,
min=0.1,
max=300,
)
ax = plt.gca()
image = ax.get_images()[0]
cbar = fig.colorbar(
image,
ax=ax,
ticks=[0.1, 1, 10, 100],
fraction=0.15,
pad=0.05,
location="bottom",
)
cbar.set_label("Stars per sq. arcmin.", size=12)
cbar.ax.tick_params(labelsize=12)
ax.tick_params(axis="both", which="major", labelsize=24)
# borders
lw = 3
pi = np.pi
dtor = pi / 180.0
theta = np.arange(0, 181) * dtor
hp.projplot(theta, theta * 0 - pi, "-k", lw=lw, direct=True)
hp.projplot(theta, theta * 0 + 0.9999 * pi, "-k", lw=lw, direct=True)
phi = np.arange(-180, 180) * dtor
hp.projplot(phi * 0 + 1.0e-10, phi, "-k", lw=lw, direct=True)
hp.projplot(phi * 0 + pi - 1.0e-10, phi, "-k", lw=lw, direct=True)
# ZTF
theta = np.arange(0.0, 360, 0.036)
phi = -30.0 * np.ones_like(theta)
hp.projplot(theta, phi, "k--", coord=["C"], lonlat=True, lw=2)
hp.projtext(170.0, -24.0, r"ZTF Limit", lonlat=True)
theta = np.arange(0.0, 360, 0.036)
# galaxy
for gallat in [15, 0, -15]:
phi = gallat * np.ones_like(theta)
hp.projplot(theta, phi, "w-", coord=["G"], lonlat=True, lw=2)
# ecliptic
for ecllat in [0, -30, 30]:
phi = ecllat * np.ones_like(theta)
hp.projplot(theta, phi, "w-", coord=["E"], lonlat=True, lw=2, ls=":")
# graticule
hp.graticule(ls="-", alpha=0.1, lw=0.5)
# labels
for lat in [60, 30, 0, -30, -60]:
hp.projtext(360.0, lat, str(lat), lonlat=True)
for lon in [0, 60, 120, 240, 300]:
hp.projtext(lon, 0.0, str(lon), lonlat=True)
# NWES
plt.text(0.0, 0.5, r"E", ha="right", transform=ax.transAxes, weight="bold")
plt.text(1.0, 0.5, r"W", ha="left", transform=ax.transAxes, weight="bold")
plt.text(
0.5,
0.992,
r"N",
va="bottom",
ha="center",
transform=ax.transAxes,
weight="bold",
)
plt.text(
0.5, 0.0, r"S", va="top", ha="center", transform=ax.transAxes, weight="bold"
)
color = "k"
lw = 10
alpha = 0.75
for pos in positions:
hp.projplot(
pos[0],
pos[1],
color=color,
markersize=5,
marker="o",
coord=coordsys,
lonlat=True,
lw=lw,
alpha=alpha,
zorder=10,
)
if save is not None:
fig.tight_layout()
plt.savefig(save)
""" Datasets """
| 31.01108 | 109 | 0.552121 |
d26ec52370aaf5a63c525e628d70b23d3bdd5697 | 1,787 | py | Python | spacy/lang/pt/lex_attrs.py | keshan/spaCy | 45c165af448783359f99673ab6b91492033bc66b | [
"MIT"
] | 1 | 2018-12-13T18:12:18.000Z | 2018-12-13T18:12:18.000Z | spacy/lang/pt/lex_attrs.py | keshan/spaCy | 45c165af448783359f99673ab6b91492033bc66b | [
"MIT"
] | null | null | null | spacy/lang/pt/lex_attrs.py | keshan/spaCy | 45c165af448783359f99673ab6b91492033bc66b | [
"MIT"
] | null | null | null | # coding: utf8
from __future__ import unicode_literals
from ...attrs import LIKE_NUM
_num_words = ['zero', 'um', 'dois', 'trs', 'tres', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez',
'onze', 'doze', 'dzia', 'dzias', 'duzia', 'duzias', 'treze', 'catorze', 'quinze', 'dezasseis',
'dezassete', 'dezoito', 'dezanove', 'vinte', 'trinta', 'quarenta', 'cinquenta', 'sessenta',
'setenta', 'oitenta', 'noventa', 'cem', 'cento', 'duzentos', 'trezentos', 'quatrocentos',
'quinhentos', 'seicentos', 'setecentos', 'oitocentos', 'novecentos', 'mil', 'milho', 'milhao',
'milhes', 'milhoes', 'bilho', 'bilhao', 'bilhes', 'bilhoes', 'trilho', 'trilhao', 'trilhes',
'trilhoes', 'quadrilho', 'quadrilhao', 'quadrilhes', 'quadrilhoes']
_ordinal_words = ['primeiro', 'segundo', 'terceiro', 'quarto', 'quinto', 'sexto',
'stimo', 'oitavo', 'nono', 'dcimo', 'vigsimo', 'trigsimo',
'quadragsimo', 'quinquagsimo', 'sexagsimo', 'septuagsimo',
'octogsimo', 'nonagsimo', 'centsimo', 'ducentsimo',
'trecentsimo', 'quadringentsimo', 'quingentsimo', 'sexcentsimo',
'septingentsimo', 'octingentsimo', 'nongentsimo', 'milsimo',
'milionsimo', 'bilionsimo']
LEX_ATTRS = {
LIKE_NUM: like_num
}
| 41.55814 | 111 | 0.567431 |
d26f1afb5207b56be2e3191794a04329185695ac | 1,818 | py | Python | factor calculation scripts/15.smoothearningstopriceratio.py | cagdemir/equity-index-predictors | 2546e72328de848222cb6a1c744ababab2058477 | [
"MIT"
] | null | null | null | factor calculation scripts/15.smoothearningstopriceratio.py | cagdemir/equity-index-predictors | 2546e72328de848222cb6a1c744ababab2058477 | [
"MIT"
] | null | null | null | factor calculation scripts/15.smoothearningstopriceratio.py | cagdemir/equity-index-predictors | 2546e72328de848222cb6a1c744ababab2058477 | [
"MIT"
] | 1 | 2021-07-21T12:24:51.000Z | 2021-07-21T12:24:51.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 29 18:00:53 2019
@author: Administrator
"""
import pdblp
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
con = pdblp.BCon(debug=False, port=8194, timeout=5000)
con.start()
index_tickers = ['NYA Index', 'SPX Index', 'CCMP Index','NDX Index','CDAX Index' ,'DAX Index',
'ASX Index','UKX Index', 'TPX Index','NKY Index', 'SHCOMP Index' ,
'SZCOMP Index','XUTUM Index','XU100 Index', 'MEXBOL Index',
'IBOV Index', 'IMOEX Index' , 'JALSH Index']
from datetime import date
start = '20040101'
firstday = '19990101'
today = date.today().strftime('%Y%m%d')
pe_ratio = con.bdh(index_tickers, 'PE RATIO', firstday, today)
pe_ratio_int = pe_ratio.interpolate(method='linear')
pe_ratio_int_w = pe_ratio_int.groupby(pd.Grouper(freq='W')).last()
#pe_ratio_last = pe_ratio_int_w[pe_ratio_int_w.index>=start]
#
#pe_ratio_last.columns = [i[0] for i in pe_ratio_last.columns]
#pe_ratio_last= pe_ratio_last[index_tickers]
pe_ratio_smoothed = pe_ratio_int_w.rolling(500, min_periods=100).mean()
var_no='15'
pe_ratio_smoothed_last = pe_ratio_smoothed[pe_ratio_smoothed.index>=start]
pe_ratio_smoothed_last.columns = [i[0] for i in pe_ratio_smoothed_last.columns]
pe_ratio_smoothed_last = pe_ratio_smoothed_last[index_tickers]
pe_ratio_smoothed_last.columns = [var_no+'_'+i for i in pe_ratio_smoothed_last.columns]
# pe_ratio_smoothed_last = pe_ratio_smoothed_last[index_tickers]
#pe_ratio_smoothed_last.columns = ['15_US_NY','15_US_SPX','15_US_CCMP', '15_DE','15_UK','15_JP','15_CH_SH','15_CH_SZ', '15_TR','15_MX','15_BR','15_RU','15_SA']
pe_ratio_smoothed_last.to_excel('C:/Users/sb0538/Desktop/15022020/excels/15_peratiosmoothed.xlsx')
| 33.054545 | 160 | 0.718372 |
d26f2516f232179df9832fdd43a7f139a4b6d7ba | 357 | py | Python | ms/commands/autopep8.py | edcilo/flask_ms_boilerplate | 6507b7b7e61ab227df40b4701faab9ec9866e732 | [
"MIT"
] | null | null | null | ms/commands/autopep8.py | edcilo/flask_ms_boilerplate | 6507b7b7e61ab227df40b4701faab9ec9866e732 | [
"MIT"
] | null | null | null | ms/commands/autopep8.py | edcilo/flask_ms_boilerplate | 6507b7b7e61ab227df40b4701faab9ec9866e732 | [
"MIT"
] | null | null | null | import click
import os
from flask.cli import with_appcontext
| 27.461538 | 93 | 0.661064 |
d26f95f1c9db6cafe8a214de467a08368f6b0271 | 2,378 | py | Python | py2ts/generate_service_registry.py | conanfanli/py2ts | 8543ad03f19f094b0771c3b0cfc26a89eefd95ed | [
"MIT"
] | 3 | 2020-04-10T22:09:44.000Z | 2020-11-29T07:19:28.000Z | py2ts/generate_service_registry.py | conanfanli/py2ts | 8543ad03f19f094b0771c3b0cfc26a89eefd95ed | [
"MIT"
] | 1 | 2020-04-11T14:25:50.000Z | 2020-04-11T14:25:50.000Z | py2ts/generate_service_registry.py | conanfanli/py2ts | 8543ad03f19f094b0771c3b0cfc26a89eefd95ed | [
"MIT"
] | 1 | 2021-05-15T09:22:41.000Z | 2021-05-15T09:22:41.000Z | #!/usr/bin/env python
import logging
import re
import subprocess
import sys
from typing import Dict
logger = logging.getLogger("py2ts.generate_service_registry")
logging.basicConfig(level=logging.INFO)
def get_service_registry_code(class_module_map: Dict[str, str]) -> str:
"""Return generated code for service registry."""
imports = []
services = []
for service_name, path in class_module_map.items():
imports.append(f"from {path} import {service_name}")
services.append(
f"{camel_to_snake(service_name)}: {service_name} = {service_name}()"
)
imports_code = "\n".join(imports)
services_code = "\n ".join(sorted(services))
return f"""
# Generated code. DO NOT EDIT!
from dataclasses import dataclass
{imports_code}
@dataclass
class ServiceRegistry:
{services_code}
service_registry = ServiceRegistry()
"""
if __name__ == "__main__":
try:
code = get_service_registry_code(get_class_module_map())
print(code)
except RipgrepError as e:
logger.error(e)
sys.exit(1)
| 27.976471 | 117 | 0.638352 |
d273a6d20b4812002b8ea1fa328f2d59bdbbb865 | 5,311 | py | Python | tasks/birds.py | CatherineWong/l3 | 53ed9dc99d9b247cb209333ae9b528974e5e7e96 | [
"Apache-2.0"
] | 46 | 2017-11-03T16:54:36.000Z | 2021-12-07T23:07:58.000Z | tasks/birds.py | CatherineWong/l3 | 53ed9dc99d9b247cb209333ae9b528974e5e7e96 | [
"Apache-2.0"
] | 7 | 2018-08-03T18:27:53.000Z | 2020-12-17T17:08:52.000Z | tasks/birds.py | CatherineWong/l3 | 53ed9dc99d9b247cb209333ae9b528974e5e7e96 | [
"Apache-2.0"
] | 6 | 2018-02-24T19:00:00.000Z | 2021-03-28T19:50:53.000Z | from misc import util
from collections import namedtuple
import csv
import numpy as np
import os
import pickle
import sys
N_EX = 4
Datum = namedtuple("Datum", ["hint", "ex_inputs", "input", "label"])
START = "<s>"
STOP = "</s>"
random = util.next_random()
birds_path = os.path.join(sys.path[0], "data/birds")
| 35.644295 | 96 | 0.559217 |
d273a8f81f9807f4dd16cdd363ca8063f3987151 | 2,261 | py | Python | tests/test_utils.py | caiosba/covid-19 | 2a0f43f5004e7e39bd982eaa36185859cd9db88f | [
"MIT"
] | 9 | 2020-03-23T19:04:04.000Z | 2020-03-28T02:11:14.000Z | tests/test_utils.py | caiosba/covid-19 | 2a0f43f5004e7e39bd982eaa36185859cd9db88f | [
"MIT"
] | 6 | 2020-03-22T14:10:08.000Z | 2020-04-05T01:53:29.000Z | tests/test_utils.py | caiosba/covid-19 | 2a0f43f5004e7e39bd982eaa36185859cd9db88f | [
"MIT"
] | 6 | 2020-03-23T18:15:26.000Z | 2020-04-05T01:49:40.000Z | import locale
import pytest
from covid.utils import fmt
| 33.25 | 58 | 0.530739 |
d27463b7bc3e1731eab5ba3103ed835b119f201f | 10,717 | py | Python | catkin_ws/src/10-lane-control/line_detector/src/line_detector_node.py | johnson880319/Software | 045894227f359e0a3a3ec5b7a53f8d1ebc06acdd | [
"CC-BY-2.0"
] | null | null | null | catkin_ws/src/10-lane-control/line_detector/src/line_detector_node.py | johnson880319/Software | 045894227f359e0a3a3ec5b7a53f8d1ebc06acdd | [
"CC-BY-2.0"
] | null | null | null | catkin_ws/src/10-lane-control/line_detector/src/line_detector_node.py | johnson880319/Software | 045894227f359e0a3a3ec5b7a53f8d1ebc06acdd | [
"CC-BY-2.0"
] | null | null | null | #!/usr/bin/env python
from anti_instagram.AntiInstagram import AntiInstagram
from cv_bridge import CvBridge, CvBridgeError
from duckietown_msgs.msg import (AntiInstagramTransform, BoolStamped, Segment,
SegmentList, Vector2D, FSMState)
from duckietown_utils.instantiate_utils import instantiate
from duckietown_utils.jpg import image_cv_from_jpg
from geometry_msgs.msg import Point
from sensor_msgs.msg import CompressedImage, Image
from visualization_msgs.msg import Marker
from line_detector.timekeeper import TimeKeeper
import cv2
import rospy
import threading
import time
from line_detector.line_detector_plot import color_segment, drawLines
import numpy as np
if __name__ == '__main__':
rospy.init_node('line_detector',anonymous=False)
line_detector_node = LineDetectorNode()
rospy.on_shutdown(line_detector_node.onShutdown)
rospy.spin()
| 33.701258 | 122 | 0.624149 |
d274bf60d6abc1273072877c9d1d6cd1119e3863 | 776 | py | Python | django_qiniu/utils.py | 9nix00/django-qiniu | 08a403dc156b4971eef5af359048a6d2ce485245 | [
"MIT"
] | 1 | 2018-06-21T03:14:20.000Z | 2018-06-21T03:14:20.000Z | django_qiniu/utils.py | 9nix00/django-qiniu | 08a403dc156b4971eef5af359048a6d2ce485245 | [
"MIT"
] | null | null | null | django_qiniu/utils.py | 9nix00/django-qiniu | 08a403dc156b4971eef5af359048a6d2ce485245 | [
"MIT"
] | 1 | 2018-06-21T03:14:21.000Z | 2018-06-21T03:14:21.000Z | # -*- coding: utf-8 -*-
from account_helper.middleware import get_current_user_id
from django.utils import timezone
from django.conf import settings
from hashlib import sha1
import os
| 38.8 | 101 | 0.590206 |
d27508d001c149eb0a10e44188a30d1458aaa3a0 | 1,339 | py | Python | AxePy3Lib/01/re/re_test_patterns_groups.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 1 | 2019-01-04T05:47:50.000Z | 2019-01-04T05:47:50.000Z | AxePy3Lib/01/re/re_test_patterns_groups.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | null | null | null | AxePy3Lib/01/re/re_test_patterns_groups.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Show the groups within the matches for a pattern.
"""
# end_pymotw_header
import re
def test_patterns(text, patterns):
"""Given source text and a list of patterns, look for
matches for each pattern within the text and print
them to stdout.
"""
# Look for each pattern in the text and print the results
for pattern, desc in patterns:
print('{!r} ({})\n'.format(pattern, desc))
print(' {!r}'.format(text))
for match in re.finditer(pattern, text):
s = match.start()
e = match.end()
prefix = ' ' * (s)
print(
' {}{!r}{} '.format(prefix,
text[s:e],
' ' * (len(text) - e)),
end=' ',
)
print(match.groups())
if match.groupdict():
print('{}{}'.format(
' ' * (len(text) - s),
match.groupdict()),
)
print()
return
if __name__ == '__main__':
patterns = [(r'a((a*)(b*))', 'a followed by 0-n a and 0-n b'),
(r'(?P<first>a+)(?P<second>c+)', 'pattern 2'), ]
test_patterns('accaaccca', patterns)
| 29.108696 | 66 | 0.477969 |
d275a759f35f51d02a7503058e5ce4b1b8c106f5 | 332 | py | Python | src/xr_embeds/urls.py | xr-web-de/xr-web | 63269e26a8752564b63e84bfc0ce180198577d35 | [
"MIT"
] | 4 | 2019-03-28T20:49:59.000Z | 2019-08-11T19:31:35.000Z | src/xr_embeds/urls.py | xr-web-de/xr-web | 63269e26a8752564b63e84bfc0ce180198577d35 | [
"MIT"
] | 4 | 2019-05-08T18:07:45.000Z | 2021-05-08T17:29:46.000Z | src/xr_embeds/urls.py | xr-web-de/xr-web | 63269e26a8752564b63e84bfc0ce180198577d35 | [
"MIT"
] | 5 | 2019-03-28T20:50:15.000Z | 2020-01-17T21:16:57.000Z | from django.urls import re_path
from xr_embeds.views import geojson_view, embed_html_view
app_name = "embeds"
urlpatterns = [
re_path(r"^(\d+)/html/$", embed_html_view, name="embed_html"),
re_path(
r"^geojson/(?P<model_slug>\w+)/(?P<query_slug>\w+)/$",
geojson_view,
name="geojson_view",
),
]
| 22.133333 | 66 | 0.638554 |
d276c726608d0b175c4c5a4e294d5b6baeab2166 | 377 | py | Python | templates/django/__APPNAME__/apps/utils/models.py | ba1dr/tplgenerator | f05b6f9a32cf825d326dd2faf551d1e156d2df37 | [
"MIT"
] | null | null | null | templates/django/__APPNAME__/apps/utils/models.py | ba1dr/tplgenerator | f05b6f9a32cf825d326dd2faf551d1e156d2df37 | [
"MIT"
] | null | null | null | templates/django/__APPNAME__/apps/utils/models.py | ba1dr/tplgenerator | f05b6f9a32cf825d326dd2faf551d1e156d2df37 | [
"MIT"
] | null | null | null |
from django.db import models
from django.core import signing
| 23.5625 | 82 | 0.732095 |
d2777dd29fcc8c927860ffb94a848ef3650dcd17 | 7,858 | py | Python | publications/admin.py | lukacu/django-publications | 663ace605925f53835f441c7761a6f4b0d2d4143 | [
"BSD-3-Clause"
] | null | null | null | publications/admin.py | lukacu/django-publications | 663ace605925f53835f441c7761a6f4b0d2d4143 | [
"BSD-3-Clause"
] | 3 | 2020-02-12T03:15:47.000Z | 2021-06-10T22:05:24.000Z | publications/admin.py | lukacu/django-publications | 663ace605925f53835f441c7761a6f4b0d2d4143 | [
"BSD-3-Clause"
] | 1 | 2018-07-23T11:46:37.000Z | 2018-07-23T11:46:37.000Z | # -*- Mode: python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from publications import list_import_formats, get_publications_importer
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <lucas@theis.io>'
__docformat__ = 'epytext'
from django.contrib import admin
from django import forms
import publications.models
from publications.models import Publication, PublicationType, Group, Authorship, Person, Metadata, Import
from publications.fields import PeopleField
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
admin.site.register(Publication, PublicationAdmin)
admin.site.register(Group, GroupAdmin)
admin.site.register(Person, PersonAdmin)
admin.site.register(PublicationType, PublicationTypeAdmin)
| 38.331707 | 241 | 0.673199 |
d2786b44c868c09be621a0323723a3881eb90dc7 | 7,379 | py | Python | tests/common/helpers/dut_utils.py | Rancho333/sonic-mgmt | c73836900f83c1a66b2121563511604a7b81807a | [
"Apache-2.0"
] | 2 | 2020-10-15T05:54:32.000Z | 2020-12-14T07:21:41.000Z | tests/common/helpers/dut_utils.py | Rancho333/sonic-mgmt | c73836900f83c1a66b2121563511604a7b81807a | [
"Apache-2.0"
] | 14 | 2021-08-04T05:50:21.000Z | 2021-12-14T10:06:38.000Z | tests/common/helpers/dut_utils.py | Rancho333/sonic-mgmt | c73836900f83c1a66b2121563511604a7b81807a | [
"Apache-2.0"
] | 7 | 2021-07-28T03:24:41.000Z | 2022-03-07T01:44:20.000Z | import logging
from tests.common.helpers.assertions import pytest_assert
from tests.common.utilities import get_host_visible_vars
from tests.common.utilities import wait_until
CONTAINER_CHECK_INTERVAL_SECS = 1
CONTAINER_RESTART_THRESHOLD_SECS = 180
logger = logging.getLogger(__name__)
def is_supervisor_node(inv_files, hostname):
"""Check if the current node is a supervisor node in case of multi-DUT.
@param inv_files: List of inventory file paths, In tests,
you can be get it from get_inventory_files in tests.common.utilities
@param hostname: hostname as defined in the inventory
Returns:
Currently, we are using 'card_type' in the inventory to make the decision. If 'card_type' for the node is defined in
the inventory, and it is 'supervisor', then return True, else return False. In future, we can change this
logic if possible to derive it from the DUT.
"""
dut_vars = get_host_visible_vars(inv_files, hostname)
if 'card_type' in dut_vars and dut_vars['card_type'] == 'supervisor':
return True
return False
def is_frontend_node(inv_files, hostname):
"""Check if the current node is a frontend node in case of multi-DUT.
@param inv_files: List of inventory file paths, In tests,
you can be get it from get_inventory_files in tests.common.utilities
@param hostname: hostname as defined in the inventory
Returns:
True if it is not any other type of node. Currently, the only other type of node supported is 'supervisor'
node. If we add more types of nodes, then we need to exclude them from this method as well.
"""
return not is_supervisor_node(inv_files, hostname)
def is_container_running(duthost, container_name):
"""Decides whether the container is running or not
@param duthost: Host DUT.
@param container_name: Name of a container.
Returns:
Boolean value. True represents the container is running
"""
running_containers = duthost.shell(r"docker ps -f 'status=running' --format \{\{.Names\}\}")['stdout_lines']
return container_name in running_containers
def check_container_state(duthost, container_name, should_be_running):
"""Determines whether a container is in the expected state (running/not running)
@param duthost: Host DUT.
@param container_name: Name of container.
@param should_be_running: Boolean value.
Returns:
This function will return True if the container was in the expected state.
Otherwise, it will return False.
"""
is_running = is_container_running(duthost, container_name)
return is_running == should_be_running
def is_hitting_start_limit(duthost, container_name):
"""Checks whether the container can not be restarted is due to start-limit-hit.
@param duthost: Host DUT.
@param ontainer_name: name of a container.
Returns:
If start limitation was hit, then this function will return True. Otherwise
it returns False.
"""
service_status = duthost.shell("sudo systemctl status {}.service | grep 'Active'".format(container_name))
for line in service_status["stdout_lines"]:
if "start-limit-hit" in line:
return True
return False
def clear_failed_flag_and_restart(duthost, container_name):
"""Clears the failed flag of a container and restart it.
@param duthost: Host DUT.
@param container_name: name of a container.
Returns:
None
"""
logger.info("{} hits start limit and clear reset-failed flag".format(container_name))
duthost.shell("sudo systemctl reset-failed {}.service".format(container_name))
duthost.shell("sudo systemctl start {}.service".format(container_name))
restarted = wait_until(CONTAINER_RESTART_THRESHOLD_SECS,
CONTAINER_CHECK_INTERVAL_SECS,
check_container_state, duthost, container_name, True)
pytest_assert(restarted, "Failed to restart container '{}' after reset-failed was cleared".format(container_name))
def get_group_program_info(duthost, container_name, group_name):
"""Gets program names, running status and their pids by analyzing the command
output of "docker exec <container_name> supervisorctl status". Program name
at here represents a program which is part of group <group_name>
Args:
duthost: Hostname of DUT.
container_name: A string shows container name.
program_name: A string shows process name.
Returns:
A dictionary where keys are the program names and values are their running
status and pids.
"""
group_program_info = defaultdict(list)
program_name = None
program_status = None
program_pid = None
program_list = duthost.shell("docker exec {} supervisorctl status".format(container_name), module_ignore_errors=True)
for program_info in program_list["stdout_lines"]:
if program_info.find(group_name) != -1:
program_name = program_info.split()[0].split(':')[1].strip()
program_status = program_info.split()[1].strip()
if program_status in ["EXITED", "STOPPED", "STARTING"]:
program_pid = -1
else:
program_pid = int(program_info.split()[3].strip(','))
group_program_info[program_name].append(program_status)
group_program_info[program_name].append(program_pid)
if program_pid != -1:
logger.info("Found program '{}' in the '{}' state with pid {}"
.format(program_name, program_status, program_pid))
return group_program_info
def get_program_info(duthost, container_name, program_name):
"""Gets program running status and its pid by analyzing the command
output of "docker exec <container_name> supervisorctl status"
Args:
duthost: Hostname of DUT.
container_name: A string shows container name.
program_name: A string shows process name.
Return:
Program running status and its pid.
"""
program_status = None
program_pid = -1
program_list = duthost.shell("docker exec {} supervisorctl status".format(container_name), module_ignore_errors=True)
for program_info in program_list["stdout_lines"]:
if program_info.find(program_name) != -1:
program_status = program_info.split()[1].strip()
if program_status == "RUNNING":
program_pid = int(program_info.split()[3].strip(','))
break
if program_pid != -1:
logger.info("Found program '{}' in the '{}' state with pid {}"
.format(program_name, program_status, program_pid))
return program_status, program_pid
def get_disabled_container_list(duthost):
"""Gets the container/service names which are disabled.
Args:
duthost: Host DUT.
Return:
A list includes the names of disabled containers/services
"""
disabled_containers = []
container_status, succeeded = duthost.get_feature_status()
pytest_assert(succeeded, "Failed to get status ('enabled'|'disabled') of containers. Exiting...")
for container_name, status in container_status.items():
if "disabled" in status:
disabled_containers.append(container_name)
return disabled_containers
| 40.322404 | 126 | 0.69359 |
d279c195499d050dd18e9f8e03c43e9d1fc1fd2d | 7,716 | py | Python | pyy1/.pycharm_helpers/python_stubs/-1550516950/_ctypes.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
] | null | null | null | pyy1/.pycharm_helpers/python_stubs/-1550516950/_ctypes.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
] | null | null | null | pyy1/.pycharm_helpers/python_stubs/-1550516950/_ctypes.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
# module _ctypes
# from /usr/lib/python3.5/lib-dynload/_ctypes.cpython-35m-x86_64-linux-gnu.so
# by generator 1.145
""" Create and manipulate C compatible data types in Python. """
# no imports
# Variables with simple values
FUNCFLAG_CDECL = 1
FUNCFLAG_PYTHONAPI = 4
FUNCFLAG_USE_ERRNO = 8
FUNCFLAG_USE_LASTERROR = 16
RTLD_GLOBAL = 256
RTLD_LOCAL = 0
_cast_addr = 140388692655680
_memmove_addr = 140388724844976
_memset_addr = 140388724996464
_string_at_addr = 140388692647104
_wstring_at_addr = 140388692653280
__version__ = '1.1.0'
# functions
def addressof(C_instance): # real signature unknown; restored from __doc__
"""
addressof(C instance) -> integer
Return the address of the C instance internal buffer
"""
return 0
def alignment(C_type): # real signature unknown; restored from __doc__
"""
alignment(C type) -> integer
alignment(C instance) -> integer
Return the alignment requirements of a C instance
"""
return 0
def buffer_info(*args, **kwargs): # real signature unknown
""" Return buffer interface information """
pass
def byref(C_instance, offset=0): # real signature unknown; restored from __doc__
"""
byref(C instance[, offset=0]) -> byref-object
Return a pointer lookalike to a C instance, only usable
as function argument
"""
pass
def dlclose(*args, **kwargs): # real signature unknown
""" dlclose a library """
pass
def dlopen(name, flag, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
""" dlopen(name, flag={RTLD_GLOBAL|RTLD_LOCAL}) open a shared library """
pass
def dlsym(*args, **kwargs): # real signature unknown
""" find symbol in shared library """
pass
def resize(*args, **kwargs): # real signature unknown
""" Resize the memory buffer of a ctypes instance """
pass
def sizeof(C_type): # real signature unknown; restored from __doc__
"""
sizeof(C type) -> integer
sizeof(C instance) -> integer
Return the size in bytes of a C instance
"""
return 0
# classes
# variables with complex values
_pointer_type_cache = {
None: # (!) real value is ''
None # (!) real value is ''
,
None: # (!) real value is ''
None # (!) real value is ''
,
None: None, # (!) real value is ''
}
__loader__ = None # (!) real value is ''
__spec__ = None # (!) real value is ''
| 27.459075 | 106 | 0.636729 |
d279e09431c6846b49df5d7a332c49cc36e64bc9 | 1,059 | py | Python | senseis/models/rc_action_model1.py | armandli/ReconChessRL | 3f3f018fd347ee17452ef6ad725d82f2f11678c6 | [
"MIT"
] | 4 | 2021-08-19T14:06:01.000Z | 2021-12-24T06:34:23.000Z | senseis/models/rc_action_model1.py | captainzhu123/ReconChessRL | 6d0de7acd7aeba0ad767e29c807ee0e6f30d95fb | [
"MIT"
] | 2 | 2021-09-18T08:34:01.000Z | 2022-03-23T07:06:05.000Z | senseis/models/rc_action_model1.py | captainzhu123/ReconChessRL | 6d0de7acd7aeba0ad767e29c807ee0e6f30d95fb | [
"MIT"
] | 1 | 2021-09-18T08:30:23.000Z | 2021-09-18T08:30:23.000Z | import torch
from torch import nn
from senseis.torch_modules.activation import relu_activation
from senseis.torch_modules.residual_layer import ResidualLayer1DV5, ResidualLayer2DV3
# Dueling Q Model
| 31.147059 | 85 | 0.674221 |
d27a9b4239643f2c105ea4c3f170a4d1c43a0714 | 5,506 | py | Python | win/msgbox.py | Zxynine/fusion360-thomasa88lib | c6570c9adffd06ec7b762032326805d13a99982e | [
"MIT"
] | 4 | 2021-11-19T17:24:44.000Z | 2022-03-18T13:17:21.000Z | win/msgbox.py | Zxynine/fusion360-thomasa88lib | c6570c9adffd06ec7b762032326805d13a99982e | [
"MIT"
] | 2 | 2021-04-15T05:47:55.000Z | 2021-12-07T17:36:53.000Z | win/msgbox.py | Zxynine/fusion360-thomasa88lib | c6570c9adffd06ec7b762032326805d13a99982e | [
"MIT"
] | 1 | 2021-12-04T23:07:53.000Z | 2021-12-04T23:07:53.000Z | # Message box functions
#
# This file is part of thomasa88lib, a library of useful Fusion 360
# add-in/script functions.
#
# Copyright (c) 2020 Thomas Axelsson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import adsk
import ctypes
# Must explicitly include wintypes for code to work at Fusion start-up
import ctypes.wintypes
user32 = ctypes.WinDLL('user32', use_last_error=True)
_hook_factory = ctypes.WINFUNCTYPE(ctypes.wintypes.LPARAM,
ctypes.c_int,
ctypes.wintypes.WPARAM, ctypes.wintypes.LPARAM)
# https://stackoverflow.com/a/31396340/106019
LPCWPRETSTRUCT = ctypes.POINTER(CWPRETSTRUCT)
# Icons
MB_ICONERROR = 0x00000010
MB_ICONQUESTION = 0x00000020
MB_ICONWARNING = 0x00000030
MB_ICONINFORMATION = 0x00000040
# Button configurations
MB_ABORTRETRYIGNORE = 0x00000002
MB_CANCELTRYCONTINUE = 0x00000006
MB_HELP = 0x00004000
MB_OK = 0x00000000
MB_OKCANCEL = 0x00000001
MB_RETRYCANCEL = 0x00000005
MB_YESNO = 0x00000004
MB_YESNOCANCEL = 0x00000003
# Default button
MB_DEFBUTTON1 = 0x00000000
MB_DEFBUTTON2 = 0x00000100
MB_DEFBUTTON3 = 0x00000200
MB_DEFBUTTON4 = 0x00000300
# Button IDs
IDOK = 1
IDCANCEL = 2
IDABORT = 3
IDRETRY = 4
IDIGNORE = 5
IDYES = 6
IDNO = 7
IDTRYAGAIN = 10
IDCONTINUE = 11
WM_INITDIALOG = 0x0110
WH_CALLWNDPROCRET = 12
user32.CallNextHookEx.restype = ctypes.wintypes.LPARAM
user32.CallNextHookEx.argtypes = (ctypes.wintypes.HHOOK,
ctypes.c_int,
ctypes.wintypes.WPARAM,
ctypes.wintypes.LPARAM)
user32.UnhookWindowsHookEx.argtypes = (ctypes.wintypes.HHOOK,)
user32.UnhookWindowsHookEx.restype = ctypes.wintypes.BOOL
user32.SetWindowsHookExW.restype = ctypes.wintypes.HHOOK
user32.SetWindowsHookExW.argtypes = (ctypes.c_int,
_hook_factory,
ctypes.wintypes.HINSTANCE,
ctypes.wintypes.DWORD)
user32.GetDlgItem.argtypes = (ctypes.wintypes.HWND, ctypes.c_int)
user32.GetDlgItem.restype = ctypes.wintypes.HWND
user32.GetActiveWindow.restype = ctypes.wintypes.HWND
def custom_msgbox(text, caption, dlg_type, label_map={}):
'''Wrapper for MessageBox that allows setting button labels (Windows-only)
https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-messageboxw
'''
win_thread_id = ctypes.windll.kernel32.GetCurrentThreadId()
# This must not go out of scope as long as the hook is active
c_hook = _hook_factory(_create_hook(label_map))
hook_handle = user32.SetWindowsHookExW(WH_CALLWNDPROCRET, c_hook,
ctypes.wintypes.HINSTANCE(0),
win_thread_id)
#error = ctypes.get_last_error()
main_window = user32.GetActiveWindow()
ret = user32.MessageBoxW(main_window, text, caption, dlg_type)
if hook_handle:
user32.UnhookWindowsHookEx(hook_handle)
return ret
| 36.463576 | 85 | 0.680349 |
d27af471667bb42fad40784d1d32e99b8d50d1f8 | 22,496 | py | Python | eispac/core/eiscube.py | MJWeberg/eispac | 8de2b282fc08da9ac66d48c396060aab6e17be70 | [
"MIT"
] | 11 | 2021-02-18T00:24:22.000Z | 2022-01-30T06:48:06.000Z | eispac/core/eiscube.py | MJWeberg/eispac | 8de2b282fc08da9ac66d48c396060aab6e17be70 | [
"MIT"
] | 23 | 2021-04-09T16:34:26.000Z | 2021-11-09T16:55:29.000Z | eispac/core/eiscube.py | MJWeberg/eispac | 8de2b282fc08da9ac66d48c396060aab6e17be70 | [
"MIT"
] | 5 | 2021-04-09T16:47:27.000Z | 2021-11-04T15:45:29.000Z | __all__ = ['EISCube']
import sys
import copy
import numpy as np
import astropy.units as u
from astropy.convolution import convolve, CustomKernel
from astropy.coordinates import SkyCoord
from ndcube import __version__ as ndcube_ver
from ndcube import NDCube
def crop_by_coords(self, *args, **kwargs):
"""REMOVED in NDCube 2.0"""
print('Error: crop_by_coords() was removed in NDCube 2.0. Please use'
+' the .crop() or .crop_by_values() methods instead. See the'
+' NDCube documentation for more information.', file=sys.stderr)
return None
def apply_radcal(self, input_radcal=None):
"""Apply a radiometric calibration curve (user-inputted or preflight)
Parameters
----------
input_radcal : array_like, optional
User-inputted radiometric calibration curve. If set to None, will
use the preflight radcal curve from the .meta dict. Default is None
Returns
-------
output_cube : EISCube class instance
A new EISCube class instance containing the calibrated data
"""
if input_radcal is None:
# Preflight radcal from HDF5 header file
new_radcal = self.meta['radcal']
else:
# User-inputted radcal curve
new_radcal = np.array(input_radcal)
if len(new_radcal) != self.data.shape[-1]:
print('Error: input_radcal must have the same number of elements'
+' as the last dimension in the data array.')
return self
output_radcal = new_radcal
if self.unit != u.photon:
if str(self.radcal) == 'unknown':
print('Error: Data currently has an unknown radcal applied.'
+' Unable to apply new calibration.')
return self
elif np.all(self.radcal == new_radcal):
print('Error: input_radcal is identical to current radcal.'
+' No calculation is required.')
return self
else:
print('Warning: Data currently has a different radcal applied.'
+' Old calibration curve will be removed.')
new_radcal = new_radcal/self.radcal
new_data = self.data.copy()*new_radcal
new_errs = self.uncertainty.array.copy()*new_radcal
new_meta = copy.deepcopy(self.meta)
new_meta['mod_index']['bunit'] = 'erg / (cm2 s sr)'
new_meta['notes'].append('Applied radcal to convert photon counts to intensity')
# wcs_mask = (np.array(tuple(reversed(self.wcs.array_shape))) <= 1).tolist()
output_cube = EISCube(new_data, wcs=self.wcs, uncertainty=new_errs,
wavelength=self.wavelength, radcal=output_radcal,
meta=new_meta, unit='erg / (cm2 s sr)',
# mask=self.mask, missing_axes=wcs_mask)
mask=self.mask)
return output_cube
def remove_radcal(self):
"""Remove the applied radiometric calibration and convert data to counts
Returns
-------
output_cube : EISCube class instance
A new EISCube class instance containing the photon count data
"""
if self.unit == u.photon:
print('Error: Data is already in units of photon counts.'
+' No calculation required.')
return self
elif str(self.radcal) == 'unknown':
print('Error: Data currently has an unknown radcal applied.'
+' Unable to remove calibration.')
return self
new_data = self.data.copy()/self.radcal
new_errs = self.uncertainty.array.copy()/self.radcal
new_meta = copy.deepcopy(self.meta)
new_meta['mod_index']['bunit'] = 'photon'
new_meta['notes'].append('Removed radcal to convert intensity to photon counts')
# wcs_mask = (np.array(tuple(reversed(self.wcs.array_shape))) <= 1).tolist()
output_cube = EISCube(new_data, wcs=self.wcs, uncertainty=new_errs,
wavelength=self.wavelength, radcal=None,
meta=new_meta, unit='photon',
# mask=self.mask, missing_axes=wcs_mask)
mask=self.mask)
return output_cube
def sum_spectra(self, wave_range=None, units=u.Angstrom):
"""Sum the data along the spectral axis.
Parameters
----------
wave_range : list of ints, floats, or Quantity instances
Wavelength range to sum over. Values can be input as either
[min, max] or [center, half width]. Units can be specified using
either Astropy units instances or by inputting a pair of ints or
floats and then also using the "units" keyword. If wave_range is set
to None, then entire spectra will be summed over. Default is None.
units : str or Quantity instance
Units to be used for the wavelength range if wave_range is given a
list of ints or floats. Will be ignored if either wave_range is None
or is given a list with Astropy units. Default is 'Angstrom'.
Returns
-------
output_cube : NDCube class instance
A new NDCube class instance containing the summed data
"""
if wave_range is None:
# Sum over entire wavelength axis and return an NDCube
try:
new_wcs = self.wcs.dropaxis(0)
except:
new_wcs = copy.deepcopy(self[:,:,0].wcs)
sum_data = np.sum(self.data, axis=2)
new_meta = copy.deepcopy(self.meta)
new_meta['notes'].append('Summed over entire wavelength axis.')
return NDCube(sum_data, new_wcs, meta=new_meta)
# Validate input wavelength range
if isinstance(wave_range, (list, tuple)):
use_range = [0, 0]
range_units = ['unknown', 'unknown']
print('Summing EISCube spectra over a select wavelength range.')
if len(wave_range) != 2:
print('Error: invalid number of wave_range values. Please input'
+' a list or tuple with exactly two elements.',
file=sys.stderr)
return None
else:
print('Error: invalid wave_range type. Please input either None or'
+' a list (or tuple) with two elements.', file=sys.stderr)
return None
for w in range(2):
if isinstance(wave_range[w], u.Quantity):
# Parse an astropy.units.Quantity and convert as needed
# Note: this will overwrite any inputs to the "units" kwarg
if wave_range[w].unit == u.pix:
use_range[w] = wave_range[w].value
range_units[w] = u.pix
elif wave_range[w].unit.physical_type == 'length':
use_range[w] = wave_range[w].to('Angstrom').value
range_units[w] = u.Angstrom
else:
print('Error: invalid wavelength unit. Please input a pixel'
+' or length unit.', file=sys.stderr)
return None
else:
# Assume default or user inputted units (still convert if needed)
input_units = u.Unit(units)
if input_units == u.pix:
use_range[w] = float(wave_range[w])
range_units[w] = u.pix
elif input_units.physical_type == 'length':
u_scale = input_units.to('Angstrom')
use_range[w] = float(wave_range[w])*u_scale
range_units[w] = u.Angstrom
else:
print('Error: invalid wavelength unit. Please input a pixel'
+' or length unit.', file=sys.stderr)
return None
# Check for consistent units
if range_units[0] != range_units[1]:
print('Error: mismatched units. Please input the same units for'
+' both wave_range elements or use the "units" keyword',
file=sys.stderr)
return None
# If given values of [center, half width], compute the actual range
if use_range[1] < use_range[0]:
temp_center = use_range[0]
temp_half_wid = use_range[1]
use_range[0] = temp_center - temp_half_wid
use_range[1] = temp_center + temp_half_wid
# Get indices to be summed over
w_indices = [0, -1]
if range_units[0] == u.pix:
# Round pixels values to nearest whole indice
w_indices[w] = int(round(use_range[w]))
elif range_units[0] == u.Angstrom:
# Find the closest pixel location on the average wavelength axis
try:
# Note: the corrected wavelength has units of [Angstrom]
w_coords = np.mean(self.wavelength, axis=(0,1))
except KeyError:
print('Error: missing or invalid corrected wavelength array.')
return None
for w in range(2):
abs_w_diff = np.abs(w_coords - use_range[w])
w_indices[w] = np.argmin(abs_w_diff)
try:
new_wcs = self.wcs.dropaxis(0)
except:
new_wcs = copy.deepcopy(self[:,:,0].wcs)
sum_data = np.sum(self.data[:,:,w_indices[0]:w_indices[1]+1], axis=2)
new_meta = copy.deepcopy(self.meta)
new_meta['notes'].append('Summed wavelength axis over the range of '
+str(use_range)+' '+str(range_units[0]))
return NDCube(sum_data, new_wcs, meta=new_meta)
def smooth_cube(self, width=3, **kwargs):
"""Smooth the data along one or more spatial axes.
Parameters
----------
width : list or single value of ints, floats, or Quantity instances
Number of pixels or angular distance to smooth over. If given a
single value, only the y-axis will be smoothed. Floats and angular
distances will be converted to the nearest whole pixel value.
If a width value is even, width + 1 will be used instead.
Default is width = 3
**kwargs : keywords or dict
Keyword arguments to be passed to the astropy.convolution.convolve()
function.
Returns
-------
output_cube : EISCube class instance
A new EISCube class instance containing the smoothed data
"""
# Validate input width
num_dims = len(self.dimensions)
wid_list = [1]*num_dims # NB: a width of 1 results in no smoothing
if isinstance(width, (list, tuple)):
# Note: we assume the last dim is always wavelength
wid_list[0] = width[0]
if num_dims > 2:
wid_list[1] = width[1]
print('Warning: smoothing over the x-axis can yield unexpected'
+' results due to the time interval between observations.'
+' Use with care.')
if len(width) >= num_dims:
print('Warning: smoothing over the wavelength axis is not'
+' supported. Only widths for the Y & X axes will be used')
elif isinstance(width, (int, float, u.Quantity)):
wid_list[0] = width # Only smooth along y-axis
else:
print('Error: invalid width data type. Please input an int, float,'
+' or astropy.units.Quantity instance', file=sys.stderr)
return None
coord_ax = ['y', 'x', 'w']
for w in range(len(wid_list)-1):
# Parse a astropy.units.Quantity and convert to units of pixels
if isinstance(wid_list[w], u.Quantity):
if wid_list[w].unit == u.pix:
wid_list[w] = wid_list[w].value
elif not wid_list[w].unit.physical_type == 'angle':
print('Error: invalid width unit. Please input a pixel or'
+' angular unit.', file=sys.stderr)
return None
else:
try:
# Note: y & x scales are in units of [arcsec]/[pixel]
ax_scale = self.meta['pointing'][coord_ax[w]+'_scale']
except KeyError:
print('Error: missing '+coord_ax[w]+'-axis scale.')
return None
angular_wid_str = str(wid_list[w])
wid_list[w] = wid_list[w].to('arcsec').value / ax_scale
print('Note: on the '+coord_ax[w]+'-axis, '+angular_wid_str
+' is equivalent to '+str(wid_list[w])+' pixels.')
# Round to nearest pixel and add 1 to even values
wid_list[w] = int(round(wid_list[w]))
if wid_list[w] % 2 == 0:
wid_list[w] = wid_list[w] + 1
# Create smoothing kernel with normalized weights (i.e. sum to 1)
# Note: Using a 2D or 3D kernel allows us to smooth everything at once
sm_weights = np.ones(wid_list) / (wid_list[0]*wid_list[1])
sm_kernel = CustomKernel(sm_weights)
# Calculate smoothed data and uncertainty values
sm_data = convolve(self.data, sm_kernel, **kwargs)
if self.uncertainty is not None:
sm_errs = np.sqrt(convolve(self.uncertainty.array**2,
sm_kernel, **kwargs))
else:
sm_errs = none
sm_data_mask = np.logical_or(np.isnan(sm_data), sm_data < 0)
# Pack everything up in a new EISCube
old_radcal = self.radcal
new_meta = copy.deepcopy(self.meta)
new_meta['notes'].append('Smoothed using pixel widths of '+str(wid_list))
# wcs_mask = (np.array(tuple(reversed(self.wcs.array_shape))) <= 1).tolist()
output_cube = EISCube(sm_data, wcs=self.wcs, uncertainty=sm_errs,
wavelength=self.wavelength, radcal=old_radcal,
meta=new_meta, unit=self.unit,
# mask=sm_data_mask, missing_axes=wcs_mask)
mask=sm_data_mask)
return output_cube
| 46.00409 | 95 | 0.581526 |
d27bad13c9c160040228fa36a65d4924909e6f0d | 5,380 | py | Python | stylee/comments/serializers.py | jbaek7023/Stylee-API | ff0397ba2dc1ed17ff22c33f80eef5d13e6ae097 | [
"MIT"
] | 1 | 2020-03-06T00:34:39.000Z | 2020-03-06T00:34:39.000Z | stylee/comments/serializers.py | jbaek7023/Stylee-API | ff0397ba2dc1ed17ff22c33f80eef5d13e6ae097 | [
"MIT"
] | null | null | null | stylee/comments/serializers.py | jbaek7023/Stylee-API | ff0397ba2dc1ed17ff22c33f80eef5d13e6ae097 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from profiles.serializers import UserRowSerializer
from .models import Comment
User = get_user_model()
# content, user
# class CommentsOnPostSerializer(serializers.ModelSerializer):
# reply_count = serializers.SerializerMethodField()
# user = UserRowSerializer(read_only=True)
#
# class Meta:
# model = Comment
# fields = (
# 'id',
# 'user',
# 'content',
# 'created_at',
# 'reply_count',
# )
#
# def get_reply_count(self, obj):
# if obj.is_parent:
# return obj.children().count()
# return 0
| 29.723757 | 96 | 0.561152 |
d27bf22d6bf897be8eacbdeb8156e0811b013b5d | 3,497 | py | Python | stressTest/stressTestPV.py | bhill-slac/epics-stress-tests | bf895cdf84e3ef16819204fbf49f2dd54c9473fb | [
"BSD-3-Clause-LBNL"
] | null | null | null | stressTest/stressTestPV.py | bhill-slac/epics-stress-tests | bf895cdf84e3ef16819204fbf49f2dd54c9473fb | [
"BSD-3-Clause-LBNL"
] | null | null | null | stressTest/stressTestPV.py | bhill-slac/epics-stress-tests | bf895cdf84e3ef16819204fbf49f2dd54c9473fb | [
"BSD-3-Clause-LBNL"
] | null | null | null | #!/usr/bin/env python3
| 38.01087 | 98 | 0.543323 |
d27c6795141864bd67b93ea1ed9caca681ced3fd | 10,246 | py | Python | pysoundcloud/client.py | omarcostahamido/PySoundCloud | 1ca53a280c77f6b5f52868adefa332c4de56858f | [
"MIT"
] | 4 | 2021-09-15T06:40:02.000Z | 2022-01-16T03:31:59.000Z | pysoundcloud/client.py | AnthonyMakesStuff/PySoundCloud | 1ca53a280c77f6b5f52868adefa332c4de56858f | [
"MIT"
] | 1 | 2021-04-22T04:18:42.000Z | 2021-05-09T09:22:59.000Z | pysoundcloud/client.py | AnthonyMakesStuff/PySoundCloud | 1ca53a280c77f6b5f52868adefa332c4de56858f | [
"MIT"
] | 1 | 2020-09-05T02:14:37.000Z | 2020-09-05T02:14:37.000Z | import re
import requests
from typing import Union
from pysoundcloud.soundcloudplaylists import SoundCloudPlaylists
from pysoundcloud.soundcloudsearchresults import SoundCloudSearchResults
from pysoundcloud.soundcloudlikedtracks import SoundCloudLikedTracks
from pysoundcloud.soundcloudplaylist import SoundCloudPlaylist
from pysoundcloud.soundcloudtrack import SoundCloudTrack
from pysoundcloud.soundcloudrelatedtracks import SoundCloudRelatedTracks
| 44.547826 | 119 | 0.576127 |
d27cc7e2f11f688e99e9542aba655008056fb669 | 859 | py | Python | rojak-analyzer/generate_stopwords.py | pyk/rojak | 0dd69efedb58ee5d951e1a43cdfa65b60f8bb7c7 | [
"BSD-3-Clause"
] | 107 | 2016-10-02T05:54:42.000Z | 2021-08-05T00:20:51.000Z | rojak-analyzer/generate_stopwords.py | pyk/rojak | 0dd69efedb58ee5d951e1a43cdfa65b60f8bb7c7 | [
"BSD-3-Clause"
] | 134 | 2016-10-02T21:21:08.000Z | 2016-12-27T02:46:34.000Z | rojak-analyzer/generate_stopwords.py | pyk/rojak | 0dd69efedb58ee5d951e1a43cdfa65b60f8bb7c7 | [
"BSD-3-Clause"
] | 54 | 2016-10-02T08:47:56.000Z | 2020-03-08T00:56:03.000Z | # Run this script to create stopwords.py based on stopwords.txt
import json
if __name__ == '__main__':
generate('stopwords.txt', 'stopwords.py')
| 29.62069 | 71 | 0.622817 |
d27d80f790828621c13dad6b6615e88b5261c7f1 | 33,166 | py | Python | bcbio/structural/cnvkit.py | YTLogos/bcbio-nextgen | f964a25ab74a31551273b7e50518f3451c90f473 | [
"MIT"
] | 1 | 2019-08-29T07:55:48.000Z | 2019-08-29T07:55:48.000Z | bcbio/structural/cnvkit.py | YTLogos/bcbio-nextgen | f964a25ab74a31551273b7e50518f3451c90f473 | [
"MIT"
] | null | null | null | bcbio/structural/cnvkit.py | YTLogos/bcbio-nextgen | f964a25ab74a31551273b7e50518f3451c90f473 | [
"MIT"
] | null | null | null | """Copy number detection with CNVkit with specific support for targeted sequencing.
http://cnvkit.readthedocs.org
"""
import copy
import math
import operator
import os
import sys
import tempfile
import subprocess
import pybedtools
import numpy as np
import toolz as tz
from bcbio import utils
from bcbio.bam import ref
from bcbio.distributed.multi import run_multicore, zeromq_aware_logging
from bcbio.distributed.transaction import file_transaction
from bcbio.heterogeneity import chromhacks
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.variation import bedutils, effects, ploidy, population, vcfutils
from bcbio.structural import annotate, shared, plot
def run(items, background=None):
"""Detect copy number variations from batched set of samples using CNVkit.
"""
if not background: background = []
return _cnvkit_by_type(items, background)
def _cnvkit_by_type(items, background):
"""Dispatch to specific CNVkit functionality based on input type.
"""
if len(items + background) == 1:
return _run_cnvkit_single(items[0])
elif vcfutils.get_paired_phenotype(items[0]):
return _run_cnvkit_cancer(items, background)
else:
return _run_cnvkit_population(items, background)
def _associate_cnvkit_out(ckouts, items, is_somatic=False):
"""Associate cnvkit output with individual items.
"""
assert len(ckouts) == len(items)
out = []
for ckout, data in zip(ckouts, items):
ckout = copy.deepcopy(ckout)
ckout["variantcaller"] = "cnvkit"
if utils.file_exists(ckout["cns"]) and _cna_has_values(ckout["cns"]):
ckout = _add_seg_to_output(ckout, data)
ckout = _add_gainloss_to_output(ckout, data)
ckout = _add_segmetrics_to_output(ckout, data)
ckout = _add_variantcalls_to_output(ckout, data, is_somatic)
# ckout = _add_coverage_bedgraph_to_output(ckout, data)
ckout = _add_cnr_bedgraph_and_bed_to_output(ckout, data)
if "svplots" in dd.get_tools_on(data):
ckout = _add_plots_to_output(ckout, data)
if "sv" not in data:
data["sv"] = []
data["sv"].append(ckout)
out.append(data)
return out
def _run_cnvkit_single(data, background=None):
"""Process a single input file with BAM or uniform background.
"""
if not background:
background = []
ckouts = _run_cnvkit_shared([data], background)
if not ckouts:
return [data]
else:
assert len(ckouts) == 1
return _associate_cnvkit_out(ckouts, [data])
def _run_cnvkit_cancer(items, background):
"""Run CNVkit on a tumor/normal pair.
"""
paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items)
normal_data = [x for x in items if dd.get_sample_name(x) != paired.tumor_name]
tumor_ready, normal_ready = _match_batches(paired.tumor_data, normal_data[0] if normal_data else None)
ckouts = _run_cnvkit_shared([tumor_ready], [normal_ready] if normal_ready else [])
if not ckouts:
return items
assert len(ckouts) == 1
tumor_data = _associate_cnvkit_out(ckouts, [paired.tumor_data], is_somatic=True)
return tumor_data + normal_data
def _match_batches(tumor, normal):
"""Fix batch names for shared tumor/normals to ensure matching
"""
if normal:
tumor = copy.deepcopy(tumor)
normal = copy.deepcopy(normal)
cur_batch = list(set(_get_batch(tumor)) & set(_get_batch(normal)))
assert len(cur_batch) == 1, "No batch overlap: %s and %s" % (_get_batch(tumor), _get_batch(normal))
cur_batch = cur_batch[0]
tumor["metadata"]["batch"] = cur_batch
normal["metadata"]["batch"] = cur_batch
return tumor, normal
def _run_cnvkit_population(items, background):
"""Run CNVkit on a population of samples.
Tries to calculate background based on case/controls, otherwise
uses samples from the same batch as background.
"""
if background and len(background) > 0:
inputs = items
else:
inputs, background = shared.find_case_control(items)
# if we have case/control organized background or a single sample
if len(inputs) == 1 or len(background) > 0:
ckouts = _run_cnvkit_shared(inputs, background)
return _associate_cnvkit_out(ckouts, inputs) + background
# otherwise run each sample with the others in the batch as background
else:
out = []
for cur_input in items:
background = [d for d in items if dd.get_sample_name(d) != dd.get_sample_name(cur_input)]
ckouts = _run_cnvkit_shared([cur_input], background)
out.extend(_associate_cnvkit_out(ckouts, [cur_input]))
return out
def _prep_cmd(cmd, tx_out_file):
"""Wrap CNVkit commands ensuring we use local temporary directories.
"""
cmd = " ".join(cmd) if isinstance(cmd, (list, tuple)) else cmd
return "export TMPDIR=%s && %s" % (os.path.dirname(tx_out_file), cmd)
def _bam_to_outbase(bam_file, work_dir, data):
"""Convert an input BAM file into CNVkit expected output.
Handles previous non-batch cases to avoid re-calculating,
returning both new and old values:
"""
batch = dd.get_batch(data) or dd.get_sample_name(data)
out_base = os.path.splitext(os.path.basename(bam_file))[0].split(".")[0]
base = os.path.join(work_dir, out_base)
return "%s-%s" % (base, batch), base
def _run_cnvkit_shared(inputs, backgrounds):
"""Shared functionality to run CNVkit, parallelizing over multiple BAM files.
"""
work_dir = _sv_workdir(inputs[0])
raw_work_dir = utils.safe_makedir(os.path.join(work_dir, "raw"))
background_name = dd.get_sample_name(backgrounds[0]) if backgrounds else "flat"
background_cnn = os.path.join(raw_work_dir, "%s_background.cnn" % (background_name))
ckouts = []
for cur_input in inputs:
cur_raw_work_dir = utils.safe_makedir(os.path.join(_sv_workdir(cur_input), "raw"))
out_base, out_base_old = _bam_to_outbase(dd.get_align_bam(cur_input), cur_raw_work_dir, cur_input)
if utils.file_exists(out_base_old + ".cns"):
out_base = out_base_old
ckouts.append({"cnr": "%s.cnr" % out_base,
"cns": "%s.cns" % out_base,
"back_cnn": background_cnn})
if not utils.file_exists(ckouts[0]["cns"]):
cov_interval = dd.get_coverage_interval(inputs[0])
raw_target_bed, access_bed = _get_target_access_files(cov_interval, inputs[0], work_dir)
# bail out if we ended up with no regions
if not utils.file_exists(raw_target_bed):
return {}
raw_target_bed = annotate.add_genes(raw_target_bed, inputs[0])
parallel = {"type": "local", "cores": dd.get_cores(inputs[0]), "progs": ["cnvkit"]}
target_bed, antitarget_bed = _cnvkit_targets(raw_target_bed, access_bed, cov_interval,
raw_work_dir, inputs[0])
samples_to_run = zip(["background"] * len(backgrounds), backgrounds) + \
zip(["evaluate"] * len(inputs), inputs)
raw_coverage_cnns = [_cnvkit_coverage(cdata, bed, itype) for itype, cdata in samples_to_run
for bed in [target_bed, antitarget_bed]]
coverage_cnns = reduce(operator.add,
[_cnvkit_metrics(cnns, target_bed, antitarget_bed, cov_interval, inputs + backgrounds)
for cnns in tz.groupby("bam", raw_coverage_cnns).values()])
background_cnn = _cnvkit_background(_select_background_cnns(coverage_cnns),
background_cnn, target_bed, antitarget_bed, inputs[0])
fixed_cnrs = run_multicore(_cnvkit_fix,
[(cnns, background_cnn, inputs + backgrounds) for cnns in
tz.groupby("bam", [x for x in coverage_cnns
if x["itype"] == "evaluate"]).values()],
inputs[0]["config"], parallel)
[_cnvkit_segment(cnr, cov_interval, data) for cnr, data in fixed_cnrs]
return ckouts
def _cnvkit_segment(cnr_file, cov_interval, data):
"""Perform segmentation and copy number calling on normalized inputs
"""
out_file = "%s.cns" % os.path.splitext(cnr_file)[0]
if not utils.file_uptodate(out_file, cnr_file):
with file_transaction(data, out_file) as tx_out_file:
if not _cna_has_values(cnr_file):
with open(tx_out_file, "w") as out_handle:
out_handle.write("chromosome\tstart\tend\tgene\tlog2\tprobes\tCN1\tCN2\tbaf\tweight\n")
else:
cmd = [_get_cmd(), "segment", "-p", str(dd.get_cores(data)),
"-o", tx_out_file, cnr_file]
small_vrn_files = _compatible_small_variants(data)
if len(small_vrn_files) > 0 and _cna_has_values(cnr_file) and cov_interval != "genome":
cmd += ["-v", small_vrn_files[0]]
if cov_interval == "genome":
cmd += ["--threshold", "0.00001"]
# preferentially use conda installed Rscript
export_cmd = ("%s && export TMPDIR=%s && "
% (utils.get_R_exports(), os.path.dirname(tx_out_file)))
do.run(export_cmd + " ".join(cmd), "CNVkit segment")
return out_file
def _cnvkit_metrics(cnns, target_bed, antitarget_bed, cov_interval, items):
"""Estimate noise of a sample using a flat background.
Only used for panel/targeted data due to memory issues with whole genome
samples.
"""
if cov_interval == "genome":
return cnns
target_cnn = [x["file"] for x in cnns if x["cnntype"] == "target"][0]
background_file = "%s-flatbackground.cnn" % utils.splitext_plus(target_cnn)[0]
background_file = _cnvkit_background([], background_file, target_bed, antitarget_bed, items[0])
cnr_file, data = _cnvkit_fix_base(cnns, background_file, items, "-flatbackground")
cns_file = _cnvkit_segment(cnr_file, cov_interval, data)
metrics_file = "%s-metrics.txt" % utils.splitext_plus(target_cnn)[0]
if not utils.file_exists(metrics_file):
with file_transaction(data, metrics_file) as tx_metrics_file:
cmd = [_get_cmd(), "metrics", "-o", tx_metrics_file, "-s", cns_file, "--", cnr_file]
do.run(_prep_cmd(cmd, tx_metrics_file), "CNVkit metrics")
metrics = _read_metrics_file(metrics_file)
out = []
for cnn in cnns:
cnn["metrics"] = metrics
out.append(cnn)
return out
def _select_background_cnns(cnns):
"""Select cnns to use for background calculations.
Uses background samples in cohort, and will remove CNNs with high
on target variability. Uses (number of segments * biweight midvariance) as metric
for variability with higher numbers being more unreliable.
"""
min_for_variability_analysis = 20
pct_keep = 0.10
b_cnns = [x for x in cnns if x["itype"] == "background" and x.get("metrics")]
assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background"
if len(b_cnns) >= min_for_variability_analysis:
b_cnns_w_metrics = []
for b_cnn in b_cnns:
unreliability = b_cnn["metrics"]["segments"] * b_cnn["metrics"]["bivar"]
b_cnns_w_metrics.append((unreliability, b_cnn))
b_cnns_w_metrics.sort()
to_keep = int(math.ceil(pct_keep * len(b_cnns) / 2.0) * 2)
b_cnns = [x[1] for x in b_cnns_w_metrics][:to_keep]
assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background"
return [x["file"] for x in b_cnns]
def _cnvkit_background(background_cnns, out_file, target_bed, antitarget_bed, data):
"""Calculate background reference, handling flat case with no normal sample.
"""
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "reference", "-f", dd.get_ref_file(data), "-o", tx_out_file]
if len(background_cnns) == 0:
cmd += ["-t", target_bed, "-a", antitarget_bed]
else:
cmd += background_cnns
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit background")
return out_file
def _cnvkit_coverage(data, bed_file, input_type):
"""Calculate coverage in a BED file for CNVkit.
"""
bam_file = dd.get_align_bam(data)
work_dir = utils.safe_makedir(os.path.join(_sv_workdir(data), "raw"))
exts = {".target.bed": ("target", "targetcoverage.cnn"),
".antitarget.bed": ("antitarget", "antitargetcoverage.cnn")}
cnntype = None
for orig, (cur_cnntype, ext) in exts.items():
if bed_file.endswith(orig):
cnntype = cur_cnntype
break
if cnntype is None:
assert bed_file.endswith(".bed"), "Unexpected BED file extension for coverage %s" % bed_file
cnntype = ""
base, base_old = _bam_to_outbase(bam_file, work_dir, data)
out_file = "%s.%s" % (base, ext)
out_file_old = "%s.%s" % (base_old, ext)
# back compatible with previous runs to avoid re-calculating
if utils.file_exists(out_file_old):
out_file = out_file_old
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "coverage", "-p", str(dd.get_cores(data)), bam_file, bed_file, "-o", tx_out_file]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit coverage")
return {"itype": input_type, "file": out_file, "bam": bam_file, "cnntype": cnntype,
"sample": dd.get_sample_name(data)}
def _cnvkit_targets(raw_target_bed, access_bed, cov_interval, work_dir, data):
"""Create target and antitarget regions from target and access files.
"""
batch = dd.get_batch(data) or dd.get_sample_name(data)
basename = os.path.splitext(os.path.basename(raw_target_bed))[0]
target_bed = os.path.join(work_dir, "%s-%s.target.bed" % (basename, batch))
# back compatible with previous runs to avoid re-calculating
target_bed_old = os.path.join(work_dir, "%s.target.bed" % basename)
if utils.file_exists(target_bed_old):
target_bed = target_bed_old
if not utils.file_exists(target_bed):
with file_transaction(data, target_bed) as tx_out_file:
cmd = [_get_cmd(), "target", raw_target_bed, "--split", "-o", tx_out_file]
bin_estimates = _cnvkit_coverage_bin_estimate(raw_target_bed, access_bed, cov_interval, work_dir, data)
if bin_estimates.get("target"):
cmd += ["--avg-size", str(bin_estimates["target"])]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit target")
antitarget_bed = os.path.join(work_dir, "%s-%s.antitarget.bed" % (basename, batch))
antitarget_bed_old = os.path.join(work_dir, "%s.antitarget.bed" % basename)
# back compatible with previous runs to avoid re-calculating
if os.path.exists(antitarget_bed_old):
antitarget_bed = antitarget_bed_old
if not os.path.exists(antitarget_bed):
with file_transaction(data, antitarget_bed) as tx_out_file:
cmd = [_get_cmd(), "antitarget", "-g", access_bed, target_bed, "-o", tx_out_file]
bin_estimates = _cnvkit_coverage_bin_estimate(raw_target_bed, access_bed, cov_interval, work_dir, data)
if bin_estimates.get("antitarget"):
cmd += ["--avg-size", str(bin_estimates["antitarget"])]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit antitarget")
return target_bed, antitarget_bed
def _cnvkit_coverage_bin_estimate(raw_target_bed, access_bed, cov_interval, work_dir, data):
"""Estimate good coverage bin sizes for target regions based on coverage.
"""
batch = dd.get_batch(data) or dd.get_sample_name(data)
out_file = os.path.join(work_dir, "%s-%s-bin_estimate.txt" % (
os.path.splitext(os.path.basename(raw_target_bed))[0], batch))
method_map = {"genome": "wgs", "regional": "hybrid", "amplicon": "amplicon"}
if not os.path.exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd("coverage_bin_size.py"), dd.get_align_bam(data),
"-m", method_map[cov_interval], "-t", raw_target_bed,
"-g", access_bed]
cmd = " ".join(cmd) + " > " + tx_out_file
try:
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit coverage bin estimation", log_error=False)
except subprocess.CalledProcessError:
logger.info("Bin size estimate failed, using default values")
with open(tx_out_file, "w") as out_handle:
out_handle.write("Bin size estimate failed, using default values")
avg_bin_sizes = {}
estimate_map = {"On-target": "target", "Off-target": "antitarget",
"Genome": "target", "Targets (sampling)": "target"}
range_map = {("genome", "target"): (500, 1000),
("regional", "target"): (50, 267), ("regional", "antitarget"): (20000, 200000),
("amplicon", "target"): (50, 267)}
with open(out_file) as in_handle:
for line in in_handle:
if line.startswith(tuple(estimate_map.keys())):
name, depth, bin_size = line.strip().split("\t")
name = estimate_map[name.replace(":", "").strip()]
try:
bin_size = int(bin_size)
except ValueError:
bin_size = None
if bin_size and bin_size > 0:
cur_min, cur_max = range_map[(cov_interval, name)]
avg_bin_sizes[name] = max(min(bin_size, cur_max), cur_min)
return avg_bin_sizes
def _get_target_access_files(cov_interval, data, work_dir):
"""Retrieve target and access files based on the type of data to process.
pick targets, anti-targets and access files based on analysis type
http://cnvkit.readthedocs.org/en/latest/nonhybrid.html
"""
base_regions = shared.get_base_cnv_regions(data, work_dir)
target_bed = bedutils.sort_merge(base_regions, data, out_dir=work_dir)
if cov_interval == "amplicon":
return target_bed, target_bed
elif cov_interval == "genome":
return target_bed, target_bed
else:
access_file = _create_access_file(dd.get_ref_file(data), _sv_workdir(data), data)
return target_bed, access_file
def _add_seg_to_output(out, data):
"""Export outputs to 'seg' format compatible with IGV and GenePattern.
"""
out_file = "%s.seg" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
"seg", "-o", tx_out_file, out["cns"]]
do.run(cmd, "CNVkit export seg")
out["seg"] = out_file
return out
def _compatible_small_variants(data):
"""Retrieve small variant (SNP, indel) VCFs compatible with CNVkit.
"""
supported = set(["vardict", "freebayes", "gatk-haplotype", "mutect2", "vardict"])
out = []
for v in data.get("variants", []):
vrn_file = v.get("vrn_file")
if vrn_file and v.get("variantcaller") in supported:
base, ext = utils.splitext_plus(os.path.basename(vrn_file))
if vcfutils.get_paired_phenotype(data):
out.append(vrn_file)
else:
sample_vrn_file = os.path.join(dd.get_work_dir(data), v["variantcaller"],
"%s-%s%s" % (base, dd.get_sample_name(data), ext))
sample_vrn_file = vcfutils.select_sample(vrn_file, dd.get_sample_name(data), sample_vrn_file,
data["config"])
out.append(sample_vrn_file)
return out
def _add_variantcalls_to_output(out, data, is_somatic=False):
"""Call ploidy and convert into VCF and BED representations.
"""
call_file = "%s-call%s" % os.path.splitext(out["cns"])
gender = population.get_gender(data)
if not utils.file_exists(call_file):
with file_transaction(data, call_file) as tx_call_file:
filters = ["--filter", "cn"]
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "call"] + \
filters + \
["--ploidy", str(ploidy.get_ploidy([data])),
"-o", tx_call_file, out["cns"]]
small_vrn_files = _compatible_small_variants(data)
if len(small_vrn_files) > 0 and _cna_has_values(out["cns"]):
cmd += ["-v", small_vrn_files[0]]
if not is_somatic:
cmd += ["-m", "clonal"]
if gender and gender.lower() != "unknown":
cmd += ["--gender", gender]
if gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit call ploidy")
calls = {}
for outformat in ["bed", "vcf"]:
out_file = "%s.%s" % (os.path.splitext(call_file)[0], outformat)
calls[outformat] = out_file
if not os.path.exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
outformat, "--sample-id", dd.get_sample_name(data),
"--ploidy", str(ploidy.get_ploidy([data])),
"-o", tx_out_file, call_file]
if gender and gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit export %s" % outformat)
out["call_file"] = call_file
out["vrn_bed"] = annotate.add_genes(calls["bed"], data)
effects_vcf, _ = effects.add_to_vcf(calls["vcf"], data, "snpeff")
out["vrn_file"] = effects_vcf or calls["vcf"]
return out
def _add_segmetrics_to_output(out, data):
"""Add metrics for measuring reliability of CNV estimates.
"""
out_file = "%s-segmetrics.txt" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "segmetrics",
"--ci", "--pi",
"-s", out["cns"], "-o", tx_out_file, out["cnr"]]
# Use less fine grained bootstrapping intervals for whole genome runs
if dd.get_coverage_interval(data) == "genome":
cmd += ["--alpha", "0.1", "--bootstrap", "50"]
else:
cmd += ["--alpha", "0.01", "--bootstrap", "500"]
do.run(cmd, "CNVkit segmetrics")
out["segmetrics"] = out_file
return out
def _add_gainloss_to_output(out, data):
"""Add gainloss based on genes, helpful for identifying changes in smaller genes.
"""
out_file = "%s-gainloss.txt" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "gainloss",
"-s", out["cns"], "-o", tx_out_file, out["cnr"]]
do.run(cmd, "CNVkit gainloss")
out["gainloss"] = out_file
return out
def _add_coverage_bedgraph_to_output(out, data):
"""Add BedGraph representation of coverage to the output
"""
out_file = "%s.coverage.bedgraph" % os.path.splitext(out["cns"])[0]
if utils.file_exists(out_file):
out["bedgraph"] = out_file
return out
bam_file = dd.get_align_bam(data)
bedtools = config_utils.get_program("bedtools", data["config"])
samtools = config_utils.get_program("samtools", data["config"])
cns_file = out["cns"]
bed_file = tempfile.NamedTemporaryFile(suffix=".bed", delete=False).name
with file_transaction(data, out_file) as tx_out_file:
cmd = ("sed 1d {cns_file} | cut -f1,2,3 > {bed_file}; "
"{samtools} view -b -L {bed_file} {bam_file} | "
"{bedtools} genomecov -bg -ibam - -g {bed_file} >"
"{tx_out_file}").format(**locals())
do.run(cmd, "CNVkit bedGraph conversion")
os.remove(bed_file)
out["bedgraph"] = out_file
return out
def _add_plots_to_output(out, data):
"""Add CNVkit plots summarizing called copy number values.
"""
out["plot"] = {}
diagram_plot = _add_diagram_plot(out, data)
if diagram_plot:
out["plot"]["diagram"] = diagram_plot
scatter = _add_scatter_plot(out, data)
if scatter:
out["plot"]["scatter"] = scatter
scatter_global = _add_global_scatter_plot(out, data)
if scatter_global:
out["plot"]["scatter_global"] = scatter_global
return out
def _get_larger_chroms(ref_file):
"""Retrieve larger chromosomes, avoiding the smaller ones for plotting.
"""
from scipy.cluster.vq import kmeans, vq
all_sizes = []
for c in ref.file_contigs(ref_file):
all_sizes.append(float(c.size))
all_sizes.sort()
# separate out smaller chromosomes and haplotypes with kmeans
centroids, _ = kmeans(np.array(all_sizes), 2)
idx, _ = vq(np.array(all_sizes), centroids)
little_sizes = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx, all_sizes)))
little_sizes = [x[1] for x in little_sizes]
# create one more cluster with the smaller, removing the haplotypes
centroids2, _ = kmeans(np.array(little_sizes), 2)
idx2, _ = vq(np.array(little_sizes), centroids2)
little_sizes2 = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx2, little_sizes)))
little_sizes2 = [x[1] for x in little_sizes2]
# get any chromosomes not in haplotype/random bin
thresh = max(little_sizes2)
larger_chroms = []
for c in ref.file_contigs(ref_file):
if c.size > thresh:
larger_chroms.append(c.name)
return larger_chroms
def _remove_haplotype_chroms(in_file, data):
"""Remove shorter haplotype chromosomes from cns/cnr files for plotting.
"""
larger_chroms = set(_get_larger_chroms(dd.get_ref_file(data)))
out_file = "%s-chromfilter%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("chromosome") or line.split()[0] in larger_chroms:
out_handle.write(line)
return out_file
def _cnx_is_empty(in_file):
"""Check if cnr or cns files are empty (only have a header)
"""
with open(in_file) as in_handle:
for i, line in enumerate(in_handle):
if i > 0:
return False
return True
def _create_access_file(ref_file, out_dir, data):
"""Create genome access file for CNVlib to define available genomic regions.
XXX Can move to installation/upgrade process if too slow here.
"""
out_file = os.path.join(out_dir, "%s-access.bed" % os.path.splitext(os.path.basename(ref_file))[0])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "access",
ref_file, "-s", "10000", "-o", tx_out_file]
do.run(_prep_cmd(cmd, tx_out_file), "Create CNVkit access file")
return out_file
# ## Theta support
def export_theta(ckout, data):
"""Provide updated set of data with export information for TheTA2 input.
"""
cns_file = chromhacks.bed_to_standardonly(ckout["cns"], data, headers="chromosome")
cnr_file = chromhacks.bed_to_standardonly(ckout["cnr"], data, headers="chromosome")
out_file = "%s-theta.input" % utils.splitext_plus(cns_file)[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "export", "theta", cns_file, cnr_file, "-o", tx_out_file]
do.run(_prep_cmd(cmd, tx_out_file), "Export CNVkit calls as inputs for TheTA2")
ckout["theta_input"] = out_file
return ckout
| 46.910891 | 117 | 0.637792 |
d27d939d04d3b5253e8adbdbae402c28328bae05 | 31,862 | py | Python | pyexchange/exchange2010/__init__.py | tedeler/pyexchange | 58042f473cbd4f00769249ce9ca20c6a376eddb6 | [
"Apache-2.0"
] | 128 | 2015-01-11T10:29:40.000Z | 2021-06-25T05:27:45.000Z | pyexchange/exchange2010/__init__.py | tedeler/pyexchange | 58042f473cbd4f00769249ce9ca20c6a376eddb6 | [
"Apache-2.0"
] | 52 | 2015-01-02T15:24:28.000Z | 2020-08-07T04:49:49.000Z | pyexchange/exchange2010/__init__.py | tedeler/pyexchange | 58042f473cbd4f00769249ce9ca20c6a376eddb6 | [
"Apache-2.0"
] | 96 | 2015-01-02T15:16:20.000Z | 2021-12-25T01:37:46.000Z | """
(c) 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import logging
from ..base.calendar import BaseExchangeCalendarEvent, BaseExchangeCalendarService, ExchangeEventOrganizer, ExchangeEventResponse
from ..base.folder import BaseExchangeFolder, BaseExchangeFolderService
from ..base.soap import ExchangeServiceSOAP
from ..exceptions import FailedExchangeException, ExchangeStaleChangeKeyException, ExchangeItemNotFoundException, ExchangeInternalServerTransientErrorException, ExchangeIrresolvableConflictException, InvalidEventType
from ..compat import BASESTRING_TYPES
from . import soap_request
from lxml import etree
from copy import deepcopy
from datetime import date
import warnings
log = logging.getLogger("pyexchange")
| 34.745911 | 216 | 0.701211 |
d27e18ed16bd406812b85f4af214631d5d9da65c | 8,982 | py | Python | rnacentral_pipeline/rnacentral/r2dt/should_show.py | RNAcentral/rnacentral-import-pipeline | 238e573440c72581a051b16c15f56fcd25bece74 | [
"Apache-2.0"
] | 1 | 2018-08-09T14:41:16.000Z | 2018-08-09T14:41:16.000Z | rnacentral_pipeline/rnacentral/r2dt/should_show.py | RNAcentral/rnacentral-import-pipeline | 238e573440c72581a051b16c15f56fcd25bece74 | [
"Apache-2.0"
] | 60 | 2015-02-04T16:43:53.000Z | 2022-01-27T10:28:43.000Z | rnacentral_pipeline/rnacentral/r2dt/should_show.py | RNAcentral/rnacentral-import-pipeline | 238e573440c72581a051b16c15f56fcd25bece74 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright [2009-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import enum
import logging
import typing as ty
from pathlib import Path
import joblib
from more_itertools import chunked
import pandas as pd
from pypika import Table, Query
import psycopg2
import psycopg2.extras
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
LOGGER = logging.getLogger(__name__)
SOURCE_MAP = {
"crw": 0,
"ribovision": 1,
"gtrnadb": 2,
"rnase_p": 3,
"rfam": 4,
}
MODEL_COLUMNS: ty.List[str] = Attributes.model_columns()
def infer_columns(frame: pd.DataFrame):
frame["diagram_sequence_length"] = (
frame["diagram_sequence_stop"] - frame["diagram_sequence_start"]
)
frame["diagram_model_length"] = (
frame["diagram_model_stop"] - frame["diagram_model_start"]
)
frame["source_index"] = frame.model_source.map(SOURCE_MAP)
if frame["source_index"].isnull().any():
raise ValueError("Could not build source_index for all training data")
| 32.309353 | 87 | 0.638833 |
d27e557da62812d946f0019863efdd827d603a76 | 1,024 | py | Python | model.py | nitro-code/inception-api | 0ee40b1bdc7cccec8e15921ff835ce29070a66f6 | [
"MIT"
] | 1 | 2017-08-18T09:13:47.000Z | 2017-08-18T09:13:47.000Z | model.py | nitroventures/inception-api | 0ee40b1bdc7cccec8e15921ff835ce29070a66f6 | [
"MIT"
] | null | null | null | model.py | nitroventures/inception-api | 0ee40b1bdc7cccec8e15921ff835ce29070a66f6 | [
"MIT"
] | null | null | null | import tensorflow as tf
from keras.preprocessing import image
from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions
import numpy as np
import h5py
model = InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None)
graph = tf.get_default_graph()
| 34.133333 | 131 | 0.775391 |
d2824e51dfb32b914b5d61d7c72ec4e8a213bab5 | 4,959 | py | Python | models/recall/word2vec/static_model.py | ziyoujiyi/PaddleRec | bcddcf46e5cd8d4e6b2c5ee8d0d5521e292a2a81 | [
"Apache-2.0"
] | 2,739 | 2020-04-28T05:12:48.000Z | 2022-03-31T16:01:49.000Z | models/recall/word2vec/static_model.py | jiangcongxu/PaddleRec | 9a107c56af2d1ee282975bcc8edb1ad5fb7e7973 | [
"Apache-2.0"
] | 205 | 2020-05-14T13:29:14.000Z | 2022-03-31T13:01:50.000Z | models/recall/word2vec/static_model.py | jiangcongxu/PaddleRec | 9a107c56af2d1ee282975bcc8edb1ad5fb7e7973 | [
"Apache-2.0"
] | 545 | 2020-05-14T13:19:13.000Z | 2022-03-24T07:53:05.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import math
from net import Word2VecLayer, Word2VecInferLayer
| 40.983471 | 79 | 0.617867 |
d282949426f3fae8441528c5a9f321ae9b759d68 | 324 | py | Python | transformers/transformers/data/processors/split_sentences.py | richardbaihe/segatron_aaai | 1739b667f2bee53541f00d227da8375543fe5f11 | [
"MIT"
] | 16 | 2020-12-22T07:35:20.000Z | 2022-02-09T19:49:02.000Z | transformers/transformers/data/processors/split_sentences.py | richardbaihe/segatron_aaai | 1739b667f2bee53541f00d227da8375543fe5f11 | [
"MIT"
] | 1 | 2021-12-21T14:33:15.000Z | 2021-12-27T20:40:39.000Z | transformers/transformers/data/processors/split_sentences.py | richardbaihe/segatron_aaai | 1739b667f2bee53541f00d227da8375543fe5f11 | [
"MIT"
] | 2 | 2020-12-22T08:46:01.000Z | 2021-01-09T14:50:12.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Time : 2020-04-13 21:19
# @Author : Richard Bai
# @EMail : he.bai@uwaterloo.ca
import nltk
import os
import json
| 19.058824 | 49 | 0.608025 |
d2831bfec38a388ec3a1badd4f034aaa55b158a5 | 1,661 | py | Python | sfdata/posts/migrations/0001_initial.py | adjspecies/sfdata | 9522176c1c80e9f0aeecf77da6576e8465238383 | [
"MIT"
] | 1 | 2019-01-24T01:57:21.000Z | 2019-01-24T01:57:21.000Z | sfdata/posts/migrations/0001_initial.py | adjspecies/sfdata | 9522176c1c80e9f0aeecf77da6576e8465238383 | [
"MIT"
] | null | null | null | sfdata/posts/migrations/0001_initial.py | adjspecies/sfdata | 9522176c1c80e9f0aeecf77da6576e8465238383 | [
"MIT"
] | 1 | 2018-12-22T02:20:39.000Z | 2018-12-22T02:20:39.000Z | # Generated by Django 2.1.4 on 2018-12-21 21:55
from django.db import migrations, models
import django.db.models.deletion
| 33.22 | 114 | 0.526791 |
d28385fbbbc8e61ca535b580e9a5d1d70c77fe44 | 1,361 | py | Python | test/test_tools.py | cokelaer/sequana | da35de12b45f38b4fa488c7a15a6d9829890b44e | [
"BSD-3-Clause"
] | 138 | 2016-07-13T06:24:45.000Z | 2022-03-28T13:12:03.000Z | test/test_tools.py | cokelaer/sequana | da35de12b45f38b4fa488c7a15a6d9829890b44e | [
"BSD-3-Clause"
] | 655 | 2016-03-10T17:33:40.000Z | 2022-03-30T16:10:45.000Z | test/test_tools.py | cokelaer/sequana | da35de12b45f38b4fa488c7a15a6d9829890b44e | [
"BSD-3-Clause"
] | 39 | 2016-11-04T11:40:58.000Z | 2022-03-15T08:12:29.000Z | from sequana.tools import bam_to_mapped_unmapped_fastq, reverse_complement, StatsBAM2Mapped
from sequana import sequana_data
from sequana.tools import bam_get_paired_distance, GZLineCounter, PairedFastQ
from sequana.tools import genbank_features_parser
| 26.686275 | 91 | 0.739897 |
d285ef91afbd55e6c2220bf3361249aade406f01 | 16,219 | py | Python | sxl/sxl.py | giriannamalai/sxl | 37db962bbbc978053375ffcc51e298ee6f5272d8 | [
"MIT"
] | null | null | null | sxl/sxl.py | giriannamalai/sxl | 37db962bbbc978053375ffcc51e298ee6f5272d8 | [
"MIT"
] | null | null | null | sxl/sxl.py | giriannamalai/sxl | 37db962bbbc978053375ffcc51e298ee6f5272d8 | [
"MIT"
] | null | null | null | """
xl.py - python library to deal with *big* Excel files.
"""
from abc import ABC
from collections import namedtuple, ChainMap
from contextlib import contextmanager
import datetime
import io
from itertools import zip_longest
import os
import re
import string
import xml.etree.cElementTree as ET
from zipfile import ZipFile
# ISO/IEC 29500:2011 in Part 1, section 18.8.30
STANDARD_STYLES = {
'0' : 'General',
'1' : '0',
'2' : '0.00',
'3' : '#,##0',
'4' : '#,##0.00',
'9' : '0%',
'10' : '0.00%',
'11' : '0.00E+00',
'12' : '# ?/?',
'13' : '# ??/??',
'14' : 'mm-dd-yy',
'15' : 'd-mmm-yy',
'16' : 'd-mmm',
'17' : 'mmm-yy',
'18' : 'h:mm AM/PM',
'19' : 'h:mm:ss AM/PM',
'20' : 'h:mm',
'21' : 'h:mm:ss',
'22' : 'm/d/yy h:mm',
'37' : '#,##0 ;(#,##0)',
'38' : '#,##0 ;[Red](#,##0)',
'39' : '#,##0.00;(#,##0.00)',
'40' : '#,##0.00;[Red](#,##0.00)',
'45' : 'mm:ss',
'46' : '[h]:mm:ss',
'47' : 'mmss.0',
'48' : '##0.0E+0',
'49' : '@',
}
ExcelErrorValue = namedtuple('ExcelErrorValue', 'value')
def head(self, num_rows=10):
"Return first 'num_rows' from this worksheet"
return self.rows[:num_rows+1] # 1-based
def cat(self, tab=1):
"Return/yield all rows from this worksheet"
dat = self.rows[1] # 1 based!
XLRec = namedtuple('XLRec', dat[0], rename=True) # pylint: disable=C0103
for row in self.rows[1:]:
yield XLRec(*row)
class Range(ExcelObj):
"""
Excel ranges
"""
class Workbook(ExcelObj):
"""
Excel workbook
"""
def get_date_system(self):
"Determine the date system used by the current workbook"
with self.xls.open('xl/workbook.xml') as xml_doc:
tree = ET.parse(io.TextIOWrapper(xml_doc, self.encoding))
tag = self.tag_with_ns('workbookPr', self.main_ns)
tag_element = tree.find(tag)
if tag_element and tag_element.get('date1904') == '1':
return 1904
return 1900
def num_to_date(self, number):
"""
Return date of "number" based on the date system used in this workbook.
The date system is either the 1904 system or the 1900 system depending
on which date system the spreadsheet is using. See
http://bit.ly/2He5HoD for more information on date systems in Excel.
"""
if self.date_system == 1900:
# Under the 1900 base system, 1 represents 1/1/1900 (so we start
# with a base date of 12/31/1899).
base = datetime.datetime(1899, 12, 31)
# BUT (!), Excel considers 1900 a leap-year which it is not. As
# such, it will happily represent 2/29/1900 with the number 60, but
# we cannot convert that value to a date so we throw an error.
if number == 60:
raise ValueError("Bad date in Excel file - 2/29/1900 not valid")
# Otherwise, if the value is greater than 60 we need to adjust the
# base date to 12/30/1899 to account for this leap year bug.
elif number > 60:
base = base - datetime.timedelta(days=1)
else:
# Under the 1904 system, 1 represent 1/2/1904 so we start with a
# base date of 1/1/1904.
base = datetime.datetime(1904, 1, 1)
days = int(number)
partial_days = number - days
seconds = int(round(partial_days * 86400000.0))
seconds, milliseconds = divmod(seconds, 1000)
date = base + datetime.timedelta(days, seconds, 0, milliseconds)
if days == 0:
return date.time()
return date
# Some helper functions
def num2col(num):
"""Convert given column letter to an Excel column number."""
result = []
while num:
num, rem = divmod(num-1, 26)
result[:0] = string.ascii_uppercase[rem]
return ''.join(result)
| 35.803532 | 106 | 0.533818 |
9627bff44e51fdfda5ec4883f22ddd53286fedc6 | 5,156 | py | Python | sdk-python/awaazde/base.py | ashwini-ad/awaazde-api-client-sdk | c966f24d1e4b11fb9b0878d7e20c80b19cc04628 | [
"Apache-2.0"
] | null | null | null | sdk-python/awaazde/base.py | ashwini-ad/awaazde-api-client-sdk | c966f24d1e4b11fb9b0878d7e20c80b19cc04628 | [
"Apache-2.0"
] | null | null | null | sdk-python/awaazde/base.py | ashwini-ad/awaazde-api-client-sdk | c966f24d1e4b11fb9b0878d7e20c80b19cc04628 | [
"Apache-2.0"
] | null | null | null | import logging
import urllib.parse
from .api_client import ApiClient
from .constants import APIConstants
from .exceptions import APIException
from .utils import CommonUtils
| 33.480519 | 145 | 0.597944 |
96296ccad66334cb3060947522a0c3b215f8f83c | 774 | py | Python | configs/mmrotate/rotated-detection_tensorrt_dynamic-320x320-1024x1024.py | grimoire/mmdeploy | e84bc30f4a036dd19cb3af854203922a91098e84 | [
"Apache-2.0"
] | 746 | 2021-12-27T10:50:28.000Z | 2022-03-31T13:34:14.000Z | configs/mmrotate/rotated-detection_tensorrt_dynamic-320x320-1024x1024.py | grimoire/mmdeploy | e84bc30f4a036dd19cb3af854203922a91098e84 | [
"Apache-2.0"
] | 253 | 2021-12-28T05:59:13.000Z | 2022-03-31T18:22:25.000Z | configs/mmrotate/rotated-detection_tensorrt_dynamic-320x320-1024x1024.py | grimoire/mmdeploy | e84bc30f4a036dd19cb3af854203922a91098e84 | [
"Apache-2.0"
] | 147 | 2021-12-27T10:50:33.000Z | 2022-03-30T10:44:20.000Z | _base_ = ['./rotated-detection_static.py', '../_base_/backends/tensorrt.py']
onnx_config = dict(
output_names=['dets', 'labels'],
input_shape=None,
dynamic_axes={
'input': {
0: 'batch',
2: 'height',
3: 'width'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
},
)
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 320, 320],
opt_shape=[1, 3, 1024, 1024],
max_shape=[1, 3, 1024, 1024])))
])
| 23.454545 | 76 | 0.432817 |
96298d33b6be77c822487f95da02b8e986a6434c | 4,053 | py | Python | rainy/envs/parallel_wrappers.py | alexmlamb/blocks_rl_gru_setup | fe462f79518d14f828e2c7cbf210cd105ff982f4 | [
"Apache-2.0"
] | null | null | null | rainy/envs/parallel_wrappers.py | alexmlamb/blocks_rl_gru_setup | fe462f79518d14f828e2c7cbf210cd105ff982f4 | [
"Apache-2.0"
] | null | null | null | rainy/envs/parallel_wrappers.py | alexmlamb/blocks_rl_gru_setup | fe462f79518d14f828e2c7cbf210cd105ff982f4 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from typing import Any, Iterable, Tuple
from .ext import EnvSpec
from .parallel import ParallelEnv
from ..prelude import Action, Array, State
from ..utils.rms import RunningMeanStd
class FrameStackParallel(ParallelEnvWrapper):
"""Parallel version of atari_wrappers.FrameStack
"""
| 32.685484 | 97 | 0.603504 |
962a9c50351cba1947f6e3a1a14ce2f159196743 | 1,205 | py | Python | oldp/apps/search/templatetags/search.py | docsuleman/oldp | 8dcaa8e6e435794c872346b5014945ace885adb4 | [
"MIT"
] | 66 | 2018-05-07T12:34:39.000Z | 2022-02-23T20:14:24.000Z | oldp/apps/search/templatetags/search.py | Justice-PLP-DHV/oldp | eadf235bb0925453d9a5b81963a0ce53afeb17fd | [
"MIT"
] | 68 | 2018-06-11T16:13:17.000Z | 2022-02-10T08:03:26.000Z | oldp/apps/search/templatetags/search.py | Justice-PLP-DHV/oldp | eadf235bb0925453d9a5b81963a0ce53afeb17fd | [
"MIT"
] | 15 | 2018-06-23T19:41:13.000Z | 2021-08-18T08:21:49.000Z | from datetime import datetime
from dateutil.relativedelta import relativedelta
from django import template
from django.template.defaultfilters import urlencode
from django.urls import reverse
from haystack.models import SearchResult
from haystack.utils.highlighting import Highlighter
register = template.Library()
| 28.690476 | 117 | 0.73444 |
962b1992cdd2dfaf0952dfed1c1a16307ccc9f57 | 372 | py | Python | interview/leet/1029_Two_City_Scheduling.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2020-10-12T13:33:29.000Z | 2020-10-12T13:33:29.000Z | interview/leet/1029_Two_City_Scheduling.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | null | null | null | interview/leet/1029_Two_City_Scheduling.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2016-11-09T07:28:45.000Z | 2016-11-09T07:28:45.000Z | #!/usr/bin/env python
costs = [[10,20],[30,200],[400,50],[30,20]]
sol = Solution()
print(sol.twoCitySchedCost(costs))
| 24.8 | 60 | 0.553763 |
962b25ef7d6c6efe9c549cbaf3d04d00594f4f6d | 23,818 | py | Python | Metrics/plots.py | liorfrenkel1992/focal_calibration | 4f020e022be501ee3f723e6105afe793a1e522f0 | [
"MIT"
] | null | null | null | Metrics/plots.py | liorfrenkel1992/focal_calibration | 4f020e022be501ee3f723e6105afe793a1e522f0 | [
"MIT"
] | null | null | null | Metrics/plots.py | liorfrenkel1992/focal_calibration | 4f020e022be501ee3f723e6105afe793a1e522f0 | [
"MIT"
] | null | null | null | '''
This file contains method for generating calibration related plots, eg. reliability plots.
References:
[1] C. Guo, G. Pleiss, Y. Sun, and K. Q. Weinberger. On calibration of modern neural networks.
arXiv preprint arXiv:1706.04599, 2017.
'''
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import math
import torch
from torch.nn import functional as F
from scipy.interpolate import make_interp_spline
plt.rcParams.update({'font.size': 20})
# Some keys used for the following dictionaries
COUNT = 'count'
CONF = 'conf'
ACC = 'acc'
BIN_ACC = 'bin_acc'
BIN_CONF = 'bin_conf'
def reliability_plot(confs, preds, labels, save_plots_loc, dataset, model, trained_loss, num_bins=15, scaling_related='before', save=False):
'''
Method to draw a reliability plot from a model's predictions and confidences.
'''
bin_dict = _populate_bins(confs, preds, labels, num_bins)
bns = [(i / float(num_bins)) for i in range(num_bins)]
y = []
for i in range(num_bins):
y.append(bin_dict[i][BIN_ACC])
plt.figure(figsize=(10, 8)) # width:20, height:3
plt.bar(bns, bns, align='edge', width=0.05, color='pink', label='Expected')
plt.bar(bns, y, align='edge', width=0.05,
color='blue', alpha=0.5, label='Actual')
plt.ylabel('Accuracy')
plt.xlabel('Confidence')
plt.legend()
if save:
plt.savefig(os.path.join(save_plots_loc, '{}_{}'.format(dataset, model), 'reliability_plot_{}_{}_{}_{}.pdf'.format(scaling_related, dataset, model, trained_loss)), dpi=40)
else:
plt.show()
def bin_strength_plot(confs, preds, labels, num_bins=15):
'''
Method to draw a plot for the number of samples in each confidence bin.
'''
bin_dict = _populate_bins(confs, preds, labels, num_bins)
bns = [(i / float(num_bins)) for i in range(num_bins)]
num_samples = len(labels)
y = []
for i in range(num_bins):
n = (bin_dict[i][COUNT] / float(num_samples)) * 100
y.append(n)
plt.figure(figsize=(10, 8)) # width:20, height:3
plt.bar(bns, y, align='edge', width=0.05,
color='blue', alpha=0.5, label='Percentage samples')
plt.ylabel('Percentage of samples')
plt.xlabel('Confidence')
plt.show()
| 54.00907 | 234 | 0.674196 |
962cd88d6f79f8b3352c0cd041ccfcff6c478fe5 | 11,137 | py | Python | sdk/python/pulumi_oci/sch/get_service_connector.py | EladGabay/pulumi-oci | 6841e27d4a1a7e15c672306b769912efbfd3ba99 | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2021-08-17T11:14:46.000Z | 2021-12-31T02:07:03.000Z | sdk/python/pulumi_oci/sch/get_service_connector.py | pulumi-oci/pulumi-oci | 6841e27d4a1a7e15c672306b769912efbfd3ba99 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-09-06T11:21:29.000Z | 2021-09-06T11:21:29.000Z | sdk/python/pulumi_oci/sch/get_service_connector.py | pulumi-oci/pulumi-oci | 6841e27d4a1a7e15c672306b769912efbfd3ba99 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-08-24T23:31:30.000Z | 2022-01-02T19:26:54.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetServiceConnectorResult',
'AwaitableGetServiceConnectorResult',
'get_service_connector',
]
def get_service_connector(service_connector_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceConnectorResult:
"""
This data source provides details about a specific Service Connector resource in Oracle Cloud Infrastructure Service Connector Hub service.
Gets the specified service connector's configuration information.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_service_connector = oci.sch.get_service_connector(service_connector_id=oci_sch_service_connector["test_service_connector"]["id"])
```
:param str service_connector_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the service connector.
"""
__args__ = dict()
__args__['serviceConnectorId'] = service_connector_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:sch/getServiceConnector:getServiceConnector', __args__, opts=opts, typ=GetServiceConnectorResult).value
return AwaitableGetServiceConnectorResult(
compartment_id=__ret__.compartment_id,
defined_tags=__ret__.defined_tags,
description=__ret__.description,
display_name=__ret__.display_name,
freeform_tags=__ret__.freeform_tags,
id=__ret__.id,
lifecyle_details=__ret__.lifecyle_details,
service_connector_id=__ret__.service_connector_id,
source=__ret__.source,
state=__ret__.state,
system_tags=__ret__.system_tags,
target=__ret__.target,
tasks=__ret__.tasks,
time_created=__ret__.time_created,
time_updated=__ret__.time_updated)
| 43.846457 | 347 | 0.679806 |
962dd9983600a5e9baec739d1eaccc092f1e2982 | 3,258 | py | Python | manim_demo/srcs/new_Scene_demo.py | shujunge/manim_tutorial | 8e320373f0404dcc0a200ab3750ee70784dc1345 | [
"MIT"
] | null | null | null | manim_demo/srcs/new_Scene_demo.py | shujunge/manim_tutorial | 8e320373f0404dcc0a200ab3750ee70784dc1345 | [
"MIT"
] | null | null | null | manim_demo/srcs/new_Scene_demo.py | shujunge/manim_tutorial | 8e320373f0404dcc0a200ab3750ee70784dc1345 | [
"MIT"
] | null | null | null | from manimlib.imports import *
| 24.313433 | 58 | 0.482198 |
962e6f77c6888ab263ac0737fad6faa36799e3b3 | 4,720 | py | Python | prototyping/OpenCv/robot_tracking.py | ssnover/msd-p18542 | 32bef466f9d5ba55429da2119a14081b3e411d0b | [
"MIT"
] | 3 | 2021-01-07T07:46:50.000Z | 2021-11-17T10:48:39.000Z | prototyping/OpenCv/robot_tracking.py | ssnover/msd-p18542 | 32bef466f9d5ba55429da2119a14081b3e411d0b | [
"MIT"
] | 3 | 2018-02-19T20:30:30.000Z | 2018-04-20T23:25:29.000Z | prototyping/OpenCv/robot_tracking.py | ssnover95/msd-p18542 | 32bef466f9d5ba55429da2119a14081b3e411d0b | [
"MIT"
] | 1 | 2021-01-07T07:46:52.000Z | 2021-01-07T07:46:52.000Z | import imutils
import cv2
import numpy as np
import math
from math import sqrt
'''
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
cv2.imwrite('messigray.png', img)
cv2.destroyAllWindows()
'''
| 39.333333 | 114 | 0.587288 |
962e9ec005fd784de4a3baab20160d8df9ba9898 | 7,404 | py | Python | ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_host_ntp.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
] | 1 | 2019-04-16T21:23:15.000Z | 2019-04-16T21:23:15.000Z | ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_host_ntp.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
] | 5 | 2020-02-26T20:10:50.000Z | 2021-09-23T23:23:18.000Z | ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_host_ntp.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
] | 1 | 2020-02-13T14:24:57.000Z | 2020-02-13T14:24:57.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_ntp
short_description: Manage NTP configurations about an ESXi host
description:
- This module can be used to manage NTP configuration information about an ESXi host.
- User can specify an ESXi hostname or Cluster name. In case of cluster name, all ESXi hosts are updated.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- NTP settings are applied to every ESXi host system in the given cluster.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- NTP settings are applied to this ESXi host system.
- If C(cluster_name) is not given, this parameter is required.
ntp_servers:
description:
- "IP or FQDN of NTP server/s."
- This accepts a list of NTP servers. For multiple servers, please look at the examples.
required: True
state:
description:
- "present: Add NTP server/s, if it specified server/s are absent else do nothing."
- "absent: Remove NTP server/s, if specified server/s are present else do nothing."
default: present
choices: [ present, absent ]
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Set NTP setting for all ESXi Host in given Cluster
vmware_host_ntp:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
state: present
ntp_servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
delegate_to: localhost
- name: Set NTP setting for an ESXi Host
vmware_host_ntp:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
state: present
ntp_servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
delegate_to: localhost
- name: Remove NTP setting for an ESXi Host
vmware_host_ntp:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
state: absent
ntp_servers:
- bad.server.ntp.org
delegate_to: localhost
'''
RETURN = r'''#
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
if __name__ == "__main__":
main()
| 35.768116 | 122 | 0.66356 |
962f56d3ff295087050794dbedace7481235e971 | 337 | py | Python | molecule/default/tests/test_creation.py | stackhpc/ansible-role-luks | 8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05 | [
"Apache-1.1"
] | 3 | 2020-04-14T19:57:25.000Z | 2021-01-11T09:09:16.000Z | molecule/default/tests/test_creation.py | stackhpc/ansible-role-luks | 8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05 | [
"Apache-1.1"
] | 4 | 2020-08-12T10:24:25.000Z | 2022-01-17T17:48:28.000Z | molecule/default/tests/test_creation.py | stackhpc/ansible-role-luks | 8c4b5f472ab0aef3d2a776d4fcd37ca17c6eac05 | [
"Apache-1.1"
] | 2 | 2021-06-17T21:57:42.000Z | 2022-02-20T08:02:43.000Z | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
| 24.071429 | 63 | 0.744807 |
962ff9f355899a8526a5df34e1ab89d319623352 | 796 | py | Python | test_weakref.py | xzfn/retroreload | 79466ef013a2a0892e096e510b847e222478caeb | [
"MIT"
] | null | null | null | test_weakref.py | xzfn/retroreload | 79466ef013a2a0892e096e510b847e222478caeb | [
"MIT"
] | null | null | null | test_weakref.py | xzfn/retroreload | 79466ef013a2a0892e096e510b847e222478caeb | [
"MIT"
] | null | null | null | """
weakref should be valid.
"""
import gc
import importlib
import autoreload
import retroreload
switch = 2
if switch == 0:
reload_module = importlib.reload
elif switch == 1:
reload_module = autoreload.superreload
elif switch == 2:
reload_module = retroreload.retroreload
import mod3
if __name__ == '__main__':
dispatcher = mod3.Dispatcher()
c = mod3.C()
dispatcher.register(c.func)
dispatcher.dispatch()
input('modify mod3.py if you like, and press enter')
reload_module(mod3)
print('gc before')
gc.collect()
print('gc after')
dispatcher.dispatch()
# builtin: preserve weakref, but result is bad
# autoreload: loses weakref when gc.collect is called, cb() returns None
# retroreload: preserve weakref, result is good
| 18.090909 | 76 | 0.688442 |
963000235c468e48e66c69225a889ee4596a7a88 | 814 | py | Python | model/model.py | Okestro-symphony/Log-Anomaly-Detection | ab6548ca93c8d6073faf96d8a39bf4517139d8ea | [
"Apache-2.0"
] | null | null | null | model/model.py | Okestro-symphony/Log-Anomaly-Detection | ab6548ca93c8d6073faf96d8a39bf4517139d8ea | [
"Apache-2.0"
] | 1 | 2021-11-03T04:17:55.000Z | 2021-11-03T04:17:55.000Z | model/model.py | Okestro-symphony/Log-Anomaly-Detection | ab6548ca93c8d6073faf96d8a39bf4517139d8ea | [
"Apache-2.0"
] | 1 | 2021-11-03T04:15:33.000Z | 2021-11-03T04:15:33.000Z | def train_isolation_forest(df, padding_data):
'''
* Isolation Forest model setting
- n_estimators=100
- max_samples='auto'
- n_jobs=-1
- max_features=2
- contamination=0.01
'''
#padding data load
data_df = padding_data
# model
model = IsolationForest(n_estimators=100, max_samples='auto', n_jobs=-1,
max_features=2, contamination=0.01)
try:
model = model.fit(data_df)
except Exception as ex:
print(' : ', ex)
try:
# score & anomaly
score = model.decision_function(data_df)
anomaly = model.predict(data_df)
except Exception as ex:
print(' : ', ex)
# anomaly_data = df.loc[df['is_anomaly'] == -1] # -1 .
return df
| 24.666667 | 76 | 0.589681 |
963036c125a200d1a52d12e53f35e03ad2ffc294 | 1,619 | py | Python | datasets.py | Tracesource/DCEC | 8b9dca56bc032fb81d18dd9709c170802600e06b | [
"MIT"
] | 154 | 2017-10-01T22:32:26.000Z | 2022-03-08T14:09:38.000Z | datasets.py | Tracesource/DCEC | 8b9dca56bc032fb81d18dd9709c170802600e06b | [
"MIT"
] | 10 | 2017-12-28T11:38:14.000Z | 2020-07-22T04:46:27.000Z | datasets.py | Tracesource/DCEC | 8b9dca56bc032fb81d18dd9709c170802600e06b | [
"MIT"
] | 59 | 2017-12-18T11:50:53.000Z | 2022-03-16T17:42:18.000Z | import numpy as np
| 34.446809 | 113 | 0.618901 |
9630b2873f1800433cfbc8d045129730577eb455 | 121,027 | py | Python | balto_gui.py | peckhams/balto_gui | 1c599bce4e90569f34aab1546d1adfd9dcaad943 | [
"MIT"
] | 8 | 2020-07-27T16:16:50.000Z | 2022-03-09T19:42:27.000Z | balto_gui.py | peckhams/balto_gui | 1c599bce4e90569f34aab1546d1adfd9dcaad943 | [
"MIT"
] | 1 | 2020-05-21T01:36:16.000Z | 2020-05-21T01:36:16.000Z | balto_gui.py | peckhams/balto_gui | 1c599bce4e90569f34aab1546d1adfd9dcaad943 | [
"MIT"
] | 5 | 2020-05-07T13:16:42.000Z | 2021-02-18T18:57:42.000Z | """
This module defines a class called "balto_gui" that can be used to
create a graphical user interface (GUI) for downloading data from
OpenDAP servers from and into a Jupyter notebook. If used with Binder,
this GUI runs in a browser window and does not require the user to
install anything on their computer. However, this module should be
included in the same directory as the Jupyter notebook.
"""
#------------------------------------------------------------------------
#
# Copyright (C) 2020. Scott D. Peckham
#
#------------------------------------------------------------------------
from ipyleaflet import Map, basemaps, FullScreenControl
from ipyleaflet import MeasureControl, Rectangle
## from ipyleaflet import ScaleControl # (doesn't work)
from traitlets import Tuple
## import ipyleaflet as ipyl
import ipywidgets as widgets
from ipywidgets import Layout
from IPython.display import display, HTML
## from IPython.core.display import display
## from IPython.lib.display import display
import pydap.client # (for open_url, etc.)
import requests # (used by get_filenames() )
import json
import datetime # (used by get_duration() )
import copy
import numpy as np
import balto_plot as bp
#------------------------------------------------------------------------
#
# class balto_gui
# __init__()
# pix_str()
# show_gui()
# make_acc_gui()
# make_tab_gui()
# make_data_panel()
# reset_data_panel()
# make_map_panel()
# make_dates_panel()
# make_download_panel()
# make_prefs_panel()
# #--------------------------
# get_map_bounds()
# replace_map_bounds()
# replace_map_bounds2()
# update_map_bounds()
# zoom_out_to_new_bounds()
# --------------------------
# get_url_dir_filenames()
# update_filename_list()
# get_opendap_file_url()
# open_dataset()
# update_data_panel()
# --------------------------
# update_var_info()
# get_all_var_shortnames()
# get_all_var_longnames()
# get_all_var_units()
# --------------------------
# get_var_shortname()
# get_var_longname()
# get_var_units()
# get_var_shape()
# get_var_dimensions()
# get_var_dtype()
# get_var_attributes()
# get_var_time_attributes()
# -------------------------------
# update_datetime_panel()
# get_years_from_time_since()
# clear_datetime_notes()
# append_datetime_notes()
# list_to_string()
# -------------------------------
# pad_with_zeros()
# get_actual_time_units()
# get_time_delta_str()
# get_datetime_obj_from_str()
# get_datetime_obj_from_one_str()
# get_start_datetime_obj()
# get_end_datetime_obj()
# get_dt_from_datetime_str()
# split_datetime_str()
# split_date_str()
# split_time_str()
# get_datetime_from_time_since()
# get_time_since_from_datetime()
# get_month_difference()
# -------------------------------
# get_new_time_index_range()
# get_new_lat_index_range()
# get_new_lon_index_range()
# -------------------------------
# get_duration() ## not used yet
# ----------------------------
# get_download_format()
# clear_download_log()
# append_download_log()
# print_user_choices()
# download_data()
# show_grid()
# -------------------------------
# get_opendap_package() # (in prefs panel)
# ----------------------------
# get_abbreviated_var_name()
# get_possible_svo_names()
#
#------------------------------
# Example GES DISC opendap URL
#------------------------------
# https://gpm1.gesdisc.eosdis.nasa.gov/opendap/GPM_L3/GPM_3IMERGHHE.05/2014/091/
# 3B-HHR-E.MS.MRG.3IMERG.20140401-S000000-E002959.0000.V05B.HDF5.nc
# ?HQprecipitation[1999:2200][919:1049],lon[1999:2200],lat[919:1049]
#------------------------------------------------------------------------
| 42.811107 | 96 | 0.432118 |
9632975c75b20b8d1e791a57c8e86aa3a4d6057f | 586 | py | Python | w0rplib/url.py | w0rp/w0rpzone | 06aa9f8871cefcbefbbfdfcba0abfd4fa2629d0c | [
"BSD-2-Clause"
] | null | null | null | w0rplib/url.py | w0rp/w0rpzone | 06aa9f8871cefcbefbbfdfcba0abfd4fa2629d0c | [
"BSD-2-Clause"
] | 13 | 2019-07-05T18:44:46.000Z | 2021-06-19T12:19:46.000Z | w0rplib/url.py | w0rp/w0rpzone | 06aa9f8871cefcbefbbfdfcba0abfd4fa2629d0c | [
"BSD-2-Clause"
] | null | null | null | from django.views.generic.base import RedirectView
from django.conf.urls import re_path
def redir(regex, redirect_url, name=None):
"""
A shorter wrapper around RedirectView for 301 redirects.
"""
return re_path(
regex,
RedirectView.as_view(url=redirect_url, permanent=True),
name=name,
)
def redir_temp(regex, redirect_url, name=None):
"""
A shorter wrapper around RedirectView for 302 redirects.
"""
return re_path(
regex,
RedirectView.as_view(url=redirect_url, permanent=False),
name=name,
)
| 23.44 | 64 | 0.663823 |
963361945d482a5cef7d35e152993ddbfadb7240 | 1,889 | py | Python | launcher.py | lucaso60/DiscordDMSpammer | 336a20195cf32013cf50c98c2a400ec79750758b | [
"MIT"
] | 1 | 2021-08-15T13:21:22.000Z | 2021-08-15T13:21:22.000Z | launcher.py | lucaso60/DiscordDMSpammer | 336a20195cf32013cf50c98c2a400ec79750758b | [
"MIT"
] | 1 | 2021-09-14T15:29:30.000Z | 2021-09-14T15:42:01.000Z | launcher.py | lucaso60/DiscordDMSpammer | 336a20195cf32013cf50c98c2a400ec79750758b | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2021 lucaso60
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVUSER_IDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import discord
from discord import *
from datetime import datetime
from time import sleep
from extensions import on_start_screen
TOKEN = input(f"{time_now()} Please input your bot token: ")
bot = discord.Bot(command_prefix=".")
bot.run(TOKEN)
| 33.732143 | 79 | 0.741133 |
9633daded62c203085e90cc7105a91f913793c8c | 2,393 | py | Python | src/pipelines.py | charnley/bayes-mndo | 38662dd738af7cba73f98ffacc5c719aaa9a036d | [
"CC0-1.0"
] | null | null | null | src/pipelines.py | charnley/bayes-mndo | 38662dd738af7cba73f98ffacc5c719aaa9a036d | [
"CC0-1.0"
] | null | null | null | src/pipelines.py | charnley/bayes-mndo | 38662dd738af7cba73f98ffacc5c719aaa9a036d | [
"CC0-1.0"
] | null | null | null | import multiprocessing as mp
import os
import shutil
from functools import partial
from tqdm import tqdm
import data
from chemhelp import mndo
# def calculate(binary, filename, scr=None):
# """
# Collect sets of lines for each molecule as they become available
# and then call a parser to extract the dictionary of properties.
# DEPRECIATED
# """
# props_list = mndo.calculate_file(filename, scr=scr, mndo_cmd=binary)
# props_list = list(props_list) # NOTE that calculate_file returns an iterator
# return props_list
def worker(*args, **kwargs):
"""
"""
scr = kwargs["scr"]
filename = kwargs["filename"]
param_keys = kwargs["param_keys"]
mean_params = kwargs["mean_params"]
scale_params = kwargs["scale_params"]
binary = kwargs["binary"]
# Ensure unique directory for this worker in scratch directory
pid = os.getpid()
cwd = os.path.join(scr, str(pid))
if not os.path.exists(cwd):
os.mkdir(cwd)
if not os.path.exists(os.path.join(cwd, filename)):
shutil.copy2(os.path.join(scr, filename), os.path.join(cwd, filename))
# Set params in worker dir
param_list = args[0]
data.set_params(
param_list, param_keys, mean_params, scale_params, scr=cwd,
)
# Calculate properties
properties_list = mndo.calculate_file(filename, scr=cwd, mndo_cmd=binary)
# NOTE JCK properties_list is a generator, so complete parsing on worker
properties_list = list(properties_list)
shutil.rmtree(cwd)
return properties_list
| 25.457447 | 88 | 0.670288 |
96343356750cc9fb146f0fb6d55a57fd12b0dbb2 | 2,915 | py | Python | lib/googlecloudsdk/api_lib/logging/common.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/api_lib/logging/common.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/api_lib/logging/common.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that contains common logging commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.logging import util
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import properties
def FetchLogs(log_filter=None,
order_by='DESC',
limit=None,
parent=None):
"""Fetches log entries.
This method uses Cloud Logging V2 api.
https://cloud.google.com/logging/docs/api/introduction_v2
Entries are sorted on the timestamp field, and afterwards filter is applied.
If limit is passed, returns only up to that many matching entries.
If neither log_filter nor log_ids are passed, no filtering is done.
Args:
log_filter: filter expression used in the request.
order_by: the sort order, either DESC or ASC.
limit: how many entries to return.
parent: the name of the log's parent resource, e.g. "projects/foo" or
"organizations/123" or "folders/123". Defaults to the current project.
Returns:
A generator that returns matching log entries.
Callers are responsible for handling any http exceptions.
"""
if parent:
if not ('projects/' in parent or 'organizations/' in parent
or 'folders/' in parent or 'billingAccounts/' in parent):
raise exceptions.InvalidArgumentException(
'parent', 'Unknown parent type in parent %s' % parent)
else:
parent = 'projects/%s' % properties.VALUES.core.project.Get(required=True)
# The backend has an upper limit of 1000 for page_size.
# However, there is no need to retrieve more entries if limit is specified.
page_size = min(limit or 1000, 1000)
if order_by.upper() == 'DESC':
order_by = 'timestamp desc'
else:
order_by = 'timestamp asc'
client = util.GetClient()
request = client.MESSAGES_MODULE.ListLogEntriesRequest(resourceNames=[parent],
filter=log_filter,
orderBy=order_by)
return list_pager.YieldFromList(
client.entries, request, field='entries', limit=limit,
batch_size=page_size, batch_size_attribute='pageSize')
| 38.355263 | 80 | 0.704974 |
96346b689665119bb71187a849bd5ed61453fc88 | 23,288 | py | Python | hkdataminer/utils/plot_.py | stephenliu1989/HK_DataMiner | 312d8244d33337971d81305ec7a9986427c669d9 | [
"Apache-2.0"
] | 3 | 2020-06-12T21:25:05.000Z | 2021-03-02T09:38:24.000Z | hkdataminer/utils/plot_.py | stephenliu1989/HK_DataMiner | 312d8244d33337971d81305ec7a9986427c669d9 | [
"Apache-2.0"
] | 1 | 2018-01-30T09:52:01.000Z | 2018-01-30T09:52:01.000Z | hkdataminer/utils/plot_.py | stephenliu1989/HK_DataMiner | 312d8244d33337971d81305ec7a9986427c669d9 | [
"Apache-2.0"
] | 1 | 2021-01-16T13:07:50.000Z | 2021-01-16T13:07:50.000Z | __author__ = 'stephen'
import numpy as np
import scipy.io
import scipy.sparse
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.pylab as pylab
from .utils import get_subindices
import matplotlib.ticker as mtick
from collections import Counter
from sklearn.neighbors.kde import KernelDensity
from scipy import stats
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_cluster(labels, phi_angles, psi_angles, name, outliers=-1, step=1, potential=False):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
clusters = np.unique(labels)
plt.rc("font", size=10)
if step > 1:
clusters = clusters[0:len(clusters):step]
colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(clusters)+1))
if potential is False: #plot Alanine Dipeptide
for i in clusters:
if i != outliers:
point = np.where(labels == i)
plt.plot(phi_angles[point], psi_angles[point], '.', markersize=1.0, alpha=0.7)#, color=colors_jet[i])
#else:
# point = np.where(labels == i)
# plt.plot(phi_angles[point], psi_angles[point], '.', markersize=1.0, alpha=0.7, color='black') # , color=colors_jet[i])
plt.title("Alanine Dipeptide " + name + " states", fontsize=10)
# plt.xlim([-180, 180])
# plt.ylim([-180, 180])
# plt.xticks([-110, -60, 0, 60, 120])
# plt.yticks([-120, -60, 0, 60, 120])
else: # if plot 2D potential
plt.figure(figsize=(10, 10))
for i in clusters:
if i != outliers:
plt.plot(phi_angles[np.where(labels == i)],
psi_angles[np.where(labels == i)], '.', markersize=1.0, alpha=0.7) #markersize=20.0, color=colors_jet[i])
#plt.plot(phi_angles[np.where(labels == i)],
# psi_angles[np.where(labels == i)],
# '.', color=colors_jet[i], label='State %d' % i)
#plt.title("2D potential " + name + " states", fontsize=20)
plt.xlim([-75, 75])
plt.ylim([-75, 75])
plt.xticks([-50, 0, 50])
plt.yticks([-50, 0, 50])
plt.xlabel(r"$\phi$", fontsize=25)
plt.ylabel(r"$\psi$", fontsize=25)
# Save the result figure
plt.savefig('./'+name+'.png', dpi=400)
plt.close()
#plt.show()
def plot_each_cluster(labels, phi_angles, psi_angles, name, outliers=-1, step=1):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
clusters = np.unique(labels)
if step > 1:
clusters = clusters[0:len(clusters):step]
colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(clusters)+1))
for i in np.unique(clusters):
if i != outliers:
plt.plot(phi_angles[np.where(labels == i)],
psi_angles[np.where(labels == i)],
'x', color=colors_jet[i], label='State %d' % i)
#plt.title("Alanine Dipeptide " + name + " state_" + str(i))
plt.xlabel(r"$\phi$")
plt.ylabel(r"$\psi$")
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
# Save the result figure
plt.savefig('./'+ name + " state_" + str(i)+'.png', dpi = 400)
plt.close()
#plt.show()
def contour_cluster(labels, phi_angles, psi_angles, name, outliers=-1):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
# lables_array = np.array(labels)
# colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(lables_array)+1))
for i in np.unique(labels):
#if i != outliers:
if i == 1:
print("i=", i)
x = phi_angles[np.where(labels == i)]
y = psi_angles[np.where(labels == i)]
indices = get_subindices(assignments=x, state=None, samples=1000)
x = x[indices]
y = y[indices]
X, Y= np.meshgrid(x, y)
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
#kde = KernelDensity(kernel='gaussian', bandwidth=0.2)
#kde_results = kde.score_samples([x,y])
#X, Y, Z = np.meshgrid(x, y, kde_results)
#Z = np.reshape(kernel([x,y]).T, x.shape)
#Z1 = mlab.bivariate_normal(X, Y, 5.0, 5.0, 0.0, 0.0)
#Z2 = mlab.bivariate_normal(X, Y, 7.5, 2.5, 5, 5)
# difference of Gaussians
#Z = 10.0 * (Z2 - Z1)
#step = Z.max()-Z.min()/10
#print "Z min:",Z.min(), "Z.max:", Z.max(), "step:", step
#levels = np.arange(Z.min(), Z.min(), Z.max())
#print levels
plt.contour(X, Y, Z, origin='lower') #, linewidths=Z.min(), levels=levels)
plt.title("Alanine Dipeptide " + name + " states")
plt.xlabel(r"$\phi$")
plt.ylabel(r"$\psi$")
plt.xlim([-180, 180])
plt.ylim([-180, 180])
# Save the result figure
plt.savefig('./'+name+'.png', dpi=400)
plt.close()
#plt.show()
def plot_matrix(tProb_=None, name=None):
'''
if labels is not None:
n_states = len(set(labels)) - (1 if -1 in labels else 0)
print 'n_states=', n_states
#diagC = tProb_.diagonal()
length = len(labels)
print "length=", length
Cmn = scipy.sparse.lil_matrix(n_states, n_states, dtype=np.float32)
Cmn = np.zeros((n_states, n_states))
print "size of tProb", tProb_.shape
if scipy.sparse.issparse(tProb_):
tProb_ = tProb_.todense()
for i in xrange(length):
for j in xrange(length):
Cmn[labels[i], labels[j]] += tProb_[i, j]
#for i in xrange(n_states):
#Cmn[i,i] += diagC[i]
# for j in xrange(n_states):
# Cmn[i, j] += Cmn[j, i]
# Cmn[j, i] = Cmn[i, j]
for j in xrange(n_states):
sum_row = np.sum(Cmn[j,:])
if sum_row is not 0:
Cmn[j,:] /= sum_row
pylab.matshow(Cmn, cmap=plt.cm.OrRd)
else:
'''
pylab.matshow(tProb_, cmap=plt.cm.OrRd)
plt.colorbar()
#pylab.show()
plt.savefig('./' + name + 'Matrix.png', dpi=400)
plt.close()
#From Wang Wei's code
| 38.556291 | 209 | 0.591635 |
9634bfc41291ba70d3cb8d6d1b58e82b77a84ebf | 494 | py | Python | commanderbot_lib/database/yaml_file_database.py | CommanderBot-Dev/commanderbot-lib | 2716279b059056eaf0797085149b61f71b175ed5 | [
"MIT"
] | 1 | 2020-09-25T19:22:47.000Z | 2020-09-25T19:22:47.000Z | commanderbot_lib/database/yaml_file_database.py | CommanderBot-Dev/commanderbot-lib | 2716279b059056eaf0797085149b61f71b175ed5 | [
"MIT"
] | 1 | 2021-01-06T00:22:56.000Z | 2021-08-29T20:54:50.000Z | commanderbot_lib/database/yaml_file_database.py | CommanderBot-Dev/commanderbot-lib | 2716279b059056eaf0797085149b61f71b175ed5 | [
"MIT"
] | 2 | 2020-09-25T19:23:07.000Z | 2020-09-25T21:06:11.000Z | from typing import IO
from commanderbot_lib.database.abc.file_database import FileDatabase
from commanderbot_lib.database.mixins.yaml_file_database_mixin import (
YamlFileDatabaseMixin,
)
| 29.058824 | 71 | 0.755061 |
9634f1c7c56270380c2632695615ddd30a7c567d | 1,663 | py | Python | git_verkefni_forritun.py | JonasFreyrB/Forritun | 61cfda738693b255131bf6fb4ebea3af6f3a4ecf | [
"MIT"
] | null | null | null | git_verkefni_forritun.py | JonasFreyrB/Forritun | 61cfda738693b255131bf6fb4ebea3af6f3a4ecf | [
"MIT"
] | null | null | null | git_verkefni_forritun.py | JonasFreyrB/Forritun | 61cfda738693b255131bf6fb4ebea3af6f3a4ecf | [
"MIT"
] | null | null | null | #Jnas Freyr Bjarnason
#25.01.2017
#Forritun
#Liur 1
#By notanda um tlu 1
tala1=int(input("Slu inn tlu 1 "))
#By notanda um tlu 2
tala2=int(input("Slu inn tlu 2 "))
#Birti tlu 1 og 2 lagar saman
print("Tlurnar lagar saman ",tala1+tala2)
#Birti tlu 1 og 2 margfaldaar saman
print("Tlurnar margfaldaar saman ",tala1*tala2)
#Liur 2
#By notanda um fornafn
fornafn=input("Slu inn fornafni itt ")
#By notanda um eftirnafn
eftirnafn=input("Slu inn eftirnafni itt ")
#Birti skilabo samt bi nfnin lg saman
print("Hall",fornafn,eftirnafn)
#Liur 3
#By notanda um texta
text=input("Slu inn texta ")
#B til teljara fyrir lgstafi
tellagstafi=0
#B til teljara fyrir hgstafi
telhastafi=0
#B til teljara fyrir lgstafi eftir hstafi
tellagstafieftir=0
#B til for lykkju sem keyrir gegnum textann
for x in range(len(text)):
#Ef stafurinn texta er bkstafur og er hstaf
if (text[x].isalpha() and text[x].isupper()):
#Bti 1 vi teljara fyrir hgstafi
telhastafi=telhastafi+1
#Ef nsti stafur er lgstafur
if (text[x +1].islower()):
#Bti 1 vi teljara fyrir lgstafi eftir hstafi
tellagstafieftir=tellagstafieftir+1
#Ef stafurinn texta er bkstafur og er lgstaf
elif(text[x].isalpha() and text[x].islower()):
#Bti 1 vi teljara fyrir lgstafi
tellagstafi=tellagstafi+1
#Birti fjlda hstafi
print("a komu",telhastafi,"hstafir")
#Birti fjlda lgstafi
print("a komu",tellagstafi,"lgstafir")
#Birti fjlda lgstafi eftir hstafi
print("a komu",tellagstafieftir,"lgstafir koma strax eftir hstaf")
| 29.175439 | 72 | 0.723391 |
96359eac01afe317df5fd3c215b39bdd662a534c | 14,568 | py | Python | test/pdu.py | praekelt/python-smpp | 8a0753fc498ab6bcd6243aed5953cddd69cef2c0 | [
"BSD-3-Clause"
] | 36 | 2015-01-15T09:38:06.000Z | 2021-06-14T15:27:34.000Z | test/pdu.py | komuW/smpp_server | 10ef5c2ebc09e2ef88bdd62c55a4280a187d1eb2 | [
"BSD-3-Clause"
] | 8 | 2015-02-12T15:52:53.000Z | 2017-05-22T12:28:45.000Z | test/pdu.py | komuW/smpp_server | 10ef5c2ebc09e2ef88bdd62c55a4280a187d1eb2 | [
"BSD-3-Clause"
] | 22 | 2015-04-29T15:06:17.000Z | 2021-05-25T11:19:41.000Z |
pdu_objects = [
{
'header': {
'command_length': 0,
'command_id': 'bind_transmitter',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
'password': 'abc123',
'system_type': '',
'interface_version': '34',
'addr_ton': 1,
'addr_npi': 1,
'address_range': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_transmitter_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_receiver',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
'password': 'abc123',
'system_type': '',
'interface_version': '34',
'addr_ton': 1,
'addr_npi': 1,
'address_range': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_receiver_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_transceiver',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
'password': 'abc123',
'system_type': '',
'interface_version': '34',
'addr_ton': 1,
'addr_npi': 1,
'address_range': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'bind_transceiver_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'outbind',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'system_id': 'test_system',
'password': 'abc123',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'unbind',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'unbind_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'generic_nack',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'submit_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'esm_class': 0,
'protocol_id': 0,
'priority_flag': 0,
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 1,
'short_message': 'testing 123',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'submit_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'esm_class': 0,
'protocol_id': 0,
'priority_flag': 0,
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 0,
'short_message': None,
# 'short_message' can be of zero length
},
'optional_parameters': [
{
'tag': 'message_payload',
'length': 0,
'value': '5666',
},
],
},
},
# ]
# breaker = [
{
'header': {
'command_length': 0,
'command_id': 'submit_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'submit_sm_resp',
'command_status': 'ESME_RSYSERR',
'sequence_number': 0,
},
# submit_sm_resp can have no body for failures
},
{
'header': {
'command_length': 0,
'command_id': 'submit_multi',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'number_of_dests': 0,
'dest_address': [
{
'dest_flag': 1,
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': 'the address'
},
{
'dest_flag': 2,
'dl_name': 'the list',
},
{
'dest_flag': 2,
'dl_name': 'the other list',
},
# {}
],
'esm_class': 0,
'protocol_id': 0,
'priority_flag': 0,
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 1,
'short_message': 'testing 123',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'submit_multi_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
'no_unsuccess': 5,
'unsuccess_sme': [
{
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'error_status_code': 0,
},
{
'dest_addr_ton': 3,
'dest_addr_npi': 1,
'destination_addr': '555',
'error_status_code': 0,
},
],
},
},
},
# ]
# breaker = [
{
'header': {
'command_length': 0,
'command_id': 'deliver_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'esm_class': 0,
'protocol_id': 0,
'priority_flag': 0,
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 1,
'short_message': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'deliver_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'data_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
'esm_class': 0,
'registered_delivery': 0,
'data_coding': 0,
},
'optional_parameters': [
{
'tag': 'message_payload',
'length': 0,
'value': '',
},
],
},
},
{
'header': {
'command_length': 0,
'command_id': 'data_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'query_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'query_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
'final_date': '',
'message_state': 0,
'error_code': 0,
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'cancel_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'service_type': '',
'message_id': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'dest_addr_ton': 1,
'dest_addr_npi': 1,
'destination_addr': '',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'cancel_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'replace_sm',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'message_id': '',
'source_addr_ton': 1,
'source_addr_npi': 1,
'source_addr': '',
'schedule_delivery_time': '',
'validity_period': '',
'registered_delivery': 0,
'replace_if_present_flag': 0,
'data_coding': 0,
'sm_default_msg_id': 0,
'sm_length': 1,
'short_message': 'is this an = sign?',
},
},
},
{
'header': {
'command_length': 0,
'command_id': 'replace_sm_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'enquire_link',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'enquire_link_resp',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
},
{
'header': {
'command_length': 0,
'command_id': 'alert_notification',
'command_status': 'ESME_ROK',
'sequence_number': 0,
},
'body': {
'mandatory_parameters': {
'source_addr_ton': 'international',
'source_addr_npi': 1,
'source_addr': '',
'esme_addr_ton': 9,
'esme_addr_npi': '',
'esme_addr': '',
},
},
},
]
| 28.17795 | 57 | 0.376922 |
96361040937f80ec08a4104661139247c1a2e9f9 | 3,616 | py | Python | arxiv/release/dist_version.py | cul-it/arxiv-base | a5beadf44c24f72e21313299bfafc1ffb9d28ac7 | [
"MIT"
] | 23 | 2019-01-10T22:01:18.000Z | 2022-02-02T10:28:25.000Z | arxiv/release/dist_version.py | arXiv/arxiv-base | b59490abc1656c240025e19af86d6a246926914a | [
"MIT"
] | 57 | 2018-12-17T16:45:38.000Z | 2021-12-14T14:20:58.000Z | arxiv/release/dist_version.py | cul-it/arxiv-base-ui | a5beadf44c24f72e21313299bfafc1ffb9d28ac7 | [
"MIT"
] | 5 | 2019-01-10T22:01:28.000Z | 2021-11-05T12:25:31.000Z | """
Functions to deal with arxiv package versions.
It can be used in the setup.py file:
from arxiv.release.dist_version import get_version
setup(
version=get_version('arxiv-filemanager'),
....
)
"""
import sys
import pathlib
from subprocess import Popen, PIPE
from datetime import datetime
import pkg_resources
from pathlib import Path
from typing import Any, Optional
def get_version(dist_name: str) -> Optional[str]:
"""Get the version written by write_version(), or the git describe version.
Parameters
----------
dist_name: str
Which arxiv distribution to get. ex arxiv-base
arxiv-filemanager. This should be the name from setup.py or
pypi. These will be mapped to arxiv.base.version and
arxiv.filemanager.version.
Returns
-------
str
The version.__version__ value if it exists or the git describe
version if it exists or the string 'no-git-or-release-version'
"""
# TODO We might want to make it an error if we are under git
# and there is a version.py file? It doesn't seem like a good state.
pkg = ".".join(dist_name.split("-")) + ".version"
try:
name = "__version__"
dist_version = str(getattr(__import__(pkg, fromlist=[name]), name))
return dist_version
except ModuleNotFoundError:
pass
pkv=get_pkg_version(dist_name)
if pkv is not None:
return pkv
try:
return get_git_version()
except ValueError:
pass
return "0.0.1+no-git-or-release-version"
def write_version(dist_name: str, version: str) -> Path:
"""Write version to version.py in package corresponding with dist_name.
Parameters
----------
dist_name: str
Which arxiv distribution to get. ex arxiv-base
arxiv-filemanager. These will be mapped to arxiv.base.version
and arxiv.filemanager.version.
version: str
A string with a semantic version.
Returns
-------
Path
This returns the path to the version.py file.
"""
dir = "/".join(dist_name.split("-")) + "/version.py"
path = pathlib.Path(dir)
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "w+") as ff: # overwrite existing version
when = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
ff.write("'Created by tag_check.write_version'\n")
ff.write("# NEVER CHECK THIS version.py file INTO GIT.\n")
ff.write(
"# Generated when the package was build for distribution.\n"
)
ff.write(f"__when__ = '{when}'\n")
ff.write(f"__version__ = '{version}'\n")
return path
def get_pkg_version(pkg: Any) -> Optional[str]:
"""Get the python package version.
pkg needs to be the package name from setup.py or the name used to
install from pypi.
"""
try:
return pkg_resources.get_distribution(pkg).version
except:
return None
def get_git_version(abbrev: int = 7) -> str:
"""Get the current version using `git describe`."""
try:
p = Popen(
["git", "describe", "--dirty", "--abbrev=%d" % abbrev],
stdout=PIPE,
stderr=PIPE,
)
p.stderr.close()
line = p.stdout.readlines()[0]
return str(line.strip().decode("utf-8"))
except Exception:
raise ValueError("Cannot get the version number from git")
# Below is intended to let this module be used in CI scripts:
# ``export APP_VER=$(python -m arxiv.release.get_version arxiv-hatsize-agent)``
if __name__ == "__main__":
print(get_version(sys.argv[1]))
| 29.16129 | 79 | 0.634679 |
9636f9d68fd104dbb3836b714d906a33ec4f48ed | 15,812 | py | Python | rssynergia/base_diagnostics/read_bunch.py | radiasoft/rs_synergia | b43509de7f4a938354dc127762d8e723463e0e95 | [
"Apache-2.0"
] | null | null | null | rssynergia/base_diagnostics/read_bunch.py | radiasoft/rs_synergia | b43509de7f4a938354dc127762d8e723463e0e95 | [
"Apache-2.0"
] | null | null | null | rssynergia/base_diagnostics/read_bunch.py | radiasoft/rs_synergia | b43509de7f4a938354dc127762d8e723463e0e95 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""?
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
#import argparse
#import tables
from mpi4py import MPI
import h5py
import inspect
import numpy as np
import os
import synergia
# load the particles that will be used for the simulation
# The particles file is a text file with particle coordinates
# defined with the MAD-X conventions: X PX Y PY T PT
# Read this in using numpy's loadtxt command
# particle coordinates are converted to Synergia conventions
# input arguments:
# particles_file: the file name
# reference particle: the lattice reference particle for kinematic conversions
# num_real_particles: the number of real particles in the bunch
# bucket_length: the longitudinal length of the bucket
# comm: the Commxx communicator object for this bunch
# verbose: be chatty about what's happening
#
def read_bunch(particles, refpart, real_particles, comm, bucket_length = None, verbose=False):
'''
Read a bunch from file (either .txt, .h5, or .mxtxt (MAD-X txt file)) and construct a Synergia bunch object.
Arguments:
- particles (string or np.ndarray): EITHER a file containing particles coordinates OR an ndarray of coordinates
- refpart (synergia.foundation.foundation.Reference_particle): the Synergia reference particle describing the bunch
- num_real_particles (float): the number of real particles
- comm (synergia.utils.parallel_utils.Commxx): the Commxx communicator object for this bunch
- bucket_length (Optional[float]): if specified, the longitudinal length of the bucket in m
- verbose (Optional[Boolean]): Flag for verbose feedback
Returns:
-bunch: A Synergia bunch object is created in the current session
'''
#first attempt to load the particles as an h5 file
try:
return read_h5_particles(particles, refpart, real_particles, bucket_length, comm, verbose)
#it's not an h5 file - then there are two possibilities:
#1. It's another sort of file, in which case, an IOError will be thrown
#2. It's a numpy array, in which case a TypeError will be thrown
#Therefore, we will catch the IOErrror and process it as an input file to check if it's a legible text file
#Then we will catch the possible TypeError and process it for being a numpy array
except IOError:
#IOError, so it's a file but not an .h5 file
name,extension = os.path.splitext(particles)
#assuming no error is thrown, we continue processing the file - whihc should be now either a .txt or .mxtxt
assert extension == '.txt' or extension == '.mxtxt', \
"Supported file types are hdf5 (.h5) and plain text (.txt/.mxtxt)"
return read_txt_particles(particles, refpart, real_particles, bucket_length, comm, extension == '.mxtxt', verbose)
except TypeError:
#TypeError, so it's not a file - so we should check if it's a numpy array
#Had we checked the .txt read first, it would have return an AttributeError
assert isinstance(particles, np.ndarray), \
"Supported data types are numpy arrays only."
return read_array_particles(particles, refpart, real_particles, bucket_length, comm, verbose)
#====================================================================
# if madx_format is True, the particles are in madX units, otherwise they are in
# synergia units
def read_txt_particles(particles_file, refpart, real_particles, bucket_length, comm, madx_format, verbose):
"""Read an array of particles from a text file"""
four_momentum = refpart.get_four_momentum()
pmass = four_momentum.get_mass()
E_0 = four_momentum.get_total_energy()
p0c = four_momentum.get_momentum()
myrank = comm.get_rank()
mpisize = comm.get_size()
if myrank==0 and verbose:
if madx_format:
print("Loading madX particles from txt file: ", particles_file)
else:
print("Loading Synergia particles from txt file: ", particles_file)
if myrank == 0:
particles = np.loadtxt(particles_file)
num_total_particles = particles.shape[0]
# broadcast num particles to all nodes
MPI.COMM_WORLD.bcast(num_total_particles, root=0)
else:
num_total_particles = None
num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)
if myrank == 0:
# make sure the data has the correct shape, either [n,6] without
# particles IDs or [n,7] with particle IDs.
if (particles.shape[1] != 6) and (particles.shape[1] != 7):
raise RuntimeError, "input data shape %shas incorrect number of particle coordinates"%repr(particles.shape)
if madx_format:
# numpy manipulations to convert kinematics
# convert MAD-X T=-c*dt to Synergia c*ct
particles[:,4] = -particles[:,4]
# convert MAD-X Delta-E/pc to Synergia delta-p/p
# sqrt(((dE/p0c)+(E0/p0c))**2 - (m/p0c)**2) - (p0c/p0c)
m_over_pc = pmass/p0c
E_0_over_pc = E_0/p0c
particles[:,5] = np.sqrt( (particles[:,5] + E_0_over_pc) *
(particles[:,5] + E_0_over_pc) - m_over_pc**2 ) - 1.0
# if there are no IDs, append particle ID column
if particles.shape[1] != 7:
particles_w_id = np.column_stack((particles,
np.arange(num_total_particles, dtype='d')))
else:
particles_w_id = particles
if myrank == 0:
print("Read ", num_total_particles, " particles")
#Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016
#Using old constructor throws an ArgumentError of a non-standard type.
# Using a try and except to handle both instances.
try:
# try the original constructor
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm,
bucket_length)
except Exception, e:
#look to see if it's an ArgumentError by evaluating the traceback
if (not str(e).startswith("Python argument types in")):
raise
else:
# use the new constructor
if verbose:
print("Using updated bunch constructor")
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm)
# now set the new parameter 'z_period_length'
if bucket_length is not None:
bunch.set_z_period_length(bucket_length)
else:
bucket_length = 1. #fix this quantity
local_num = bunch.get_local_num()
local_particles = bunch.get_local_particles()
# Each processor will have a possibly different number of local particles.
# rank 0 has to find out how many each of them has and distribute them
n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)
if myrank == 0:
# copy in my particles
this_rank_start = 0
local_particles[:,:] = particles_w_id[0:local_num, :]
this_rank_start += local_num
# send particles out to other ranks
for r in range(1, mpisize):
this_rank_end = this_rank_start+n_particles_by_proc[r]
MPI.COMM_WORLD.send(obj=particles_w_id[this_rank_start:this_rank_end, :],
dest=r)
this_rank_start += n_particles_by_proc[r]
else:
# I'm not rank 0. Receive my particles
lp = MPI.COMM_WORLD.recv(source=0)
local_particles[:,:] = lp[:,:]
return bunch
#==========================================================
def read_h5_particles(particles_file, refpart, real_particles, bucket_length, comm, verbose):
"""Read an array of particles from an HDF-5 file"""
four_momentum = refpart.get_four_momentum()
pmass = four_momentum.get_mass()
E_0 = four_momentum.get_total_energy()
p0c = four_momentum.get_momentum()
myrank = comm.get_rank()
mpisize = comm.get_size()
if myrank==0 and verbose:
print("Loading particles from h5 file: ", particles_file)
if myrank == 0:
#h5 = tables.open_file(particles_file)
h5 = h5py.File(particles_file)
# use explicit int conversion otherwise there seems to
# be a typepython->C++ type mismatch of numpy.int64->int
#num_total_particles = int(h5.root.particles.shape[0])
num_total_particles = int(h5['particles'].shape[0])
if verbose:
print("Total of ", num_total_particles, " particles from file")
# broadcast num particles to all nodes
MPI.COMM_WORLD.bcast(num_total_particles, root=0)
else:
num_total_particles = None
num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)
if myrank == 0:
particles = h5['particles']
# make sure the data has the correct shape, either [n,6] without
# particles IDs or [n,7] with particle IDs.
if (particles.shape[1] != 7):
raise RuntimeError, "input data shape %shas incorrect number of particle coordinates"%repr(particles.shape)
#Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016
#Using old constructor throws an ArgumentError of a non-standard type.
# Using a try and except to handle both instances.
try:
# try the original constructor
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm,
bucket_length)
except Exception, e:
#look to see if it's an ArgumentError by evaluating the traceback
if (not str(e).startswith("Python argument types in")):
raise
else:
# use the new constructor
if verbose:
print("Using updated bunch constructor")
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm)
# now set the new parameter 'z_period_length'
if bucket_length is not None:
bunch.set_z_period_length(bucket_length)
else:
bucket_length = 1. #fix this quantity
local_num = bunch.get_local_num()
local_particles = bunch.get_local_particles()
# Each processor will have a possibly different number of local particles.
# rank 0 has to find out how many each of them has and distribute them
n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)
if myrank == 0:
# copy in my particles
this_rank_start = 0
local_particles[:,:] = particles[0:local_num, :]
this_rank_start += local_num
# send particles out to other ranks
for r in range(1, mpisize):
this_rank_end = this_rank_start+n_particles_by_proc[r]
MPI.COMM_WORLD.send(obj=particles[this_rank_start:this_rank_end, :],
dest=r)
this_rank_start += n_particles_by_proc[r]
else:
# I'm not rank 0. Receive my particles
lp = MPI.COMM_WORLD.recv(source=0)
local_particles[:,:] = lp[:,:]
return bunch
#==========================================================
def read_array_particles(particle_array, refpart, real_particles, bucket_length, comm, verbose):
"""Read an array of particles coordinates from memory"""
four_momentum = refpart.get_four_momentum()
pmass = four_momentum.get_mass()
E_0 = four_momentum.get_total_energy()
p0c = four_momentum.get_momentum()
myrank = comm.get_rank()
mpisize = comm.get_size()
if myrank==0 and verbose:
print("Loading particles from: ".format(particle_array))
if myrank == 0:
# use explicit int conversion otherwise there seems to
# be a typepython->C++ type mismatch of numpy.int64->int
#num_total_particles = int(h5.root.particles.shape[0])
num_total_particles = particle_array.shape[0]
if verbose:
print("Total of ", num_total_particles, " particles")
# broadcast num particles to all nodes
MPI.COMM_WORLD.bcast(num_total_particles, root=0)
else:
num_total_particles = None
num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)
if myrank == 0:
particles = particle_array
# make sure the data has the correct shape, either [n,6] without
# particles IDs or [n,7] with particle IDs.
if (particle_array.shape[1] != 7):
raise RuntimeError, "input data shape %shas incorrect number of particle coordinates"%repr(particles.shape)
#Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016
#Using old constructor throws an ArgumentError of a non-standard type.
# Using a try and except to handle both instances.
try:
# try the original constructor
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm,
bucket_length)
except Exception, e:
#look to see if it's an ArgumentError by evaluating the traceback
if (not str(e).startswith("Python argument types in")):
raise
else:
# use the new constructor
if verbose:
print("Using updated bunch constructor")
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm)
# now set the new parameter 'z_period_length'
if bucket_length is not None:
bunch.set_z_period_length(bucket_length)
else:
bucket_length = 1. #fix this quantity
local_num = bunch.get_local_num()
local_particles = bunch.get_local_particles()
# Each processor will have a possibly different number of local particles.
# rank 0 has to find out how many each of them has and distribute them
n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)
if myrank == 0:
# copy in my particles
this_rank_start = 0
local_particles[:,:] = particle_array[0:local_num, :]
this_rank_start += local_num
# send particles out to other ranks
for r in range(1, mpisize):
this_rank_end = this_rank_start+n_particles_by_proc[r]
MPI.COMM_WORLD.send(obj=particles[this_rank_start:this_rank_end, :],
dest=r)
this_rank_start += n_particles_by_proc[r]
else:
# I'm not rank 0. Receive my particles
lp = MPI.COMM_WORLD.recv(source=0)
local_particles[:,:] = lp[:,:]
return bunch
#================================================================
#=========================================================
| 41.177083 | 142 | 0.623893 |
9637f8491206f394aea0791103d4e2cc75fcd07e | 15,043 | py | Python | funfolding/binning/classic_binning.py | tudo-astroparticlephysics/funfolding | 2f485b04f8d79698527fcaab015baf708505e8dd | [
"MIT"
] | 1 | 2019-05-22T13:46:46.000Z | 2019-05-22T13:46:46.000Z | funfolding/binning/classic_binning.py | tudo-astroparticlephysics/funfolding | 2f485b04f8d79698527fcaab015baf708505e8dd | [
"MIT"
] | null | null | null | funfolding/binning/classic_binning.py | tudo-astroparticlephysics/funfolding | 2f485b04f8d79698527fcaab015baf708505e8dd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ._binning import Binning
import itertools
import numpy as np
import copy
try:
from astroML.density_estimation.bayesian_blocks import bayesian_blocks
got_astroML = True
except ImportError:
got_astroML = False
| 38.670951 | 79 | 0.515456 |
96395cbf1fcfecb1f1e6a9078b9555cfe006e998 | 2,625 | py | Python | basic_algorithm/draw/draw.py | Quanfita/ImageProcessing | 2a7c1d093a003c43d6d259f6e8db3b4e6163839b | [
"MIT"
] | null | null | null | basic_algorithm/draw/draw.py | Quanfita/ImageProcessing | 2a7c1d093a003c43d6d259f6e8db3b4e6163839b | [
"MIT"
] | null | null | null | basic_algorithm/draw/draw.py | Quanfita/ImageProcessing | 2a7c1d093a003c43d6d259f6e8db3b4e6163839b | [
"MIT"
] | null | null | null | import cv2
import numpy as np
if __name__ == '__main__':
canvas = np.ones([1000,1000],dtype=np.uint8) * 255
drawLine(canvas,800,100,100,600)
cv2.imwrite('line.png',canvas)
canvas = np.ones([1000,1000],dtype=np.uint8) * 255
drawCircle(canvas,500,500,300)
cv2.imwrite('circle.png',canvas)
canvas = np.ones([1000,1000],dtype=np.uint8) * 255
drawEllipse(canvas,500,500,100,200)
cv2.imwrite('ellipse.png',canvas) | 27.061856 | 67 | 0.459429 |
9639600eac70c10e9cb5e5d9d76147c7dda0b313 | 888 | py | Python | PyTCI/models/alfentanil.py | jcia2192/PyTCI | 952ac6312015514c8609af5d9a61cc3397758c94 | [
"MIT"
] | 6 | 2019-02-16T22:29:42.000Z | 2020-10-17T17:22:52.000Z | PyTCI/models/alfentanil.py | jcia2192/PyTCI | 952ac6312015514c8609af5d9a61cc3397758c94 | [
"MIT"
] | 91 | 2019-03-04T06:11:07.000Z | 2022-03-30T01:31:27.000Z | PyTCI/models/alfentanil.py | jcia2192/PyTCI | 952ac6312015514c8609af5d9a61cc3397758c94 | [
"MIT"
] | 3 | 2019-05-14T15:09:30.000Z | 2020-02-19T13:03:03.000Z | from .base import Three
| 24 | 83 | 0.516892 |
963a4d3128c84db58d2f454e777068e2515b774e | 307 | py | Python | cooee/actions.py | yschimke/cooee-cli-py | 74edeb58ee5cfd0887b73de4f90ffa28892e24df | [
"Apache-2.0"
] | null | null | null | cooee/actions.py | yschimke/cooee-cli-py | 74edeb58ee5cfd0887b73de4f90ffa28892e24df | [
"Apache-2.0"
] | null | null | null | cooee/actions.py | yschimke/cooee-cli-py | 74edeb58ee5cfd0887b73de4f90ffa28892e24df | [
"Apache-2.0"
] | null | null | null | import webbrowser
from typing import Dict, Any
from prompt_toolkit import print_formatted_text
from .format import todo_string
| 21.928571 | 49 | 0.745928 |
963a7890170f483f8139e5e50f0f73025935d302 | 4,114 | py | Python | custom_components/panasonic_smart_app/sensor.py | clspeter/panasonic_smart_app | 22cf7e64f3b9685b94b38e4d7ffeb5deb900a8af | [
"MIT"
] | null | null | null | custom_components/panasonic_smart_app/sensor.py | clspeter/panasonic_smart_app | 22cf7e64f3b9685b94b38e4d7ffeb5deb900a8af | [
"MIT"
] | null | null | null | custom_components/panasonic_smart_app/sensor.py | clspeter/panasonic_smart_app | 22cf7e64f3b9685b94b38e4d7ffeb5deb900a8af | [
"MIT"
] | null | null | null | from datetime import timedelta
import logging
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import (
STATE_UNAVAILABLE,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
PERCENTAGE,
)
from .entity import PanasonicBaseEntity
from .const import (
DOMAIN,
UPDATE_INTERVAL,
DEVICE_TYPE_DEHUMIDIFIER,
DEVICE_TYPE_AC,
DATA_CLIENT,
DATA_COORDINATOR,
LABEL_PM25,
LABEL_HUMIDITY,
LABEL_OUTDOOR_TEMPERATURE,
ICON_PM25,
ICON_THERMOMETER,
ICON_HUMIDITY,
)
_LOGGER = logging.getLogger(__package__)
SCAN_INTERVAL = timedelta(seconds=UPDATE_INTERVAL)
| 26.714286 | 75 | 0.622265 |
963b386535b3cdad7d06852710557f50ea31610a | 5,664 | py | Python | fluent.runtime/tests/format/test_placeables.py | jakub-szczepaniak/python-fluent | 2b751220e4ced57fc256df0f25adc72400e5ce9a | [
"Apache-2.0"
] | null | null | null | fluent.runtime/tests/format/test_placeables.py | jakub-szczepaniak/python-fluent | 2b751220e4ced57fc256df0f25adc72400e5ce9a | [
"Apache-2.0"
] | null | null | null | fluent.runtime/tests/format/test_placeables.py | jakub-szczepaniak/python-fluent | 2b751220e4ced57fc256df0f25adc72400e5ce9a | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, unicode_literals
import unittest
from fluent.runtime import FluentBundle
from fluent.runtime.errors import FluentCyclicReferenceError, FluentReferenceError
from ..utils import dedent_ftl
| 37.76 | 82 | 0.596751 |
963c560293977e228cb5a3afa7c8d254adb111f7 | 956 | py | Python | ads/feature_engineering/adsstring/parsers/base.py | oracle/accelerated-data-science | d594ed0c8c1365daf4cf9e860daebc760fa9a24b | [
"UPL-1.0",
"Apache-2.0"
] | 20 | 2022-02-22T19:07:09.000Z | 2022-03-16T17:21:42.000Z | ads/feature_engineering/adsstring/parsers/base.py | oracle/accelerated-data-science | d594ed0c8c1365daf4cf9e860daebc760fa9a24b | [
"UPL-1.0",
"Apache-2.0"
] | null | null | null | ads/feature_engineering/adsstring/parsers/base.py | oracle/accelerated-data-science | d594ed0c8c1365daf4cf9e860daebc760fa9a24b | [
"UPL-1.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2021, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
| 19.916667 | 104 | 0.639121 |
963c73799bbdb00fb97205577d7028cae7121d80 | 1,535 | py | Python | tests/conftest.py | banteg/lido-vault | 702fa9c58a26c01c61b24489d18ea099e22e8b09 | [
"MIT"
] | 22 | 2020-12-19T10:07:38.000Z | 2022-01-03T03:28:08.000Z | tests/conftest.py | banteg/lido-vault | 702fa9c58a26c01c61b24489d18ea099e22e8b09 | [
"MIT"
] | 1 | 2020-12-23T22:32:42.000Z | 2020-12-23T22:35:56.000Z | tests/conftest.py | banteg/lido-vault | 702fa9c58a26c01c61b24489d18ea099e22e8b09 | [
"MIT"
] | 1 | 2020-12-21T08:45:07.000Z | 2020-12-21T08:45:07.000Z | import pytest
from brownie import Wei
class Helpers:
| 25.583333 | 84 | 0.721173 |
963e0a388ab593079d1ff2e77544ecf12fa56919 | 112 | py | Python | reqto/__init__.py | DovaX/reqto | 4d3cc03535297fb0d5c946632e9de6a3a1ec5420 | [
"MIT"
] | null | null | null | reqto/__init__.py | DovaX/reqto | 4d3cc03535297fb0d5c946632e9de6a3a1ec5420 | [
"MIT"
] | null | null | null | reqto/__init__.py | DovaX/reqto | 4d3cc03535297fb0d5c946632e9de6a3a1ec5420 | [
"MIT"
] | null | null | null | from reqto.core.reqto import get, post, delete, put, patch, head
__all__=[get, post, delete, put, patch, head] | 28 | 64 | 0.723214 |
963e5b6e89e0809787a1d58ba17bc95ac8ccc84f | 911 | py | Python | setup.py | sgillies/fio-taxa | a278f366c23d1e0946bc4675de905bda712c2490 | [
"MIT"
] | 2 | 2018-05-20T06:31:44.000Z | 2021-12-02T20:59:46.000Z | setup.py | sgillies/fio-taxa | a278f366c23d1e0946bc4675de905bda712c2490 | [
"MIT"
] | 1 | 2018-12-19T17:05:05.000Z | 2018-12-19T17:05:05.000Z | setup.py | sgillies/fio-taxa | a278f366c23d1e0946bc4675de905bda712c2490 | [
"MIT"
] | null | null | null | from codecs import open as codecs_open
from setuptools import setup, find_packages
# Get the long description from the relevant file
with codecs_open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(name='fio_taxa',
version='1.0.0',
description=u"Classification of GeoJSON features",
long_description=long_description,
classifiers=[],
keywords='',
author=u"Sean Gillies",
author_email='sean.gillies@gmail.com',
url='https://github.com/sgillies/fio-taxa',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'click', 'fiona'
],
extras_require={
'test': ['pytest'],
},
entry_points="""
[fiona.fio_commands]
taxa=fio_taxa.scripts.cli:taxa
"""
)
| 26.794118 | 72 | 0.630077 |
963e81d1f86297198f40e8bbac901cbb13572805 | 829 | py | Python | python/q04.py | holisound/70-math-quizs-for-programmers | 746d98435a496fd8313a233fe4c2a59fd11d3823 | [
"MIT"
] | null | null | null | python/q04.py | holisound/70-math-quizs-for-programmers | 746d98435a496fd8313a233fe4c2a59fd11d3823 | [
"MIT"
] | null | null | null | python/q04.py | holisound/70-math-quizs-for-programmers | 746d98435a496fd8313a233fe4c2a59fd11d3823 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from collections import deque
print cutBar(3, 8)
print cutBar(3, 20)
print cutBar(5, 100)
print cutBar(1, 1)
| 19.738095 | 43 | 0.472859 |
963ee23183336e553e81e0efa85833a77f9df80d | 6,108 | py | Python | numba/core/itanium_mangler.py | auderson/numba | 3d67c9850ab56457f418cf40af6245fd9c337705 | [
"BSD-2-Clause"
] | null | null | null | numba/core/itanium_mangler.py | auderson/numba | 3d67c9850ab56457f418cf40af6245fd9c337705 | [
"BSD-2-Clause"
] | 1 | 2020-07-28T20:47:24.000Z | 2020-07-28T20:47:24.000Z | numba/core/itanium_mangler.py | auderson/numba | 3d67c9850ab56457f418cf40af6245fd9c337705 | [
"BSD-2-Clause"
] | null | null | null | """
Itanium CXX ABI Mangler
Reference: http://mentorembedded.github.io/cxx-abi/abi.html
The basics of the mangling scheme.
We are hijacking the CXX mangling scheme for our use. We map Python modules
into CXX namespace. A `module1.submodule2.foo` is mapped to
`module1::submodule2::foo`. For parameterized numba types, we treat them as
templated types; for example, `array(int64, 1d, C)` becomes an
`array<int64, 1, C>`.
All mangled names are prefixed with "_Z". It is followed by the name of the
entity. A name contains one or more identifiers. Each identifier is encoded
as "<num of char><name>". If the name is namespaced and, therefore,
has multiple identifiers, the entire name is encoded as "N<name>E".
For functions, arguments types follow. There are condensed encodings for basic
built-in types; e.g. "i" for int, "f" for float. For other types, the
previously mentioned name encoding should be used.
For templated types, the template parameters are encoded immediately after the
name. If it is namespaced, it should be within the 'N' 'E' marker. Template
parameters are encoded in "I<params>E", where each parameter is encoded using
the mentioned name encoding scheme. Template parameters can contain literal
values like the '1' in the array type shown earlier. There is special encoding
scheme for them to avoid leading digits.
"""
import re
from numba.core import types
# According the scheme, valid characters for mangled names are [a-zA-Z0-9_].
# We borrow the '_' as the escape character to encode invalid char into
# '_xx' where 'xx' is the hex codepoint.
_re_invalid_char = re.compile(r'[^a-z0-9_]', re.I)
PREFIX = "_Z"
# Numba types to mangled type code. These correspond with the codes listed in
# https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling-builtin
N2CODE = {
types.void: 'v',
types.boolean: 'b',
types.uint8: 'h',
types.int8: 'a',
types.uint16: 't',
types.int16: 's',
types.uint32: 'j',
types.int32: 'i',
types.uint64: 'y',
types.int64: 'x',
types.float16: 'Dh',
types.float32: 'f',
types.float64: 'd'
}
def _escape_string(text):
"""Escape the given string so that it only contains ASCII characters
of [a-zA-Z0-9_$].
The dollar symbol ($) and other invalid characters are escaped into
the string sequence of "$xx" where "xx" is the hex codepoint of the char.
Multibyte characters are encoded into utf8 and converted into the above
hex format.
"""
ret = re.sub(_re_invalid_char, repl, text)
# Return str if we got a unicode (for py2)
if not isinstance(ret, str):
return ret.encode('ascii')
return ret
def _fix_lead_digit(text):
"""
Fix text with leading digit
"""
if text and text[0].isdigit():
return '_' + text
else:
return text
def _len_encoded(string):
"""
Prefix string with digit indicating the length.
Add underscore if string is prefixed with digits.
"""
string = _fix_lead_digit(string)
return '%u%s' % (len(string), string)
def mangle_identifier(ident, template_params='', *, abi_tags=(), uid=None):
"""
Mangle the identifier with optional template parameters and abi_tags.
Note:
This treats '.' as '::' in C++.
"""
if uid is not None:
# Add uid to abi-tags
abi_tags = (f"v{uid}", *abi_tags)
parts = [_len_encoded(_escape_string(x)) for x in ident.split('.')]
enc_abi_tags = list(map(mangle_abi_tag, abi_tags))
extras = template_params + ''.join(enc_abi_tags)
if len(parts) > 1:
return 'N%s%sE' % (''.join(parts), extras)
else:
return '%s%s' % (parts[0], extras)
def mangle_type_or_value(typ):
"""
Mangle type parameter and arbitrary value.
"""
# Handle numba types
if isinstance(typ, types.Type):
if typ in N2CODE:
return N2CODE[typ]
else:
return mangle_templated_ident(*typ.mangling_args)
# Handle integer literal
elif isinstance(typ, int):
return 'Li%dE' % typ
# Handle str as identifier
elif isinstance(typ, str):
return mangle_identifier(typ)
# Otherwise
else:
enc = _escape_string(str(typ))
return _len_encoded(enc)
# Alias
mangle_type = mangle_type_or_value
mangle_value = mangle_type_or_value
def mangle_templated_ident(identifier, parameters):
"""
Mangle templated identifier.
"""
template_params = ('I%sE' % ''.join(map(mangle_type_or_value, parameters))
if parameters else '')
return mangle_identifier(identifier, template_params)
def mangle_args(argtys):
"""
Mangle sequence of Numba type objects and arbitrary values.
"""
return ''.join([mangle_type_or_value(t) for t in argtys])
def mangle(ident, argtys, *, abi_tags=(), uid=None):
"""
Mangle identifier with Numba type objects and abi-tags.
"""
return ''.join([PREFIX,
mangle_identifier(ident, abi_tags=abi_tags, uid=uid),
mangle_args(argtys)])
def prepend_namespace(mangled, ns):
"""
Prepend namespace to mangled name.
"""
if not mangled.startswith(PREFIX):
raise ValueError('input is not a mangled name')
elif mangled.startswith(PREFIX + 'N'):
# nested
remaining = mangled[3:]
ret = PREFIX + 'N' + mangle_identifier(ns) + remaining
else:
# non-nested
remaining = mangled[2:]
head, tail = _split_mangled_ident(remaining)
ret = PREFIX + 'N' + mangle_identifier(ns) + head + 'E' + tail
return ret
def _split_mangled_ident(mangled):
"""
Returns `(head, tail)` where `head` is the `<len> + <name>` encoded
identifier and `tail` is the remaining.
"""
ct = int(mangled)
ctlen = len(str(ct))
at = ctlen + ct
return mangled[:at], mangled[at:]
| 29.650485 | 79 | 0.655861 |
963ee361844cacc5317b943abf161599e3643da8 | 1,247 | py | Python | DRAFTS/CookieStealer.py | henryza/Python | 34af4a915e7bec27268b619246833e65e48d1cb8 | [
"MIT"
] | null | null | null | DRAFTS/CookieStealer.py | henryza/Python | 34af4a915e7bec27268b619246833e65e48d1cb8 | [
"MIT"
] | null | null | null | DRAFTS/CookieStealer.py | henryza/Python | 34af4a915e7bec27268b619246833e65e48d1cb8 | [
"MIT"
] | null | null | null | import requests
import json
f = test()
f.login(ip,username, password) | 30.414634 | 81 | 0.567763 |
964088ff23e1f89499e5fcf0c7eaef0bac779407 | 15,483 | py | Python | viewers/trpl_h5.py | ScopeFoundry/FoundryDataBrowser | 604506a2e9cabe757f1c5430b688fb98788b6251 | [
"BSD-3-Clause"
] | 6 | 2017-01-10T20:13:38.000Z | 2019-05-23T16:25:12.000Z | viewers/trpl_h5.py | ScopeFoundry/FoundryDataBrowser | 604506a2e9cabe757f1c5430b688fb98788b6251 | [
"BSD-3-Clause"
] | null | null | null | viewers/trpl_h5.py | ScopeFoundry/FoundryDataBrowser | 604506a2e9cabe757f1c5430b688fb98788b6251 | [
"BSD-3-Clause"
] | null | null | null | from ScopeFoundry.data_browser import DataBrowser
from FoundryDataBrowser.viewers.hyperspec_base_view import HyperSpectralBaseView
import numpy as np
import h5py
from qtpy import QtWidgets
from ScopeFoundry.logged_quantity import LQCollection
import time
from FoundryDataBrowser.viewers.plot_n_fit import MonoExponentialFitter, BiExponentialFitter, SemiLogYPolyFitter, TauXFitter
"""class TRPL3dNPZView(HyperSpectralBaseView):
name = 'trpl_3d_npz'
def setup(self):
HyperSpectralBaseView.setup(self)
TRPLNPZView.scan_specific_setup(self)
self.settings.New('plane', dtype=str, initial='xy', choices=('xy', 'yz', 'xz'))
self.settings.New('index', dtype=int)
self.settings.New('auto_level', dtype=bool, initial=True)
for name in ['plane', 'index', 'auto_level']:
self.settings.get_lq(name).add_listener(self.update_display)
#self.ui = QtWidgets.QWidget()
#self.ui.setLayout(QtWidgets.QVBoxLayout())
self.dockarea.addDock(name='Image', widget=self.settings.New_UI())
self.info_label = QtWidgets.QLabel()
self.dockarea.addDock(name='info', widget=self.info_label)
#self.imview = pg.ImageView()
#self.ui.layout().addWidget(self.imview, stretch=1)
#self.graph_layout = pg.GraphicsLayoutWidget()
#self.graph_layout.addPlot()
def on_change_data_filename(self, fname):
try:
TRPLNPZView.load_data(self, fname)
self.update_display()
except Exception as err:
self.imview.setImage(np.zeros((10,10)))
self.databrowser.ui.statusbar.showMessage("failed to load %s:\n%s" %(fname, err))
raise(err)
def is_file_supported(self, fname):
return "trpl_scan3d.npz" in fname
def update_display(self):
ii = self.settings['index']
plane = self.settings['plane']
if plane == 'xy':
arr_slice = np.s_[ii,:,:]
index_max = self.dat['integrated_count_map'].shape[0]-1
elif plane == 'yz':
arr_slice = np.s_[:,:,ii]
index_max = self.dat['integrated_count_map'].shape[2]-1
elif plane == 'xz':
arr_slice = np.s_[:,ii,:]
index_max = self.dat['integrated_count_map'].shape[1]-1
self.settings.index.change_min_max(0, index_max)
self.hyperspec_data = self.time_trace_map[:,:,:,0:self.num_hist_chans][arr_slice]+1
self.display_image = self.integrated_count_map[arr_slice]
#self.imview.setImage(self.dat['integrated_count_map'][arr_slice], autoLevels=self.settings['auto_level'], )
other_ax = dict(xy='z', yz='x', xz='y' )[plane]
self.info_label.setText("{} plane {}={} um (index={})".format(
plane, other_ax, self.dat[other_ax+'_array'][ii], ii))
HyperSpectralBaseView.update_display(self)"""
if __name__ == '__main__':
import sys
app = DataBrowser(sys.argv)
app.load_view(TRPLH5View(app))
sys.exit(app.exec_()) | 39.497449 | 124 | 0.580831 |
964093368998bbfce9f4d1b33cd4d8d11bcb3ef0 | 854 | py | Python | python/matrices.py | silvajhonatan/robotics | d1097809e88c744658dab6d661092b6ea8f0e13a | [
"MIT"
] | 3 | 2017-11-16T18:34:27.000Z | 2021-01-28T15:33:46.000Z | python/matrices.py | sjhonatan/robotics | d1097809e88c744658dab6d661092b6ea8f0e13a | [
"MIT"
] | null | null | null | python/matrices.py | sjhonatan/robotics | d1097809e88c744658dab6d661092b6ea8f0e13a | [
"MIT"
] | null | null | null | import numpy as np
import numpy.matlib
# soma das matrizes
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
C = A + B
print(C)
# soma das linhas
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
s_linha = sum(A)
print(s_linha)
# soma dos elementos
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
soma = sum(sum(A))
print(soma)
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
C = A - B
print(C)
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
C = np.matmul(A,B)
print(C)
# transposta
A = np.array([[1,0],[0,2]])
A_transposta = A.T
print(A_transposta)
# inversa
from numpy.linalg import *
from numpy import linalg as LA
A = np.array([[1,3],[2,0]])
A_inv = inv(A)
print(A_inv)
I = np.matmul(A,A_inv)
print(I)
A = ([2,2],[4,8])
A_det = LA.det(A)
print(A_det)
A = ([[1,2],[1,2]])
A_n = LA.matrix_power(A, 2)
| 16.423077 | 30 | 0.564403 |
9640feccc968d6a78b76b7178f48a08b2309b36c | 5,459 | py | Python | builder/action.py | nagisc007/storybuilder | 54b28934de8acedbe35930ce27e12a7e75f91be0 | [
"MIT"
] | null | null | null | builder/action.py | nagisc007/storybuilder | 54b28934de8acedbe35930ce27e12a7e75f91be0 | [
"MIT"
] | 176 | 2019-03-07T13:31:26.000Z | 2019-11-02T12:38:23.000Z | builder/action.py | nagisc007/storybuilder | 54b28934de8acedbe35930ce27e12a7e75f91be0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Define action class.
"""
from enum import Enum
from . import assertion
from .basedata import BaseData
from .description import Description, NoDesc, DescType
from .flag import Flag, NoFlag, NoDeflag
from .basesubject import NoSubject
from .person import Person
from .chara import Chara
from .who import Who
def setLayer(self, layer: str):
self._layer = assertion.is_str(layer)
return self
def flag(self, val: [str, NoFlag]):
if isinstance(val, Flag):
self._flag = val
elif isinstance(val, str):
self._flag = Flag(val)
else:
self._flag = NoFlag()
return self
def deflag(self, val: [str, NoDeflag]):
if isinstance(val, Flag):
self._deflag = val
elif isinstance(val, str):
self._deflag = Flag(val, True)
else:
self._deflag = NoDeflag()
return self
def getFlag(self): return self._flag
# methods
# private
class TagAction(Action):
def inherited(self):
return TagAction(self, self.info, self.subinfo, self.tag_type)
| 28.432292 | 87 | 0.605972 |
9641ba7ef69f2c86256af69c136b624ad8b36e71 | 1,025 | py | Python | pype/hosts/fusion/plugins/publish/increment_current_file_deadline.py | simonebarbieri/pype | a6dc83aa1300738749cbe8e5e2e6d2d1794e0289 | [
"MIT"
] | null | null | null | pype/hosts/fusion/plugins/publish/increment_current_file_deadline.py | simonebarbieri/pype | a6dc83aa1300738749cbe8e5e2e6d2d1794e0289 | [
"MIT"
] | null | null | null | pype/hosts/fusion/plugins/publish/increment_current_file_deadline.py | simonebarbieri/pype | a6dc83aa1300738749cbe8e5e2e6d2d1794e0289 | [
"MIT"
] | null | null | null | import pyblish.api
| 29.285714 | 76 | 0.66439 |
9642f267112ae3cb7eec037a994d03366ec2da1a | 2,346 | py | Python | tests/integration_tests/framework/flask_utils.py | ilan-WS/cloudify-manager | 510d8a277c848db351f38fc5b264806b2cb36d0b | [
"Apache-2.0"
] | 124 | 2015-01-22T22:28:37.000Z | 2022-02-26T23:12:06.000Z | tests/integration_tests/framework/flask_utils.py | cloudify-cosmo/cloudify-manager | 4a3f44ceb49d449bc5ebc8766b1c7b9c174ff972 | [
"Apache-2.0"
] | 345 | 2015-01-08T15:49:40.000Z | 2022-03-29T08:33:00.000Z | tests/integration_tests/framework/flask_utils.py | ilan-WS/cloudify-manager | 510d8a277c848db351f38fc5b264806b2cb36d0b | [
"Apache-2.0"
] | 77 | 2015-01-07T14:04:35.000Z | 2022-03-07T22:46:00.000Z | #########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from cloudify.utils import setup_logger
from integration_tests.framework.docker import (execute,
copy_file_to_manager)
from integration_tests.tests.constants import MANAGER_PYTHON
from integration_tests.tests.utils import get_resource
logger = setup_logger('Flask Utils', logging.INFO)
security_config = None
PREPARE_SCRIPT_PATH = '/tmp/prepare_reset_storage.py'
SCRIPT_PATH = '/tmp/reset_storage.py'
CONFIG_PATH = '/tmp/reset_storage_config.json'
| 36.65625 | 75 | 0.738704 |
9643beb9c22472b136ce8bcd1f8f9fb526f1f46a | 11,096 | py | Python | dependencies/FontTools/Lib/fontTools/misc/bezierTools.py | charlesmchen/typefacet | 8c6db26d0c599ece16f3704696811275120a4044 | [
"Apache-2.0"
] | 21 | 2015-01-16T05:10:02.000Z | 2021-06-11T20:48:15.000Z | dependencies/FontTools/Lib/fontTools/misc/bezierTools.py | charlesmchen/typefacet | 8c6db26d0c599ece16f3704696811275120a4044 | [
"Apache-2.0"
] | 1 | 2019-09-09T12:10:27.000Z | 2020-05-22T10:12:14.000Z | dependencies/FontTools/Lib/fontTools/misc/bezierTools.py | charlesmchen/typefacet | 8c6db26d0c599ece16f3704696811275120a4044 | [
"Apache-2.0"
] | 2 | 2015-05-03T04:51:08.000Z | 2018-08-24T08:28:53.000Z | """fontTools.misc.bezierTools.py -- tools for working with bezier path segments."""
__all__ = [
"calcQuadraticBounds",
"calcCubicBounds",
"splitLine",
"splitQuadratic",
"splitCubic",
"splitQuadraticAtT",
"splitCubicAtT",
"solveQuadratic",
"solveCubic",
]
from fontTools.misc.arrayTools import calcBounds
import numpy
epsilon = 1e-12
def calcQuadraticBounds(pt1, pt2, pt3):
"""Return the bounding rectangle for a qudratic bezier segment.
pt1 and pt3 are the "anchor" points, pt2 is the "handle".
>>> calcQuadraticBounds((0, 0), (50, 100), (100, 0))
(0.0, 0.0, 100.0, 50.0)
>>> calcQuadraticBounds((0, 0), (100, 0), (100, 100))
(0.0, 0.0, 100.0, 100.0)
"""
a, b, c = calcQuadraticParameters(pt1, pt2, pt3)
# calc first derivative
ax, ay = a * 2
bx, by = b
roots = []
if ax != 0:
roots.append(-bx/ax)
if ay != 0:
roots.append(-by/ay)
points = [a*t*t + b*t + c for t in roots if 0 <= t < 1] + [pt1, pt3]
return calcBounds(points)
def calcCubicBounds(pt1, pt2, pt3, pt4):
"""Return the bounding rectangle for a cubic bezier segment.
pt1 and pt4 are the "anchor" points, pt2 and pt3 are the "handles".
>>> calcCubicBounds((0, 0), (25, 100), (75, 100), (100, 0))
(0.0, 0.0, 100.0, 75.0)
>>> calcCubicBounds((0, 0), (50, 0), (100, 50), (100, 100))
(0.0, 0.0, 100.0, 100.0)
>>> calcCubicBounds((50, 0), (0, 100), (100, 100), (50, 0))
(35.5662432703, 0.0, 64.4337567297, 75.0)
"""
a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4)
# calc first derivative
ax, ay = a * 3.0
bx, by = b * 2.0
cx, cy = c
xRoots = [t for t in solveQuadratic(ax, bx, cx) if 0 <= t < 1]
yRoots = [t for t in solveQuadratic(ay, by, cy) if 0 <= t < 1]
roots = xRoots + yRoots
points = [(a*t*t*t + b*t*t + c * t + d) for t in roots] + [pt1, pt4]
return calcBounds(points)
def splitLine(pt1, pt2, where, isHorizontal):
"""Split the line between pt1 and pt2 at position 'where', which
is an x coordinate if isHorizontal is False, a y coordinate if
isHorizontal is True. Return a list of two line segments if the
line was successfully split, or a list containing the original
line.
>>> printSegments(splitLine((0, 0), (100, 100), 50, True))
((0, 0), (50.0, 50.0))
((50.0, 50.0), (100, 100))
>>> printSegments(splitLine((0, 0), (100, 100), 100, True))
((0, 0), (100, 100))
>>> printSegments(splitLine((0, 0), (100, 100), 0, True))
((0, 0), (0.0, 0.0))
((0.0, 0.0), (100, 100))
>>> printSegments(splitLine((0, 0), (100, 100), 0, False))
((0, 0), (0.0, 0.0))
((0.0, 0.0), (100, 100))
"""
pt1, pt2 = numpy.array((pt1, pt2))
a = (pt2 - pt1)
b = pt1
ax = a[isHorizontal]
if ax == 0:
return [(pt1, pt2)]
t = float(where - b[isHorizontal]) / ax
if 0 <= t < 1:
midPt = a * t + b
return [(pt1, midPt), (midPt, pt2)]
else:
return [(pt1, pt2)]
def splitQuadratic(pt1, pt2, pt3, where, isHorizontal):
"""Split the quadratic curve between pt1, pt2 and pt3 at position 'where',
which is an x coordinate if isHorizontal is False, a y coordinate if
isHorizontal is True. Return a list of curve segments.
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 150, False))
((0, 0), (50, 100), (100, 0))
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, False))
((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (75.0, 50.0), (100.0, 0.0))
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, False))
((0.0, 0.0), (12.5, 25.0), (25.0, 37.5))
((25.0, 37.5), (62.5, 75.0), (100.0, 0.0))
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, True))
((0.0, 0.0), (7.32233047034, 14.6446609407), (14.6446609407, 25.0))
((14.6446609407, 25.0), (50.0, 75.0), (85.3553390593, 25.0))
((85.3553390593, 25.0), (92.6776695297, 14.6446609407), (100.0, -7.1054273576e-15))
>>> # XXX I'm not at all sure if the following behavior is desirable:
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, True))
((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (50.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (75.0, 50.0), (100.0, 0.0))
"""
a, b, c = calcQuadraticParameters(pt1, pt2, pt3)
solutions = solveQuadratic(a[isHorizontal], b[isHorizontal],
c[isHorizontal] - where)
solutions = [t for t in solutions if 0 <= t < 1]
solutions.sort()
if not solutions:
return [(pt1, pt2, pt3)]
return _splitQuadraticAtT(a, b, c, *solutions)
def splitCubic(pt1, pt2, pt3, pt4, where, isHorizontal):
"""Split the cubic curve between pt1, pt2, pt3 and pt4 at position 'where',
which is an x coordinate if isHorizontal is False, a y coordinate if
isHorizontal is True. Return a list of curve segments.
>>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 150, False))
((0, 0), (25, 100), (75, 100), (100, 0))
>>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 50, False))
((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0))
((50.0, 75.0), (68.75, 75.0), (87.5, 50.0), (100.0, 0.0))
>>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 25, True))
((0.0, 0.0), (2.2937927384, 9.17517095361), (4.79804488188, 17.5085042869), (7.47413641001, 25.0))
((7.47413641001, 25.0), (31.2886200204, 91.6666666667), (68.7113799796, 91.6666666667), (92.52586359, 25.0))
((92.52586359, 25.0), (95.2019551181, 17.5085042869), (97.7062072616, 9.17517095361), (100.0, 1.7763568394e-15))
"""
a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4)
solutions = solveCubic(a[isHorizontal], b[isHorizontal], c[isHorizontal],
d[isHorizontal] - where)
solutions = [t for t in solutions if 0 <= t < 1]
solutions.sort()
if not solutions:
return [(pt1, pt2, pt3, pt4)]
return _splitCubicAtT(a, b, c, d, *solutions)
def splitQuadraticAtT(pt1, pt2, pt3, *ts):
"""Split the quadratic curve between pt1, pt2 and pt3 at one or more
values of t. Return a list of curve segments.
>>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5))
((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (75.0, 50.0), (100.0, 0.0))
>>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5, 0.75))
((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (62.5, 50.0), (75.0, 37.5))
((75.0, 37.5), (87.5, 25.0), (100.0, 0.0))
"""
a, b, c = calcQuadraticParameters(pt1, pt2, pt3)
return _splitQuadraticAtT(a, b, c, *ts)
def splitCubicAtT(pt1, pt2, pt3, pt4, *ts):
"""Split the cubic curve between pt1, pt2, pt3 and pt4 at one or more
values of t. Return a list of curve segments.
>>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5))
((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0))
((50.0, 75.0), (68.75, 75.0), (87.5, 50.0), (100.0, 0.0))
>>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5, 0.75))
((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0))
((50.0, 75.0), (59.375, 75.0), (68.75, 68.75), (77.34375, 56.25))
((77.34375, 56.25), (85.9375, 43.75), (93.75, 25.0), (100.0, 0.0))
"""
a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4)
return _splitCubicAtT(a, b, c, d, *ts)
#
# Equation solvers.
#
from math import sqrt, acos, cos, pi
def solveQuadratic(a, b, c,
sqrt=sqrt):
"""Solve a quadratic equation where a, b and c are real.
a*x*x + b*x + c = 0
This function returns a list of roots. Note that the returned list
is neither guaranteed to be sorted nor to contain unique values!
"""
if abs(a) < epsilon:
if abs(b) < epsilon:
# We have a non-equation; therefore, we have no valid solution
roots = []
else:
# We have a linear equation with 1 root.
roots = [-c/b]
else:
# We have a true quadratic equation. Apply the quadratic formula to find two roots.
DD = b*b - 4.0*a*c
if DD >= 0.0:
rDD = sqrt(DD)
roots = [(-b+rDD)/2.0/a, (-b-rDD)/2.0/a]
else:
# complex roots, ignore
roots = []
return roots
def solveCubic(a, b, c, d,
abs=abs, pow=pow, sqrt=sqrt, cos=cos, acos=acos, pi=pi):
"""Solve a cubic equation where a, b, c and d are real.
a*x*x*x + b*x*x + c*x + d = 0
This function returns a list of roots. Note that the returned list
is neither guaranteed to be sorted nor to contain unique values!
"""
#
# adapted from:
# CUBIC.C - Solve a cubic polynomial
# public domain by Ross Cottrell
# found at: http://www.strangecreations.com/library/snippets/Cubic.C
#
if abs(a) < epsilon:
# don't just test for zero; for very small values of 'a' solveCubic()
# returns unreliable results, so we fall back to quad.
return solveQuadratic(b, c, d)
a = float(a)
a1 = b/a
a2 = c/a
a3 = d/a
Q = (a1*a1 - 3.0*a2)/9.0
R = (2.0*a1*a1*a1 - 9.0*a1*a2 + 27.0*a3)/54.0
R2_Q3 = R*R - Q*Q*Q
if R2_Q3 < 0:
theta = acos(R/sqrt(Q*Q*Q))
rQ2 = -2.0*sqrt(Q)
x0 = rQ2*cos(theta/3.0) - a1/3.0
x1 = rQ2*cos((theta+2.0*pi)/3.0) - a1/3.0
x2 = rQ2*cos((theta+4.0*pi)/3.0) - a1/3.0
return [x0, x1, x2]
else:
if Q == 0 and R == 0:
x = 0
else:
x = pow(sqrt(R2_Q3)+abs(R), 1/3.0)
x = x + Q/x
if R >= 0.0:
x = -x
x = x - a1/3.0
return [x]
#
# Conversion routines for points to parameters and vice versa
#
def _segmentrepr(obj):
"""
>>> _segmentrepr([1, [2, 3], [], [[2, [3, 4], numpy.array([0.1, 2.2])]]])
'(1, (2, 3), (), ((2, (3, 4), (0.1, 2.2))))'
"""
try:
it = iter(obj)
except TypeError:
return str(obj)
else:
return "(%s)" % ", ".join([_segmentrepr(x) for x in it])
def printSegments(segments):
"""Helper for the doctests, displaying each segment in a list of
segments on a single line as a tuple.
"""
for segment in segments:
print _segmentrepr(segment)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30.31694 | 114 | 0.594809 |
9643d280cf21edb06bbf96df561f04888a36f82e | 19,403 | py | Python | pymach/present.py | EnriqueU/pymach | b9918dcb0964fc4645f548639a762ef03c3c2e13 | [
"MIT"
] | null | null | null | pymach/present.py | EnriqueU/pymach | b9918dcb0964fc4645f548639a762ef03c3c2e13 | [
"MIT"
] | null | null | null | pymach/present.py | EnriqueU/pymach | b9918dcb0964fc4645f548639a762ef03c3c2e13 | [
"MIT"
] | null | null | null | # Standard Libraries
import subprocess
import datetime
import sys # print en consola
import os
import json
# Local Libraries
import define
import analyze
import prepare
import fselect
import evaluate
import improve
import tools
import pandas as pd
from flask_login import LoginManager, login_required, login_user, logout_user, current_user, UserMixin
from flask import Flask, render_template, redirect, request, url_for, jsonify, flash, session
from requests_oauthlib import OAuth2Session
from requests.exceptions import HTTPError
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from werkzeug.utils import secure_filename
from collections import OrderedDict
"""
basedir = os.path.abspath(os.path.dirname(__file__))
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
"""
"""App Configuration""" ########################################################
"""
class Auth:
# Google Project Credentials
CLIENT_ID = ('814931001809-tch3d62bdn7f0j3qkdu7dmp21n7t87ra'
'.apps.googleusercontent.com')
CLIENT_SECRET = 'M9s6kUQ3MYllNAl4t2NAv_9V'
REDIRECT_URI = 'http://127.0.0.1:8002/oauth2callback'
AUTH_URI = 'https://accounts.google.com/o/oauth2/auth'
TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
USER_INFO = 'https://www.googleapis.com/userinfo/v2/me'
SCOPE = ['https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile']
class Config:
# Base config
APP_NAME = "Pymach"
SECRET_KEY = os.environ.get("SECRET_KEY") or os.urandom(24)
class DevConfig(Config):
# Dev config
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, "test.db")
class ProdConfig(Config):
# Production config
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, "prod.db")
config = {
"dev": DevConfig,
"prod": ProdConfig,
"default": DevConfig
}
"""
"""APP creation and configuration""" ###########################################
app = Flask(__name__)
#app.config.from_object(config['dev'])
#app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
#app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
#app.secret_key = os.urandom(24)
#db = SQLAlchemy(app)
#login_manager = LoginManager(app)
#login_manager.login_view = "login"
#login_manager.session_protection = "strong"
APP_PATH = os.path.dirname(os.path.abspath(__file__))
app.config['UPLOAD_DIR'] = os.path.join(APP_PATH, 'uploads')
app.config['MODELS_DIR'] = os.path.join(APP_PATH, 'models')
app.config['MARKET_DIR'] = os.path.join(APP_PATH, 'market')
ALLOWED_EXTENSIONS = ['txt', 'csv', 'ml', 'html']
""" DB Models """ ##############################################################
"""
class User(db.Model, UserMixin):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), unique=True, nullable=False)
name = db.Column(db.String(100), nullable=True)
avatar = db.Column(db.String(200))
tokens = db.Column(db.Text)
created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow())
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
"""
""" OAuth Session creation """ #################################################
"""
def get_google_auth(state=None, token=None):
if token:
return OAuth2Session(Auth.CLIENT_ID, token=token)
if state:
return OAuth2Session(Auth.CLIENT_ID, state=state, redirect_uri=Auth.REDIRECT_URI)
oauth = OAuth2Session(Auth.CLIENT_ID, redirect_uri=Auth.REDIRECT_URI, scope=Auth.SCOPE)
return oauth
"""
########################### Start Upload Button ##################################
########################### End Upload Button ##################################
# ########################## Start Analyze Button ##################################
########################### End Analyze Button ##################################
########################### Start Model Button ##################################
########################### End Model Button ##################################
########################### Start Improve Button ##################################
########################### End Improve Button ##################################
########################### Start Model Button ##################################
########################### End Market Button ##################################
################################################################################
if __name__ == '__main__':
#db.create_all()
app.secret_key = os.urandom(24)
app.run(host='0.0.0.0', debug=True, port=8002)
#falta: para mensaje flush
#app.secret_key = 'some_secret'
| 35.931481 | 124 | 0.611091 |
96442ec34b3f08fd8d2dea36e730470c13f2a4b5 | 7,344 | py | Python | api/ddu/management-zone-calculation/dduConsumptionPerMZ.py | pawelsiwek/snippets | 6b551bf98e1ca514c0176363acfcb7dd20288b30 | [
"Apache-2.0"
] | 11 | 2019-07-26T08:35:08.000Z | 2021-11-04T11:25:28.000Z | api/ddu/management-zone-calculation/dduConsumptionPerMZ.py | pawelsiwek/snippets | 6b551bf98e1ca514c0176363acfcb7dd20288b30 | [
"Apache-2.0"
] | 12 | 2019-07-09T07:55:36.000Z | 2022-03-10T22:26:42.000Z | api/ddu/management-zone-calculation/dduConsumptionPerMZ.py | pawelsiwek/snippets | 6b551bf98e1ca514c0176363acfcb7dd20288b30 | [
"Apache-2.0"
] | 46 | 2019-04-24T13:35:46.000Z | 2022-03-23T01:00:17.000Z | import sys, requests, json, time
METRIC_NAME = "builtin:billing.ddu.metrics.byEntity"
PAGE_SIZE = 500
sys.tracebacklimit = 0
# python .\dduConsumptionPerMZ.py 2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00 https://mySampleEnv.live.dynatrace.com/api/ abcdefghijklmnop 60
# python .\dduConsumptionPerMZ.py 2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00 https://mySampleEnv.live.dynatrace.com/api/ abcdefghijklmnop 60 MyManagementZone
arguments = len(sys.argv) - 1
if arguments != 5 and arguments != 6:
print(
"The script was called with {} arguments but expected 5 or 6: \nFROM_DATE_AND_TIME TO_DATE_AND_TIME URL_TO_ENVIRONMENT API_TOKEN MAX_REQUESTS_PER_MINUTE [SELECTED_MANAGEMENT_ZONE]\n"
"Example: python dduConsumptionPerMZ.py 2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00 https://mySampleEnv.live.dynatrace.com/api/ abcdefghijklmnop 60 [myManagementZone]\n"
"Note: The SELECTED_MANAGEMENT_ZONE is optional. Specify it if you only want the calculate the ddu consumption for a single management zone.".format(
arguments
)
)
exit()
FROM = str(sys.argv[1])
TO = str(sys.argv[2])
BASE_URL = str(sys.argv[3])
API_TOKEN = str(sys.argv[4])
MAX_REQUESTS_PER_MINUTE = int(sys.argv[5])
if arguments == 6:
SELECTED_MANAGEMENT_ZONE_NAME = str(sys.argv[6])
else:
SELECTED_MANAGEMENT_ZONE_NAME = None
# Get all available management zones
# https://mySampleEnv.live.dynatrace.com/api/config/v1/managementZones
# try:
response = requests.get(
BASE_URL + "config/v1/managementZones",
headers={"Authorization": "Api-Token " + API_TOKEN},
)
# Show error message when a connection cant be established. Terminates the script when theres an error.
response.raise_for_status()
allManagemementZones = json.loads(response.content)["values"]
# print("Amount of different management zones: ", len(allManagemementZones))
# If the management zone is specified: Get the index of the occurrence
if SELECTED_MANAGEMENT_ZONE_NAME != None:
for mzIndex, managementZone in enumerate(allManagemementZones):
if allManagemementZones[mzIndex].get("name") == SELECTED_MANAGEMENT_ZONE_NAME:
SELECTED_MANAGEMENT_ZONE_INDEX = mzIndex
# Get all different entityTypes. Due to the high number of different types you can't fetch all at once => Loop through every page with nextPageKey
# https://mySampleEnv.live.dynatrace.com/api/v2/entityTypes
# https://mySampleEnv.live.dynatrace.com/api/v2/entityTypes?nextPageKey=AQAAADIBAAAAMg==
response = requests.get(
BASE_URL + "v2/entityTypes", headers={"Authorization": "Api-Token " + API_TOKEN}
)
response.raise_for_status()
allEntityTypes = json.loads(response.content)["types"]
nextPage = json.loads(response.content)["nextPageKey"]
while nextPage != None:
response = requests.get(
BASE_URL + "v2/entityTypes?nextPageKey=" + nextPage,
headers={"Authorization": "Api-Token " + API_TOKEN},
)
response.raise_for_status()
nextPage = (json.loads(response.content)).get("nextPageKey", None)
allEntityTypes.extend(json.loads(response.content)["types"])
# print("Amount of different entity types: ", len(allEntityTypes))
# print()
dduConsumptionObjectOfManagementZone = {}
# Result JSON Object with Array of dduConsumption for each management zone
dduConsumptionPerManagementZone = "[ "
dduConsumptionOfEntityType = 0
dduConsumptionOfManagementZone = 0
# https://mySampleEnv.live.dynatrace.com/api/v2/metrics/query?metricSelector=builtin:billing.ddu.metrics.byEntity&entitySelector=type(HOST),mzId(123456789)&from=2020-08-01T12:00:00+02:00 2020-08-10T12:00:00+02:00
# Loop through every entityType of every management zone
# If there is a specific management zone selected: "loop through" the single management zone
for managementZoneIndex, managementZone in (
enumerate([allManagemementZones[SELECTED_MANAGEMENT_ZONE_INDEX]])
if SELECTED_MANAGEMENT_ZONE_NAME != None
else enumerate(allManagemementZones)
):
# If a management zone got specified: access it via the index in all management zones
if SELECTED_MANAGEMENT_ZONE_NAME != None:
managementZoneIndex = SELECTED_MANAGEMENT_ZONE_INDEX
for entityTypeIndex, entityType in enumerate(allEntityTypes):
"""
print(
"MZId: {:21} MZName: {:20} ET Name: {:5}".format(
allManagemementZones[managementZoneIndex]["id"],
allManagemementZones[managementZoneIndex]["name"],
allEntityTypes[entityTypeIndex]["type"],
)
)
"""
# Replace the "+" of Timezone to the encoded %2B
response = requests.get(
"{}v2/metrics/query?metricSelector={}:splitBy()&entitySelector=mzId({}),type({})&pageSize={}&from={}&to={}".format(
BASE_URL,
METRIC_NAME,
allManagemementZones[managementZoneIndex]["id"],
allEntityTypes[entityTypeIndex]["type"],
str(PAGE_SIZE),
FROM.replace("+", "%2B", 1),
TO.replace("+", "%2B", 1),
),
headers={"Authorization": "Api-Token " + API_TOKEN},
)
response.raise_for_status()
# print("Waiting for ", 60 / MAX_REQUESTS_PER_MINUTE, " seconds")
time.sleep(60 / MAX_REQUESTS_PER_MINUTE)
dduConsumptionOfMZandETDict = json.loads(response.content)["result"][0]["data"]
# If there are any results
if dduConsumptionOfMZandETDict:
# Filter out every empty usage values and create the sum of ddu usage
dduConsumptionOfMZandET = sum(
filter(None, dduConsumptionOfMZandETDict[0]["values"])
)
"""
print(
"Ddu consumption of manangement zone {} and entityType {}: {}".format(
allManagemementZones[managementZoneIndex]["name"],
allEntityTypes[entityTypeIndex]["type"],
round(dduConsumptionOfMZandET, 3),
)
)
"""
dduConsumptionOfManagementZone += dduConsumptionOfMZandET
dduConsumptionOfMZandET = 0
"""
print(
"Ddu consumption of management zone {}: {}".format(
allManagemementZones[managementZoneIndex]["name"],
round(dduConsumptionOfManagementZone, 3),
)
)
"""
# print()
# Populate JSON Object
dduConsumptionObjectOfManagementZone["MZId"] = allManagemementZones[
managementZoneIndex
]["id"]
dduConsumptionObjectOfManagementZone["MZName"] = allManagemementZones[
managementZoneIndex
]["name"]
dduConsumptionObjectOfManagementZone["dduConsumption"] = round(
dduConsumptionOfManagementZone, 3
)
dduConsumptionOfManagementZone = 0
# <[ > takes 2 chars
if len(dduConsumptionPerManagementZone) > 2:
dduConsumptionPerManagementZone = (
dduConsumptionPerManagementZone
+ ", "
+ json.dumps(dduConsumptionObjectOfManagementZone)
)
else:
dduConsumptionPerManagementZone = dduConsumptionPerManagementZone + json.dumps(
dduConsumptionObjectOfManagementZone
)
dduConsumptionPerManagementZone = dduConsumptionPerManagementZone + " ]"
print(dduConsumptionPerManagementZone)
| 42.697674 | 212 | 0.687228 |
964481acbba226d6bc1f722acc9bd0960d9cebe5 | 936 | py | Python | Lesson3/coefficient_of_determination2.py | rmhyman/DataScience | c839c97c76f104ab298563a5c8b48f6d90be5f60 | [
"MIT"
] | 1 | 2015-09-17T18:49:09.000Z | 2015-09-17T18:49:09.000Z | Lesson3/coefficient_of_determination2.py | rmhyman/DataScience | c839c97c76f104ab298563a5c8b48f6d90be5f60 | [
"MIT"
] | null | null | null | Lesson3/coefficient_of_determination2.py | rmhyman/DataScience | c839c97c76f104ab298563a5c8b48f6d90be5f60 | [
"MIT"
] | null | null | null | import numpy as np
import scipy
import matplotlib.pyplot as plt
import sys
def compute_r_squared(data, predictions):
'''
In exercise 5, we calculated the R^2 value for you. But why don't you try and
and calculate the R^2 value yourself.
Given a list of original data points, and also a list of predicted data points,
write a function that will compute and return the coefficient of determination (R^2)
for this data. numpy.mean() and numpy.sum() might both be useful here, but
not necessary.
Documentation about numpy.mean() and numpy.sum() below:
http://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html
http://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html
'''
mean = data.mean()
numerator = np.sum((data - predictions)**2)
denom = np.sum((data-mean)**2)
r_squared = 1 - numerator/denom
return r_squared | 36 | 89 | 0.679487 |
964490d0c12237f4b5b63e54d5ed293032299a1f | 414 | py | Python | setup.py | 10sr/pyltsv | d31286cef6caca941d20d364863bf3bd0d95b008 | [
"Apache-2.0"
] | null | null | null | setup.py | 10sr/pyltsv | d31286cef6caca941d20d364863bf3bd0d95b008 | [
"Apache-2.0"
] | 16 | 2020-06-15T11:04:39.000Z | 2022-01-11T15:34:14.000Z | setup.py | 10sr/pyltsv | d31286cef6caca941d20d364863bf3bd0d95b008 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# type: ignore
"""Setup script."""
from setuptools import setup
setup(
version=_get_version(),
)
| 19.714286 | 56 | 0.589372 |
96461908df787c4f715fcb78e9b9a2b6846a1ccf | 15,328 | py | Python | hypha/apply/projects/models/project.py | slifty/hypha | 93313933c26589858beb9a861e33431658cd3b24 | [
"BSD-3-Clause"
] | null | null | null | hypha/apply/projects/models/project.py | slifty/hypha | 93313933c26589858beb9a861e33431658cd3b24 | [
"BSD-3-Clause"
] | null | null | null | hypha/apply/projects/models/project.py | slifty/hypha | 93313933c26589858beb9a861e33431658cd3b24 | [
"BSD-3-Clause"
] | null | null | null | import collections
import decimal
import json
import logging
from django.apps import apps
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models import Count, F, Max, OuterRef, Subquery, Sum, Value
from django.db.models.functions import Cast, Coalesce
from django.db.models.signals import post_delete
from django.dispatch.dispatcher import receiver
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from wagtail.contrib.settings.models import BaseSetting, register_setting
from wagtail.core.fields import StreamField
from addressfield.fields import ADDRESS_FIELDS_ORDER
from hypha.apply.funds.models.mixins import AccessFormData
from hypha.apply.stream_forms.blocks import FormFieldsBlock
from hypha.apply.stream_forms.files import StreamFieldDataEncoder
from hypha.apply.stream_forms.models import BaseStreamForm
from hypha.apply.utils.storage import PrivateStorage
from .vendor import Vendor
logger = logging.getLogger(__name__)
COMMITTED = 'committed'
CONTRACTING = 'contracting'
IN_PROGRESS = 'in_progress'
CLOSING = 'closing'
COMPLETE = 'complete'
PROJECT_STATUS_CHOICES = [
(COMMITTED, _('Committed')),
(CONTRACTING, _('Contracting')),
(IN_PROGRESS, _('In Progress')),
(CLOSING, _('Closing')),
(COMPLETE, _('Complete')),
]
def unpaid_value(self):
return self.invoices.unpaid_value()
def clean(self):
if self.proposed_start is None:
return
if self.proposed_end is None:
return
if self.proposed_start > self.proposed_end:
raise ValidationError(_('Proposed End Date must be after Proposed Start Date'))
def can_request_funding(self):
"""
Should we show this Project's funding block?
"""
return self.status in (CLOSING, IN_PROGRESS)
def get_missing_document_categories(self):
"""
Get the number of documents required to meet each DocumentCategorys minimum
"""
# Count the number of documents in each category currently
existing_categories = DocumentCategory.objects.filter(packet_files__project=self)
counter = collections.Counter(existing_categories)
# Find the difference between the current count and recommended count
for category in DocumentCategory.objects.all():
current_count = counter[category]
difference = category.recommended_minimum - current_count
if difference > 0:
yield {
'category': category,
'difference': difference,
}
# def send_to_compliance(self, request):
# """Notify Compliance about this Project."""
# messenger(
# MESSAGES.SENT_TO_COMPLIANCE,
# request=request,
# user=request.user,
# source=self,
# )
# self.sent_to_compliance_at = timezone.now()
# self.save(update_fields=['sent_to_compliance_at'])
class Approval(models.Model):
project = models.ForeignKey("Project", on_delete=models.CASCADE, related_name="approvals")
by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="approvals")
created_at = models.DateTimeField(auto_now_add=True)
class ContractQuerySet(models.QuerySet):
class Contract(models.Model):
approver = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL, related_name='contracts')
project = models.ForeignKey("Project", on_delete=models.CASCADE, related_name="contracts")
file = models.FileField(upload_to=contract_path, storage=PrivateStorage())
is_signed = models.BooleanField("Signed?", default=False)
created_at = models.DateTimeField(auto_now_add=True)
approved_at = models.DateTimeField(null=True)
objects = ContractQuerySet.as_manager()
class PacketFile(models.Model):
category = models.ForeignKey("DocumentCategory", null=True, on_delete=models.CASCADE, related_name="packet_files")
project = models.ForeignKey("Project", on_delete=models.CASCADE, related_name="packet_files")
title = models.TextField()
document = models.FileField(upload_to=document_path, storage=PrivateStorage())
def get_remove_form(self):
"""
Get an instantiated RemoveDocumentForm with this class as `instance`.
This allows us to build instances of the RemoveDocumentForm for each
instance of PacketFile in the supporting documents template. The
standard Delegated View flow makes it difficult to create these forms
in the view or template.
"""
from ..forms import RemoveDocumentForm
return RemoveDocumentForm(instance=self)
| 32.892704 | 123 | 0.666819 |
9646e8e2458a9b399cec0bf5ce7ece6cbbdffad6 | 1,938 | py | Python | temp/src/square.py | wvu-irl/smart-2 | b39b6d477b5259b3bf0d96180a154ee1dafae0ac | [
"MIT"
] | null | null | null | temp/src/square.py | wvu-irl/smart-2 | b39b6d477b5259b3bf0d96180a154ee1dafae0ac | [
"MIT"
] | null | null | null | temp/src/square.py | wvu-irl/smart-2 | b39b6d477b5259b3bf0d96180a154ee1dafae0ac | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from math import radians
import os
import numpy as np
from nav_msgs.msg import Odometry
if __name__ == '__main__':
try:
DrawASquare()
except:
rospy.loginfo("node terminated.")
| 27.685714 | 120 | 0.583591 |