max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
hw-appium/page_object/testcase/test_self_choice.py
|
ZitherPeng/CodeRecord_Python
| 0
|
12777551
|
import pytest
from page_object.page.MainPage import MainPage
class TestSelfChoice(object):
def test_price(self):
main = MainPage()
assert main.click_self_choice()
| 1.882813
| 2
|
question_bank/permutation-i-lcci/permutation-i-lcci.py
|
yatengLG/leetcode-python
| 9
|
12777552
|
# -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:288 ms, 在所有 Python3 提交中击败了5.29% 的用户
内存消耗:20.6 MB, 在所有 Python3 提交中击败了33.22% 的用户
解题思路:
回溯
通过一个列表记录已经使用过的字符下标
"""
class Solution:
def permutation(self, S: str) -> List[str]:
n = len(S)
result = []
def backtrack(current, used):
if len(current) >= n:
result.append(''.join(current[:]))
for i in range(n):
if i not in used:
backtrack(current+[S[i]], used+[i])
backtrack([], [])
return result
"""
执行用时:152 ms, 在所有 Python3 提交中击败了71.82% 的用户
内存消耗:20.7 MB, 在所有 Python3 提交中击败了13.36% 的用户
解题思路:
回溯
更新当前字符串
"""
class Solution:
def permutation(self, S: str) -> List[str]:
result = []
def backtrack(s, current): # 当前剩余字符串
if s == '':
result.append(current)
for i in range(len(s)):
backtrack(s[:i]+s[i+1:], current+s[i])
backtrack(S, '')
return result
"""
"""
| 3.703125
| 4
|
wagtail_advanced_form_builder/forms/widgets/side_by_side_radio_select_widget.py
|
octavenz/wagtail-advanced-form-builder
| 11
|
12777553
|
from django.forms import RadioSelect
class SideBySideRadioSelectWidget(RadioSelect):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.display_side_by_side = True
| 2.046875
| 2
|
stsdas/pkg/hst_calib/nicmos/runcalsaa.py
|
iraf-community/stsdas
| 1
|
12777554
|
#! /usr/bin/env python
"""
runcalsaa.py - Module to perform SAA correction in the CALNIC pipeline
(After CALNICA, before CALNICB) by running the PEDSUB, BEP, and SAACLEAN tasks.
PEDSUB is run only to improve the calculations of the SAA persistence and BEP
signature; no pedestal correction is actually applied to the final output
image.
USAGE: runcalsaa.py [-d] ipppssoot_raw.fits
Alternative USAGE: python
import runcalsaa
status=runcalsaa.run('ipppssoot_raw.fits')
RETURN VALUES:
It will return status codes to indicate completion status:
0 = successful completion with correction applied
4 = successful completion with no correction applied
1 = failed gracefully with exception
3 = aborted gracefully based on self-diagnostic
REQUIRED INPUT FILES:
Although these files are not specified on the command line, they
must be available for the script to succeed.
In the working directory:
ipppssoot_cal.fits
The association file specified in SAA_DARK
The _raw files specified in that association file
As specified in the _cal file header:
SAACNTAB
PEDSBTAB
FLATFILE
As specified in the post-SAA exposure file headers:
MASKFILE
SAADFILE
OUTPUT FILES & EFFECTS:
The ipppssoot_cal.fits file may be replaced.
The SAADONE keyword in the ipppssoot_cal.fits file is updated.
The BEPDONE keyword in the ipppssoot_cal.fits file is updated.
The ipppssoot_trl.txt file is appended to.
INTERIM FILES:
A _psb.fits file is created temporarily, but removed by the script.
A _ped2.fits file is created temporarily, but removed by the script.
@author: <NAME>, <NAME>
@version: 0.4 (3-Jul-2006)
0.5 (13-Aug-2008)
1.0 (26-Jan-2009)
1.1 (29-Jan-2009)
1.2 (25-Mar-2009)
1.3 (15-Jun-2010)
1.4.2 (5-NOv-2013) MLS: changed return codes for opus
"""
from __future__ import print_function
import os,time,sys
from pyraf import iraf
from iraf import stsdas, hst_calib, nicmos,ctools
from iraf import saaclean
from nictools import nic_rem_persist
from astropy.io import fits as pyfits
import numpy as N
__version__ = '1.4.2'
__vdate__ = '25-Nov-2013'
__trlmarker__ = '*** CALNIC RUNCALSAA Processing Version %s %s ***\n'%(__version__,__vdate__)
"""
These return codes have been changed as requested by opus so that they can detect a return
value of 1 as a real error for the shell script, see #1078
"""
_success = 0
_none = 4
_error = 1
_abort = 3
# Constants relevant to saaclean
statdict_saaclean = {'none':_none,'low only':_success,'high only':_success,
'both':_success,'n/a':_none,'aborted':_abort}
donestring = {_none:'OMITTED',_success:'PERFORMED',_abort:'SKIPPED',
_error:'SKIPPED'}
def run(rawname,debug=False):
#............................................................
# Setup
#............................................................
saadone = _none
bepdone = _none
if '_raw' not in rawname:
print("""ERROR: this script takes ipppssoot_raw.fits file as input:
you provided %s"""%rawname)
return
# Define file names
calname = rawname.replace('_raw','_cal')
pedname = rawname.replace('_raw','_ped')
pedname2 = rawname.replace('_raw','_ped2')
outname = rawname.replace('_raw','_scn_applied')
saapername = rawname.replace('_raw','_spr')
pedtrlname = rawname.replace('_raw.fits','_pedsb_trl.txt')
F_A = calname
F_B = pedname
F_C = outname
F_D = pedname2
# Establish connection to the trailer file
trlname = rawname.replace('_raw.fits','_trl.txt')
Trl = open( trlname,'a')
Trl.write(_timestamp('RUNCALSAA starting'))
Trl.write(__trlmarker__)
# Open the calfile header and determine whether the script should run
f = pyfits.open(calname)
prihdr = f[0].header
# Get some things from the calfile header
saaparname = f[0].header['saacntab']
pedparname = f[0].header['pedsbtab']
camera = f[0].header['camera']
# Trap the case where no PEDSBTAB was provided, as this reference file is
# required for running PEDSUB.
if pedparname == 'N/A':
# No PEDSUB reference file, so turn off all processing.
dosaa=False
saadone=_abort
dobep=False
bepdone=_abort
else:
if 'saacorr' in prihdr:
dosaa = (prihdr['saacorr'] == 'PERFORM')
else:
dosaa = False
saadone = _abort
if 'bepcorr' in prihdr:
dobep = (prihdr['bepcorr'] == 'PERFORM')
else:
dobep = False
bepdone = _abort
if ((dosaa or dobep) and (f[0].header['flatdone'] == 'PERFORMED') and (f[0].header['flatfile'] != 'N/A')):
pass # keep running
else:
Trl.write(_timestamp('RUNCALSAA omitted'))
Trl.close()
set_keys_final( _abort, _abort, F_A, donestring, saapername)
# No files to delete
f.close()
return _none
f.close()
try: # get pedsub pars for SAACLEAN, BEP, or both
kwpars = get_pedsub_pars( camera, pedparname, Trl, F_A, saapername, debug=debug)
except Exception as e:
handle_exception(e, Trl, [], debug = debug)
set_keys_final( _abort, _abort, F_A, donestring, saapername )
# no copy to final as it already is cal, no files to delete
return _abort
if (dosaa):
if (f[0].header['saadone'] == 'PERFORMED'):
saadone = _abort
F_S1 = F_A # set file that is the final for 'stage 1' to file F_A
else: # f[0].header['saadone'] != 'PERFORMED'):
try: # for do_pedsub
do_pedsub(pedparname, Trl, pedtrlname, F_A, F_B, kwpars, saapername)
except Exception as e:
handle_exception(e, Trl, [], debug = debug)
set_keys_final( _abort, _abort, F_A, donestring,saapername )
# no copy to final as it already is cal, no files to delete
return _abort
saadone, F_S1 = do_saaclean(F_B, F_A, F_C, trlname, saaparname, camera, saapername, Trl, debug=debug)
else: # dosaa is False
F_S1 = F_A # set file that is the final for 'stage 1' to file F_A
if (dobep):
try:
do_pedsub(pedparname, Trl, pedtrlname, F_S1, F_D, kwpars,saapername)
except Exception as e:
handle_exception(e, Trl, [], debug = debug)
set_keys_final(_abort,_abort, F_A, donestring,saapername )
# no copy to final as it already is cal, no files to delete
return _abort
bepdone, F_Final = do_bright_ep( F_D, F_S1, Trl, donestring, debug=debug )
else: # dobep is False
F_Final = F_S1
set_keys_final(saadone, bepdone, F_S1, donestring, saapername)
os.rename( F_Final, calname)
Trl.write(_timestamp('RUNCALSAA completed'))
Trl.close()
return _success
def set_keys_final(saadone, bepdone, F_Final, donestring, saapername):
""" Set values for saadone and bepdone in the final cal file
@param saadone: value of key SAADONE
@type saadone: string
@param bepdone: value of key BEPDONE
@type bepdone: string
@param F_Final: name of final cal file
@type F_Final: string
@param donestring: mapping of strings for done keys
@type donestring: dict
@param saapername: name of persistence model created by SAACLEAN
@type saapername: string
"""
fh = pyfits.open( F_Final, mode = 'update' )
fh[0].header.update('saadone',donestring[saadone])
fh[0].header.update('bepdone',donestring[bepdone])
if saapername != None:
fh[0].header.update('SAACRMAP',saapername)
fh.close()
def get_pedsub_pars( camera, pedparname, Trl, pedsub_file, saapername, debug=False ):
""" Get keyword parameter values for pedsub
@param camera: camera number
@type camera: int
@param pedparname: parameter file name
@type pedparname: string
@param Trl: trailer file name
@type Trl: string
@param pedsub_file: name of file with pedsub pars
@type pedsub_file: string
@param saapername: name of file for SAA persistence image
@type saapername: string
@return: kwpars
@rtype: dict
"""
# Get params from the pedsubtab
try:
kwpars = getkwpars(camera,iraf.osfn(pedparname))
except Exception as e:
set_keys_final(_error,_error, pedsub_file, donestring,saapername)
handle_exception(e, Trl, [], debug = debug)
return _error
return kwpars
def do_pedsub( pedparname, Trl, pedtrlname, file_1, file_2, kwpars, saapername):
""" Call pedsub
@param pedparname: parameter file name
@type pedparname: string
@param Trl: trailer file name
@type Trl: string
@param pedtrlname: pedsub's trailer file name
@type pedtrlname: string
@param file_1: name of input cal file
@type file_1: string
@param file_2: name of output ped file
@type file_2: string
@param kwpars: keyword params for pedsub
@type kwpars: dict
@param saapername: name of file for SAA persistence image
@type saapername: string
"""
pedsub_complete='=== PEDSUB finished'
# Timestamp the trailer file
Trl.write(_timestamp('PEDSUB starting with paramas from %s'%pedparname))
# Run pedsub with output directed to special file
iraf.flprcache()
iraf.pedsub.unlearn()
iraf.pedsub(input = file_1, output = file_2, Stdout = pedtrlname, **kwpars)
# Examine task output & append to trailer file
pedout = open( pedtrlname )
for line in pedout:
Trl.write( line )
pedout.close()
os.remove(pedtrlname)
if not line.startswith(pedsub_complete):
raise PedsubError
def do_saaclean( calcimage, targimage, output, trlname, saaparname, camera, saapername, Trl, debug=False):
""" Call saaclean
@param calcimage: calc file name
@type calimage: string
@param targimage: target file name
@type targimage: string
@param trlname: trailer file name
@type trlname: string
@param saaparname: file name for SAACLEAN pars
@type saaparname: string
@param camera: camera number
@type camera: int
@param saapername: file name for SAACLEAN persistence
@type saapername: string
@param Trl: trailer file
@type Trl: string
@return: saadone, stage 1 file
@rtype: int, string
"""
Trl.write(_timestamp('SAACLEAN starting from pars in %s'%saaparname))
# Get the task parameters from the saacntab
try:
kwpars = getkwpars( camera,iraf.osfn(saaparname) )
except Exception as e:
handle_exception( e, Trl, [calcimage], debug=debug )
saadone = _error
return saadone, targimage
#
# Run the saaclean task
try:
iraf.saaclean.unlearn()
iraf.saaclean(calcimage = calcimage,
targimage = targimage,
output = output,
saaperfile = saapername,
Stderr = Trl, **kwpars)
retstat = statdict_saaclean[ iraf.saaclean.applied ]
if not debug:
if retstat == _abort:
saadone = _abort
F_S1 = targimage # set file that is the final for 'stage 1' to file targimage
Trl.write(_timestamp('SAACLEAN aborted'))
if os.path.exists(output): os.remove(output)
elif retstat == _none:
saadone = _none
F_S1 = targimage # set file that is the final for 'stage 1' to file targimage
Trl.write(_timestamp('SAACLEAN omitted'))
if os.path.exists(output): os.remove(output)
else: # retstat is SUCCESS
saadone = _success
F_S1 = output # set file that is the final for 'stage 1'
Trl.write(_timestamp('SAACLEAN completed'))
fh_targ = pyfits.open(targimage, mode='update')
fh_targ[0].header.update(key = 'SAACRMAP', value = saapername )
fh_targ.close()
else:
saadone = retstat
if retstat == _abort or retstat == _none:
F_S1 = targimage
else:
F_S1 = output
os.rename( targimage,targimage.replace('_cal.','_orig_cal.'))
os.rename( output,targimage )
os.remove( calcimage) # remove ped file (calcimage) because 2nd pedsub will need to write to it
# Return end of phase 1 final file
return saadone, F_S1
except Exception as e:
if os.path.exists( calcimage ):
os.remove( calcimage) # remove ped file (calcimage) because 2nd pedsub will need to write to it
handle_exception(e, Trl, [calcimage, output], debug = debug)
saadone = _error
F_S1 = targimage
return saadone, targimage
def do_bright_ep( calcimage, targimage, Trl, donestring, debug=False):
""" Do bright earth persistence correction
@param calcimage: calc file name
@type calimage: string
@param targimage: target file name
@type targimage: string
@param Trl: trailer file name
@type Trl: string
@return: bepdone, final cal file
@rtype: int, string
"""
Trl.write(_timestamp('BEP starting' ))
# Run the nic_rem_persist task
try:
# When nic_rem_persist reset sys.stdout, IPython did not pick up on the
# change back when nrp.persist() completed, and shut down the entire IPython
# session when Trl.close() was called.
# We need to manage sys.stdout here to allow IPython to recognize that
# we are resetting it back before closing the Trl file.
sys.orig_stdout = sys.stdout
sys.stdout = Trl
nrp = nic_rem_persist.NicRemPersist( calcfile = calcimage, targfile = targimage, run_stdout = None) # set task's stdout to trailer file
nrp_stat = nrp.persist()
bepdone = nrp_stat
if (donestring[nrp_stat] == 'OMITTED'):
Trl.write(_timestamp('BEP aborted'))
elif (donestring[nrp_stat] == 'PERFORMED'):
Trl.write(_timestamp('BEP completed'))
else:
Trl.write(_timestamp('BEP skipped'))
# Set sys.stdout back to normal now that all Trl messages have been written out
sys.stdout = sys.orig_stdout
if os.path.exists( calcimage ):
os.remove( calcimage) # remove ped file (calcimage)
return bepdone, targimage
# If nic_rem_persist fails, we can't proceed. End with an error.
except Exception as e:
if os.path.exists( calcimage ):
os.remove( calcimage) # remove ped file (calcimage)
handle_exception(e, Trl, [calcimage], debug = debug)
# Reset sys.stdout back to normal...
sys.stdout = sys.orig_stdout
bepdone = _none
return bepdone, targimage
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class PedsubError(Exception):
def __str__(self):
return "PEDSUB ended with error"
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Utility functions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def handle_exception(e,trl,files_to_delete,debug=False):
""" Print various useful information to various useful places """
print(str(e))
trl.write(_timestamp("Encountered exception"))
trl.write(str(e))
if not debug:
trl.write('\n Cleaning up interim files \n')
#Clean up files
for fname in files_to_delete:
if os.path.isfile(fname):
os.remove(fname)
trl.write(_timestamp('RUNCALSAA completed with errors'))
def getkwpars(camera,parname):
"""Extract the correct row of the parameter file based on the
value of CAMERA. Parameters are returned as a keyword:value
dictionary."""
d={}
f=pyfits.open(parname)
t=f[1].data
cols=f[1].columns
# Pick out the matching row of the "camera" column.
cams = t.field('camera')
idx = N.where(cams == camera)[0][0]
#..........................^^^^^^
# (The ugly [0][0] syntax is because numarray.where returns
# a tuple of arrays, and in this case we just want the
# actual scalar value that can be used to index the other
# columns in the table).
for k in cols:
d[k.name] = t.field(k.name)[idx]
del d['camera']
f.close()
return d
def _timestamp(_process_name):
"""Create formatted time string recognizable by OPUS."""
_prefix = time.strftime("\n%Y%j%H%M%S-I-----",time.localtime())
_lenstr = 60 - len(_process_name)
return _prefix+_process_name+(_lenstr*'-')+'\n'
def _getTime():
# Format time values for keywords IRAF-TLM, and DATE
_ltime = time.localtime(time.time())
time_str = time.strftime('%H:%M:%S (%d-%b-%Y)',_ltime)
return time_str
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Run from the shell.
if __name__ == '__main__':
# Look for debug flag
debug = " -d " in sys.argv
# Handle arguments
if len(sys.argv) > 3 or len(sys.argv) < 2:
print("syntax: runcalsaa.py [-d] inputfilename")
sys.exit(_error)
rawname = sys.argv[-1]
# Run script with error checking
try:
retstat = run(rawname,debug=debug)
except Exception as e:
print(str(e))
print("ERROR: RUNCALSAA failed on %s"%rawname)
retstat = _error
# Return status
sys.exit(retstat)
| 1.859375
| 2
|
waves_gateway/storage/mongo_key_value_storage_impl.py
|
NeolithEra/WavesGatewayFramework
| 25
|
12777555
|
"""
MongoKeyValueStorageImpl
"""
from waves_gateway.common import Injectable, KEY_VALUE_STORAGE_COLLECTION
from waves_gateway.model import PollingState
from waves_gateway.serializer import PollingStateSerializer
from waves_gateway.storage.key_value_storage import KeyValueStorage
from pymongo.collection import Collection # type: ignore
from typing import Optional, Any
from doc_inherit import method_doc_inherit # type: ignore
@Injectable(deps=[KEY_VALUE_STORAGE_COLLECTION, PollingStateSerializer], provides=KeyValueStorage)
class MongoKeyValueStorageImpl(KeyValueStorage):
"""
Implements a key value storage with a MongoDB collection.
"""
_COIN_BLOCK_HEIGHT_KEY = 'coin_block_height'
_WAVES_BLOCK_HEIGHT_KEY = 'waves_block_height'
_VALUE_PROPERTY_KEY = 'value'
_KEY_PROPERTY_KEY = 'key'
_COIN_POLLING_STATE_KEY = 'coin_polling_state'
_WAVES_POLLING_STATE_KEY = 'waves_polling_state'
def _set_value(self, key: str, value: Any) -> None:
"""
Inserts the key/value pair. Overwrites existing entries.
"""
query = dict()
query[MongoKeyValueStorageImpl._KEY_PROPERTY_KEY] = key
replacement = dict()
replacement[MongoKeyValueStorageImpl._KEY_PROPERTY_KEY] = key
replacement[MongoKeyValueStorageImpl._VALUE_PROPERTY_KEY] = value
self._collection.replace_one(filter=query, replacement=replacement, upsert=True)
def _get_value(self, key: str) -> Any:
"""
Returns the value or None if no value was found.
"""
query = dict()
query[MongoKeyValueStorageImpl._KEY_PROPERTY_KEY] = key
query_result = self._collection.find_one(filter=query)
if query_result is None:
return None
else:
return query_result[MongoKeyValueStorageImpl._VALUE_PROPERTY_KEY]
@method_doc_inherit
def set_last_checked_waves_block_height(self, block_height: int) -> None:
self._set_value(MongoKeyValueStorageImpl._WAVES_BLOCK_HEIGHT_KEY, block_height)
@method_doc_inherit
def get_last_checked_waves_block_height(self) -> Optional[int]:
return self._get_value(MongoKeyValueStorageImpl._WAVES_BLOCK_HEIGHT_KEY)
def __init__(self, collection: Collection, polling_state_serializer: PollingStateSerializer) -> None:
self._collection = collection
self._polling_state_serializer = polling_state_serializer
@method_doc_inherit
def set_last_checked_coin_block_height(self, block_height: int) -> None:
self._set_value(MongoKeyValueStorageImpl._COIN_BLOCK_HEIGHT_KEY, block_height)
@method_doc_inherit
def get_last_checked_coin_block_height(self) -> Optional[int]:
return self._get_value(MongoKeyValueStorageImpl._COIN_BLOCK_HEIGHT_KEY)
def set_waves_polling_state(self, polling_state: PollingState) -> None:
self._set_value(MongoKeyValueStorageImpl._WAVES_POLLING_STATE_KEY,
self._polling_state_serializer.as_dict(polling_state))
def get_coin_polling_state(self) -> Optional[PollingState]:
data = self._get_value(MongoKeyValueStorageImpl._COIN_POLLING_STATE_KEY)
if data is None:
return None
else:
return self._polling_state_serializer.from_dict(data)
def get_waves_polling_state(self) -> Optional[PollingState]:
data = self._get_value(MongoKeyValueStorageImpl._WAVES_POLLING_STATE_KEY)
if data is None:
return None
else:
return self._polling_state_serializer.from_dict(data)
def set_coin_polling_state(self, polling_state: PollingState) -> None:
self._set_value(MongoKeyValueStorageImpl._COIN_POLLING_STATE_KEY,
self._polling_state_serializer.as_dict(polling_state))
| 2.34375
| 2
|
tests/test_api.py
|
ignpelloz/fuji
| 25
|
12777556
|
# -*- coding: utf-8 -*-
"""
A collection of tests to test the reponses of a Fask tesk fuji client,
i.e. if the app is working and there are no swagger problems.
"""
def test_ui(fujiclient):
"""Basic smoke test to see if app is buildable"""
response = fujiclient.get('/fuji/api/v1/ui/')
print(response.data)
assert response.status_code == 200
def test_ui_break(fujiclient):
"""Basic test if a path not in the UI gives a 404"""
response = fujiclient.get('/fuji/500api/v1/ui/')
print(response.data)
assert response.status_code == 404
def test_get_metrics(fujiclient):
"""Test if a client get returns the metric"""
response = fujiclient.get('/fuji/api/v1/metrics',
headers={
'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'accept': 'application/json'
})
print(response)
assert response.status_code == 200
result = response.json
assert result != {}
'''
from swagger_tester import swagger_test
def test_swagger_api(swagger_yaml):
swagger_test(swagger_yaml)
def test_swagger_api2(app_url):
swagger_test(app_url=app_url)
'''
'''
def test_login_logout(client):
"""Make sure login and logout works."""
username = flaskr.app.config["USERNAME"]
password = <PASSWORD>.config["PASSWORD"]
rv = login(client, username, password)
assert b'You were logged in' in rv.data
rv = logout(client)
assert b'You were logged out' in rv.data
rv = login(client, f"{username}x", password)
assert b'Invalid username' in rv.data
rv = login(client, username, f'{password}x')
assert b'Invalid password' in rv.data
'''
| 2.40625
| 2
|
toga/optimization_state/datadict.py
|
JPLMLIA/TOGA
| 0
|
12777557
|
"""
Author: <NAME>
Date : 12/4/19
Brief : Handles the pareto frontier dictionary updates and accessing
Notes :
Copyright 2019 California Institute of Technology. ALL RIGHTS RESERVED.
U.S. Government Sponsorship acknowledged.
"""
import collections
from collections import Mapping
import copy
import json
import numpy
from operator import add
import os
import threading
import string
import random
import yaml
class DataDict(object):
_FLAG_FIRST = object()
def __init__(self, fitness_metrics=[], maximize=True, amount_per_bin=1, history_log=""):
self.fitness_metrics = fitness_metrics
self.maximize = maximize
self.amount_per_bin = amount_per_bin
self.dictionary = self.create_initial()
self.trial_count = 0
self.trial_count_lock = threading.Lock()
self.track_history = history_log is not None and len(history_log) > 0
self.history_log = history_log
def get_dictionary(self):
"""
:return:
"""
return self.dictionary
def update_from_datadict(self, other):
"""
:param other:
:return:
"""
self.deep_update(self.dictionary, other)
def add_trials(self, trials):
self.trial_count_lock.acquire()
self.trial_count += trials
self.trial_count_lock.release()
def update_from_population(self, population=[]):
"""
:param population:
:return:
"""
updated_individuals = []
def update(_dict={}, key_path=[], value=None):
_sub = _dict
for index, item in enumerate(key_path):
if item in _sub:
if index == len(key_path) - 1:
items = _sub[item]
if not items:
_sub[item] = [value]
else:
items.append(value)
items.sort(key=lambda x: x['metrics'][key_path[-1]], reverse=self.maximize)
_sub[item] = items[:self.amount_per_bin]
if any(x['uuid'] == value['uuid'] for x in _sub[item]):
updated_individuals.append(value)
else:
_sub = _sub[item]
return _dict
for individual in population:
if self.has_metrics(individual):
key_path = self.get_corresponding_bin(individual)
self.dictionary = update(_dict=self.dictionary, key_path=key_path, value=individual)
if self.track_history and len(updated_individuals) > 0:
for new_item in updated_individuals:
with open(self.history_log, "a") as f:
f.write(str(self.trial_count) + ": " + str(new_item['metrics']) + "\n")
return self.dictionary, updated_individuals
def has_metrics(self, individual):
"""
:param individual:
:return:
"""
individual_metrics = individual.get('metrics')
if not individual_metrics:
return False
for metrics in self.fitness_metrics:
if individual_metrics.get(metrics.name) is None:
return False
return True
def update_from_previous_run(self, files):
"""
:param files:
:return:
"""
population = []
for file in files:
population.append(yaml.safe_load(open(file)))
self.update_from_population(population)
def create_initial(self):
"""
name, fixed_axis, axis_range, index
fitness_metrics = [Metric(name='banana', axis_range=[0, 1],index=0, partitions=10),
Metric(name='sinc', axis_range=[0,100], index=1, partitions=20),
Metric(name='foo', axis_range=[2.5, math.pi], index=2, partitions=20)] <-- last in list is free axis
datadict = {'banana':
{0:{'sinc':{
0: {'foo': []},
100: {'foo': []}
}
},
1:{'sinc:{
0: {'foo': []},
100: {'foo': []}}
}
}
"""
input_arr = copy.deepcopy(self.fitness_metrics)
if not input_arr:
raise Exception("No metrics exist\nName metrics inside the Metrics: fitness: section in the run_config yml")
def helper(dictionary, array):
_dict = {}
if not array:
return dictionary
_ = array[-1]
if not _.fixed_axis:
_dict[_.name] = []
return helper(_dict, array[:-1])
else:
_range = _.axis_range
partitions = array[-1].partitions
#Solve fencepost problem here
#We need N bins so we create N+1 evenly spaced fenceposts with numpy.linspace
#Only need left endpoint of each bin, so throwaway the last one
bin_labels = numpy.linspace(min(_range), max(_range), num=partitions+1)[:-1]
_dict[_.name] = {round(el, 2): dictionary for el in bin_labels}
return helper(_dict, array[:-1])
return json.loads(json.dumps(helper({}, input_arr)))
def serialize(self, basedir):
"""
:param basedir:
:return:
"""
population = []
def walk(node, best_dir):
for key, item in node.items():
if isinstance(item, dict):
walk(item, best_dir)
else:
if item:
for i in item:
population.append(i)
walk(self.dictionary, basedir)
return population
def deep_update(self, source, overrides):
"""
:param source:
:param overrides:
:return:
"""
for key, value in overrides.items():
if isinstance(value, collections.Mapping) and value:
returned = self.deep_update(source.get(key, {}), value)
source[key] = returned
else:
items = []
if source.get(key):
items = source[key]
items.extend(overrides[key])
items = sorted(items, key=lambda x: x['metrics'][key], reverse=self.maximize)
items = items[:self.amount_per_bin]
source[key] = items
return source
def get_corresponding_bin(self, individual):
"""
:param individual:
:return:
"""
key_path = []
_dict = self.dictionary
for metric in self.fitness_metrics:
_dict = _dict[metric.name]
key_path.append(metric.name)
if metric.fixed_axis:
# get the bins for this value and sort by float if they're stored as strings for some reason
bins = sorted([float(i) for i in list(_dict.keys())])
_bin = bins[0]
for _ in bins:
if individual['metrics'][metric.name] > _:
_bin = _
_dict = _dict[str(_bin)]
key_path.append(str(_bin))
else:
return key_path
def flatten_dict(self, d):
"""
:param d:
:param join:
:param lift:
:return:
"""
results = []
def visit(subdict, results, partialKey):
for k, v in subdict.items():
newKey = partialKey + (k,)
if isinstance(v, Mapping):
visit(v, results, newKey)
else:
results.append((newKey, v))
empty_key = ()
visit(d, results, empty_key)
return results
def get_non_empty_bins(self):
"""
:return:
"""
self._FLAG_FIRST = object()
original = dict(self.flatten_dict(self.dictionary))
filtered = {k: v for k, v in original.items() if len(v) > 0}
return filtered
def _get_best_metric(self, trials):
trials = sorted(trials, key=lambda x : x['metrics'][self.fitness_metrics[-1].name], reverse=self.maximize)
best = trials[0]
return best['metrics'][self.fitness_metrics[-1].name]
def get_points(self):
"""
:return:
"""
self._FLAG_FIRST = object()
flattened = self.flatten_dict(self.dictionary)
points = []
for key, trials in flattened:
if trials:
i = self._get_best_metric(trials)
else:
i = None
if len(key) > 1:
points.append((key[-2], i))
else:
points.append((key[-1], i))
return points
| 2.3125
| 2
|
metaworld/envs/gym_UR3/example/mujoco/ur3_gripper_test.py
|
dscho1234/metaworld
| 0
|
12777558
|
import gym
import numpy as np
from gym_UR3.envs.mujoco import MujocoUR3Env
import time
def main():
env = gym.make('UR3-v0')
Da = env.action_space.shape[0]
obs=env.reset()
start = time.time()
for i in range(100):
env.reset()
print('{}th episode'.format(i+1))
for j in range(100):
env.render()
# env.step(env.action_space.sample())
a = np.zeros(8)
a[:6] = 0.01*np.random.uniform(size = 6)
a[-1] = 1
a[-2] = 1
env.step(a)
end = time.time()
print('Done! {}'.format(end-start))
#action[0] : qpos[0] radian
#action[4] : qpos[4] radian
#action[5] : qpos[5] radian
#action[6] : qpos[7] radian인가?? 여튼 밑에 finger
#action[7] : qpos[11] radian인가?? 여튼 위에 finger
#action[8] : qpos[15] radian인가?? 여튼 가운데 finger
#action[9] : qpos[6] qpos[10] radian인가?? 여튼 밑, 위 finger 위아래로 벌어짐
if __name__=="__main__":
main()
| 2.6875
| 3
|
eth2/beacon/types/blocks.py
|
hwwhww/trinity
| 2
|
12777559
|
<gh_stars>1-10
from abc import (
ABC,
abstractmethod,
)
from typing import (
Sequence,
TYPE_CHECKING,
)
from eth_typing import (
BLSSignature,
Hash32,
)
from eth_utils import (
encode_hex,
)
import ssz
from ssz.sedes import (
List,
bytes32,
bytes96,
uint64,
)
from eth._utils.datatypes import (
Configurable,
)
from eth2.beacon._utils.hash import hash_eth2
from eth2.beacon.constants import EMPTY_SIGNATURE
from eth2.beacon.typing import (
Slot,
FromBlockParams,
)
from .attestations import Attestation
from .attester_slashings import AttesterSlashing
from .deposits import Deposit
from .eth1_data import Eth1Data
from .transfers import Transfer
from .voluntary_exits import VoluntaryExit
from .proposer_slashings import ProposerSlashing
if TYPE_CHECKING:
from eth2.beacon.db.chain import BaseBeaconChainDB # noqa: F401
class BeaconBlockBody(ssz.Serializable):
fields = [
('proposer_slashings', List(ProposerSlashing)),
('attester_slashings', List(AttesterSlashing)),
('attestations', List(Attestation)),
('deposits', List(Deposit)),
('voluntary_exits', List(VoluntaryExit)),
('transfers', List(Transfer)),
]
def __init__(self,
proposer_slashings: Sequence[ProposerSlashing],
attester_slashings: Sequence[AttesterSlashing],
attestations: Sequence[Attestation],
deposits: Sequence[Deposit],
voluntary_exits: Sequence[VoluntaryExit],
transfers: Sequence[Transfer])-> None:
super().__init__(
proposer_slashings=proposer_slashings,
attester_slashings=attester_slashings,
attestations=attestations,
deposits=deposits,
voluntary_exits=voluntary_exits,
transfers=transfers,
)
@classmethod
def create_empty_body(cls) -> 'BeaconBlockBody':
return cls(
proposer_slashings=(),
attester_slashings=(),
attestations=(),
deposits=(),
voluntary_exits=(),
transfers=(),
)
@property
def is_empty(self) -> bool:
return (
self.proposer_slashings == () and
self.attester_slashings == () and
self.attestations == () and
self.deposits == () and
self.voluntary_exits == () and
self.transfers == ()
)
@classmethod
def cast_block_body(cls,
body: 'BeaconBlockBody') -> 'BeaconBlockBody':
return cls(
proposer_slashings=body.proposer_slashings,
attester_slashings=body.attester_slashings,
attestations=body.attestations,
deposits=body.deposits,
voluntary_exits=body.voluntary_exits,
transfers=body.transfers,
)
class BaseBeaconBlock(ssz.Serializable, Configurable, ABC):
fields = [
#
# Header
#
('slot', uint64),
('parent_root', bytes32),
('state_root', bytes32),
('randao_reveal', bytes96),
('eth1_data', Eth1Data),
('signature', bytes96),
#
# Body
#
('body', BeaconBlockBody),
]
def __init__(self,
slot: Slot,
parent_root: Hash32,
state_root: Hash32,
randao_reveal: BLSSignature,
eth1_data: Eth1Data,
body: BeaconBlockBody,
signature: BLSSignature=EMPTY_SIGNATURE) -> None:
super().__init__(
slot=slot,
parent_root=parent_root,
state_root=state_root,
randao_reveal=randao_reveal,
eth1_data=eth1_data,
signature=signature,
body=body,
)
def __repr__(self) -> str:
return '<Block #{0} {1}>'.format(
self.slot,
encode_hex(self.root)[2:10],
)
_hash = None
@property
def hash(self) -> Hash32:
if self._hash is None:
self._hash = hash_eth2(ssz.encode(self))
return self._hash
@property
def root(self) -> Hash32:
# Alias of `hash`.
# Using flat hash, might change to SSZ tree hash.
return self.hash
@property
def num_attestations(self) -> int:
return len(self.body.attestations)
@property
def block_without_signature_root(self) -> Hash32:
return self.copy(
signature=EMPTY_SIGNATURE
).root
@classmethod
@abstractmethod
def from_root(cls, root: Hash32, chaindb: 'BaseBeaconChainDB') -> 'BaseBeaconBlock':
"""
Return the block denoted by the given block root.
"""
raise NotImplementedError("Must be implemented by subclasses")
class BeaconBlock(BaseBeaconBlock):
block_body_class = BeaconBlockBody
@classmethod
def from_root(cls, root: Hash32, chaindb: 'BaseBeaconChainDB') -> 'BeaconBlock':
"""
Return the block denoted by the given block ``root``.
"""
block = chaindb.get_block_by_root(root, cls)
body = cls.block_body_class(
proposer_slashings=block.body.proposer_slashings,
attester_slashings=block.body.attester_slashings,
attestations=block.body.attestations,
deposits=block.body.deposits,
voluntary_exits=block.body.voluntary_exits,
transfers=block.body.transfer,
)
return cls(
slot=block.slot,
parent_root=block.parent_root,
state_root=block.state_root,
randao_reveal=block.randao_reveal,
eth1_data=block.eth1_data,
signature=block.signature,
body=body,
)
@classmethod
def from_parent(cls,
parent_block: 'BaseBeaconBlock',
block_params: FromBlockParams) -> 'BaseBeaconBlock':
"""
Initialize a new block with the `parent` block as the block's
parent hash.
"""
if block_params.slot is None:
slot = parent_block.slot + 1
else:
slot = block_params.slot
return cls(
slot=slot,
parent_root=parent_block.root,
state_root=parent_block.state_root,
randao_reveal=EMPTY_SIGNATURE,
eth1_data=parent_block.eth1_data,
signature=EMPTY_SIGNATURE,
body=cls.block_body_class.create_empty_body(),
)
@classmethod
def convert_block(cls,
block: 'BaseBeaconBlock') -> 'BeaconBlock':
return cls(
slot=block.slot,
parent_root=block.parent_root,
state_root=block.state_root,
randao_reveal=block.randao_reveal,
eth1_data=block.eth1_data,
signature=block.signature,
body=block.body,
)
| 2.15625
| 2
|
astronet/astronet/data/generate_kepler_subset.py
|
ch8644760/models
| 2
|
12777560
|
# Written by <NAME> (GitHub: OneAndOnlySeabass) 15-10-2018
# This script generates a stratified random subset of n size from a Kepler TCE csv.
import pandas as pd
import numpy as np
# Adjustable variables can be changed here
read_loc = #r"tce csv location"
pc_subset = 1000
fp_subset = 1000 # Both AFPs and NTPs
write_loc = #r"desired output csv location"
my_seed = 114639
# Reading the csv file from read_loc
kepler_df = pd.read_csv(read_loc, index_col="rowid", comment="#")
# Removing rows with av_training_set=='UNK'
kepler_df = kepler_df[kepler_df.av_training_set != 'UNK']
# Dividing the dataset in PCs and FPs(AFPs & NTPs)
PC_df = kepler_df[kepler_df.av_training_set == 'PC']
FP_df = kepler_df[kepler_df.av_training_set != 'PC']
# Random selection of 1000 PCs and 1000 NPs
np.random.seed(my_seed)
PC_random = PC_df.sample(n=pc_subset)
FP_random = FP_df.sample(n=fp_subset)
sample_df = pd.concat((PC_random, FP_random))
sample_df = sample_df.sample(frac=1) # Shuffles the data
# Writing a new csv to write_loc
sample_df.to_csv(write_loc, index=False)
| 2.796875
| 3
|
testMath.py
|
SLongofono/448_Project3
| 0
|
12777561
|
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import Variance
import math
def testVariance():
print ("1. Testing Variance")
weighting = [2,2,2,2,2,2,2,2,2,2]
test1 = [['artist1', 'artist2', 'artist3'],['genre1', 'genre2', 'genre3'],0,0,0,0,0,0,0,0]
test2 = [['artist1'],['genre1', 'genre2'],1,2,3,4,5,6,7,8]
test3 = [['artist1'],['genre1','genre2'],6,7,8,9,2,3,4,5]
test4 = []
emptylist = -1
diffList1 = []
diffList2 = []
knownVal1 = [0,0,1,2,3,4,5,6,7,8]
knownVal2 = [0,0,5,5,5,5,3,3,3,3]
print "\t A. Variance between a populated list and a list of zeros ..."
for i in range(len(test1)):
diffList1.append(Variance.getVariance(test1,test2)[i] -knownVal1[i])
print "\t B. Variance between 2 populated lists ..."
for i in range(len(test2)):
diffList2.append(Variance.getVariance(test3,test2)[i] - knownVal2[i])
print "\t C. Variance calculated on an empty List ..."
emptylistValue = Variance.getVariance(test3,test4)
if emptylistValue == emptylist:
for i in range (len(diffList1)):
if ((diffList1[i] or diffList2[i]) > .0000001):
return False
return True
def testWeightedDifference():
print "2. Testing Weighted Difference"
weighting = [2,2,2,2,2,2,2,2,2,2]
badWeighting = []
test1 = [['artist1', 'artist2', 'artist3'],['genre1', 'genre2', 'genre3'],0,0,0,0,0,0,0,0]
test2 = [['artist1'],['genre1', 'genre2'],1,2,3,4,5,6,7,8]
test3 = [['artist1'],['genre1', 'genre2'],6,7,8,9,2,3,4,5]
test4 = []
diffList1 = []
diffList2 = []
diffList3 = []
knownVal1 = [0,0,2,4,6,8,10,12,14,16]
knownVal2 = [0,0,10,10,10,10,6,6,6,6]
emptylistValue = -1
print "\t A. Weighted Difference between a populated list and a list of zeros ..."
for i in range(len(test1)):
diffList1.append(Variance.getWeightedDifference(test2, test1, weighting)[i] - knownVal1[i])
print "\t B. Weighted Difference between 2 populated lists ..."
for i in range(len(test1)):
diffList2.append(Variance.getWeightedDifference(test3, test2, weighting)[i] - knownVal2[i])
print "\t C. Testing when Weighting is an empty list ..."
diffList3 = Variance.getWeightedDifference(test3,test2,badWeighting)
print "\t D.Testing when one of the lists is an empty list ..."
emptylist = Variance.getWeightedDifference(test4,test2,weighting)
if emptylist == emptylistValue:
for i in range(len(diffList1)):
if((diffList1[i] or diffList2[i])> .0000001):
return False
return True
def testgetNewWeight():
print "3. Testing getNewWeight"
badstddevs = []
stddevs = [1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0]
knownVal1 = [1, 1, 1, 0.5, 0.333, 0.25, 0.2, 0.167, 0.143, 0.125]
emptylistValue = -1
diffList = []
print "\t A. getNewWeight when stddevs is empty ..."
emptylist = Variance.getNewWeight(badstddevs)
print "\t B. getNewWeight when stddevs is populated ..."
for i in range(len(knownVal1)):
diffList.append(Variance.getNewWeight(stddevs)[i] - knownVal1[i])
if emptylist == emptylistValue:
for i in range(len(diffList)):
if(diffList[i] > .0000001):
return False
return True
def filter2sigmaTest():
print("4. Testing Filter2Sigma")
averages = [[],[],10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0]
stddevs = [2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0]
knownVal = [1, 1, 1, 0, 0, 0, 0]
testSongs = [
[[],[], 10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0],
[[],[], 6.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0],
[[],[], 10.0,10.0,10.0,10.0,10.0,10.0,10.0,14.0],
[[],[], 5.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0],
[[],[], 0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],
[[],[], 15.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0],
[[],[], 10.0,10.0,10.0,10.0,10.0,10.0,10.0,15.0],
]
val = Variance.filter2Sigma(testSongs, averages, stddevs)
return val == knownVal
def teststdDev():
print("5. Testing Standard Deviation")
stdDev = []
diffList = []
listWithRowsAsColumns = [[1,2,3,4,5,6,7,8],
[6,1,9,0,5,7,3,4],
[5,5,5,5,5,5,5,5],
[23,100,1,0,8,9,5,6],
[7,5,4,3,2,1,9,6]
]
listofCalculatedStdDevs = [2.449,3.0,0.0,33.481,2.645]
for column in listWithRowsAsColumns:
vals = [x for x in column]
Nval = len(vals)
mean = sum(vals)/Nval
stdDev.append((sum([(x-mean)**2 for x in vals])/(Nval-1))**0.5)
for i in range(len(listofCalculatedStdDevs)):
diffList.append(stdDev[i] - listofCalculatedStdDevs[i])
for i in range(len(diffList)):
if(diffList[i] > .001):
return False
return True
def go():
numTests = 0
numPassed = 0
print "**************************************"
print "********MATH FUNCTION TESTING*********"
print "**************************************"
numTests +=1
if testVariance():
print "\t Variance test passed! \n\n"
numPassed += 1
numTests +=1
if testWeightedDifference():
print "\tWeightedDifference test passed!\n\n"
numPassed +=1
numTests +=1
if testgetNewWeight():
print "\t getNewWeight test passed!\n\n"
numPassed +=1
numTests +=1
if (filter2sigmaTest()):
print "\t f2sigma test passed!\n\n"
numPassed+=1
numTests +=1
if(teststdDev()):
print "\t Standard Deviation Test Passed!"
numPassed +=1
print "Tests: %d\nTests passed: %d\nPercentage: %f\n\n" % (numTests,numPassed, (float(numPassed)/numTests)*100)
return numTests,numPassed
if __name__ == "__main__":
x,y = go()
print "Tests: %d\nTests passed: %d\nPercentage: %f\n\n" % (x,y, (float(y)/x)*100)
| 2.90625
| 3
|
skp_edu_docker/code/tfrest/celery.py
|
TensorMSA/hoyai_docker
| 8
|
12777562
|
<gh_stars>1-10
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
import logging
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tfrest.settings')
app = Celery('tfrest')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
CELERYD_HIJACK_ROOT_LOGGER = False
| 1.6875
| 2
|
variable_and_data_type/string_demo/string_concatenation.py
|
pysga1996/python-basic-programming
| 0
|
12777563
|
<gh_stars>0
# To concatenate, or combine, two strings you can use the + operators.
a = "Hello"
b = "World"
c = a + b
print(c)
a = "Hello"
b = "World"
c = a + " " + b
print(c)
| 3.921875
| 4
|
quickstart-jython/src/main/java/org/quickstart/jython/calculator_func.py
|
youngzil/quickstart-framework
| 6
|
12777564
|
<gh_stars>1-10
# coding=utf-8
import math
# 面向函数式编程
def power(x, y):
return math.pow(x, y)
| 2.28125
| 2
|
tests/test_portfolio_handler.py
|
ivanliu1989/qstrader
| 113
|
12777565
|
import datetime
from decimal import Decimal
import unittest
from qstrader.event import FillEvent, OrderEvent, SignalEvent
from qstrader.portfolio_handler import PortfolioHandler
from qstrader.price_handler.base import AbstractTickPriceHandler
from qstrader.compat import queue
class PriceHandlerMock(AbstractTickPriceHandler):
def __init__(self):
pass
def get_best_bid_ask(self, ticker):
prices = {
"MSFT": (Decimal("50.28"), Decimal("50.31")),
"GOOG": (Decimal("705.46"), Decimal("705.46")),
"AMZN": (Decimal("564.14"), Decimal("565.14")),
}
return prices[ticker]
class PositionSizerMock(object):
def __init__(self):
pass
def size_order(self, portfolio, initial_order):
"""
This PositionSizerMock object simply modifies
the quantity to be 100 of any share transacted.
"""
initial_order.quantity = 100
return initial_order
class RiskManagerMock(object):
def __init__(self):
pass
def refine_orders(self, portfolio, sized_order):
"""
This RiskManagerMock object simply lets the
sized order through, creates the corresponding
OrderEvent object and adds it to a list.
"""
order_event = OrderEvent(
sized_order.ticker,
sized_order.action,
sized_order.quantity
)
return [order_event]
class TestSimpleSignalOrderFillCycleForPortfolioHandler(unittest.TestCase):
"""
Tests a simple Signal, Order and Fill cycle for the
PortfolioHandler. This is, in effect, a sanity check.
"""
def setUp(self):
"""
Set up the PortfolioHandler object supplying it with
$500,000.00 USD in initial cash.
"""
initial_cash = Decimal("500000.00")
events_queue = queue.Queue()
price_handler = PriceHandlerMock()
position_sizer = PositionSizerMock()
risk_manager = RiskManagerMock()
# Create the PortfolioHandler object from the rest
self.portfolio_handler = PortfolioHandler(
initial_cash, events_queue, price_handler,
position_sizer, risk_manager
)
def test_create_order_from_signal_basic_check(self):
"""
Tests the "_create_order_from_signal" method
as a basic sanity check.
"""
signal_event = SignalEvent("MSFT", "BOT")
order = self.portfolio_handler._create_order_from_signal(signal_event)
self.assertEqual(order.ticker, "MSFT")
self.assertEqual(order.action, "BOT")
self.assertEqual(order.quantity, 0)
def test_place_orders_onto_queue_basic_check(self):
"""
Tests the "_place_orders_onto_queue" method
as a basic sanity check.
"""
order = OrderEvent("MSFT", "BOT", 100)
order_list = [order]
self.portfolio_handler._place_orders_onto_queue(order_list)
ret_order = self.portfolio_handler.events_queue.get()
self.assertEqual(ret_order.ticker, "MSFT")
self.assertEqual(ret_order.action, "BOT")
self.assertEqual(ret_order.quantity, 100)
def test_convert_fill_to_portfolio_update_basic_check(self):
"""
Tests the "_convert_fill_to_portfolio_update" method
as a basic sanity check.
"""
fill_event_buy = FillEvent(
datetime.datetime.utcnow(), "MSFT", "BOT",
100, "ARCA", Decimal("50.25"), Decimal("1.00")
)
self.portfolio_handler._convert_fill_to_portfolio_update(fill_event_buy)
# Check the Portfolio values within the PortfolioHandler
port = self.portfolio_handler.portfolio
self.assertEqual(port.cur_cash, Decimal("494974.00"))
# TODO: Finish this off and check it works via Interactive Brokers
fill_event_sell = FillEvent(
datetime.datetime.utcnow(), "MSFT", "SLD",
100, "ARCA", Decimal("50.25"), Decimal("1.00")
)
self.portfolio_handler._convert_fill_to_portfolio_update(fill_event_sell)
def test_on_signal_basic_check(self):
"""
Tests the "on_signal" method as a basic sanity check.
"""
signal_event = SignalEvent("MSFT", "BOT")
self.portfolio_handler.on_signal(signal_event)
ret_order = self.portfolio_handler.events_queue.get()
self.assertEqual(ret_order.ticker, "MSFT")
self.assertEqual(ret_order.action, "BOT")
self.assertEqual(ret_order.quantity, 100)
if __name__ == "__main__":
unittest.main()
| 2.828125
| 3
|
tick_track/src/helpers/time.py
|
dmenezesgabriel/tick_track
| 0
|
12777566
|
<gh_stars>0
import datetime
import pytz
def now():
"""
Returns UTC timestamp with time zone
"""
return pytz.UTC.localize(datetime.datetime.utcnow())
def now_br():
"""
Returns America - São Paulo timestamp with time zone
"""
return now().astimezone(pytz.timezone("America/Sao_Paulo"))
def timestamptz_to_unix(timestamptz):
"""
Converts timestamp with time zone to epoch
"""
return timestamptz.timestamp()
def unix_to_timestamp_utc(unix):
"""
Converts epoch to timestamp with time zone
"""
return datetime.datetime.utcfromtimestamp(unix)
def timestamptz_to_text(timestamptz):
"""
Converts timestamp with time zone to string
"""
return datetime.datetime.strftime(timestamptz, "%Y-%m-%d %H:%M:%S.%f%z")
def timestamptz_text_to_date(text):
"""
Converts string date to date object
"""
return datetime.datetime.strptime(text, "%Y-%m-%d %H:%M:%S.%f%z")
def date_trunc_day(timestamptz):
"""
Trunc timestamp at day
"""
return timestamptz.replace(hour=0, minute=0, second=0, microsecond=0)
| 3.125
| 3
|
systematicity.py
|
adamdotdev/font-systematicity
| 1
|
12777567
|
<reponame>adamdotdev/font-systematicity<filename>systematicity.py
import io
from itertools import combinations
import json
from typing import NamedTuple
import numpy as np
from scipy.stats.stats import pearsonr
from peewee import DoesNotExist
import data
from data import Font, GlyphSet, Glyph, SoundDistance, ShapeDistance, Correlation
import shapes
"""
Delete any glyph sets that match the specified criteria. All glyphs, shapedistances,
and correlations will be deleted as well.
"""
def delete_glyph_set(chars, font, size, coords=None):
coords_serial = json.dumps(coords)
chars_serial = json.dumps(chars)
with data.db.atomic():
glyph_sets = (GlyphSet
.select()
.where(
GlyphSet.font_id == font.id,
GlyphSet.size == size,
GlyphSet.coords == coords_serial,
GlyphSet.chars == chars_serial))
print("Found {0} matching glyph sets".format(len(glyph_sets)))
for glyph_set in glyph_sets:
print("Deleting glyph set {0}".format(glyph_set.id))
result = glyph_set.delete_instance(recursive=True)
print(result, "glyph sets deleted")
"""
Gets or creates a set of glyphs using the specified criteria. If a glyph set for this
criteria already exists, the glyphset id is loaded and returned. If a set does not
exist, a new glyphset is created and glyphs are rendered and saved.
"""
def get_glyphs(chars, font, size, coords=None):
coords_serial = None if (coords is None or len(coords) == 0) else json.dumps(coords)
chars_serial = json.dumps(chars)
# Check if glyphs already exist
glyph_sets = (GlyphSet
.select()
.where(
GlyphSet.font_id == font.id,
GlyphSet.size == size,
GlyphSet.coords == coords_serial,
GlyphSet.chars == chars_serial)
.execute())
if len(glyph_sets) > 0:
return glyph_sets[0].id
renderer = shapes.GlyphRenderer(io.BytesIO(font.font_file))
bitmaps = renderer.bitmaps(chars, size, coords)
glyph_set = GlyphSet(font=font, size=size, coords=coords_serial, chars=chars_serial)
glyph_set.save()
glyphs = []
for i in range(len(chars)):
glyph = Glyph(
glyph_set_id = glyph_set.id,
character = chars[i],
bitmap = bitmaps[i]
)
glyphs.append(glyph)
with data.db.atomic():
Glyph.bulk_create(glyphs, batch_size=100)
return glyph_set.id
"""
Calculate all visual distance measures between all possible combinations
of glyphs belonging to the specified set. If the calculations already
exist, the existing records are returned.
"""
def get_and_save_shape_distances(glyph_set_id):
glyph_query = Glyph.select().where(Glyph.glyph_set_id == glyph_set_id)
glyphs = [glyph for glyph in glyph_query]
# Get existing glyph distances
Glyph1 = Glyph.alias()
Glyph2 = Glyph.alias()
shape_query = (ShapeDistance
.select()
.join(Glyph1, on=ShapeDistance.glyph1)
.switch(ShapeDistance)
.join(Glyph2, on=ShapeDistance.glyph2)
.where(
(Glyph1.glyph_set_id == glyph_set_id) &
(Glyph2.glyph_set_id == glyph_set_id)))
if len(shape_query) > 0:
# distances already calculated, return existing values
return [s for s in shape_query]
shape_distances = get_shape_distances(glyphs)
with data.db.atomic():
ShapeDistance.bulk_create(shape_distances, batch_size=100)
return shape_distances
def get_shape_distances(glyphs):
shape_distances = []
# Generate all pairs of chars and calculate distance
pairs = list(combinations(range(len(glyphs)),2))
for pair in pairs:
i = pair[0]
j = pair[1]
glyph_1 = glyphs[i]
glyph_2 = glyphs[j]
bitmap_1 = glyph_1.bitmap
bitmap_2 = glyph_2.bitmap
haus = shapes.hausdorff_distance(bitmap_1, bitmap_2)
if haus is None:
raise FailedRenderException("Unable to determine distance and correlation because at least one glyph failed to render.")
contrib_points1 = json.dumps([haus[0][1], haus[1][2]])
contrib_points2 = json.dumps([haus[0][2], haus[1][1]])
s = ShapeDistance(
glyph1 = glyph_1.id,
glyph2 = glyph_2.id,
metric = "hausdorff",
distance = max(haus[0][0], haus[1][0]),
points1 = contrib_points1,
points2 = contrib_points2
)
shape_distances.append(s)
return shape_distances
"""
Calculate correlation between the sound and shape distances for the
specified glyph set, using the distance metric specified. If the
correlation has already been calculated, the existing results are
returned.
"""
def get_correlation(glyph_set_id, sound_metric, shape_metric):
# Fetch from db if it's already calculated
query = (Correlation
.select()
.where(
(Correlation.glyph_set_id == glyph_set_id) &
(Correlation.sound_metric == sound_metric) &
(Correlation.shape_metric == shape_metric)))
if len(query) > 0:
return query.first()
sound_query = (SoundDistance
.select()
.where(SoundDistance.metric == sound_metric)
.order_by(SoundDistance.char1, SoundDistance.char2))
Glyph1 = Glyph.alias()
Glyph2 = Glyph.alias()
shape_query = (ShapeDistance
.select()
.join(Glyph1, on=ShapeDistance.glyph1)
.switch(ShapeDistance)
.join(Glyph2, on=ShapeDistance.glyph2)
.where(
(Glyph1.glyph_set_id == glyph_set_id) &
(Glyph2.glyph_set_id == glyph_set_id) &
(ShapeDistance.metric == shape_metric))
.order_by(Glyph1.character, Glyph2.character))
sound_distances = [s.distance for s in sound_query]
shape_distances = [s.distance for s in shape_query]
if (len(sound_distances) != len(shape_distances)):
raise Exception("Numer of shape ({0}) and sound ({1}) distances are not equal for glyph set {2}, sound metric {3}, shape metric {4}".format(
len(shape_distances), len(sound_distances), glyph_set_id, sound_metric, shape_metric))
if np.std(shape_distances) == 0:
raise Exception("Unable to calculate correlation for glyph set {0}: standard deviation of shape distances is zero."
.format(glyph_set_id))
corr_value = pearsonr(shape_distances, sound_distances)
correlation = Correlation(
glyph_set = glyph_set_id,
shape_metric = shape_metric,
sound_metric = sound_metric,
r_value = corr_value[0],
p_value = corr_value[1]
)
correlation.save()
return correlation
"""
Perform a complete measurement of systematiciy for the font, characters,
size, and variation coordinates specified. Renders and saves a set of glyphs,
measures their visual distances, and calculates the correlation between their
visual (shape) and phonological (sound) distances, using a variety of measures.
This method returns only the correlation using the Edit distance.
"""
def evaluate(chars, font, font_size, coords=None, overwrite=False):
if (overwrite):
delete_glyph_set(chars, font, font_size, coords)
glyph_set_id = get_glyphs(chars, font, font_size, coords)
get_and_save_shape_distances(glyph_set_id)
euclidean_corr = get_correlation(glyph_set_id, "Euclidean", "hausdorff")
edit_sum_corr = get_correlation(glyph_set_id, "Edit_Sum", "hausdorff")
edit_corr = get_correlation(glyph_set_id, "Edit", "hausdorff")
return SystematicityResult(
glyph_set_id = glyph_set_id,
edit_correlation = edit_corr.r_value,
edit_sum_correlation = edit_sum_corr.r_value,
euclidean_correlation = euclidean_corr.r_value
)
class SystematicityResult(NamedTuple):
"""Class to represent the results of a systematiciy evaluation. """
glyph_set_id: int
edit_correlation: float
edit_sum_correlation: float
euclidean_correlation: float
class FailedRenderException(Exception):
"""Exception for when a glyph renders with no pixels"""
pass
| 2.703125
| 3
|
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/corpus/reader/string_category.py
|
hectormartinez/rougexstem
| 0
|
12777568
|
# Natural Language Toolkit: String Category Corpus Reader
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Read tuples from a corpus consisting of categorized strings.
For example, from the question classification corpus:
NUM:dist How far is it from Denver to Aspen ?
LOC:city What county is Modesto , California in ?
HUM:desc Who was Galileo ?
DESC:def What is an atom ?
NUM:date When did Hawaii become a state ?
"""
# based on PPAttachmentCorpusReader
from util import *
from api import *
import os
class StringCategoryCorpusReader(CorpusReader):
def __init__(self, delimiter=' '):
"""
@param root: The root directory for this corpus.
@param files: A list or regexp specifying the files in this corpus.
@param delimiter: Field delimiter
"""
CorpusReader.__init__(self, root, files)
self._delimiter = delimiter
def tuples(self, files):
return concat([StreamBackedCorpusView(filename, self._read_tuple_block)
for filename in self.abspaths(files)])
def raw(self, files):
return concat([open(filename).read()
for filename in self.abspaths(files)])
def _read_tuple_block(self, stream):
line = stream.readline().strip()
if line:
return [tuple(line.split(self._delimiter, 1))]
else:
return []
| 3.515625
| 4
|
tests/resource/test_uriquote.py
|
ekoka/halo
| 0
|
12777569
|
<reponame>ekoka/halo
from halo.resource import URIEncode
def test_can_encode_uri():
urq = URIEncode()
decoded = 'foo and bar/{baz}'
encoded = 'foo%20and%20bar/%7bbaz%7d'
assert urq.enc(decoded).uri==encoded
def test_can_accept_plain_strings():
ur = URIEncode('abc')
assert ur.plain(': and :').uri =='abc: and :'
def test_can_decode_uri():
urq = URIEncode()
decoded = 'foo and bar/{baz}'
encoded = 'foo%20and%20bar/%7bbaz%7d'
assert urq.dec(encoded).uri==decoded
def test_uri_normalized_to_lowercase():
encoded = 'FU/baR/baz%2A'
decoded = 'FU/baR/baz*'
assert URIEncode(encoded).uri==encoded.lower()
assert URIEncode().dec(encoded).uri==decoded.lower()
def test_can_encode_space_to_plus():
urq = URIEncode()
decoded = 'foo and bar'
encoded = 'foo+and+bar'
assert urq.encp(decoded).uri==encoded
def test_can_decode_plus_to_space():
urq = URIEncode()
encoded = 'foo+and+bar'
decoded = 'foo and bar'
assert urq.decp(encoded).uri==decoded
def test_URIEncode_chainable():
urq = URIEncode()
result = 'foo/bar and baz%20'
assert urq.enc('foo').enc('/bar').dec('%20').enc('and').dec('%20').enc('baz ').uri==result
def test_URIEncode_uri_string_not_mutated():
urq1 = URIEncode('abc')
urq2 = urq1.enc('/def')
urq3 = urq2.enc('/ghi')
urq4 = urq1.enc('/jkl')
assert urq1.uri=='abc'
assert urq2.uri=='abc/def'
assert urq3.uri=='abc/def/ghi'
assert urq4.uri=='abc/jkl'
| 2.4375
| 2
|
mod_custom.py
|
shariqmalik/seeker
| 1
|
12777570
|
<gh_stars>1-10
#!/usr/bin/env python3
R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
old = input(G + '[+]' + C + ' Do you want to reuse previous configs? (Y/N) : ' + W)
if old.lower() != 'y':
redirect = input(G + '[+]' + C + ' Enter Target URL (YouTube,Blog etc) : ' + W)
sitename = input(G + '[+]' + C + ' Site Name: ' + W)
title = input(G + '[+]' + C + ' Title : ' + W)
image_url = input(G + '[+]' + C + ' Image URL: ' + W)
description = input(G + '[+]' + C + ' Description: ' + W)
with open('template/custom/js/location_temp.js', 'r') as js:
reader = js.read()
update = reader.replace('REDIRECT_URL', redirect)
with open('template/custom/js/location.js', 'w') as js_update:
js_update.write(update)
with open('template/custom/index_temp.html', 'r') as index_temp:
code = index_temp.read()
code = code.replace('$SITE_NAME$', sitename)
code = code.replace('$TITLE$', title)
code = code.replace('$IMG_URL$', image_url)
code = code.replace('$DESCRIPTION$', description)
with open('template/custom/index.html', 'w') as new_index:
new_index.write(code)
| 2.390625
| 2
|
eazy/igm.py
|
albertfxwang/eazy-py
| 20
|
12777571
|
import os
import numpy as np
from . import __file__ as filepath
__all__ = ["Inoue14"]
class Inoue14(object):
def __init__(self, scale_tau=1.):
"""
IGM absorption from Inoue et al. (2014)
Parameters
----------
scale_tau : float
Parameter multiplied to the IGM :math:`\tau` values (exponential
in the linear absorption fraction).
I.e., :math:`f_\mathrm{igm} = e^{-\mathrm{scale\_tau} \tau}`.
"""
self._load_data()
self.scale_tau = scale_tau
def _load_data(self):
path = os.path.join(os.path.dirname(filepath),'data')
#print path
LAF_file = os.path.join(path, 'LAFcoeff.txt')
DLA_file = os.path.join(path, 'DLAcoeff.txt')
data = np.loadtxt(LAF_file, unpack=True)
ix, lam, ALAF1, ALAF2, ALAF3 = data
self.lam = lam[:,np.newaxis]
self.ALAF1 = ALAF1[:,np.newaxis]
self.ALAF2 = ALAF2[:,np.newaxis]
self.ALAF3 = ALAF3[:,np.newaxis]
data = np.loadtxt(DLA_file, unpack=True)
ix, lam, ADLA1, ADLA2 = data
self.ADLA1 = ADLA1[:,np.newaxis]
self.ADLA2 = ADLA2[:,np.newaxis]
return True
@property
def NA(self):
"""
Number of Lyman-series lines
"""
return self.lam.shape[0]
def tLSLAF(self, zS, lobs):
"""
Lyman series, Lyman-alpha forest
"""
z1LAF = 1.2
z2LAF = 4.7
l2 = self.lam #[:, np.newaxis]
tLSLAF_value = np.zeros_like(lobs*l2).T
x0 = (lobs < l2*(1+zS))
x1 = x0 & (lobs < l2*(1+z1LAF))
x2 = x0 & ((lobs >= l2*(1+z1LAF)) & (lobs < l2*(1+z2LAF)))
x3 = x0 & (lobs >= l2*(1+z2LAF))
tLSLAF_value = np.zeros_like(lobs*l2)
tLSLAF_value[x1] += ((self.ALAF1/l2**1.2)*lobs**1.2)[x1]
tLSLAF_value[x2] += ((self.ALAF2/l2**3.7)*lobs**3.7)[x2]
tLSLAF_value[x3] += ((self.ALAF3/l2**5.5)*lobs**5.5)[x3]
return tLSLAF_value.sum(axis=0)
def tLSDLA(self, zS, lobs):
"""
Lyman Series, DLA
"""
z1DLA = 2.0
l2 = self.lam #[:, np.newaxis]
tLSDLA_value = np.zeros_like(lobs*l2)
x0 = (lobs < l2*(1+zS)) & (lobs < l2*(1.+z1DLA))
x1 = (lobs < l2*(1+zS)) & ~(lobs < l2*(1.+z1DLA))
tLSDLA_value[x0] += ((self.ADLA1/l2**2)*lobs**2)[x0]
tLSDLA_value[x1] += ((self.ADLA2/l2**3)*lobs**3)[x1]
return tLSDLA_value.sum(axis=0)
def tLCDLA(self, zS, lobs):
"""
Lyman continuum, DLA
"""
z1DLA = 2.0
lamL = 911.8
tLCDLA_value = np.zeros_like(lobs)
x0 = lobs < lamL*(1.+zS)
if zS < z1DLA:
tLCDLA_value[x0] = 0.2113 * _pow(1.0+zS, 2) - 0.07661 * _pow(1.0+zS, 2.3) * _pow(lobs[x0]/lamL, (-3e-1)) - 0.1347 * _pow(lobs[x0]/lamL, 2)
else:
x1 = lobs >= lamL*(1.+z1DLA)
tLCDLA_value[x0 & x1] = 0.04696 * _pow(1.0+zS, 3) - 0.01779 * _pow(1.0+zS, 3.3) * _pow(lobs[x0 & x1]/lamL, (-3e-1)) - 0.02916 * _pow(lobs[x0 & x1]/lamL, 3)
tLCDLA_value[x0 & ~x1] =0.6340 + 0.04696 * _pow(1.0+zS, 3) - 0.01779 * _pow(1.0+zS, 3.3) * _pow(lobs[x0 & ~x1]/lamL, (-3e-1)) - 0.1347 * _pow(lobs[x0 & ~x1]/lamL, 2) - 0.2905 * _pow(lobs[x0 & ~x1]/lamL, (-3e-1))
return tLCDLA_value
def tLCLAF(self, zS, lobs):
"""
Lyman continuum, LAF
"""
z1LAF = 1.2
z2LAF = 4.7
lamL = 911.8
tLCLAF_value = np.zeros_like(lobs)
x0 = lobs < lamL*(1.+zS)
if zS < z1LAF:
tLCLAF_value[x0] = 0.3248 * (_pow(lobs[x0]/lamL, 1.2) - _pow(1.0+zS, -9e-1) * _pow(lobs[x0]/lamL, 2.1))
elif zS < z2LAF:
x1 = lobs >= lamL*(1+z1LAF)
tLCLAF_value[x0 & x1] = 2.545e-2 * (_pow(1.0+zS, 1.6) * _pow(lobs[x0 & x1]/lamL, 2.1) - _pow(lobs[x0 & x1]/lamL, 3.7))
tLCLAF_value[x0 & ~x1] = 2.545e-2 * _pow(1.0+zS, 1.6) * _pow(lobs[x0 & ~x1]/lamL, 2.1) + 0.3248 * _pow(lobs[x0 & ~x1]/lamL, 1.2) - 0.2496 * _pow(lobs[x0 & ~x1]/lamL, 2.1)
else:
x1 = lobs > lamL*(1.+z2LAF)
x2 = (lobs >= lamL*(1.+z1LAF)) & (lobs < lamL*(1.+z2LAF))
x3 = lobs < lamL*(1.+z1LAF)
tLCLAF_value[x0 & x1] = 5.221e-4 * (_pow(1.0+zS, 3.4) * _pow(lobs[x0 & x1]/lamL, 2.1) - _pow(lobs[x0 & x1]/lamL, 5.5))
tLCLAF_value[x0 & x2] = 5.221e-4 * _pow(1.0+zS, 3.4) * _pow(lobs[x0 & x2]/lamL, 2.1) + 0.2182 * _pow(lobs[x0 & x2]/lamL, 2.1) - 2.545e-2 * _pow(lobs[x0 & x2]/lamL, 3.7)
tLCLAF_value[x0 & x3] = 5.221e-4 * _pow(1.0+zS, 3.4) * _pow(lobs[x0 & x3]/lamL, 2.1) + 0.3248 * _pow(lobs[x0 & x3]/lamL, 1.2) - 3.140e-2 * _pow(lobs[x0 & x3]/lamL, 2.1)
return tLCLAF_value
def full_IGM(self, z, lobs):
"""Get full Inoue IGM absorption
Parameters
----------
z : float
Redshift to evaluate IGM absorption
lobs : array
Observed-frame wavelength(s) in Angstroms.
Returns
-------
abs : array
IGM absorption
"""
tau_LS = self.tLSLAF(z, lobs) + self.tLSDLA(z, lobs)
tau_LC = self.tLCLAF(z, lobs) + self.tLCDLA(z, lobs)
### Upturn at short wavelengths, low-z
#k = 1./100
#l0 = 600-6/k
#clip = lobs/(1+z) < 600.
#tau_clip = 100*(1-1./(1+np.exp(-k*(lobs/(1+z)-l0))))
tau_clip = 0.
return np.exp(-self.scale_tau*(tau_LC + tau_LS + tau_clip))
def build_grid(self, zgrid, lrest):
"""Build a spline interpolation object for fast IGM models
Returns: self.interpolate
"""
from scipy.interpolate import CubicSpline
igm_grid = np.zeros((len(zgrid), len(lrest)))
for iz in range(len(zgrid)):
igm_grid[iz,:] = self.full_IGM(zgrid[iz], lrest*(1+zgrid[iz]))
self.interpolate = CubicSpline(zgrid, igm_grid)
def _pow(a, b):
"""C-like power, a**b
"""
return a**b
| 2.5625
| 3
|
hex2file/hex2file.py
|
mattixtech/hex2file
| 0
|
12777572
|
<reponame>mattixtech/hex2file
"""
hex2file.py
<NAME>, 2018
Utility for writing hex to a file.
"""
import argparse
import binascii
import deprecation
import sys
def _sanitize(hex_str, comment_strings=None, ignore_strings=None):
"""
Sanitize string input before attempting to write to file.
:param hex_str: the string input to sanitize
:param comment_strings: a tuple of strings identifying comment characters
:param ignore_strings: a tuple of strings to ignore in the input
:return: the sanitized string or None
"""
# Remove whitespace
hex_str = hex_str.strip()
if not hex_str:
return None
# Ignore lines beginning with a comment string and any content after a
# comment string
if comment_strings:
for comment_string in comment_strings:
if hex_str.startswith(comment_string):
return None
else:
hex_str = hex_str.split(comment_string)[0]
hex_id = "0x"
# Ignore strings
if ignore_strings:
ignore_strings += (hex_id,)
else:
ignore_strings = (hex_id,)
for string_to_remove in ignore_strings:
hex_str = hex_str.replace(string_to_remove, "")
return "".join(hex_str.split())
def _str2hexbin(hex_str):
"""
Converts a hex string to hex binary.
:param hex_str: the string to convert
:return: the string converted to hex or None if we were passed an empty
string
"""
if hex_str:
try:
int(hex_str, 16)
except (TypeError, ValueError):
raise ValueError("Invalid hex input '{}'".format(hex_str))
return binascii.unhexlify(hex_str)
def write(hex_input, file_path, append=False, comment_strings=None,
ignore_strings=None, from_file=False):
"""
Write a hex string to a file as hex.
:param hex_input: the string containing the hex or the file containing the
hex if 'from_file' is set
:param file_path: the path to the file to write
:param append: whether to append or overwrite
:param comment_strings: a tuple of strings identifying comment characters
:param ignore_strings: a tuple of strings to ignore in the input
:param from_file: specifies to read from the given file rather than a
string
:return: None
"""
if hex_input is not None:
if append:
mode = "a"
else:
mode = "w"
mode += "b"
if from_file:
# TODO: Should we check for exceptions here?
with open(hex_input, 'r') as input_file:
hex_input = input_file.read()
with open(file_path, mode) as f:
for line in iter(hex_input.splitlines()):
hexbin_line = _str2hexbin(
_sanitize(line, comment_strings=comment_strings,
ignore_strings=ignore_strings))
if hexbin_line:
f.write(hexbin_line)
@deprecation.deprecated(deprecated_in="1.1.0",
details="Use the write() function instead")
def write_str(hex_input, file_path, append=False):
"""
Write a hex string to a file as hex.
:param hex_input: the string containing the hex or the file containing the
hex if 'from_file' is set
:param file_path: the path to the file to write
:param append: whether to append or overwrite
:return: None
"""
write(hex_input, file_path, append)
@deprecation.deprecated(deprecated_in="1.1.0",
details="Use the write() function instead and set"
" 'from_file'")
def write_from_file(text_file_path, file_path, append=False):
"""
Writes the hex (in ascii format) contained in the given file to the given
output file path.
:param text_file_path: the path to the input file
:param file_path: the path to the file to write
:param append: whether to append or overwrite
:return: None
"""
with open(text_file_path, 'r') as input_file:
write(input_file.read(), file_path, append=append)
def _parse_arguments():
"""
Parse the command line arguments.
:return: the parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--append",
help="Append to the file rather than overwrite it.",
action="store_true")
parser.add_argument("-c", "--comments",
help="Ignore lines starting with any of the supplied"
" comment strings (space separated) and any"
" content preceded by any of those strings")
parser.add_argument("-f", "--file",
help="Get the hex contents from the specified file")
parser.add_argument("-i", "--ignore",
help="Ignore any of the given strings (space separated)"
" in the input")
parser.add_argument("output_path",
help="The path to the output file to write hex to.")
return parser.parse_args()
def _cmd_line():
"""
Writes hex from a file or from stdin.
:return: None
"""
append = False
from_file = False
parsed_args = _parse_arguments()
# Check if we are copying from a file
if parsed_args.file:
hex_input = parsed_args.file
from_file = True
# Check if stdin has anything for us
elif not sys.stdin.isatty():
hex_input = sys.stdin.read()
else:
sys.stderr.write("ERROR: No input provided via stdin\n")
sys.exit(1)
if parsed_args.append:
append = True
# Convert the comments strings into a tuple
if parsed_args.comments:
parsed_args.comments = tuple(parsed_args.comments.split())
# Convert the ignore strings into a tuple
if parsed_args.ignore:
parsed_args.ignore = tuple(parsed_args.ignore.split())
try:
write(hex_input, parsed_args.output_path, append=append,
comment_strings=parsed_args.comments,
ignore_strings=parsed_args.ignore, from_file=from_file)
except ValueError as e:
sys.stderr.write("ERROR: {}\n".format(e))
sys.exit(1)
except IOError as e:
sys.stderr.write(
"ERROR: {} '{}'\n".format("".join(e.args[1:]), e.filename))
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
_cmd_line()
| 3.578125
| 4
|
main/config/management/commands/download_geolite.py
|
TunedMystic/url-shortener
| 0
|
12777573
|
<gh_stars>0
import gzip
import os
import shutil
import urllib
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Download the geolite binaries and store in GEOIP_PATH'
def download_and_extract_file(self, item):
filename = item.get('filename')
# Download file.
urllib.request.urlretrieve(item.get('location'), filename)
# Extract file.
with gzip.open(filename, 'rb') as gzip_file:
file_data = gzip_file.read()
with open(item.get('extract_name'), 'wb') as f:
f.write(file_data)
self.stdout.write(self.style.SUCCESS('Downloaded and extracted \'{}\''.format(filename)))
def handle(self, *args, **options):
path = settings.GEOIP_PATH
files = [
{
'location': 'http://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.mmdb.gz',
'filename': os.path.join(path, 'GeoLite2-Country.mmdb.gz'),
'extract_name': os.path.join(path, 'GeoLite2-Country.mmdb'),
},
{
'location': 'http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz',
'filename': os.path.join(path, 'GeoLite2-City.mmdb.gz'),
'extract_name': os.path.join(path, 'GeoLite2-City.mmdb'),
}
]
# Create GeoIP directory if it doesn't exist.
try:
shutil.rmtree(path)
os.makedirs(path)
except FileNotFoundError:
os.makedirs(path)
# Download geolite binaries
for item in files:
self.download_and_extract_file(item)
os.remove(item.get('filename'))
| 2.21875
| 2
|
lib/Utils/fitnessmatrixuploadUtilClient.py
|
OGalOz/poolfileupload
| 0
|
12777574
|
<gh_stars>0
import os
import logging
import re
import shutil
import datetime
import pandas as pd
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.WorkspaceClient import Workspace
class fitnessmatrixuploadUtil:
def __init__(self, params):
self.params = params
self.callback_url = os.environ["SDK_CALLBACK_URL"]
self.dfu = DataFileUtil(self.callback_url)
self.data_folder = os.path.abspath("/kb/module/data/")
# This is where files from staging area exist
self.staging_folder = os.path.abspath("/staging/")
self.shared_folder = params["shared_folder"]
self.scratch_folder = os.path.join(params["shared_folder"], "scratch")
def upload_fitnessmatrix(self):
"""
The upload method
We perform a number of steps:
Get name of fitnessmatrix as it is in staging.
Find the fitnessmatrix in /staging/op_datatype_name
Get the output name for the fitnessmatrix
Get the column headers for the pool file for
data and testing purposes. Should be len 12.
Test if fitnessmatrix is well-formed.
We send the file to shock using dfu.
We get the handle and save the object with all
the necessary information- including related genome.
params should include:
output_names,
staging_file_names,
ws_obj,
workspace_id,
"""
print("params: ", self.params)
self.validate_import_fitnessmatrix_from_staging_params()
# Double checking number of files we want from staging
strain_fit_bool = False
stg_fs = self.params["staging_file_names"]
if not len(stg_fs) in [2, 3]:
raise Exception(
"Expecting between 2/3 staging files, got a different number"
f" of staging files: {len(stg_fs)}. Files: " + ", ".join(sgf_fs)
)
else:
staging_fitness_matrix_fp_name = stg_fs[0]
staging_t_score_matrix_fp_name = stg_fs[1]
logging.info(
"Using this file for the fitness matrix: "
+ staging_fitness_matrix_fp_name
+ ". "
)
logging.info(
"Using this file for the t_score matrix: "
+ staging_t_score_matrix_fp_name
+ "."
)
if len(stg_fs) == 3:
strain_fit_bool = True
staging_strain_fit_table_fp_name = stg_fs[2]
logging.info(
"Using this file for the strain fit matrix: "
+ strain_fit_table_fp_name
+ "."
)
op_nms = self.params["output_names"]
if len(op_nms) != 1:
raise Exception(
"Expecting a single output name, got a different number"
f": {len(op_nms)}. Output Names: " + ", ".join(op_nms)
)
else:
op_datatype_name = op_nms[0]
print("op_datatype_name: ", op_datatype_name)
print("top dir /:", os.listdir("/"))
print("/kb/module/:", os.listdir("/kb/module"))
if not os.path.exists(self.staging_folder):
raise Exception("Staging dir does not exist yet! Cannot continue.")
else:
print("Succesfully recognized staging directory")
# This is the path to the pool file
fitnessmatrix_fp = os.path.join(
self.staging_folder, staging_fitness_matrix_fp_name
)
t_scorematrix_fp = os.path.join(
self.staging_folder, staging_t_score_matrix_fp_name
)
if strain_fit_bool:
strain_fitmatrix_fp = os.path.join(
self.staging_folder, staging_strain_fit_table_fp_name
)
# CHECK FILES:
column_header_list, num_lines = self.check_matrix_files(
fitnessmatrix_fp, t_scorematrix_fp, self.params["sep_type"]
)
if strain_fit_bool:
self.check_strain_fit_table(strain_fitmatrix_fp, self.params["sep_type"])
# We copy the files from staging to scratch
new_fitness_matrix_fp = os.path.join(
self.shared_folder, op_datatype_name + ".fit.tsv"
)
new_t_score_fp = os.path.join(
self.shared_folder, op_datatype_name + ".t_score.tsv"
)
if strain_fit_bool:
new_strain_fp = os.path.join(
self.shared_folder, op_datatype_name + ".strain_fit.tsv"
)
if self.params["sep_type"] == "TSV":
shutil.copyfile(fitnessmatrix_fp, new_fitness_matrix_fp)
shutil.copyfile(t_scorematrix_fp, new_t_score_fp)
if strain_fit_bool:
shutil.copyfile(strain_fitmatrix_fp, new_strain_fp)
else:
# sep type is comma (CSVs)
fit_df = pd.read_table(fitnessmatrix_fp, sep=",", keep_default_na=False)
t_score_df = pd.read_table(t_scorematrix_fp, sep=",", keep_default_na=False)
fit_df.to_csv(new_fitness_matrix_fp, sep="\t", index=False)
t_score_df.to_csv(new_t_score_fp, sep="\t", index=False)
if strain_fit_bool:
strain_df = pd.read_table(
strain_fitmatrix_fp, sep=",", keep_default_na=False
)
strain_df.to_csv(new_strain_fp, sep="\t", index=False)
# We create the handles for the objects:
fitness_file_to_shock_result = self.dfu.file_to_shock(
{"file_path": new_fitness_matrix_fp, "make_handle": True, "pack": "gzip"}
)
t_score_file_to_shock_result = self.dfu.file_to_shock(
{"file_path": new_t_score_fp, "make_handle": True, "pack": "gzip"}
)
fitness_res_handle = fitness_file_to_shock_result["handle"]
t_score_res_handle = t_score_file_to_shock_result["handle"]
if strain_fit_bool:
strain_fit_file_to_shock_result = self.dfu.file_to_shock(
{"file_path": new_strain_fp, "make_handle": True, "pack": "gzip"}
)
strain_fit_res_handle = strain_fit_file_to_shock_result["handle"]
# We create a better Description by adding date time and username
date_time = datetime.datetime.utcnow()
# We create the data for the object
matrices_data = {
"file_type": "KBaseRBTnSeq.RBTS_Gene_Fitness_T_Matrix",
"fit_scores_handle": fitness_res_handle["hid"],
"t_scores_handle": t_score_res_handle["hid"],
# below should be shock
"handle_type": fitness_res_handle["type"],
"fitness_shock_url": fitness_res_handle["url"],
"t_scores_shock_url": t_score_res_handle["url"],
"fitness_shock_node_id": fitness_res_handle["id"],
"t_scores_shock_node_id": t_score_res_handle["id"],
"compression_type": "gzip",
"fitness_file_name": fitness_res_handle["file_name"],
"t_scores_file_name": t_score_res_handle["file_name"],
"utc_created": str(date_time),
"column_header_list": column_header_list,
"num_cols": str(len(column_header_list)),
"num_lines": str(num_lines),
"related_genome_ref": self.params["genome_ref"],
"poolcounts_used": [],
"related_experiments_ref": self.params["experiments_ref"],
"related_organism_scientific_name": self.get_genome_organism_name(
self.params["genome_ref"]
),
"description": "Manual Upload: " + self.params["description"],
}
if strain_fit_bool:
matrices_data["strain_fit_handle"] = strain_fit_res_handle["hid"]
matrices_data["strain_fit_shock_url"] = strain_fit_res_handle["url"]
matrices_data["strain_fit_shock_node_id"] = strain_fit_res_handle["id"]
matrices_data["strain_fit_file_name"] = strain_fit_res_handle["file_name"]
# To get workspace id:
ws_id = self.params["workspace_id"]
save_object_params = {
"id": ws_id,
"objects": [
{
"type": "KBaseRBTnSeq.RBTS_Gene_Fitness_T_Matrix",
"data": matrices_data,
"name": op_datatype_name,
}
],
}
# save_objects returns a list of object_infos
dfu_object_info = self.dfu.save_objects(save_object_params)[0]
print("dfu_object_info: ")
print(dfu_object_info)
return {
"Name": dfu_object_info[1],
"Type": dfu_object_info[2],
"Date": dfu_object_info[3],
}
def validate_import_fitnessmatrix_from_staging_params(self):
prms = self.params
# check for required parameters
for p in [
"username",
"staging_file_names",
"genome_ref",
"experiments_ref",
"description",
"output_names",
]:
if p not in prms:
raise ValueError(
'When uploading a fitness matrix, "{}" parameter is required, but missing'.format(
p
)
)
def check_matrix_files(self, fitness_matrix_fp, t_score_matrix_fp, separator):
"""
Args:
fitness_matrix_fp (str): Path to fitness matrix file
t_score_matrix_fp (str): Path to t score matrix file
separator (str): "," or "\t"
Returns:
list<list<column_names (str)>, num_rows (int)>
Description:
We check the matrix files by initializing into dict format
"""
sep = "\t" if separator == "TSV" else ","
"""
dtypes = {
"orgId", "locusId", "sysName", "geneName", "desc" All strings
}
"""
req_cols = ["locusId", "sysName", "geneName", "desc"]
fitness_df = pd.read_table(fitness_matrix_fp, sep=sep, keep_default_na=False)
t_score_df = pd.read_table(t_score_matrix_fp, sep=sep, keep_default_na=False)
for x in req_cols:
if x not in fitness_df.columns:
raise Exception(
f"Required column name {x} not found in fitness file {fitness_matrix_fp}."
)
if x not in t_score_df.columns:
raise Exception(
f"Required column name {x} not found in t score file {t_score_matrix_fp}."
)
for i in range(len(fitness_df.columns)):
if fitness_df.columns[i] != t_score_df.columns[i]:
raise Exception(
"Columns don't match up (fitness, t_score):"
f"{fitness_df.columns[i]} != {t_score_df.columns[i]} at column {i}"
)
# Making sure all non numerical values are the same for both files, and locusIds are unique.
locusIds_dict = {}
for ix, locusId in fitness_df["locusId"].iteritems():
if locusId != t_score_df["locusId"].iloc[ix]:
raise Exception(
f"locusIds not equal at index {ix} in fitness and t score files."
f"{str(fitness_df['locusId'])} != {str(t_score_df['locusId'])}"
)
if fitness_df["sysName"].iloc[ix] != t_score_df["sysName"].iloc[ix]:
if not (
pd.isnull(fitness_df["sysName"].iloc[ix])
and pd.isnull(fitness_df["sysName"].iloc[ix])
):
raise Exception(
f"sysNames not equal at index {ix} in fitness and t score files."
f"{str(fitness_df['sysName'])} != {str(t_score_df['sysName'])}"
)
if fitness_df["geneName"].iloc[ix] != t_score_df["geneName"].iloc[ix]:
if not (
pd.isnull(fitness_df["geneName"].iloc[ix])
and pd.isnull(fitness_df["geneName"].iloc[ix])
):
raise Exception(
f"geneNames not equal at index {ix} in fitness and t score files."
f"{str(fitness_df['geneName'])} != {str(t_score_df['geneName'])}"
)
if fitness_df["desc"].iloc[ix] != t_score_df["desc"].iloc[ix]:
if not (
pd.isnull(fitness_df["desc"].iloc[ix])
and pd.isnull(fitness_df["desc"].iloc[ix])
):
raise Exception(
f"descriptions not equal at index {ix} in fitness and t score files."
f"{str(fitness_df['desc'])} != {str(t_score_df['desc'])}"
)
if locusId in locusIds_dict:
raise Exception(f"Duplicate locusIds at index {ix}")
else:
locusIds_dict[locusId] = 1
logging.info("Matrices columns are: " + ", ".join(fitness_df.columns))
return [list(fitness_df.columns), fitness_df.shape[0]]
def get_genome_organism_name(self, genome_ref):
# Getting the organism name using WorkspaceClient
ws = self.params['ws_obj']
res = ws.get_objects2(
{
"objects": [
{
"ref": genome_ref,
"included": ["scientific_name"],
}
]
}
)
scientific_name = res["data"][0]["data"]["scientific_name"]
return scientific_name
def get_genome_organism_name_from_genes_table(self, gene_table_ref):
# Getting the organism name using WorkspaceClient
ws = self.params['ws_obj']
res = ws.get_objects2(
{
"objects": [
{
"ref": gene_table_ref,
"included": ["related_organism_scientific_name"],
}
]
}
)
scientific_name = res["data"][0]["data"]["related_organism_scientific_name"]
return scientific_name
| 2.390625
| 2
|
main.py
|
mthompson-lab/xray_thermometer
| 0
|
12777575
|
<gh_stars>0
import subprocess
directory = "/reg/d/psdm/mfx/mfxo1916/scratch/tmp_training/results/r0020/000_rg001/out/debug"
print set(line.strip() for line in subprocess.check_output("sh generate_hitlist.sh {}".format(directory), shell=True).split())
log_direct = '/reg/d/psdm/mfx/mfxo1916/scratch/tmp_training/results/r0020/010_rg001/stdout'
from glob import glob
def logfiles(directory):
log_list = glob(directory+"/*log*")
return log_list
# print(logfiles(log_direct))
| 2.203125
| 2
|
tests/timstamp.py
|
zibous/ha-miscale2
| 25
|
12777576
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
sys.path.append("..")
try:
from datetime import datetime, timezone
import pytz
except Exception as e:
print('Import error {}, check requirements.txt'.format(e))
sys.exit(1)
DATEFORMAT_MISCAN = '%Y-%m-%d %H:%M:%S'
DATEFORMAT_UTC = '%Y-%m-%dT%H:%M:%SZ'
LAST_TIMESTAMP = str(datetime.today().strftime(DATEFORMAT_UTC))
mi_timestamp = "{}-{}-{} {}:{}:{}".format(
2000 + 20,
9, 23,
12, 10,
5)
# current timestamp from the mi scale
mi_datetime = datetime.strptime(mi_timestamp,DATEFORMAT_MISCAN)
print(mi_datetime)
# convert this to utc time
utc = pytz.utc
mytz = pytz.timezone('Europe/Vaduz')
utc_dt = mytz.localize(mi_datetime)
print (utc_dt.astimezone(utc).strftime(DATEFORMAT_UTC))
| 2.765625
| 3
|
mayan/apps/sources/handlers.py
|
garrans/mayan-edms
| 0
|
12777577
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from converter.models import Transformation
from .literals import SOURCE_UNCOMPRESS_CHOICE_ASK
from .models import POP3Email, IMAPEmail, WatchFolderSource, WebFormSource
def create_default_document_source(sender, **kwargs):
if not WebFormSource.objects.count():
WebFormSource.objects.create(
label=_('Default'), uncompress=SOURCE_UNCOMPRESS_CHOICE_ASK
)
def copy_transformations_to_version(sender, **kwargs):
instance = kwargs['instance']
# TODO: Fix this, source should be previous version
# TODO: Fix this, shouldn't this be at the documents app
Transformation.objects.copy(
source=instance.document, targets=instance.pages.all()
)
def initialize_periodic_tasks(**kwargs):
for source in POP3Email.objects.filter(enabled=True):
source.save()
for source in IMAPEmail.objects.filter(enabled=True):
source.save()
for source in WatchFolderSource.objects.filter(enabled=True):
source.save()
| 1.664063
| 2
|
day7.py
|
seblars/AdventOfCode2020
| 1
|
12777578
|
import fileinput
import re
data = ''.join(fileinput.input()).split('\n')
def searchData(target):
return [d for d in data if re.search(target, d) is not None]
# part 1
targets = ['shiny gold']
searched = []
all_bags = []
converged = False
while not converged:
new_targets = []
for t in targets:
if t not in searched:
# search data
bags = searchData(t)
bags = [" ".join(b.split()[:2]) for b in bags]
# remove target
while t in bags: bags.remove(t)
searched.append(t)
if len(bags) > 0:
new_targets.extend(bags)
all_bags.extend(bags)
targets = new_targets
if len(targets) == 0:
converged = True
print(len(set(all_bags)))
# part 2
pattern1 = ' bags contain'
pattern2 = r'([0-9]+)\s([a-z]+\s[a-z]+)\sbag'
targets = [(1, 'shiny gold')]
n_bags = 0
converged = False
d_bags = {}
while not converged:
new_targets = []
for t in targets:
for d in searchData(t[1] + pattern1):
bags = d.split("contain ")[1].split(', ')
for b in bags:
m = re.match(pattern2, b)
if m:
n_bag, type_bag = m.groups()
n_bags += t[0]*int(n_bag)
new_targets.append((t[0]*int(n_bag), type_bag))
if len(new_targets) == 0:
converged = True
else:
targets = new_targets
print(n_bags)
| 3.171875
| 3
|
prodcal_ics.py
|
ffix/prodcal_ics
| 26
|
12777579
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from icalendar import Calendar, Event
from datetime import datetime, timedelta
from lxml import html
import requests
import argparse
import logging
import secrets
def get_holidays_grouped_by_months(year):
page = requests.get(
"http://www.consultant.ru/law/ref/calendar/proizvodstvennye/{0}/".format(year)
)
if "404 Ресурс не найден!" in page.text:
return None
tree = html.fromstring(page.content)
months = tree.xpath("//th[@class='month']/../../..")
if len(months) != 12:
logging.warning(f"Number of months in {year} don't equal to 12")
holidays = []
for m in months:
holidays_in_month = m.xpath(
".//td[@class='holiday weekend' or @class='weekend' or @class='nowork']/text()"
)
holidays.append([int(day) for day in holidays_in_month])
return holidays
def create_dayoff_event(year, month, day_start, day_end):
event = Event()
event.add("summary", "Выходной")
event.add("dtstart", datetime(year, month, day_start, 0, 0, 0).date())
event.add(
"dtend", datetime(year, month, day_end, 0, 0, 0).date() + timedelta(days=1)
)
# UID is REQUIRED https://tools.ietf.org/html/rfc5545#section-3.6.1
uid = secrets.token_hex(64)
event.add("uid", uid)
return event
def generate_events(year, holidays_by_months):
import more_itertools as mit
events = []
for month, holidays in enumerate(holidays_by_months, start=1):
holidays_groups = [list(group) for group in mit.consecutive_groups(holidays)]
for g in holidays_groups:
e = create_dayoff_event(year, month, g[0], g[-1])
events.append(e)
return events
def parse_args():
parser = argparse.ArgumentParser(
description="This script fetches data about production calendar and generates .ics file with it."
)
default_output_file = "test.ics"
parser.add_argument(
"-o",
dest="output_file",
metavar="out",
default=default_output_file,
help="output file (default: {0})".format(default_output_file),
)
parser.add_argument(
"--start-year",
metavar="yyyy",
type=int,
default=datetime.today().year,
help="year calendar starts (default: current year)",
)
parser.add_argument(
"--end-year",
metavar="yyyy",
type=int,
default=(datetime.today().year + 1),
help="year calendar ends (default: next year)",
)
parser.add_argument("--log-level", metavar="level", default="INFO")
return parser.parse_args()
def generate_calendar(events):
cal = Calendar()
cal.add("prodid", "-//My calendar product//mxm.dk//")
cal.add("version", "2.0")
cal.add("NAME", "Производственный календарь")
cal.add("X-WR-CALNAME", "Производственный календарь")
for e in events:
cal.add_component(e)
return cal
def setup_logging(log_level):
logging_level = getattr(logging, log_level.upper(), None)
if not isinstance(logging_level, int):
raise ValueError("Invalid log level: {0}".format(log_level))
logging.basicConfig(
level=logging_level,
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="[%d/%m/%Y:%H:%M:%S %z]",
)
if __name__ == "__main__":
args = parse_args()
setup_logging(args.log_level)
events = []
# (args.end_year + 1) because range() function doesn't include right margin
for year in range(args.start_year, args.end_year + 1, 1):
holidays_by_months = get_holidays_grouped_by_months(year)
if not holidays_by_months:
break
events += generate_events(year, holidays_by_months)
cal = generate_calendar(events)
with open(args.output_file, "w") as f:
f.write(cal.to_ical().decode("utf-8"))
| 2.828125
| 3
|
AItest.py
|
owattenmaker/PythonFighter
| 0
|
12777580
|
<gh_stars>0
import pygame
import random
from pygame.locals import *
pygame.init()
screen=pygame.display.set_mode((640,480))
clock=pygame.time.Clock()
px=35
py=35
prect=pygame.Rect(px-10,py-10,20,20)
class Enemy(object):
def __init__(self,x,y):
self.x=x
self.y=y
self.rad=random.randint(5,10)
self.rect=pygame.Rect(0,0,0,0)
self.x_dir = random.choice(('left','right'))
self.y_dir = random.choice(('up','down'))
def move(self, mode='chase'):
if mode=='chase':
if self.x>px:
self.x-=1
elif self.x<px:
self.x+=1
if self.y<py:
self.y+=1
elif self.y>py:
self.y-=1
else: # roam around
# Move for x direction
if self.x_dir == 'left':
if self.x > 1:
self.x -= 1
else:
self.x_dir = 'right'
self.x += 1
else:
if self.x < px - 1:
self.x += 1
else:
self.x_dir = 'left'
self.x -= 1
# Now move for y direction
if self.y_dir == 'up':
if self.y > 1:
self.y -= 1
else:
self.y_dir = 'down'
self.y += 1
else:
if self.y < py - 1:
self.y += 1
else:
self.y_dir = 'up'
self.y -= 1
enemies=[Enemy(50,60),Enemy(200,100), Enemy(200,400), Enemy(200,200), Enemy(200,400), Enemy(200,200)]
roam = {} # Dict to track relative roam/chase
roam_count = {} # Dict to track time for which roaming
max_roam = {}
max_chasing = len(enemies) // 3
cur_chasing = 0
for i, enmy in enumerate(enemies):
if cur_chasing < max_chasing:
roam[i] = 'chase'
cur_chasing += 1
else:
roam[i] = 'roam'
roam_count[i] = 0
max_roam[i] = random.randint(100, 500)
while True:
screen.fill((200,230,200))
key=pygame.key.get_pressed()
if key[K_UP]:
py-=2
if key[K_DOWN]:
py+=2
if key[K_RIGHT]:
px+=2
if key[K_LEFT]:
px-=2
for e in pygame.event.get():
if e.type==QUIT:
exit()
prect=pygame.Rect(px-20,py-20,20,20)
for e_1, enmy in enumerate(enemies):
pygame.draw.circle(screen, (255,0,0), (enmy.x-enmy.rad,enmy.y-enmy.rad), enmy.rad, 0)
moved_once = False
for e_2, enmy2 in enumerate(enemies):
if enmy2 is not enmy:
if enmy.rect.colliderect(enmy2.rect):
if roam[e_2] == roam[e_1] == 'roam':
if cur_chasing < max_chasing:
roam[e_1] = 'chase'
elif roam[e_2] == roam[e_1] == 'chase':
roam[e_2] = 'roam'
cur_chasing -= 1
if roam[e_1] == 'roam':
roam_count[e_1] += 1
enmy.move('roam')
if roam_count[e_1] > max_roam[e_1]:
roam_count[e_1] = 0
if cur_chasing < max_chasing:
roam[e_1] = 'chase'
else:
enmy.move('chase')
else:
if not moved_once:
if roam[e_1] == 'roam':
roam_count[e_1] += 1
enmy.move('roam')
if roam_count[e_1] > max_roam[e_1]:
roam_count[e_1] = 0
if cur_chasing < max_chasing:
roam[e_1] = 'chase'
else:
enmy.move('chase')
moved_once = True
enmy.rect=pygame.Rect(enmy.x-enmy.rad*2,enmy.y-enmy.rad*2,enmy.rad*2,enmy.rad*2)
pygame.draw.rect(screen, (0,0,255), enmy.rect, 2)
pygame.draw.circle(screen, (0,0,255), (px-10,py-10), 10, 0)
pygame.draw.rect(screen, (255,0,0), prect, 2)
clock.tick(80)
pygame.display.flip()
| 3.28125
| 3
|
tests/geometry/test_utm.py
|
jhonykaesemodel/av2-api
| 26
|
12777581
|
<filename>tests/geometry/test_utm.py
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Unit tests on utilities for converting AV2 city coordinates to UTM or WGS84 coordinate systems."""
import numpy as np
import av2.geometry.utm as geo_utils
from av2.geometry.utm import CityName
from av2.utils.typing import NDArrayFloat
def test_convert_city_coords_to_wgs84_atx() -> None:
"""Convert city coordinates from Austin, TX to GPS coordinates."""
points_city: NDArrayFloat = np.array(
[
[1745.37, -1421.37],
[1738.54, -1415.03],
[1731.53, -1410.81],
]
)
wgs84_coords = geo_utils.convert_city_coords_to_wgs84(points_city, city_name=CityName.ATX)
expected_wgs84_coords: NDArrayFloat = np.array(
[
[30.261642967615092, -97.72246957081633],
[30.26170086362131, -97.72253982250783],
[30.261739638233472, -97.72261222631731],
]
)
assert np.allclose(wgs84_coords, expected_wgs84_coords, atol=1e-4)
def test_convert_city_coords_to_wgs84_wdc() -> None:
"""Convert city coordinates from Washington, DC to GPS coordinates."""
points_city: NDArrayFloat = np.array(
[
[1716.85, 4470.38],
[2139.70, 4606.14],
]
)
wgs84_coords = geo_utils.convert_city_coords_to_wgs84(points_city, city_name=CityName.WDC)
expected_wgs84_coords: NDArrayFloat = np.array(
[
[38.9299801515994, -77.0168603173312],
[38.931286945069985, -77.0120195048271],
]
)
assert np.allclose(wgs84_coords, expected_wgs84_coords, atol=1e-4)
def test_convert_gps_to_utm() -> None:
"""Convert Pittsburgh city origin (given in WGS84) to UTM coordinates."""
lat, long = 40.44177902989321, -80.01294377242584
utm_coords = geo_utils.convert_gps_to_utm(lat, long, city_name=CityName.PIT)
expected_utm_coords = 583710, 4477260
assert np.allclose(utm_coords, expected_utm_coords, atol=0.01)
| 2.5625
| 3
|
Lessons/source/strings.py
|
jayceazua/CS-1.3-Core-Data-Structures
| 0
|
12777582
|
<reponame>jayceazua/CS-1.3-Core-Data-Structures
#!python
def contains(text, pattern):
"""Return a boolean indicating whether pattern occurs in text."""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
# TODO: Implement contains here (iteratively and/or recursively)
# Base Case
if pattern == '':
return True
# Edge Case
if text == '':
return False
# return true if there was an index found
if find_index_recursive(text, pattern) != None:
return True
return False
def find_index(text, pattern):
"""Return the starting index of the first occurrence of pattern in text,
or None if not found."""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
# TODO: Implement find_index here (iteratively and/or recursively)
if pattern == '':
return 0
if text == '':
return None
# preset indexes to zero
text_index = 0 # index position to return that stays when a pattern is detected
pattern_index = 0 # iterator of the pattern to check if the pattern is being met
ghost_index = 0 # iterator of the text to match the pattern
# make sure we are within range
while text_index < (len(text)):
# if there is a match move on to the next index of the pattern
if text[ghost_index] == pattern[pattern_index]:
ghost_index += 1
pattern_index += 1
# return the start of the index pattern only if the pattern is fully met
if pattern_index == len(pattern):
return text_index
else: # move on to the next and restart from zero but with the start indexes plus one
pattern_index = 0
text_index += 1
ghost_index = text_index
return None
def find_index_recursive(text, pattern, text_index=None, pattern_index=None, ghost_index=None):
#
if text_index is None and pattern_index is None and ghost_index is None:
text_index = 0
pattern_index = 0
ghost_index = 0
# make sure the indexes are within range
if text_index < len(text) and ghost_index <= (len(text) -1):
# check that there is pattern starting
if text[ghost_index] == pattern[pattern_index]:
# return the index once we found the entire pattern
if pattern_index == (len(pattern) - 1):
return text_index
# check the following indexes of the pattern
ghost_index += 1
pattern_index += 1
return find_index_recursive(text, pattern, text_index, pattern_index, ghost_index)
else:
# move the text index from its current index plus one and start the pattern from 0
pattern_index = 0
text_index += 1
ghost_index = text_index
return find_index_recursive(text, pattern, text_index, pattern_index, ghost_index)
return None
def find_all_indexes(text, pattern):
"""Return a list of starting indexes of all occurrences of pattern in text,
or an empty list if not found."""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
# TODO: Implement find_all_indexes here (iteratively and/or recursively)
# Base Case returns a value without making any subsequent recursive calls.
# It does this for one or more special input values for which the function can be evaluated without recursion.
if pattern == '':
return [x for x in range(0, len(text))]
# an empty array to store indexes found
indexes = []
# get the initial index of the pattern
result = find_index_recursive(text, pattern)
while result != None:
indexes.append(result)
# move the indexes over by one to make sure we are not starting from its previous index
start_index = result + 1
result = find_index_recursive(text, pattern, start_index, 0, start_index)
return indexes
def test_string_algorithms(text, pattern):
found = contains(text, pattern)
print('contains({!r}, {!r}) => {}'.format(text, pattern, found))
# TODO: Uncomment these lines after you implement find_index
index = find_index(text, pattern)
print('find_index({!r}, {!r}) => {}'.format(text, pattern, index))
# TODO: Uncomment these lines after you implement find_all_indexes
indexes = find_all_indexes(text, pattern)
print('find_all_indexes({!r}, {!r}) => {}'.format(text, pattern, indexes))
def main():
"""Read command-line arguments and test string searching algorithms."""
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 2:
text = args[0]
pattern = args[1]
test_string_algorithms(text, pattern)
else:
script = sys.argv[0]
print('Usage: {} text pattern'.format(script))
print('Searches for occurrences of pattern in text')
print("\nExample: {} 'abra cadabra' 'abra'".format(script))
print("contains('abra cadabra', 'abra') => True")
print("find_index('abra cadabra', 'abra') => 0")
print("find_all_indexes('abra cadabra', 'abra') => [0, 8]")
if __name__ == '__main__':
main()
| 4.03125
| 4
|
plugins/sed.py
|
martinkirch/tofbot
| 1
|
12777583
|
<reponame>martinkirch/tofbot<filename>plugins/sed.py<gh_stars>1-10
# This file is part of tofbot, a friendly IRC bot.
# You may redistribute it under the Simplified BSD License.
# If we meet some day, and you think this stuff is worth it,
# you can buy us a beer in return.
#
# Copyright (c) 2011 <NAME> <<EMAIL>>
"See PluginSed"
from toflib import Plugin
import re
import sre_constants
class PluginSed(Plugin):
"That's what she sed"
def __init__(self, bot):
Plugin.__init__(self, bot)
self.msg = None
def handle_msg(self, msg_text, chan, nick):
r = 's/(.*?)/(.*?)/?$'
m = re.match(r, msg_text)
if m is not None and self.msg is not None:
regexp = m.group(1)
to = m.group(2)
msg_who = self.msg[0]
msg_what = self.msg[1]
try:
new_msg = re.sub(regexp, to, msg_what)
if new_msg != msg_what:
self.say("<%s> : %s" % (msg_who, new_msg))
self.msg = (nick, new_msg)
except sre_constants.error:
pass
else:
self.msg = (nick, msg_text)
| 2.359375
| 2
|
yahoo_finance_pynterface/__init__.py
|
mellon85/yahoo-finance-pynterface
| 15
|
12777584
|
<filename>yahoo_finance_pynterface/__init__.py
#!/usr/bin/env python
#
# Yahoo Finance Python Interface
# https://github.com/andrea-dm/yahoo-finance-pynterface
#
# Copyright (c) 2018 <NAME>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__name__ = "yahoo_finance_pynterface";
__version__ = "1.0.3";
__author__ = "<NAME>";
__all__ = ['Get'];
from . import api
from . import core
import requests
import datetime as dt
import concurrent.futures as cf
import pandas as pd
from typing import Tuple, Dict, List, Union, ClassVar, Any, Optional, Type
TickerType = Union[str, List[str]];
PeriodType = Optional[Union[str,List[Union[str,dt.datetime]]]];
AccessModeType = Type[api.AccessModeInQuery];
QueryType = Type[api.Query];
class Get():
"""
Class container that exposes the methods available
to interact with the Yahoo Finance API.
Such methods are:
- With(...) : to enable/disable parallel calculations;
- CurrentProcessingMode() : to get the current processing mode;
- Info(...) : to retrieve basic informations about the ticker such as trading periods, base currency, ...;
- Prices(...) : to get the time series of OHLC prices together with Volumes (and adjusted close prices, when available);
- Dividends(...) : to get the time series of dividends;
- Splits(...) : to get the time series of splits;
The above methods should be sufficient for any standard usage.
To gain much more control over the data sent back by Yahoo, the following method is implemented:
- Data(...) : the basic method that is actually pushing the request for data.
All the other methods are somewhat relying on it.
"""
__processing_mode__:Type[core.ProcessingMode] = core.ProcessingMode.AUTO;
@classmethod
def With(cls, mode:Type[core.ProcessingMode]) -> None:
if not isinstance(mode,core.ProcessingMode):
raise TypeError(f"invalid type for the argument 'mode'! <class 'core.ProcessingMode'> expected; got '{type(mode)}'");
else:
cls.__processing_mode__ = mode;
@classmethod
def CurrentProcessingMode(cls) -> str:
return str(cls.__processing_mode__);
@classmethod
def Info(cls, tickers:TickerType) -> Dict[str,Any]:
r= cls.Data(tickers, "1d", "1y", using_api=api.AccessModeInQuery.CHART);
return { ticker:core.parser({k:v for k,v in data['meta'].items() if k not in ['dataGranularity', 'validRanges']}) for ticker,data in r.items()};
@classmethod
def Prices(cls, tickers:TickerType,
interval:str="1d",
period:PeriodType=None,
using_api:AccessModeType=api.AccessModeInQuery.CHART) -> Optional[Union[Dict[str,Any],pd.DataFrame]]:
r = cls.Data(tickers, interval, period, events=api.EventsInQuery.HISTORY, using_api=using_api);
k = 'quotes' if using_api is api.AccessModeInQuery.CHART else 'data';
return {ticker:data[k] for ticker,data in r.items()} if isinstance(tickers,list) else r[tickers][k];
@classmethod
def Dividends(cls, tickers:TickerType,
interval:str="1d",
period:PeriodType=None,
using_api:AccessModeType=api.AccessModeInQuery.CHART) -> Optional[Union[Dict[str,Any],pd.DataFrame]]:
r = cls.Data(tickers, interval, period, events=api.EventsInQuery.DIVIDENDS, using_api=using_api);
k = 'events' if using_api is api.AccessModeInQuery.CHART else 'data';
return {ticker:data[k] for ticker,data in r.items()} if isinstance(tickers,list) else r[tickers][k];
@classmethod
def Splits(cls, tickers:TickerType,
interval:str="1d",
period:PeriodType=None,
using_api:AccessModeType=api.AccessModeInQuery.CHART) -> Optional[Union[Dict[str,Any],pd.DataFrame]]:
r = cls.Data(tickers, interval, period, events=api.EventsInQuery.SPLITS, using_api=using_api);
k = 'events' if using_api is api.AccessModeInQuery.CHART else 'data';
return {ticker:data[k] for ticker,data in r.items()} if isinstance(tickers,list) else r[tickers][k]
@classmethod
def Data(cls, tickers:TickerType,
interval:str="1d",
period:Optional[Union[str,dt.datetime,List[Union[str,dt.datetime]]]]=None,
events:Type[api.EventsInQuery]=api.EventsInQuery.HISTORY,
using_api:AccessModeType=api.AccessModeInQuery.DEFAULT) -> Dict[str,Any]:
if isinstance(tickers,str) or (isinstance(tickers,list) and all(isinstance(ticker,str) for ticker in tickers)):
tickers = tickers if isinstance(tickers, list) else list([tickers]);
tickers = [x.upper() for x in tickers];
else:
raise TypeError(f"invalid type for the argument 'tickers'! {type(str)} or a list of {type(str)} expected; got {type(tickers)}");
if period is None:
t = dt.datetime.now();
period = [t-dt.timedelta(weeks=52),t] if using_api is api.AccessModeInQuery.DOWNLOAD else "1y";
params = api.Query(using_api);
params.SetPeriod(period);
params.SetInterval(interval);
params.SetEvents(events);
if not isinstance(using_api,api.AccessModeInQuery):
raise TypeError(f"invalid type for the argument 'using_api'! <class 'api.AccessModeInQuery'> expected; got {type(api)}");
else:
if cls.__processing_mode__ is core.ProcessingMode.PARALLEL:
get = cls.__parallel__;
elif cls.__processing_mode__ is core.ProcessingMode.SERIAL:
get = cls.__serial__;
else:
get = cls.__serial__ if len(tickers)==1 else cls.__parallel__;
return get(tickers, params, using_api);
@classmethod
def __serial__(cls, tickers:list, params:QueryType, using_api:AccessModeType) -> Dict[str,Any]:
data = dict();
for ticker in tickers:
response = cls.__get__(ticker, params, using_api, timeout=2);
data[ticker] = response if response else None;
return data;
@classmethod
def __parallel__(cls, tickers:list, params:QueryType, using_api:AccessModeType) -> Dict[str,Any]:
data = dict();
with cf.ProcessPoolExecutor(max_workers=len(tickers)) as executor:
results = { executor.submit(cls.__get__, ticker, params, using_api, timeout=2) : ticker for ticker in tickers};
for result in cf.as_completed(results):
data[results[result]] = result.result() if result.result() else None;
return data;
@staticmethod
def __get__(ticker:str, params:QueryType, this_api:AccessModeType, timeout:int=5) -> Optional[dict]:
err, res = api.Session.With(this_api).Get(ticker, params, timeout=timeout);
if err:
err_msg = "*ERROR: {0:s}.\n{1:s}";
if res['code']=='Unprocessable Entity':
print(err_msg.format(res['code'], res['description']));
print("please, check whether the parameters you have set are correct!");
elif res['code']=="-1":
print(err_msg.format("A request exception occured", res['description']));
elif res['code']=="-2":
print(err_msg.format(res['description'], "Aborting the task..."));
else:
print(err_msg.format(res['code'], res['description']));
return None;
else:
return res;
| 1.609375
| 2
|
Easy/392. Is Subsequence/solution (3).py
|
czs108/LeetCode-Solutions
| 3
|
12777585
|
# 392. Is Subsequence
# Runtime: 28 ms, faster than 90.94% of Python3 online submissions for Is Subsequence.
# Memory Usage: 14.2 MB, less than 74.36% of Python3 online submissions for Is Subsequence.
class Solution:
# Two Pointers
def isSubsequence(self, s: str, t: str) -> bool:
left, right = 0, 0
while left < len(s) and right < len(t):
if s[left] == t[right]:
left += 1
right += 1
return left == len(s)
| 3.671875
| 4
|
sarpy/io/complex/other_nitf.py
|
ngageoint/SarPy
| 0
|
12777586
|
"""
Work in progress for reading some other kind of complex NITF.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
import logging
from typing import Union, Tuple, List, Optional, Callable, Sequence
import copy
from datetime import datetime
import numpy
from scipy.constants import foot
from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf
from sarpy.geometry.latlon import num as lat_lon_parser
from sarpy.io.general.base import SarpyIOError
from sarpy.io.general.data_segment import DataSegment, SubsetSegment
from sarpy.io.general.format_function import FormatFunction, ComplexFormatFunction
from sarpy.io.general.nitf import extract_image_corners, NITFDetails, NITFReader
from sarpy.io.general.nitf_elements.security import NITFSecurityTags
from sarpy.io.general.nitf_elements.image import ImageSegmentHeader, ImageSegmentHeader0
from sarpy.io.general.nitf_elements.nitf_head import NITFHeader, NITFHeader0
from sarpy.io.general.nitf_elements.base import TREList
from sarpy.io.general.nitf_elements.tres.unclass.CMETAA import CMETAA
from sarpy.io.general.utils import is_file_like
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType
from sarpy.io.complex.sicd_elements.ImageData import ImageDataType
from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType
from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType
from sarpy.io.complex.sicd_elements.Timeline import TimelineType, IPPSetType
from sarpy.io.complex.sicd_elements.RadarCollection import RadarCollectionType, \
TxFrequencyType, WaveformParametersType, ChanParametersType
from sarpy.io.complex.sicd_elements.SCPCOA import SCPCOAType
from sarpy.io.complex.sicd_elements.ImageFormation import ImageFormationType, TxFrequencyProcType
from sarpy.io.complex.sicd_elements.ImageCreation import ImageCreationType
from sarpy.io.complex.sicd_elements.PFA import PFAType
logger = logging.getLogger(__name__)
_iso_date_format = '{}-{}-{}T{}:{}:{}'
# NB: DO NOT implement is_a() here.
# This will explicitly happen after other readers
########
# Define sicd structure from image sub-header information
def extract_sicd(
img_header: Union[ImageSegmentHeader, ImageSegmentHeader0],
transpose: True,
nitf_header: Optional[Union[NITFHeader, NITFHeader0]] = None) -> SICDType:
"""
Extract the best available SICD structure from relevant nitf header structures.
Parameters
----------
img_header : ImageSegmentHeader|ImageSegmentHeader0
transpose : bool
nitf_header : None|NITFHeader|NITFHeader0
Returns
-------
SICDType
"""
def get_collection_info() -> CollectionInfoType:
isorce = img_header.ISORCE.strip()
collector_name = None if len(isorce) < 1 else isorce
iid2 = img_header.IID2.strip()
core_name = img_header.IID1.strip() if len(iid2) < 1 else iid2
class_str = img_header.Security.CLAS
if class_str == 'T':
classification = 'TOPSECRET'
elif class_str == 'S':
classification = 'SECRET'
elif class_str == 'C':
classification = 'CONFIDENTIAL'
elif class_str == 'U':
classification = 'UNCLASSIFIED'
else:
classification = ''
ctlh = img_header.Security.CTLH.strip()
if len(ctlh) < 1:
classification += '//' + ctlh
code = img_header.Security.CODE.strip()
if len(code) < 1:
classification += '//' + code
return CollectionInfoType(
CollectorName=collector_name,
CoreName=core_name,
Classification=classification)
def get_image_data() -> ImageDataType:
pvtype = img_header.PVTYPE
if pvtype == 'C':
if img_header.NBPP != 64:
logger.warning(
'This NITF has complex bands that are not 64-bit.\n\t'
'This is not currently supported.')
pixel_type = 'RE32F_IM32F'
elif pvtype == 'R':
if img_header.NBPP == 64:
logger.warning(
'The real/imaginary data in the NITF are stored as 64-bit floating point.\n\t'
'The closest Pixel Type, RE32F_IM32F, will be used,\n\t'
'but there may be overflow issues if converting this file.')
pixel_type = 'RE32F_IM32F'
elif pvtype == 'SI':
pixel_type = 'RE16I_IM16I'
else:
raise ValueError('Got unhandled PVTYPE {}'.format(pvtype))
if transpose:
rows = img_header.NCOLS
cols = img_header.NROWS
else:
rows = img_header.NROWS
cols = img_header.NCOLS
return ImageDataType(
PixelType=pixel_type,
NumRows=rows,
NumCols=cols,
FirstRow=0,
FirstCol=0,
FullImage=(rows, cols),
SCPPixel=(0.5 * rows, 0.5 * cols))
def append_country_code(cc) -> None:
if len(cc) > 0:
if the_sicd.CollectionInfo is None:
the_sicd.CollectionInfo = CollectionInfoType(CountryCodes=[cc, ])
elif the_sicd.CollectionInfo.CountryCodes is None:
the_sicd.CollectionInfo.CountryCodes = [cc, ]
elif cc not in the_sicd.CollectionInfo.CountryCodes:
the_sicd.CollectionInfo.CountryCodes.append(cc)
def set_image_corners(icps: numpy.ndarray, override: bool = False) -> None:
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType(ImageCorners=icps)
elif the_sicd.GeoData.ImageCorners is None or override:
the_sicd.GeoData.ImageCorners = icps
def set_arp_position(arp_ecf: numpy.ndarray, override: bool = False) -> None:
if the_sicd.SCPCOA is None:
the_sicd.SCPCOA = SCPCOAType(ARPPos=arp_ecf)
elif override:
# prioritize this information first - it should be more reliable than other sources
the_sicd.SCPCOA.ARPPos = arp_ecf
def set_scp(scp_ecf: numpy.ndarray, scp_pixel: Union[numpy.ndarray, list, tuple], override: bool = False) -> None:
def set_scppixel():
if the_sicd.ImageData is None:
the_sicd.ImageData = ImageDataType(SCPPixel=scp_pixel)
else:
the_sicd.ImageData.SCPPixel = scp_pixel
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType(SCP=SCPType(ECF=scp_ecf))
set_scppixel()
elif the_sicd.GeoData.SCP is None or override:
the_sicd.GeoData.SCP = SCPType(ECF=scp_ecf)
set_scppixel()
def set_collect_start(
collect_start: Union[str, datetime, numpy.datetime64], override: bool = False) -> None:
if the_sicd.Timeline is None:
the_sicd.Timeline = TimelineType(CollectStart=collect_start)
elif the_sicd.Timeline.CollectStart is None or override:
the_sicd.Timeline.CollectStart = collect_start
def set_uvects(row_unit: numpy.ndarray, col_unit: numpy.ndarray) -> None:
if the_sicd.Grid is None:
the_sicd.Grid = GridType(
Row=DirParamType(UVectECF=row_unit),
Col=DirParamType(UVectECF=col_unit))
return
if the_sicd.Grid.Row is None:
the_sicd.Grid.Row = DirParamType(UVectECF=row_unit)
elif the_sicd.Grid.Row.UVectECF is None:
the_sicd.Grid.Row.UVectECF = row_unit
if the_sicd.Grid.Col is None:
the_sicd.Grid.Col = DirParamType(UVectECF=col_unit)
elif the_sicd.Grid.Col.UVectECF is None:
the_sicd.Grid.Col.UVectECF = col_unit
def try_CMETAA() -> None:
# noinspection PyTypeChecker
tre = None if tres is None else tres['CMETAA'] # type: CMETAA
if tre is None:
return
cmetaa = tre.DATA
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType()
if the_sicd.SCPCOA is None:
the_sicd.SCPCOA = SCPCOAType()
if the_sicd.Grid is None:
the_sicd.Grid = GridType()
if the_sicd.Timeline is None:
the_sicd.Timeline = TimelineType()
if the_sicd.RadarCollection is None:
the_sicd.RadarCollection = RadarCollectionType()
if the_sicd.ImageFormation is None:
the_sicd.ImageFormation = ImageFormationType()
the_sicd.SCPCOA.SCPTime = 0.5*float(cmetaa.WF_CDP)
the_sicd.GeoData.SCP = SCPType(ECF=tre.get_scp())
the_sicd.SCPCOA.ARPPos = tre.get_arp()
the_sicd.SCPCOA.SideOfTrack = cmetaa.CG_LD.strip().upper()
the_sicd.SCPCOA.SlantRange = float(cmetaa.CG_SRAC)
the_sicd.SCPCOA.DopplerConeAng = float(cmetaa.CG_CAAC)
the_sicd.SCPCOA.GrazeAng = float(cmetaa.CG_GAAC)
the_sicd.SCPCOA.IncidenceAng = 90 - float(cmetaa.CG_GAAC)
if hasattr(cmetaa, 'CG_TILT'):
the_sicd.SCPCOA.TwistAng = float(cmetaa.CG_TILT)
if hasattr(cmetaa, 'CG_SLOPE'):
the_sicd.SCPCOA.SlopeAng = float(cmetaa.CG_SLOPE)
the_sicd.ImageData.SCPPixel = [int(cmetaa.IF_DC_IS_COL), int(cmetaa.IF_DC_IS_ROW)]
img_corners = tre.get_image_corners()
if img_corners is not None:
the_sicd.GeoData.ImageCorners = img_corners
if cmetaa.CMPLX_SIGNAL_PLANE.upper() == 'S':
the_sicd.Grid.ImagePlane = 'SLANT'
elif cmetaa.CMPLX_SIGNAL_PLANE.upper() == 'G':
the_sicd.Grid.ImagePlane = 'GROUND'
else:
logger.warning(
'Got unexpected CMPLX_SIGNAL_PLANE value {},\n\t'
'setting ImagePlane to SLANT'.format(cmetaa.CMPLX_SIGNAL_PLANE))
the_sicd.Grid.Row = DirParamType(
SS=float(cmetaa.IF_RSS),
ImpRespWid=float(cmetaa.IF_RGRES),
Sgn=1 if cmetaa.IF_RFFTS.strip() == '-' else -1, # opposite sign convention
ImpRespBW=float(cmetaa.IF_RFFT_SAMP)/(float(cmetaa.IF_RSS)*float(cmetaa.IF_RFFT_TOT)))
the_sicd.Grid.Col = DirParamType(
SS=float(cmetaa.IF_AZSS),
ImpRespWid=float(cmetaa.IF_AZRES),
Sgn=1 if cmetaa.IF_AFFTS.strip() == '-' else -1, # opposite sign convention
ImpRespBW=float(cmetaa.IF_AZFFT_SAMP)/(float(cmetaa.IF_AZSS)*float(cmetaa.IF_AZFFT_TOT)))
cmplx_weight = cmetaa.CMPLX_WEIGHT.strip().upper()
if cmplx_weight == 'UWT':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='UNIFORM')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='UNIFORM')
elif cmplx_weight == 'HMW':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='HAMMING')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='HAMMING')
elif cmplx_weight == 'HNW':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='HANNING')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='HANNING')
elif cmplx_weight == 'TAY':
the_sicd.Grid.Row.WgtType = WgtTypeType(
WindowName='TAYLOR',
Parameters={
'SLL': '-{0:d}'.format(int(cmetaa.CMPLX_RNG_SLL)),
'NBAR': '{0:d}'.format(int(cmetaa.CMPLX_RNG_TAY_NBAR))})
the_sicd.Grid.Col.WgtType = WgtTypeType(
WindowName='TAYLOR',
Parameters={
'SLL': '-{0:d}'.format(int(cmetaa.CMPLX_AZ_SLL)),
'NBAR': '{0:d}'.format(int(cmetaa.CMPLX_AZ_TAY_NBAR))})
else:
logger.warning(
'Got unsupported CMPLX_WEIGHT value {}.\n\tThe resulting SICD will '
'not have valid weight array populated'.format(cmplx_weight))
the_sicd.Grid.Row.define_weight_function()
the_sicd.Grid.Col.define_weight_function()
# noinspection PyBroadException
try:
date_str = cmetaa.T_UTC_YYYYMMMDD
time_str = cmetaa.T_HHMMSSUTC
date_time = _iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
time_str[:2], time_str[2:4], time_str[4:6])
the_sicd.Timeline.CollectStart = numpy.datetime64(date_time, 'us')
except Exception:
logger.info('Failed extracting start time from CMETAA')
pass
the_sicd.Timeline.CollectDuration = float(cmetaa.WF_CDP)
the_sicd.Timeline.IPP = [
IPPSetType(TStart=0,
TEnd=float(cmetaa.WF_CDP),
IPPStart=0,
IPPEnd=numpy.floor(float(cmetaa.WF_CDP)*float(cmetaa.WF_PRF)),
IPPPoly=[0, float(cmetaa.WF_PRF)])]
the_sicd.RadarCollection.TxFrequency = TxFrequencyType(
Min=float(cmetaa.WF_SRTFR),
Max=float(cmetaa.WF_ENDFR))
the_sicd.RadarCollection.TxPolarization = cmetaa.POL_TR.upper()
the_sicd.RadarCollection.Waveform = [WaveformParametersType(
TxPulseLength=float(cmetaa.WF_WIDTH),
TxRFBandwidth=float(cmetaa.WF_BW),
TxFreqStart=float(cmetaa.WF_SRTFR),
TxFMRate=float(cmetaa.WF_CHRPRT)*1e12)]
tx_rcv_pol = '{}:{}'.format(cmetaa.POL_TR.upper(), cmetaa.POL_RE.upper())
the_sicd.RadarCollection.RcvChannels = [
ChanParametersType(TxRcvPolarization=tx_rcv_pol)]
the_sicd.ImageFormation.TxRcvPolarizationProc = tx_rcv_pol
if_process = cmetaa.IF_PROCESS.strip().upper()
if if_process == 'PF':
the_sicd.ImageFormation.ImageFormAlgo = 'PFA'
scp_ecf = tre.get_scp()
fpn_ned = numpy.array(
[float(cmetaa.CG_FPNUV_X), float(cmetaa.CG_FPNUV_Y), float(cmetaa.CG_FPNUV_Z)], dtype='float64')
ipn_ned = numpy.array(
[float(cmetaa.CG_IDPNUVX), float(cmetaa.CG_IDPNUVY), float(cmetaa.CG_IDPNUVZ)], dtype='float64')
fpn_ecf = ned_to_ecf(fpn_ned, scp_ecf, absolute_coords=False)
ipn_ecf = ned_to_ecf(ipn_ned, scp_ecf, absolute_coords=False)
the_sicd.PFA = PFAType(FPN=fpn_ecf, IPN=ipn_ecf)
elif if_process in ['RM', 'CD']:
the_sicd.ImageFormation.ImageFormAlgo = 'RMA'
# the remainder of this is guesswork to define required fields
the_sicd.ImageFormation.TStartProc = 0 # guess work
the_sicd.ImageFormation.TEndProc = float(cmetaa.WF_CDP)
the_sicd.ImageFormation.TxFrequencyProc = TxFrequencyProcType(
MinProc=float(cmetaa.WF_SRTFR), MaxProc=float(cmetaa.WF_ENDFR))
# all remaining guess work
the_sicd.ImageFormation.STBeamComp = 'NO'
the_sicd.ImageFormation.ImageBeamComp = 'SV' if cmetaa.IF_BEAM_COMP[0] == 'Y' else 'NO'
the_sicd.ImageFormation.AzAutofocus = 'NO' if cmetaa.AF_TYPE[0] == 'N' else 'SV'
the_sicd.ImageFormation.RgAutofocus = 'NO'
def try_AIMIDA() -> None:
tre = None if tres is None else tres['AIMIDA']
if tre is None:
return
aimida = tre.DATA
append_country_code(aimida.COUNTRY.strip())
create_time = datetime.strptime(aimida.CREATION_DATE, '%d%b%y')
if the_sicd.ImageCreation is None:
the_sicd.ImageCreation = ImageCreationType(DateTime=create_time)
elif the_sicd.ImageCreation.DateTime is None:
the_sicd.ImageCreation.DateTime = create_time
collect_start = datetime.strptime(aimida.MISSION_DATE+aimida.TIME, '%d%b%y%H%M')
set_collect_start(collect_start, override=False)
def try_AIMIDB() -> None:
tre = None if tres is None else tres['AIMIDB']
if tre is None:
return
aimidb = tre.DATA
append_country_code(aimidb.COUNTRY.strip())
if the_sicd.ImageFormation is not None and the_sicd.ImageFormation.SegmentIdentifier is None:
the_sicd.ImageFormation.SegmentIdentifier = aimidb.CURRENT_SEGMENT.strip()
date_str = aimidb.ACQUISITION_DATE
collect_start = numpy.datetime64(_iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
date_str[8:10], date_str[10:12], date_str[12:14]), 'us')
set_collect_start(collect_start, override=False)
def try_ACFT() -> None:
if tres is None:
return
tre = tres['ACFTA']
if tre is None:
tre = tres['ACFTB']
if tre is None:
return
acft = tre.DATA
sensor_id = acft.SENSOR_ID.strip()
if len(sensor_id) > 1:
if the_sicd.CollectionInfo is None:
the_sicd.CollectionInfo = CollectionInfoType(CollectorName=sensor_id)
elif the_sicd.CollectionInfo.CollectorName is None:
the_sicd.CollectionInfo.CollectorName = sensor_id
row_ss = float(acft.ROW_SPACING)
col_ss = float(acft.COL_SPACING)
if hasattr(acft, 'ROW_SPACING_UNITS') and acft.ROW_SPACING_UNITS.strip().lower() == 'f':
row_ss *= foot
if hasattr(acft, 'COL_SPACING_UNITS') and acft.COL_SPACING_UNITS.strip().lower() == 'f':
col_ss *= foot
# NB: these values are actually ground plane values, and should be
# corrected to slant plane if possible
if the_sicd.SCPCOA is not None:
if the_sicd.SCPCOA.GrazeAng is not None:
col_ss *= numpy.cos(numpy.deg2rad(the_sicd.SCPCOA.GrazeAng))
if the_sicd.SCPCOA.TwistAng is not None:
row_ss *= numpy.cos(numpy.deg2rad(the_sicd.SCPCOA.TwistAng))
if the_sicd.Grid is None:
the_sicd.Grid = GridType(Row=DirParamType(SS=row_ss), Col=DirParamType(SS=col_ss))
return
if the_sicd.Grid.Row is None:
the_sicd.Grid.Row = DirParamType(SS=row_ss)
elif the_sicd.Grid.Row.SS is None:
the_sicd.Grid.Row.SS = row_ss
if the_sicd.Grid.Col is None:
the_sicd.Grid.Col = DirParamType(SS=col_ss)
elif the_sicd.Grid.Col.SS is None:
the_sicd.Grid.Col.SS = col_ss
def try_BLOCKA() -> None:
tre = None if tres is None else tres['BLOCKA']
if tre is None:
return
blocka = tre.DATA
icps = []
for fld_name in ['FRFC_LOC', 'FRLC_LOC', 'LRLC_LOC', 'LRFC_LOC']:
value = getattr(blocka, fld_name)
# noinspection PyBroadException
try:
lat_val = float(value[:10])
lon_val = float(value[10:21])
except ValueError:
lat_val = lat_lon_parser(value[:10])
lon_val = lat_lon_parser(value[10:21])
icps.append([lat_val, lon_val])
set_image_corners(icps, override=False)
def try_MPDSRA() -> None:
def valid_array(arr):
return numpy.all(numpy.isfinite(arr)) and numpy.any(arr != 0)
tre = None if tres is None else tres['MPDSRA']
if tre is None:
return
mpdsra = tre.DATA
scp_ecf = foot*numpy.array(
[float(mpdsra.ORO_X), float(mpdsra.ORO_Y), float(mpdsra.ORO_Z)], dtype='float64')
if valid_array(scp_ecf):
set_scp(scp_ecf, (int(mpdsra.ORP_COLUMN) - 1, int(mpdsra.ORP_ROW) - 1), override=False)
arp_pos_ned = foot*numpy.array(
[float(mpdsra.ARP_POS_N), float(mpdsra.ARP_POS_E), float(mpdsra.ARP_POS_D)], dtype='float64')
arp_vel_ned = foot*numpy.array(
[float(mpdsra.ARP_VEL_N), float(mpdsra.ARP_VEL_E), float(mpdsra.ARP_VEL_D)], dtype='float64')
arp_acc_ned = foot*numpy.array(
[float(mpdsra.ARP_ACC_N), float(mpdsra.ARP_ACC_E), float(mpdsra.ARP_ACC_D)], dtype='float64')
arp_pos = ned_to_ecf(arp_pos_ned, scp_ecf, absolute_coords=True) if valid_array(arp_pos_ned) else None
set_arp_position(arp_pos, override=False)
arp_vel = ned_to_ecf(arp_vel_ned, scp_ecf, absolute_coords=False) if valid_array(arp_vel_ned) else None
if the_sicd.SCPCOA.ARPVel is None:
the_sicd.SCPCOA.ARPVel = arp_vel
arp_acc = ned_to_ecf(arp_acc_ned, scp_ecf, absolute_coords=False) if valid_array(arp_acc_ned) else None
if the_sicd.SCPCOA.ARPAcc is None:
the_sicd.SCPCOA.ARPAcc = arp_acc
if the_sicd.PFA is not None and the_sicd.PFA.FPN is None:
# TODO: is this already in meters?
fpn_ecf = numpy.array(
[float(mpdsra.FOC_X), float(mpdsra.FOC_Y), float(mpdsra.FOC_Z)], dtype='float64') # *foot
if valid_array(fpn_ecf):
the_sicd.PFA.FPN = fpn_ecf
def try_MENSRB() -> None:
tre = None if tres is None else tres['MENSRB']
if tre is None:
return
mensrb = tre.DATA
arp_llh = numpy.array(
[lat_lon_parser(mensrb.ACFT_LOC[:12]),
lat_lon_parser(mensrb.ACFT_LOC[12:25]),
foot*float(mensrb.ACFT_ALT)], dtype='float64')
scp_llh = numpy.array(
[lat_lon_parser(mensrb.RP_LOC[:12]),
lat_lon_parser(mensrb.RP_LOC[12:25]),
foot*float(mensrb.RP_ELV)], dtype='float64')
# TODO: handle the conversion from msl to hae
arp_ecf = geodetic_to_ecf(arp_llh)
scp_ecf = geodetic_to_ecf(scp_llh)
set_arp_position(arp_ecf, override=True)
set_scp(scp_ecf, (int(mensrb.RP_COL)-1, int(mensrb.RP_ROW)-1), override=False)
row_unit_ned = numpy.array(
[float(mensrb.C_R_NC), float(mensrb.C_R_EC), float(mensrb.C_R_DC)], dtype='float64')
col_unit_ned = numpy.array(
[float(mensrb.C_AZ_NC), float(mensrb.C_AZ_EC), float(mensrb.C_AZ_DC)], dtype='float64')
set_uvects(ned_to_ecf(row_unit_ned, scp_ecf, absolute_coords=False),
ned_to_ecf(col_unit_ned, scp_ecf, absolute_coords=False))
def try_MENSRA() -> None:
tre = None if tres is None else tres['MENSRA']
if tre is None:
return
mensra = tre.DATA
arp_llh = numpy.array(
[lat_lon_parser(mensra.ACFT_LOC[:10]),
lat_lon_parser(mensra.ACFT_LOC[10:21]),
foot*float(mensra.ACFT_ALT)], dtype='float64')
scp_llh = numpy.array(
[lat_lon_parser(mensra.CP_LOC[:10]),
lat_lon_parser(mensra.CP_LOC[10:21]),
foot*float(mensra.CP_ALT)], dtype='float64')
# TODO: handle the conversion from msl to hae
arp_ecf = geodetic_to_ecf(arp_llh)
scp_ecf = geodetic_to_ecf(scp_llh)
set_arp_position(arp_ecf, override=True)
# TODO: is this already zero based?
set_scp(geodetic_to_ecf(scp_llh), (int(mensra.CCRP_COL), int(mensra.CCRP_ROW)), override=False)
row_unit_ned = numpy.array(
[float(mensra.C_R_NC), float(mensra.C_R_EC), float(mensra.C_R_DC)], dtype='float64')
col_unit_ned = numpy.array(
[float(mensra.C_AZ_NC), float(mensra.C_AZ_EC), float(mensra.C_AZ_DC)], dtype='float64')
set_uvects(ned_to_ecf(row_unit_ned, scp_ecf, absolute_coords=False),
ned_to_ecf(col_unit_ned, scp_ecf, absolute_coords=False))
def extract_corners() -> None:
icps = extract_image_corners(img_header)
if icps is None:
return
# TODO: include symmetry transform issue
set_image_corners(icps, override=False)
def extract_start() -> None:
# noinspection PyBroadException
try:
date_str = img_header.IDATIM
collect_start = numpy.datetime64(
_iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
date_str[8:10], date_str[10:12], date_str[12:14]), 'us')
except Exception:
logger.info('failed extracting start time from IDATIM tre')
return
set_collect_start(collect_start, override=False)
# noinspection PyUnresolvedReferences
tres = None if img_header.ExtendedHeader.data is None \
else img_header.ExtendedHeader.data # type: Union[None, TREList]
collection_info = get_collection_info()
image_data = get_image_data()
the_sicd = SICDType(
CollectionInfo=collection_info,
ImageData=image_data)
# apply the various tres and associated logic
# NB: this should generally be in order of preference
try_CMETAA()
try_AIMIDB()
try_AIMIDA()
try_ACFT()
try_BLOCKA()
try_MPDSRA()
try_MENSRA()
try_MENSRB()
extract_corners()
extract_start()
return the_sicd
# Helper methods for transforming data
def get_linear_magnitude_scaling(scale_factor: float):
"""
Get a linear magnitude scaling function, to correct magnitude.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
Returns
-------
callable
"""
def scaler(data):
return data/scale_factor
return scaler
def get_linear_power_scaling(scale_factor):
"""
Get a linear power scaling function, to derive correct magnitude.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
Returns
-------
callable
"""
def scaler(data):
return numpy.sqrt(data/scale_factor)
return scaler
def get_log_magnitude_scaling(scale_factor, db_per_step):
"""
Gets the log magnitude scaling function, to derive correct magnitude.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
db_per_step : float
The db_per_step factor, according to the definiton given in STDI-0002
Returns
-------
callable
"""
lin_scaler = get_linear_magnitude_scaling(scale_factor)
def scaler(data):
return lin_scaler(numpy.exp(0.05*numpy.log(10)*db_per_step*data))
return scaler
def get_log_power_scaling(scale_factor, db_per_step):
"""
Gets the log power scaling function, to derive correct magnitude.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
db_per_step : float
The db_per_step factor, according to the definiton given in STDI-0002
Returns
-------
callable
"""
power_scaler = get_linear_power_scaling(scale_factor)
def scaler(data):
return power_scaler(numpy.exp(0.1*numpy.log(10)*db_per_step*data))
return scaler
def get_linlog_magnitude_scaling(scale_factor, tipping_point):
"""
Gets the magnitude scaling function for the model which
is initially linear, and then switches to logarithmic beyond a fixed
tipping point.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
tipping_point : float
The tipping point between the two models.
Returns
-------
callable
"""
db_per_step = 20*numpy.log10(tipping_point)/tipping_point
log_scaler = get_log_magnitude_scaling(scale_factor, db_per_step)
def scaler(data):
out = data/scale_factor
above_tipping = (out > tipping_point)
out[above_tipping] = log_scaler(data[above_tipping])
return out
return scaler
class ApplyAmplitudeScalingFunction(ComplexFormatFunction):
__slots__ = ('_scaling_function', )
_allowed_ordering = ('MP', 'PM')
has_inverse = False
def __init__(
self,
raw_dtype: Union[str, numpy.dtype],
order: str,
scaling_function: Optional[Callable] = None,
raw_shape: Optional[Tuple[int, ...]] = None,
formatted_shape: Optional[Tuple[int, ...]] = None,
reverse_axes: Optional[Tuple[int, ...]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None,
band_dimension: int = -1):
"""
Parameters
----------
raw_dtype : str|numpy.dtype
The raw datatype. Valid options dependent on the value of order.
order : str
One of `('MP', 'PM')`, with allowable raw_dtype
`('uint8', 'uint16', 'uint32', 'float32', 'float64')`.
scaling_function : Optional[Callable]
raw_shape : None|Tuple[int, ...]
formatted_shape : None|Tuple[int, ...]
reverse_axes : None|Tuple[int, ...]
transpose_axes : None|Tuple[int, ...]
band_dimension : int
Which band is the complex dimension, **after** the transpose operation.
"""
self._scaling_function = None
ComplexFormatFunction.__init__(
self, raw_dtype, order, raw_shape=raw_shape, formatted_shape=formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes, band_dimension=band_dimension)
self._set_scaling_function(scaling_function)
@property
def scaling_function(self) -> Optional[Callable]:
"""
The magnitude scaling function.
Returns
-------
None|Callable
"""
return self._scaling_function
def _set_scaling_function(self, value: Optional[Callable]):
if value is None:
self._scaling_function = None
return
if not isinstance(value, Callable):
raise TypeError('scaling_function must be callable')
self._scaling_function = value
def _forward_magnitude_theta(
self,
data: numpy.ndarray,
out: numpy.ndarray,
magnitude: numpy.ndarray,
theta: numpy.ndarray,
subscript: Tuple[slice, ...]) -> None:
if self._scaling_function is not None:
magnitude = self._scaling_function(magnitude)
ComplexFormatFunction._forward_magnitude_theta(
self, data, out, magnitude, theta, subscript)
def _extract_transform_data(
image_header: Union[ImageSegmentHeader, ImageSegmentHeader0],
band_dimension: int):
"""
Helper function for defining necessary transform_data definition for
interpreting image segment data.
Parameters
----------
image_header : ImageSegmentHeader|ImageSegmentHeader0
Returns
-------
None|str|callable
"""
if len(image_header.Bands) != 2:
raise ValueError('Got unhandled case of {} image bands'.format(len(image_header.Bands)))
complex_order = image_header.Bands[0].ISUBCAT+image_header.Bands[1].ISUBCAT
if complex_order not in ['IQ', 'QI', 'MP', 'PM']:
raise ValueError('Got unhandled complex order `{}`'.format(complex_order))
bpp = int(image_header.NBPP/8)
pv_type = image_header.PVTYPE
if pv_type == 'INT':
raw_dtype = '>u{}'.format(bpp)
elif pv_type == 'SI':
raw_dtype = '>i{}'.format(bpp)
elif pv_type == 'R':
raw_dtype = '>f{}'.format(bpp)
else:
raise ValueError('Got unhandled PVTYPE {}'.format(pv_type))
# noinspection PyUnresolvedReferences
tre = None if img_header.ExtendedHeader.data is None else \
img_header.ExtendedHeader.data['CMETAA'] # type: Optional[CMETAA]
if tre is None:
return ComplexFormatFunction(raw_dtype, complex_order, band_dimension=band_dimension)
cmetaa = tre.DATA
if cmetaa.CMPLX_PHASE_SCALING_TYPE.strip() != 'NS':
raise ValueError(
'Got unsupported CMPLX_PHASE_SCALING_TYPE {}'.format(
cmetaa.CMPLX_PHASE_SCALING_TYPE))
remap_type = cmetaa.CMPLX_MAG_REMAP_TYPE.strip()
if remap_type == 'NS':
if complex_order in ['IQ', 'QI']:
return ComplexFormatFunction(raw_dtype, complex_order, band_dimension=band_dimension)
else:
raise ValueError(
'Got unexpected state where cmetaa.CMPLX_MAG_REMAP_TYPE is "NS",\n\t '
'but Band[0].ISUBCAT/Band[1].ISUBCAT = `{}`'.format(complex_order))
elif remap_type not in ['LINM', 'LINP', 'LOGM', 'LOGP', 'LLM']:
raise ValueError('Got unsupported CMETAA.CMPLX_MAG_REMAP_TYPE {}'.format(remap_type))
if complex_order not in ['MP', 'PM']:
raise ValueError(
'Got unexpected state where cmetaa.CMPLX_MAG_REMAP_TYPE is `{}`,\n\t'
'but Band[0].ISUBCAT/Band[1].ISUBCAT = `{}`'.format(
remap_type, complex_order))
scale_factor = float(cmetaa.CMPLX_LIN_SCALE)
if remap_type == 'LINM':
scaling_function = get_linear_magnitude_scaling(scale_factor)
elif remap_type == 'LINP':
scaling_function = get_linear_power_scaling(scale_factor)
elif remap_type == 'LOGM':
# NB: there is nowhere in the CMETAA structure to define
# the db_per_step value. Strangely, the use of this value is laid
# out in the STDI-0002 standards document, which defines CMETAA
# structure. We will generically use a value which maps the
# max uint8 value to the max int16 value.
db_per_step = 300*numpy.log(2)/255.0
scaling_function = get_log_magnitude_scaling(scale_factor, db_per_step)
elif remap_type == 'LOGP':
db_per_step = 300*numpy.log(2)/255.0
scaling_function = get_log_power_scaling(scale_factor, db_per_step)
elif remap_type == 'LLM':
scaling_function = get_linlog_magnitude_scaling(
scale_factor, int(cmetaa.CMPLX_LINLOG_TP))
else:
raise ValueError('Got unhandled CMETAA.CMPLX_MAG_REMAP_TYPE {}'.format(remap_type))
return ApplyAmplitudeScalingFunction(raw_dtype, complex_order, scaling_function, band_dimension=band_dimension)
######
# The interpreter and reader objects
class ComplexNITFDetails(NITFDetails):
"""
Details object for NITF file containing complex data.
"""
__slots__ = (
'_segment_status', '_segment_bands', '_sicd_meta', '_reverse_axes', '_transpose_axes')
def __init__(
self,
file_name: str,
reverse_axes: Union[None, int, Sequence[int]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None):
"""
Parameters
----------
file_name : str
file name for a NITF file containing a complex SICD
reverse_axes : None|Sequence[int]
Any entries should be restricted to `{0, 1}`. The presence of
`0` means to reverse the rows (in the raw sense), and the presence
of `1` means to reverse the columns (in the raw sense).
transpose_axes : None|Tuple[int, ...]
If presented this should be only `(1, 0)`.
"""
self._reverse_axes = reverse_axes
self._transpose_axes = transpose_axes
self._segment_status = None
self._sicd_meta = None
self._segment_bands = None
NITFDetails.__init__(self, file_name)
self._find_complex_image_segments()
if len(self.sicd_meta) == 0:
raise SarpyIOError(
'No complex valued image segments found in file {}'.format(file_name))
@property
def reverse_axes(self) -> Union[None, int, Sequence[int]]:
return self._reverse_axes
@property
def transpose_axes(self) -> Optional[Tuple[int, ...]]:
return self._transpose_axes
@property
def segment_status(self) -> Tuple[bool, ...]:
"""
Tuple[bool, ...]: Where each image segment is viable for use.
"""
return self._segment_status
@property
def sicd_meta(self) -> Tuple[SICDType, ...]:
"""
Tuple[SICDType, ...]: The best inferred sicd structures.
"""
return self._sicd_meta
@property
def segment_bands(self) -> Tuple[Tuple[int, Optional[int]], ...]:
"""
This describes the structure for the output data segments from the NITF,
with each entry of the form `(image_segment, output_band)`, where
`output_band` will be `None` if the image segment has exactly one
complex band.
Returns
-------
Tuple[Tuple[int, Optional[int]], ...]
The band details for use.
"""
return self._segment_bands
def _check_band_details(
self,
index: int,
sicd_meta: List,
segment_status: List,
segment_bands: List):
if len(segment_status) != index:
raise ValueError('Inconsistent status checking state')
image_header = self.img_headers[index]
if image_header.ICAT.strip() not in ['SAR', 'SARIQ']:
segment_status.append(False)
return
# construct a preliminary sicd
sicd = extract_sicd(image_header, self._transpose_axes is not None)
bands = image_header.Bands
pvtype = image_header.PVTYPE
# handle odd bands
if (len(bands) % 2) == 1:
if image_header.PVTYPE != 'C':
# it's not complex, so we're done
segment_status.append(False)
return
segment_status.append(True)
sicd_meta.append(sicd)
segment_bands.append((index, len(bands)))
return
# we have an even number of bands - ensure that the bands are marked
# IQ/QI/MP/PM
order = bands[0].ISUBCAT + bands[1].ISUBCAT
if order not in ['IQ', 'QI', 'MP', 'PM']:
segment_status.append(False)
return
if len(bands) == 2:
# this should be the most common by far
segment_status.append(True)
sicd_meta.append(sicd)
segment_bands.append((index, 1))
return
for i in range(2, len(bands), 2):
if order != bands[i].ISUBCAT + bands[i+1].ISUBCAT:
logging.error(
'Image segment appears to multiband with switch complex ordering')
segment_status.append(False)
return
if order in ['IQ', 'QI']:
if pvtype not in ['SI', 'R']:
logging.error(
'Image segment appears to be complex of order `{}`, \n\t'
'but PVTYPE is `{}`'.format(order, pvtype))
segment_status.append(False)
if order in ['MP', 'PM']:
if pvtype not in ['INT', 'R']:
logging.error(
'Image segment appears to be complex of order `{}`, \n\t'
'but PVTYPE is `{}`'.format(order, pvtype))
segment_status.append(False)
segment_status.append(True)
sicd_meta.append(sicd)
segment_bands.append((index, int(len(bands)/2)))
def _find_complex_image_segments(self):
"""
Find complex image segments.
Returns
-------
None
"""
sicd_meta = []
segment_status = []
segment_bands = []
for index in range(len(self.img_headers)):
self._check_band_details(index, sicd_meta, segment_status, segment_bands)
self._segment_status = tuple(segment_status)
use_sicd_meta = []
use_segment_bands = []
for (the_index, out_bands), sicd in zip(segment_bands, sicd_meta):
if out_bands == 1:
use_sicd_meta.append(sicd)
use_segment_bands.append((the_index, None))
else:
for j in range(out_bands):
use_sicd_meta.append(sicd.copy())
use_segment_bands.append((the_index, j))
self._sicd_meta = tuple(use_sicd_meta)
self._segment_bands = tuple(use_segment_bands)
class ComplexNITFReader(NITFReader, SICDTypeReader):
"""
A reader for complex valued NITF elements, this should be explicitly tried AFTER
the SICDReader.
"""
def __init__(
self,
nitf_details: Union[str, ComplexNITFDetails],
reverse_axes: Union[None, int, Sequence[int]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None):
"""
Parameters
----------
nitf_details : str|ComplexNITFDetails
reverse_axes : None|Sequence[int]
Any entries should be restricted to `{0, 1}`. The presence of
`0` means to reverse the rows (in the raw sense), and the presence
of `1` means to reverse the columns (in the raw sense).
transpose_axes : None|Tuple[int, ...]
If presented this should be only `(1, 0)`.
"""
if isinstance(nitf_details, str):
nitf_details = ComplexNITFDetails(
nitf_details, reverse_axes=reverse_axes, transpose_axes=transpose_axes)
if not isinstance(nitf_details, ComplexNITFDetails):
raise TypeError('The input argument for ComplexNITFReader must be a filename or '
'ComplexNITFDetails object.')
SICDTypeReader.__init__(self, None, nitf_details.sicd_meta)
NITFReader.__init__(
self,
nitf_details,
reader_type="SICD",
reverse_axes=nitf_details.reverse_axes,
transpose_axes=nitf_details.transpose_axes)
self._check_sizes()
@property
def nitf_details(self) -> ComplexNITFDetails:
"""
ComplexNITFDetails: The NITF details object.
"""
# noinspection PyTypeChecker
return self._nitf_details
def get_nitf_dict(self):
"""
Populate a dictionary with the pertinent NITF header information. This
is for use in more faithful preservation of NITF header information
in copying or rewriting sicd files.
Returns
-------
dict
"""
out = {}
security = {}
security_obj = self.nitf_details.nitf_header.Security
# noinspection PyProtectedMember
for field in NITFSecurityTags._ordering:
value = getattr(security_obj, field).strip()
if value != '':
security[field] = value
if len(security) > 0:
out['Security'] = security
out['OSTAID'] = self.nitf_details.nitf_header.OSTAID
out['FTITLE'] = self.nitf_details.nitf_header.FTITLE
return out
def populate_nitf_information_into_sicd(self):
"""
Populate some pertinent NITF header information into the SICD structure.
This provides more faithful copying or rewriting options.
"""
nitf_dict = self.get_nitf_dict()
for sicd_meta in self._sicd_meta:
sicd_meta.NITF = copy.deepcopy(nitf_dict)
def depopulate_nitf_information(self):
"""
Eliminates the NITF information dict from the SICD structure.
"""
for sicd_meta in self._sicd_meta:
sicd_meta.NITF = {}
def get_format_function(
self,
raw_dtype: numpy.dtype,
complex_order: Optional[str],
lut: Optional[numpy.ndarray],
band_dimension: int,
image_segment_index: Optional[int] = None,
**kwargs) -> Optional[FormatFunction]:
image_header = self.nitf_details.img_headers[image_segment_index]
bands = len(image_header.Bands)
if complex_order is not None and bands == 2:
return _extract_transform_data(image_header, band_dimension)
# TODO: strange nonstandard float16 handling?
return NITFReader.get_format_function(
self, raw_dtype, complex_order, lut, band_dimension, image_segment_index, **kwargs)
def _check_image_segment_for_compliance(
self,
index: int,
img_header: Union[ImageSegmentHeader, ImageSegmentHeader0]) -> bool:
return self.nitf_details.segment_status[index]
def find_image_segment_collections(self) -> Tuple[Tuple[int, ...]]:
return tuple((entry[0], ) for entry in self.nitf_details.segment_bands)
def create_data_segment_for_collection_element(self, collection_index: int) -> DataSegment:
the_index, the_band = self.nitf_details.segment_bands[collection_index]
if the_index not in self._image_segment_data_segments:
data_segment = self.create_data_segment_for_image_segment(the_index, apply_format=True)
else:
data_segment = self._image_segment_data_segments[the_index]
if the_band is None:
return data_segment
else:
return SubsetSegment(data_segment, (slice(None, None, 1), slice(None, None, 1), slice(the_band, the_band+1, 1)), 'formatted', close_parent=True)
def final_attempt(file_name: str) -> Optional[ComplexNITFReader]:
"""
Contingency check to open for some other complex NITF type file.
Returns a reader instance, if so.
Parameters
----------
file_name : str|BinaryIO
the file_name to check
Returns
-------
ComplexNITFReader|None
"""
if is_file_like(file_name):
return None
try:
nitf_details = ComplexNITFDetails(file_name)
logger.info('File {} is determined to be some other format complex NITF.')
return ComplexNITFReader(nitf_details)
except (SarpyIOError, ValueError):
return None
| 1.773438
| 2
|
babble/__init__.py
|
billchenxi/babble
| 130
|
12777587
|
from .explanation import Explanation
from .parsing import Rule, Grammar, Parse, SemanticParser
from .filter_bank import FilterBank
from .utils import ExplanationIO, link_explanation_candidates
from .babbler import Babbler, BabbleStream
| 0.980469
| 1
|
medium/129_sum_root_to_leaf_nodes.py
|
Sukhrobjon/leetcode
| 0
|
12777588
|
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def sumNumbers(self, root):
"""
:type root: TreeNode
:rtype: int
"""
all_path = []
n_sum = 0
path_sum = self.root_to_leaf(root, n_sum, all_path.append)
print(f"all_paths:", all_path, n_sum)
print(sum(all_path))
return 1026
def root_to_leaf(self, node, curr_sum, all_path, path=None):
"""
Helper function to traverse the tree and calculate the path
sum
"""
if path is None:
path = []
if node is None:
# sum of all
print(f"when it hit the none: allpath: {(all_path)}, sum: {curr_sum}, path: {path}")
return all_path
path.append(node.val)
# go the left
if node.left is None and node.right is None:
# calculate the sum
path_val = int("".join(str(num) for num in path))
all_path(path_val)
print('curr path:', path_val)
curr_sum += path_val
print("curr_sum:", curr_sum)
self.root_to_leaf(node.left, curr_sum, all_path, path)
self.root_to_leaf(node.right, curr_sum, all_path, path)
path.pop()
if __name__ == "__main__":
root = TreeNode(4)
root.left = TreeNode(9)
root.right = TreeNode(0)
root.left.left = TreeNode(5)
root.left.right = TreeNode(1)
tree = Solution()
result = tree.sumNumbers(root)
print(result)
| 3.984375
| 4
|
airobot/ee_tool/simple_gripper_mimic_pybullet.py
|
rhett-chen/airobot
| 51
|
12777589
|
import threading
import time
import airobot.utils.common as arutil
from airobot.ee_tool.simple_gripper_pybullet import SimpleGripperPybullet
from airobot.utils.arm_util import wait_to_reach_jnt_goal
class SimpleGripperMimicPybullet(SimpleGripperPybullet):
"""
A base class for gripper with mimic joints in pybullet.
Args:
cfgs (YACS CfgNode): configurations for the gripper.
pb_client (BulletClient): pybullet client.
Attributes:
cfgs (YACS CfgNode): configurations for the gripper.
gripper_close_angle (float): position value corresponding to the
fully closed position of the gripper.
gripper_open_angle (float): position value corresponding to the
fully open position of the gripper.
jnt_names (list): names of the gripper joints.
gripper_jnt_ids (list): pybullet joint ids of the gripper joints.
robot_id (int): robot id in Pybullet.
jnt_to_id (dict): mapping from the joint name to joint id.
"""
def __init__(self, cfgs, pb_client):
super(SimpleGripperMimicPybullet, self).__init__(cfgs=cfgs, pb_client=pb_client)
self._gripper_mimic_coeff = self.cfgs.EETOOL.MIMIC_COEFF
self._mthread_started = False
def feed_robot_info(self, robot_id, jnt_to_id):
"""
Setup the gripper, pass the robot info from the arm to the gripper.
Args:
robot_id (int): robot id in Pybullet.
jnt_to_id (dict): mapping from the joint name to joint id.
"""
super().feed_robot_info(robot_id, jnt_to_id)
# if the gripper has been activated once,
# the following code is used to prevent starting
# a new thread after the arm reset if a thread has been started
if not self._mthread_started:
self._mthread_started = True
# gripper thread
self._th_gripper = threading.Thread(target=self._th_mimic_gripper)
self._th_gripper.daemon = True
self._th_gripper.start()
else:
return
def set_jpos(self, pos, wait=True, ignore_physics=False):
"""
Set the gripper position.
Args:
pos (float): joint position.
wait (bool): wait until the joint position is set
to the target position.
Returns:
bool: A boolean variable representing if the action is
successful at the moment when the function exits.
"""
joint_name = self.jnt_names[0]
tgt_pos = arutil.clamp(
pos,
min(self.gripper_open_angle, self.gripper_close_angle),
max(self.gripper_open_angle, self.gripper_close_angle))
jnt_id = self.jnt_to_id[joint_name]
if ignore_physics:
self._zero_vel_mode()
mic_pos = self._mimic_gripper(pos)
self._hard_reset(mic_pos)
success = True
else:
self._pb.setJointMotorControl2(self.robot_id,
jnt_id,
self._pb.POSITION_CONTROL,
targetPosition=tgt_pos,
force=self._max_torque)
if not self._pb.in_realtime_mode():
self._set_rest_joints(tgt_pos)
success = False
if self._pb.in_realtime_mode() and wait:
success = wait_to_reach_jnt_goal(
tgt_pos,
get_func=self.get_jpos,
joint_name=joint_name,
get_func_derv=self.get_jvel,
timeout=self.cfgs.ARM.TIMEOUT_LIMIT,
max_error=self.cfgs.ARM.MAX_JOINT_ERROR
)
return success
def get_jpos(self):
"""
Return the joint position(s) of the gripper.
Returns:
float: joint position.
"""
if not self._is_activated:
raise RuntimeError('Call activate function first!')
jnt_id = self.jnt_to_id[self.jnt_names[0]]
pos = self._pb.getJointState(self.robot_id, jnt_id)[0]
return pos
def get_jvel(self):
"""
Return the joint velocity of the gripper.
Returns:
float: joint velocity.
"""
if not self._is_activated:
raise RuntimeError('Call activate function first!')
jnt_id = self.jnt_to_id[self.jnt_names[0]]
vel = self._pb.getJointState(self.robot_id, jnt_id)[1]
return vel
def _mimic_gripper(self, joint_val):
"""
Given the value for the first joint,
mimic the joint values for the rest joints.
"""
jnt_vals = [joint_val]
for i in range(1, len(self.jnt_names)):
jnt_vals.append(joint_val * self._gripper_mimic_coeff[i])
return jnt_vals
def _th_mimic_gripper(self):
"""
Make all the other joints of the gripper
follow the motion of the first joint of the gripper.
"""
while True:
if self._is_activated and self._pb.in_realtime_mode():
self._set_rest_joints()
time.sleep(0.005)
def _set_rest_joints(self, gripper_pos=None):
max_torq = self._max_torque
max_torques = [max_torq] * (len(self.jnt_names) - 1)
if gripper_pos is None:
gripper_pos = self.get_jpos()
gripper_poss = self._mimic_gripper(gripper_pos)[1:]
gripper_vels = [0.0] * len(max_torques)
self._pb.setJointMotorControlArray(self.robot_id,
self.gripper_jnt_ids[1:],
self._pb.POSITION_CONTROL,
targetPositions=gripper_poss,
targetVelocities=gripper_vels,
forces=max_torques)
| 2.375
| 2
|
old/Agent.py
|
Leonard1904/reinforcement-learning
| 0
|
12777590
|
<filename>old/Agent.py
import threading
import gym
import time
import cv2
import numpy as np
from Network import Network
from scipy.misc import imresize
from scipy.signal import lfilter
class Memory:
def __init__(self):
self.states = []
self.actions = []
self.rewards = []
def store(self, state, action, reward):
self.states.append(state)
self.actions.append(action)
self.rewards.append(reward)
def clear(self):
self.states.clear()
self.actions.clear()
self.rewards.clear()
def size(self):
return len(self.actions)
class Agent(threading.Thread):
save_lock = threading.Lock()
global_episode = 0
global_step = 0
global_max = -21
global_moving_average = -21
global_moving_update = 0
def __init__(self, name,
game, state_size, action_size,
global_net, _sess,
args,
feature_layers=None, critic_layers=None, actor_layers=None
):
super(Agent, self).__init__()
self.args = args
self.phi_length = 4
self.name = name
self.state_size = state_size
self.action_size = action_size
self.sess = _sess
self.global_net = global_net
self.env = gym.make(game)
self.local = Network(name, state_size, action_size, global_net.optimizer,
feature_layers, critic_layers, actor_layers)
self.copy_to_local_op = self.local.transfer_weights('global')
self.mem = Memory()
def _discounted_reward(self, rewards):
return lfilter([1], [1, -self.args.gamma], x=rewards[::-1])[::-1]
def act_post_func(self, action):
return action + 1
def _preprocess(self, image, height_range=(35, 193), bg=(144, 72, 17)):
image = image[height_range[0]:height_range[1], ...]
image = imresize(image, (self.state_size[0], self.state_size[1]), interp="nearest")
H, W, _ = image.shape
R = image[..., 0]
G = image[..., 1]
B = image[..., 2]
cond = (R == bg[0]) & (G == bg[1]) & (B == bg[2])
image = np.zeros((H, W))
image[~cond] = 1
image = np.expand_dims(image, axis=2)
return image
def play_episode(self):
env, local, mem, args, global_net, sess = self.env, self.local, self.mem, self.args, self.global_net, self.sess
s, done, step, counting_step, ep_reward, update_count = env.reset(), False, 0, 0, 0, 0
s = self._preprocess(s)
state_diff = s
mem.clear()
start = time.time()
self.sess.run(self.copy_to_local_op)
while not done:
# a = local.get_action(np.array(mem.states + [s])[-4:], sess)
a = self.local.get_action(state_diff, self.sess)
s_, r, done, _ = env.step(a + 1)
s_ = self._preprocess(s_)
ep_reward, step, counting_step = ep_reward + r, step + 1, counting_step + 1
mem.store(state_diff, a, r)
state_diff = s_ - s
s = s_
# if counting_step >= args.update_freq or done:
# if counting_step >= args.update_freq or r != 0 or done:
if r == -1 or r == 1 or done:
# states = np.array(mem.states + [s])
# obs = [mem.states[i:i + 4] for i in range(mem.size())]
values = np.squeeze(self.local.get_values(mem.states, self.sess))
discounted_reward = self._discounted_reward(mem.rewards)
discounted_reward -= np.mean(discounted_reward)
discounted_reward /= np.std(discounted_reward)
# A(s_t) = R_t = gamma ** t * V(s') - V(s)
# advantages = discounted_reward + (1 - np.array(mem.rewards) ** 2) * self.args.gamma * values[1:] - values[:-1]
advantages = discounted_reward - values
advantages -= np.mean(advantages)
advantages /= np.std(advantages) + 1e-8
gradients = self.sess.run(
self.local.gradients,
feed_dict={
self.local.state: np.array(mem.states),
self.local.selected_action: np.array(mem.actions),
self.local.discounted_reward: discounted_reward,
self.local.advantages: advantages
}
)
feed = []
for (grad, _), (placeholder, _) in zip(gradients, self.global_net.gradients_placeholders):
feed.append((placeholder, grad))
feed = dict(feed)
self.sess.run(self.global_net.apply_gradients, feed)
self.sess.run(self.copy_to_local_op)
update_count, counting_step = update_count + 1, 0
mem.clear()
# mem.states.extend([s, s, s])
if done:
episode_time = update_count / (time.time() - start)
with Agent.save_lock:
Agent.global_moving_average = Agent.global_moving_average * .99 + ep_reward * .01
Agent.global_moving_update = Agent.global_moving_update * .99 + episode_time * .01 \
if Agent.global_moving_update != 0 else episode_time
print(
# f'{Agent.global_episode}|{Agent.global_step:,}/{int(self.args.max_steps):,}|'
f'{Agent.global_episode}|{Agent.global_step:,}|'
f' Average: {int(Agent.global_moving_average)}|{(self.args.num_agents * Agent.global_moving_update):.2f} up/sec. '
f'{self.name} gets {ep_reward} in {step} steps. '
)
self.global_net.summary(sess, Agent.global_moving_average, Agent.global_episode)
Agent.global_episode += 1
Agent.global_step += step
if Agent.global_max < ep_reward:
Agent.global_max = ep_reward
return ep_reward, step, update_count, time.time() - start
def run(self):
while True:
# while Agent.global_step < self.args.max_steps:
self.play_episode()
print(Agent.global_max)
class AgentPong(Agent):
def __init__(self, name,
game, state_size, action_size,
global_net, _sess,
args,
feature_layers=None, critic_layers=None, actor_layers=None):
super().__init__(name,
game, state_size, action_size,
global_net, _sess,
args,
feature_layers, critic_layers, actor_layers)
def act_post_func(self, a):
return a + 1
def _discounted_reward(self, rewards):
return lfilter([1], [1, -self.args.gamma], x=rewards[::-1])[::-1]
def _preprocess(self, image, height_range=(84, 84)):
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = cv2.resize(image, (height_range[0], height_range[1]), interpolation=cv2.INTER_LINEAR)
return image / 255.
# def _preprocess(self, image, height_range=(35, 193), bg=(144, 72, 17)):
# image = image[height_range[0]:height_range[1], ...]
# image = imresize(image, (80, 80), interp="nearest")
#
# H, W, _ = image.shape
#
# R = image[..., 0]
# G = image[..., 1]
# B = image[..., 2]
#
# cond = (R == bg[0]) & (G == bg[1]) & (B == bg[2])
#
# image = np.zeros((H, W))
# image[~cond] = 1
#
# image = np.expand_dims(image, axis=2)
#
# return image
| 2.3125
| 2
|
tools/accuracy_checker/accuracy_checker/representation/segmentation_representation.py
|
zhoub/dldt
| 0
|
12777591
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from enum import Enum
import numpy as np
from .base_representation import BaseRepresentation
from ..data_readers import BaseReader
class GTMaskLoader(Enum):
PILLOW = 0
OPENCV = 1
SCIPY = 2
NIFTI = 3
class SegmentationRepresentation(BaseRepresentation):
pass
class SegmentationAnnotation(SegmentationRepresentation):
LOADERS = {
GTMaskLoader.PILLOW: 'pillow_imread',
GTMaskLoader.OPENCV: 'opencv_imread',
GTMaskLoader.SCIPY: 'scipy_imread',
GTMaskLoader.NIFTI: 'nifti_reader'
}
def __init__(self, identifier, path_to_mask, mask_loader=GTMaskLoader.PILLOW):
"""
Args:
identifier: object identifier (e.g. image name).
path_to_mask: path where segmentation mask should be loaded from. The path is relative to data source.
mask_loader: back-end, used to load segmentation masks.
"""
super().__init__(identifier)
self._mask_path = path_to_mask
self._mask_loader = mask_loader
self._mask = None
@property
def mask(self):
return self._mask if self._mask is not None else self._load_mask()
@mask.setter
def mask(self, value):
self._mask = value
def _load_mask(self):
if self._mask is None:
loader = BaseReader.provide(self.LOADERS.get(self._mask_loader), self.metadata['data_source'])
if self._mask_loader == GTMaskLoader.PILLOW:
loader.convert_to_rgb = False
mask = loader.read(self._mask_path)
return mask.astype(np.uint8)
return self._mask
class SegmentationPrediction(SegmentationRepresentation):
def __init__(self, identifiers, mask):
"""
Args:
identifiers: object identifier (e.g. image name).
mask: array with shape (n_classes, height, width) of probabilities at each location.
"""
super().__init__(identifiers)
self.mask = mask
class BrainTumorSegmentationAnnotation(SegmentationAnnotation):
def __init__(self, identifier, path_to_mask):
super().__init__(identifier, path_to_mask, GTMaskLoader.NIFTI)
class BrainTumorSegmentationPrediction(SegmentationPrediction):
pass
| 2.171875
| 2
|
Website/forms.py
|
Astatine404/spiritus
| 1
|
12777592
|
<filename>Website/forms.py<gh_stars>1-10
from django import forms
from django.forms import ModelForm
from .models import Music
class MusicForm(forms.ModelForm):
video = forms.FileField(label='Video file')
class Meta:
model = Music
fields = {'video'}
| 2.125
| 2
|
noxfile.py
|
larryturner/diamondback
| 4
|
12777593
|
<reponame>larryturner/diamondback<gh_stars>1-10
""" **Description**
Nox project management.
**Example**
::
nox --list
nox --sessions clean dist docs image notebook push status tag tests
**License**
© 2020 - 2021 Schneider Electric Industries SAS. All rights reserved.
**Author**
<NAME>, Schneider Electric, Analytics & AI, 2020-10-12.
"""
import glob
import nox
import os
import requests
import shutil
import time
repository = os.getcwd( ).split( os.path.sep )[ -1 ]
@nox.session( venv_backend = 'none' )
def clean( session ) -> None :
""" Clean repository.
"""
for x in ( '.mypy_cache', '.nox', '.pytest_cache', 'build', 'dist', 'docs' ) :
shutil.rmtree( x, ignore_errors = True )
for x in [ x for x in glob.glob( '**/', recursive = True ) if ( '__pycache__' in x ) ] :
shutil.rmtree( x, ignore_errors = True )
@nox.session( venv_backend = 'none' )
def dist( session ) -> None :
""" Build distribution.
"""
if ( os.path.exists( 'setup.py' ) ) :
shutil.rmtree( 'dist', ignore_errors = True )
session.run( 'python', 'setup.py', 'sdist', 'bdist_wheel', 'build' )
if ( os.path.exists( 'service' ) ) :
session.install( glob.glob( 'dist/*.whl' )[ 0 ] )
session.run( 'git', 'add', './dist/*' )
@nox.session( venv_backend = 'none' )
def docs( session ) -> None :
""" Build documentation.
"""
if ( os.path.exists( 'sphinx' ) ) :
dist( session )
shutil.rmtree( 'docs', ignore_errors = True )
os.makedirs( 'docs' )
session.run( 'sphinx-apidoc', '--force', '--output', './sphinx', '.', 'tests' )
session.run( 'sphinx-build', './sphinx', './docs' )
session.run( 'git', 'add', './docs/*' )
session.run( 'git', 'add', './sphinx/*' )
@nox.session( venv_backend = 'none' )
def image( session ) -> None :
""" Build image.
"""
if ( os.path.exists( 'dockerfile' ) ) :
dist( session )
try :
session.run( 'az', 'acr', 'login', '--name', 'ecaregistry' )
except Exception :
pass
session.run( 'docker', 'build', '--tag', repository, '--build-arg', 'FEED_LOGIN', '--build-arg', 'FEED_PASSWORD', '.' )
@nox.session( venv_backend = 'none' )
def notebook( session ) -> None :
""" Run jupyter notebook.
"""
if ( os.path.exists( 'jupyter' ) ) :
os.chdir( 'jupyter' )
value = [ x for x in glob.glob( '*.ipynb', recursive = True ) ]
if ( value ) :
session.run( 'jupyter', 'notebook', value[ 0 ] )
@nox.session( venv_backend = 'none' )
def push( session ) -> None :
""" Push repository.
"""
if ( os.path.exists( '.git' ) ) :
package = repository.split( '-' )
package = package[ max( len( package ) - 2, 0 ) ]
if ( os.path.exists( package ) ) :
session.run( 'git', 'add', './' + package + '/*' )
if ( os.path.exists( 'service' ) ) :
session.run( 'git', 'add', './service/*' )
if ( os.path.exists( 'tests' ) ) :
session.run( 'git', 'add', './tests/*' )
status( session )
value = input( '[ ' + repository + ' ] message : ' )
if ( value ) :
try :
if ( session.run( 'git', 'commit', '--all', '--message', value ) ) :
session.run( 'git', 'push', 'origin', 'master' )
except Exception :
pass
try :
url = 'https://github.schneider-electric.com'
requests.request( method = 'head', url = url, timeout = 2 )
value = input( '[ ' + repository + ' ] mirror : ' )
if ( value ) :
session.run( 'git', 'push', '--mirror', url + '/' + value + '/' + repository + '.git' )
except Exception :
pass
@nox.session( venv_backend = 'none' )
def status( session ) -> None :
""" Check status.
"""
if ( os.path.exists( '.git' ) ) :
print( '[ ' + repository + ' ]' )
session.run( 'git', 'status', '--short' )
@nox.session( venv_backend = 'none' )
def tag( session ) -> None :
""" Push tag.
"""
if ( os.path.exists( '.git' ) ) :
session.run( 'git', 'tag', '--list' )
value = input( '[ ' + repository + ' ] annotate : ' )
if ( value ) :
session.run( 'git', 'tag', '--annotate', value, '--force', '--message', '.' )
try :
session.run( 'git', 'push', '--force', '--tags' )
except Exception :
pass
@nox.session( venv_backend = 'none' )
def tests( session ) -> None :
""" Run tests.
"""
if ( os.path.exists( 'tests' ) ) :
if ( os.listdir( 'tests' ) ) :
if ( os.path.exists( 'docker-compose.yml' ) ) :
try :
session.run( 'az', 'acr', 'login', '--name', 'ecaregistry' )
except Exception :
pass
try :
session.run( 'docker', 'compose', 'up', '--detach' )
time.sleep( 10.0 )
except Exception :
pass
try :
session.run( 'pytest', '--capture=no', '--verbose' )
shutil.rmtree( '.pytest_cache', ignore_errors = True )
except Exception :
pass
if ( os.path.exists( 'docker-compose.yml' ) ) :
try :
session.run( 'docker', 'compose', 'down' )
except Exception :
pass
| 1.96875
| 2
|
Others/[TCS CodeVita] - Perry the Platypus.py
|
yashbhatt99/HackerRank-Problems
| 10
|
12777594
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 01:52:02 2020
@author: Ravi
"""
def PerryThisIsForYouMyFriend(arr,n):
index = []
prev = n*n-n+1
index.append(prev)
counter = 1
for i in range(n-1):
if counter < n//2+1 :
prev = prev - 2*n + 1
index.append(prev)
counter+=1
else:
counter+=1
prev = prev + 2*n +1
index.append(prev)
finalLi = []
for i in index:
finalLi.append(arr[i-1])
encrypted_msg = ''
for i in finalLi:
encrypted_msg += chr(96+(i%26))
print(encrypted_msg)
t = int(input())
for i in range(t):
arr = list(map(int,input().split(" ")))
PerryThisIsForYouMyFriend(arr[2:],arr[0])
| 3.0625
| 3
|
src/bkl/interpreter/__init__.py
|
johnwbyrd/brakefile
| 118
|
12777595
|
#
# This file is part of Bakefile (http://bakefile.org)
#
# Copyright (C) 2008-2013 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This module contains the very core of Bakefile -- the interpreter,
:class:`bkl.interpreter.Interpreter`, and its supporting classes.
"""
import logging
import bkl.parser
import bkl.model
import bkl.api
import bkl.expr
import passes
from builder import Builder
from bkl.error import Error, warning
from bkl.parser import parse_file
logger = logging.getLogger("bkl.interpreter")
class Interpreter(object):
"""
The interpreter is responsible for doing everything necessary to
"translate" input ``.bkl`` files into generated native makefiles. This
includes building a project model from the input, checking it for
correctness, optimizing it and creating outputs for all enabled toolsets.
:class:`Interpreter` provides both high-level interface for single-call
usage (see :meth:`process`) and other methods with finer granularity that
allows you to inspect individual steps (most useful for the test suite).
.. attribute:: model
Model of the project, as :class:`bkl.model.Project`. It's state always
reflects current state of processing.
.. attribute:: toolsets_to_use
Set of toolsets to generate for. This list may contain only a subset of
toolsets the bakefile is written for and may even contain toolsets not
specified in the bakefile.
If :const:`None` (the default), then the toolsets listed in the bakefile
are used.
"""
def __init__(self):
self.model = bkl.model.Project()
self.toolsets_to_use = None
def limit_toolsets(self, toolsets):
"""Sets :attr:`toolsets_to_use`."""
self.toolsets_to_use = set(toolsets)
def process(self, ast):
"""
Interprets input file and generates the outputs.
:param ast: AST of the input file, as returned by
:func:`bkl.parser.parse_file`.
Processing is done in several phases:
1. Basic model is built (see :class:`bkl.interpreter.builder.Builder`).
No optimizations or checks are performed at this point.
2. Several generic optimization and checking passes are run on the
model. Among other things, types correctness and other constraints
are checked, variables are substituted and evaluated.
3. The model is split into several copies, one per output toolset.
4. Further optimization passes are done.
5. Output files are generated.
Step 1 is done by :meth:`add_module`. Steps 2-4 are done by
:meth:`finalize` and step 5 is implemented in :meth:`generate`.
"""
self.add_module(ast, self.model)
self.finalize()
self.generate()
def process_file(self, filename):
"""Like :meth:`process()`, but takes filename as its argument."""
self.process(parse_file(filename))
def add_module(self, ast, parent):
"""
Adds parsed AST to the model, without doing any optimizations. May be
called more than once, with different parsed files.
:param ast: AST of the input file, as returned by
:func:`bkl.parser.parse_file`.
"""
logger.info("processing %s", ast.filename)
submodules = []
b = Builder(on_submodule=lambda fn, pos: submodules.append((fn,pos)))
module = b.create_model(ast, parent)
while submodules:
sub_filename, sub_pos = submodules[0]
submodules.pop(0)
try:
sub_ast = parse_file(sub_filename)
except IOError as e:
if e.filename:
msg = "%s: %s" % (e.strerror, e.filename)
else:
msg = e.strerror
raise Error(msg, pos=sub_pos)
self.add_module(sub_ast, module)
def _call_custom_steps(self, model, func):
for step in bkl.api.CustomStep.all():
logger.debug("invoking custom step %s.%s()", step.name, func)
getattr(step, func)(model)
def finalize(self):
"""
Finalizes the model, i.e. checks it for validity, optimizes, creates
per-toolset models etc.
"""
logger.debug("finalizing the model")
# call any custom steps first:
self._call_custom_steps(self.model, "finalize")
# then apply standard processing:
passes.detect_potential_problems(self.model)
passes.normalize_and_validate_bool_subexpressions(self.model)
passes.normalize_vars(self.model)
passes.validate_vars(self.model)
passes.normalize_paths_in_model(self.model, toolset=None)
passes.simplify_exprs(self.model)
def finalize_for_toolset(self, toolset_model, toolset):
"""
Finalizes after "toolset" variable was set.
"""
passes.remove_disabled_model_parts(toolset_model, toolset)
# TODO: do this in finalize() instead
passes.make_variables_for_missing_props(toolset_model, toolset)
passes.eliminate_superfluous_conditionals(toolset_model)
# This is done second time here (in addition to finalize()) to deal
# with paths added by make_variables_for_missing_props() and paths with
# @builddir (which is toolset specific and couldn't be resolved
# earlier). Ideally we wouldn't do it, but hopefully it's not all that
# inefficient, as no real work is done for paths that are already
# normalized:
passes.normalize_paths_in_model(toolset_model, toolset)
def make_toolset_specific_model(self, toolset, skip_making_copy=False):
"""
Returns toolset-specific model, i.e. one that works only with
*toolset*, has the ``toolset`` property set to it. The caller
still needs to call finalize_for_toolset() on it.
"""
if skip_making_copy:
model = self.model
else:
model = self.model.clone()
# don't use Variable.from_property(), because it's read-only
model.add_variable(bkl.model.Variable.from_property(
model.get_prop("toolset"),
bkl.expr.LiteralExpr(toolset)))
return model
def generate(self):
"""
Generates output files.
"""
# collect all requested toolsets:
toolsets = set()
for module in self.model.modules:
module_toolsets = module.get_variable("toolsets")
if module_toolsets:
toolsets.update(module_toolsets.value.as_py())
if self.toolsets_to_use:
for t in self.toolsets_to_use:
if t not in toolsets:
try:
bkl.api.Toolset.get(t)
except KeyError:
raise Error("unknown toolset \"%s\" given on command line" % t)
warning("toolset \"%s\" is not supported by the project, there may be issues", t)
# Add the forced toolset to all submodules:
for module in self.model.modules:
module_toolsets = module.get_variable("toolsets")
if module_toolsets:
module_toolsets.value.items.append(bkl.expr.LiteralExpr(t))
toolsets = self.toolsets_to_use
toolsets = list(toolsets)
logger.debug("toolsets to generate for: %s", toolsets)
if not toolsets:
raise Error("nothing to generate, \"toolsets\" property is empty")
# call any custom steps first:
self._call_custom_steps(self.model, "generate")
# and generate the outputs (notice that we can avoid making a
# (expensive!) deepcopy of the model for one of the toolsets and can
# reuse the current model):
for toolset in toolsets[:-1]:
self.generate_for_toolset(toolset)
self.generate_for_toolset(toolsets[-1], skip_making_copy=True)
def generate_for_toolset(self, toolset, skip_making_copy=False):
"""
Generates output for given *toolset*.
"""
logger.debug("****** preparing model for toolset %s ******", toolset)
model = self.make_toolset_specific_model(toolset, skip_making_copy)
self.finalize_for_toolset(model, toolset)
logger.debug("****** generating for toolset %s ********", toolset)
bkl.api.Toolset.get(toolset).generate(model)
| 1.757813
| 2
|
connections/rs232Connection.py
|
IKKUengine/EtaNetPythonClients
| 2
|
12777596
|
import time
import threading
import serial
import parameter
class Rs232Connection(threading.Thread):
exit = True
stop = True
try:
__ser = serial.Serial(
port='/dev/ttyS0', # Open RPI buit-in serial port
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
except:
print ("RS232-Port could not be opened!")
def __init__(self):
threading.Thread.__init__(self)
if parameter.printMessages:
print("init rs232")
threading.Thread.start(self)
def run(self):
#self.lock.acquire()
while self.exit:#threat wird erst beendet wenn aus while schleife herausgeganen wird
if self.stop:
self.request()
time.sleep(parameter.timeTriggerPowerAnalayser)
#self.lock.release()
def request(self):
pass
def getSerialPort(self):
return self.__ser
def setStop(self):
self.stop = False
def setStart(self):
self.stop = True
def setExit(self):
self.exit = False
self.__ser.close
def __exit__(self):
pass
| 2.859375
| 3
|
setup.py
|
dcramer/jinja1-djangosupport
| 2
|
12777597
|
<filename>setup.py
# -*- coding: utf-8 -*-
"""
jinja
~~~~~
Jinja is a `sandboxed`_ template engine written in pure Python. It
provides a `Django`_ like non-XML syntax and compiles templates into
executable python code. It's basically a combination of Django templates
and python code.
Nutshell
--------
Here a small example of a Jinja template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url|e }}">{{ user.username|e }}</a></li>
{% endfor %}
</ul>
{% endblock %}
Philosophy
----------
Application logic is for the controller but don't try to make the life
for the template designer too hard by giving him too few functionality.
For more informations visit the new `jinja webpage`_ and `documentation`_.
Note
----
This is the Jinja 1.0 release which is completely incompatible with the
old "pre 1.0" branch. The old branch will still receive security updates
and bugfixes but the 1.0 branch will be the only version that receives
support.
If you have an application that uses Jinja 0.9 and won't be updated in
the near future the best idea is to ship a Jinja 0.9 checkout together
with the application.
The `Jinja tip`_ is installable via `easy_install` with ``easy_install
Jinja==dev``.
.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security)
.. _Django: http://www.djangoproject.com/
.. _jinja webpage: http://jinja.pocoo.org/
.. _documentation: http://jinja.pocoo.org/documentation/index.html
.. _Jinja tip: http://dev.pocoo.org/hg/jinja-main/archive/tip.tar.gz#egg=Jinja-dev
"""
import os
import sys
import ez_setup
ez_setup.use_setuptools()
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsError
from setuptools import setup, Extension, Feature
def list_files(path):
for fn in os.listdir(path):
if fn.startswith('.'):
continue
fn = os.path.join(path, fn)
if os.path.isfile(fn):
yield fn
class optional_build_ext(build_ext):
def run(self):
try:
build_ext.run(self)
except DistutilsError, e:
self.compiler = None
self._setup_error = e
def build_extension(self, ext):
try:
if self.compiler is None:
raise self._setup_error
build_ext.build_extension(self, ext)
except CCompilerError, e:
print '=' * 79
print 'INFORMATION'
print ' the speedup extension could not be compiled, Jinja will'
print ' fall back to the native python classes.'
print '=' * 79
except:
e = sys.exc_info()[1]
print '=' * 79
print 'WARNING'
print ' could not compile optional speedup extension. This is'
print ' is not a real problem because Jinja provides a native'
print ' implementation of those classes but for best performance'
print ' you could try to reinstall Jinja after fixing this'
print ' problem: %s' % e
print '=' * 79
setup(
name='Jinja',
version='1.33373907',
url='http://jinja.pocoo.org/',
license='BSD',
author='<NAME>',
author_email='<EMAIL>',
description='A small but fast and easy to use stand-alone template '
'engine written in pure python.',
long_description = __doc__,
# jinja is egg safe. But because we distribute the documentation
# in form of html and txt files it's a better idea to extract the files
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
],
keywords=['python.templating.engines'],
packages=['jinja', 'jinja.translators', 'jinja.contrib'],
data_files=[
('docs/html', list(list_files('docs/html'))),
('docs/txt', list(list_files('docs/src')))
],
entry_points='''
[python.templating.engines]
jinja = jinja.plugin:BuffetPlugin
''',
extras_require={'plugin': ['setuptools>=0.6a2']},
features={
'speedups': Feature(
'optional C-speed enhancements',
standard=True,
ext_modules=[
Extension('jinja._speedups', ['jinja/_speedups.c'])
]
),
'extended-debugger': Feature(
'extended debugger',
standard=True,
ext_modules=[
Extension('jinja._debugger', ['jinja/_debugger.c'])
]
)
},
cmdclass={'build_ext': optional_build_ext}
)
| 2.328125
| 2
|
examples/get_fact_simulations.py
|
mbrner/funfolding
| 1
|
12777598
|
<filename>examples/get_fact_simulations.py<gh_stars>1-10
import os
import requests
URL = 'http://www.blog.pythonlibrary.org/wp-content/uploads/2012/06/wxDbViewer.zip'
script_dir = os.path.dirname(os.path.abspath(__file__))
def download(url=URL):
path = os.path.join(script_dir, "fact_simulations.hdf")
r = requests.get(url)
with open(path, "wb") as f:
f.write(r.content)
| 2.828125
| 3
|
app/core/views.py
|
ariksidney/Webleaf
| 5
|
12777599
|
from flask import render_template, session, redirect, url_for
from flask_login import login_required
from . import core
@core.route('/', methods=['GET', 'POST'])
@login_required
def index():
return redirect(url_for('aurora.aurora_overview'))
@core.route('/offline.html')
def offline():
return core.send_static_file('offline.html')
@core.route('/service-worker.js')
def sw():
return core.send_static_file('service-worker.js')
| 2.15625
| 2
|
16/16b.py
|
jamOne-/adventofcode2018
| 0
|
12777600
|
<filename>16/16b.py
import re
import sys
OPERATIONS = {
'addr': lambda a, b, c, registers: registers[a] + registers[b],
'addi': lambda a, b, c, registers: registers[a] + b,
'mulr': lambda a, b, c, registers: registers[a] * registers[b],
'muli': lambda a, b, c, registers: registers[a] * b,
'banr': lambda a, b, c, registers: registers[a] & registers[b],
'bani': lambda a, b, c, registers: registers[a] & b,
'borr': lambda a, b, c, registers: registers[a] | registers[b],
'bori': lambda a, b, c, registers: registers[a] | b,
'setr': lambda a, b, c, registers: registers[a],
'seti': lambda a, b, c, registers: a,
'grir': lambda a, b, c, registers: 1 if a > registers[b] else 0,
'gtri': lambda a, b, c, registers: 1 if registers[a] > b else 0,
'gtrr': lambda a, b, c, registers: 1 if registers[a] > registers[b] else 0,
'eqir': lambda a, b, c, registers: 1 if a == registers[b] else 0,
'eqri': lambda a, b, c, registers: 1 if registers[a] == b else 0,
'eqrr': lambda a, b, c, registers: 1 if registers[a] == registers[b] else 0
}
def find_numbers(line):
return list(map(int, re.findall('\d+', line)))
def perform_operation(operation, instruction, registers):
op_code, a, b, c = instruction
registers[c] = operation(a, b, c, registers)
def matching_operations(before, instruction, after):
matching = []
for key, operation in OPERATIONS.items():
registers = list(before)
perform_operation(operation, instruction, registers)
if registers == after:
matching.append(key)
return matching
def reduce_codes(codes):
calculated_codes = dict()
while len(calculated_codes) < 16:
for code, ops in codes.items():
rest = ops.difference(set(calculated_codes.values()))
if len(rest) == 1:
op = list(rest)[0]
calculated_codes[code] = op
return calculated_codes
def solve(puzzle_input):
lines = list(puzzle_input)
line_i = 0
codes = { code: set(OPERATIONS.keys()) for code in range(16) }
while lines[line_i].startswith('Before'):
before = find_numbers(lines[line_i])
instruction = find_numbers(lines[line_i + 1])
after = find_numbers(lines[line_i + 2])
line_i += 4
matching_keys = matching_operations(before, instruction, after)
codes[instruction[0]].intersection_update(set(matching_keys))
codes = reduce_codes(codes)
line_i += 2
registers = [0, 0, 0, 0]
while line_i < len(lines):
instruction = find_numbers(lines[line_i])
op_code = instruction[0]
perform_operation(OPERATIONS[codes[op_code]], instruction, registers)
line_i += 1
return registers[0]
print(solve(sys.stdin))
| 3.3125
| 3
|
captcha_predict.py
|
junryan/pytorch-captcha-recognition
| 0
|
12777601
|
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
import torch
import time
from torch.autograd import Variable
import captcha_setting
import my_dataset
from captcha_cnn_model import CNN
def main():
print('开始对图片进行预测')
cnn = CNN()
cnn.eval()
cnn.load_state_dict(torch.load('model.pkl'))
print("加载神经网络训练的模型.")
result = []
predict_dataloader = my_dataset.get_predict_data_loader()
for i, (image_name, images, labels) in enumerate(predict_dataloader):
start = time.time()
image = images
vimage = Variable(image)
predict_label = cnn(vimage)
c0 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_label[0, 0:captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
c1 = captcha_setting.ALL_CHAR_SET[np.argmax(
predict_label[0, captcha_setting.ALL_CHAR_SET_LEN:2 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
c2 = captcha_setting.ALL_CHAR_SET[np.argmax(
predict_label[0, 2 * captcha_setting.ALL_CHAR_SET_LEN:3 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
c3 = captcha_setting.ALL_CHAR_SET[np.argmax(
predict_label[0, 3 * captcha_setting.ALL_CHAR_SET_LEN:4 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
res = '%s%s%s%s' % (c0, c1, c2, c3)
cost = '%.2f ms' % ((time.time() - start) * 1000)
result.append([image_name[0],res, cost])
print('经过训练后的神经网络预测图片的结果为:')
data = np.hstack([result])
res = pd.DataFrame(data, columns=['图片名称', '预测结果', '耗费时间'])
print(res)
if __name__ == '__main__':
main()
| 2.8125
| 3
|
example/issues/449_django_lazy_path/pulpsettings.py
|
sephiartlist/dynaconf
| 2,293
|
12777602
|
REST_FRAMEWORK__DEFAULT_AUTHENTICATION_CLASSES = (
"rest_framework.authentication.SessionAuthentication",
"pulpcore.app.authentication.PulpRemoteUserAuthentication",
"foo_bar1",
)
| 1.070313
| 1
|
dianhua/worker/crawler/china_mobile/heilongjiang/main.py
|
Svolcano/python_exercise
| 6
|
12777603
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import json
import random
import re
import sys
import time
import traceback
import datetime
import hashlib
import urllib
from dateutil.parser import *
from dateutil.relativedelta import relativedelta
from pwd_change import des_encode, pw_query
reload(sys)
sys.setdefaultencoding("utf8")
if __name__ == '__main__':
sys.path.append('../..')
sys.path.append('../../..')
sys.path.append('../../../..')
from crawler.base_crawler import BaseCrawler
else:
from worker.crawler.base_crawler import BaseCrawler
class Crawler(BaseCrawler):
def __init__(self, **kwargs):
super(Crawler, self).__init__(**kwargs)
def need_parameters(self, **kwargs):
return ['pin_pwd']
def get_login_verify_type(self, **kwargs):
return ''
def send_login_verify_request(self, **kwargs):
# get cookies
url = "http://hl.10086.cn/apps/login/unifylogin.html"
code, key, resp = self.get(url)
if code != 0:
return code, key, ''
# check tel
url = "http://hl.10086.cn/rest/common/validate/validateHLPhone/?phone_no={}".format(kwargs['tel'])
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/json; charset=utf-8",
"Referer": "http://hl.10086.cn/apps/login/unifylogin.html"
}
code, key, resp = self.get(url, headers=headers)
if code != 0:
return code, key, ''
if u"校验成功" not in resp.text:
self.log("user", u"手机号码校验失败", resp)
return 1, "invalid_tel", ""
st_captcha_time = time.time()
for i in range(1,6):
capture_url = 'http://hl.10086.cn/rest/authImg?type=0&rand=' + str(random.random())
headers = {
"Accept": "image/webp,image/apng,image/*,*/*;q=0.8",
"Referer": "http://hl.10086.cn/apps/login/unifylogin.html"
}
code, key, resp = self.get(capture_url, headers=headers)
if code != 0:
continue
# 云打码
codetype = 3004
key, result, cid = self._dama(resp.content, codetype)
self.cid = cid
if key == "success" and result != "":
captcha_code = str(result)
else:
self.log("website", "website_busy_error: 云打码失败{}".format(result), '')
code, key = 9, "auto_captcha_code_error"
continue
# 验证图片
url = "http://hl.10086.cn/rest/common/vali/valiImage?imgCode={}&_={}".format(captcha_code, int(time.time()*1000))
headers = {
"X-Requested-With": "XMLHttpRequest",
"Referer": "http://hl.10086.cn/apps/login/unifylogin.html"
}
code, key, resp = self.get(url, headers=headers)
if code != 0:
continue
try:
result = resp.json()
retCode = result.get("retCode")
if retCode not in ["000000", "0"]:
self._dama_report(self.cid)
end_captcha_time = time.time() - st_captcha_time
self.log("crawler", "验证图片第 {} 次,{} 错误 用时:'{}'s cid:'{}'".format(i,captcha_code,end_captcha_time,self.cid), resp)
code, key = 9, "auto_captcha_code_error"
continue
return 0, "success", captcha_code
except:
error = traceback.format_exc()
self.log("crawler", "解析结果错误{}".format(error), "")
continue
else:
return code, key, ""
def get_info(self, serviceName, channelId="12034"):
tim = str(time.time())
l_tim, r_tim = tim.split('.')
r_tim = r_tim.ljust(3, '0')
dd = l_tim + r_tim[:4]
en_str = base64.b64encode(hashlib.md5(dd + 'CM_201606').hexdigest())
ymd_hms_m = time.strftime("%Y%m%d%H%M%S", time.localtime(int(l_tim))) + r_tim[:4]
ran = str(random.randint(100, 999)) + str(random.randint(100, 999))
info = """{"serviceName":"%s","header":{"version":"1.0","timestamp":%s,"digest":"%s","conversationId":"%s"},"data":{"channelId":"%s"}}""" % (
serviceName, dd, en_str, ymd_hms_m + ran, channelId)
return urllib.quote(info)
def login(self, **kwargs):
code, key, captcha_code = self.send_login_verify_request(tel=kwargs['tel'])
if code != 0:
return code, key
url = "http://hl.10086.cn/rest/login/sso/doUnifyLogin/"
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/json; charset=UTF-8",
"Referer": "http://hl.10086.cn/apps/login/unifylogin.html"
}
# key_url = 'http://hl.10086.cn/rest/rsa/new-key?_={}'.format(str(time.time()).replace('.', ''))
key_url = 'http://hl.10086.cn/rest/rsa/aes-key?_={}'.format(str(time.time()).replace('.', ''))
code, key, resp = self.get(key_url)
if code != 0:
return code, key
try:
json_data = resp.json()
exponent = json_data['data']['exponent']
modulus = json_data['data']['modulus']
pass_word = <PASSWORD>(kwargs['pin_pwd'], modulus=modulus, exponent=exponent)
except:
error = traceback.format_exc()
self.log("crawler", "加密密码失败{}".format(error), resp)
return 9, "crawl_error"
data = {
"userName": kwargs['tel'],
"passWord": <PASSWORD>,
"pwdType": "01",
"clientIP": captcha_code
}
code, key, resp = self.post(url, headers=headers, data=json.dumps(data))
if code != 0:
return code, key
try:
js = resp.json()
code = js.get('retCode')
msg = js.get('retMsg')
if code == '2036' or code == "400000" or code == "9006":
# 400000 输入的请求不合法
# 9006 认证请求报文格式错误
self.log("user", "账户密码不匹配", resp)
return 9, "pin_pwd_error"
elif code == '2046':
self.log("user", "账户被锁定", resp)
return 9, "account_locked"
# "retCode":"800000","retMsg":"统一认证中心返回信息为null"
elif code == '8014' or code == '800000' or code == "9008" or code == "5001" or code =='100000':
# 9008 签名验证错误 不知道具体的原因是什么
# 5001 ,100000 系统繁忙,请您稍后再试
self.log("website", "系统繁忙", resp)
return 9, "website_busy_error"
elif code == "4005":
self.log("user", "invalid_tel", resp)
return 9, "invalid_tel"
elif code == "000001":
#前一步已经进行图片验证,不排除官网异常
self.log("crawler", "运营商提示:{}".format(msg), resp)
self._dama_report(self.cid)
return 2, "verify_error"
elif code != "000000":
self.log("crawler", "未知原因错误", resp)
return 9, "unknown_error"
artifact = js.get('data')
except:
error = traceback.format_exc()
self.log("crawler", "获取artifact信息失败{}".format(error), resp)
return 9, "crawl_error"
url = "http://hl.10086.cn/rest/login/unified/callBack/"
params = {
"artifact": artifact,
"backUrl": "" # 这个就是空的
}
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Referer": "http://hl.10086.cn/apps/login/unifylogin.html"
}
code, key, resp = self.get(url, headers=headers, params=params)
if code != 0:
return code, key
if u"账号当前可用余额" not in resp.text:
if u"没有访问权限,您尚未登录" in resp.text:
self.log("website", "官网偶发异常", resp)
return 9, "website_busy_error"
self.log("crawler", "未知原因导致异常", resp)
return 9, "unknown_error"
# 再次登录
url = "https://login.10086.cn/SSOCheck.action"
params = {
"channelID": "12034",
"backUrl": "http://hl.10086.cn/apps/login/my.html"
}
headers= {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Referer": "http://hl.10086.cn/apps/login/my.html"
}
code, key, resp_login = self.get(url, headers=headers, params=params)
if code != 0:
return code, key
url = "http://www1.10086.cn/web-Center/authCenter/assertionQuery.do"
headers = {
"Origin": "http://hl.10086.cn",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Referer": "http://hl.10086.cn/apps/qry/bill-detail-query/bill-detail-query.html"
}
data = "requestJson=" + self.get_info("if008_query_user_assertion")
code, key, resp_test = self.post(url, headers=headers, data=data)
if code != 0:
return code, key
url = "http://www1.10086.cn/web-Center/authCenter/assertionQuery.do"
data = "requestJson=" + self.get_info("if008_query_user_assertion")
headers = {
"Accept": "*/*",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Referer": "http://hl.10086.cn/apps/qry/bill-detail-query/bill-detail-query.html"
}
code, key, resp = self.post(url, headers=headers, data=data)
if code != 0:
return code, key
if u"用户已登录" in resp.text:
return 0, "success"
elif u'8101' in resp.text or u'Artifact无效' in resp.text or u'无效的artifact' in resp.text or u'9999' in resp.text \
or '"response_code":"0000"' in resp.text:
self.log('website', 'website_busy_error', resp)
self.log('website', '为什么会出现这种情况?', resp_login)
self.log('website', '为什么会出现这种情况????', resp_test)
return 9, 'website_busy_error'
elif '"response_code":"-100"' in resp.text or u"连接超时" in resp.text:
# "response_code":"-100" 连接超时
self.log("crawler", u"连接超时", resp)
return 9, 'website_busy_error'
else:
self.log("crawler", u"未知异常", resp)
return 9, "unknown_error"
def get_verify_type(self, **kwargs):
return 'SMS'
def send_verify_request(self, **kwargs):
today = datetime.datetime.now()
year_month = "{}{:0>2}".format(today.year, today.month)
url = "http://hl.10086.cn/rest/qry/billdetailquery/s1526_1"
params = {
"select_type": "72",
"cxfs": "1",
"timeStr1": "",
"timeStr2": "",
"time_string": year_month,
"_": "{}".format(int(time.time()))
}
headers = {
"X-Requested-With": "XMLHttpRequest",
"Referer": "http://hl.10086.cn/apps/qry/bill-detail-query/bill-detail-query.html"
}
code, key, resp = self.get(url, headers=headers, params=params)
if code != 0:
return code, key, ""
url = "http://hl.10086.cn/rest/sms/sendSmsMsg"
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/json; charset=UTF-8",
"Referer": "http://hl.10086.cn/apps/qry/bill-detail-query/bill-detail-query.html"
}
data = {
"func_code": "000004",
"sms_type": "2",
"phone_no": kwargs['tel'],
"sms_params": ""
}
code, key, resp = self.post(url, headers=headers, data=json.dumps(data))
if code != 0:
return code, key, ""
if u"发送成功" in resp.text:
return 0, "success", ''
elif u'尊敬的用户,请勿在1分钟内重复下发短信' in resp.text:
self.log("user", 'send_sms_too_quick_error', resp)
return 9, 'send_sms_too_quick_error', ''
elif u'短信下发失败,手机号码为空' in resp.text or '100001' in resp.text:
self.log("user", 'invalid_tel', resp)
return 9, 'invalid_tel', ''
else:
self.log("crawler", 'request_error', resp)
return 9, 'request_error', ''
def verify(self, **kwargs):
url = "http://hl.10086.cn/rest/sms/checkSmsCode"
headers = {
"Referer": "http://hl.10086.cn/apps/qry/bill-detail-query/bill-detail-query.html",
"X-Requested-With": "XMLHttpRequest"
}
params = {
'func_code': '000004',
'sms_type': '2',
'phone_no': '',
'sms_code': kwargs['sms_code'],
'_': "{}".format(int(time.time()))
}
code, key, resp = self.get(url, headers=headers, params=params)
if code != 0:
return code, key
if u"输入正确" in resp.text:
return 0, "success"
elif u"输入错误" in resp.text:
self.log("user", u"验证码输入错误", resp)
return 9, "verify_error"
elif u"获取短信验证码" in resp.text:
self.log("crawler", u"尊敬的用户,请您获取短信验证码", resp)
return 9, "website_busy_error"
else:
self.log("crawler", u"未知异常", resp)
return 9, "unknown_error"
def crawl_call_log(self, **kwargs):
call_logs, miss_list, pos_miss_list = [], [], []
error_num = 0
# def getPscToken():
# getPscToken_url = "http://hl.10086.cn/rest/session/getPscToken/?_={}".format(int(time.time()))
# headers = {
# "X-Requested-With": "XMLHttpRequest",
# "Content-Type": "application/json; charset=utf-8",
# "Referer": "http://hl.10086.cn/apps/qry/bill-detail-query/bill-detail-query.html"
# }
# code, key, resp = self.get(getPscToken_url, headers=headers)
# if code != 0:
# return False, key, resp
# try:
# res_json = json.loads(resp.text)
# except:
# error = traceback.format_exc()
# self.log("crawler", error, resp)
# return False, error, resp
# if "000000" == res_json['retCode']:
# return True, res_json['data'], resp
# return True, "成功", resp
aes_key_url = 'http://hl.10086.cn/rest/rsa/aes-key?_={}'.format(str(time.time()).replace("."," "))
code,key,resp = self.get(aes_key_url)
if code!= 0:
return code ,key
try:
resp_json = resp.json()
exponent = resp_json['data']['exponent']
modulus = resp_json['data']['modulus']
xuser_word = des_encode(kwargs['pin_pwd'],modulus=modulus,exponent=exponent)
except:
error = traceback.format_exc()
self.log("crawler","加密密码2失败{}".format(error),resp)
return 9 ,"crawl_error"
message, response = "", ""
month_retry_list = [(x, self.max_retry) for x in self.monthly_period(6, strf='%Y%m')]
# for month in self.monthly_period(6, strf='%Y%m'):
full_time = 60.0
retrys_limit = 4
st_time = time.time()
time_fee = 0
rand_time = random.randint(20, 40)/ 10.0
log_for_retrys = []
while month_retry_list:
month, retrys = month_retry_list.pop(0)
retrys -= 1
if retrys < -retrys_limit:
self.log("crawler", "重试次数完毕", "")
miss_list.append(month)
continue
log_for_retrys.append((month, retrys, time_fee))
# result, pscToken, r = getPscToken()
# if not result:
# if retrys >= 0:
# time_fee += time.time() - st_time
# month_retry_list.append((month, retrys))
# elif time_fee < full_time:
# time.sleep(rand_time)
# time_fee += time.time() - st_time
# month_retry_list.append((month, retrys))
# else:
# self.log("crawler", u"获取信息错误", r)
# miss_list.append(month)
# continue
# xuser_word = pw_query(kwargs['pin_pwd'], pscToken).encode('utf8')
re_url = "http://hl.10086.cn/apps/qry/bill-detail-query/bill-detail-query-attr.html"
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Referer": "http://hl.10086.cn/apps/qry/bill-detail-query/bill-detail-query.html"
}
params = {
"select_type": "72",
"time_string": month,
"feny_flag": "N",
# "xuser_word": kwargs['pin_pwd'],
"xuser_word": xuser_word,
"recordPass":"",
"{}".format(random.random()): ""
}
code, key, resp = self.get(re_url, headers=headers, params=params)
if code != 0:
if retrys >= 0:
time_fee = time.time() - st_time
month_retry_list.append((month, retrys))
elif time_fee < full_time:
time.sleep(rand_time)
time_fee = time.time() - st_time
month_retry_list.append((month, retrys))
else:
self.log("crawler", u"获取前置url失败{}".format(key), resp)
miss_list.append(month)
continue
url = "http://hl.10086.cn/rest/qry/billdetailquery/channelQuery"
params = {
"select_type": "72",
"time_string": month,
"xuser_word": xuser_word,
"recordPass":"",
"_": "{}".format(int(time.time()))
}
headers = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"X-Requested-With": "XMLHttpRequest",
"Referer": re_url
}
code, key, resp = self.get(url, headers=headers, params=params)
if code != 0:
if retrys >= 0:
time_fee = time.time() - st_time
month_retry_list.append((month, retrys))
elif time_fee < full_time:
time.sleep(rand_time)
time_fee = time.time() - st_time
month_retry_list.append((month, retrys))
else:
self.log("crawler", u"获取详单失败{}".format(key), resp)
miss_list.append(month)
continue
code, key, msg, result = self.call_log_get(resp.text, month)
if code == 0:
if result:
call_logs.extend(result)
else:
if retrys >= 0:
time_fee = time.time() - st_time
month_retry_list.append((month, retrys))
elif time_fee < full_time:
time.sleep(rand_time)
time_fee = time.time() - st_time
month_retry_list.append((month, retrys))
else:
self.log("crawler", u"详单或许缺失", resp)
pos_miss_list.append(month)
continue
else:
message, response = key, resp
if retrys >= 0:
time_fee = time.time() - st_time
month_retry_list.append((month, retrys))
elif time_fee < full_time:
time.sleep(rand_time)
time_fee = time.time() - st_time
month_retry_list.append((month, retrys))
else:
self.log("crawler", u"获取详单失败{}".format(key), resp)
miss_list.append(month)
if message == "html_error":
self.log("crawler", message, response)
error_num += 1
self.log("crawler", "重试记录{}".format(log_for_retrys), "")
self.log("crawler", "缺失: {}, 可能缺失: {}, 部分缺失: {}".format(miss_list, pos_miss_list, []), "")
if len(pos_miss_list) + len(miss_list) == 6:
if error_num > 0:
return 9, "crawl_error", [], [], []
else:
return 9, "website_busy_error", [], [], []
return 0, "success", call_logs, miss_list, pos_miss_list
def call_log_get(self, text_resp, month):
call_log = []
if "errCode:10111109814220003" in text_resp:
self.log("user", "不允许查询开户前的详单", "")
return 0, "success", "", []
try:
js = json.loads(text_resp)
if "java.lang.NullPointerException" in text_resp:
return 9, "website_busy_error", "", []
text_list = js.get("data").get("detailList")[0].get("DETAIL_LINES")
for text in text_list:
single_call_log = {}
info_list = text
if len(info_list) < 4:
continue
# '2017/11/01 03:39:48', '北京', '主叫', '10086', '1分58秒', '国内异地主叫', '标准资费', '0.00', '2G网络', ''
# 0 1 2 3 4 5 6 7 8 9
single_call_log['call_tel'] = info_list[3]
single_call_log['call_cost'] = info_list[7]
call_time = info_list[0]
result, call_time = self.time_stamp(call_time)
if not result:
self.log("crawler", "转换时间失败{}{}".format(call_time, text_resp), "")
return 9, 'html_error', 'html_error when transform call_time to time_stamp : %s' % call_time, []
single_call_log['call_time'] = call_time
single_call_log['month'] = month
single_call_log['call_method'] = info_list[2]
single_call_log['call_type'] = info_list[5]
raw_call_from = info_list[1]
call_from, error = self.formatarea(raw_call_from)
if not call_from:
call_from = raw_call_from
single_call_log['call_from'] = call_from
single_call_log['call_to'] = ''
single_call_log['call_duration'] = self.time_format(info_list[4])
call_log.append(single_call_log)
except:
error = traceback.format_exc()
return 9, 'html_error', 'html_error when parse call log : %s' % error, []
return 0, 'success', '成功', call_log
def time_format(self, time_str, **kwargs):
exec_type = 1
time_str = time_str.encode('utf-8')
if 'exec_type' in kwargs:
exec_type = kwargs['exec_type']
if (exec_type == 1):
xx = re.match(r'(.*时)?(.*分)?(.*秒)?', time_str)
h, m, s = 0, 0, 0
if xx.group(1):
hh = re.findall('\d+', xx.group(1))[0]
h = int(hh)
if xx.group(2):
mm = re.findall('\d+', xx.group(2))[0]
m = int(mm)
if xx.group(3):
ss = re.findall('\d+', xx.group(3))[0]
s = int(ss)
real_time = h * 60 * 60 + m * 60 + s
if (exec_type == 2):
xx = re.findall(r'\d*', time_str)
h, m, s = map(int, xx[::2])
real_time = h * 60 * 60 + m * 60 + s
return str(real_time)
def time_stamp(self, time_str):
try:
timeArray = time.strptime(time_str, "%Y/%m/%d %H:%M:%S")
timeStamp = int(time.mktime(timeArray))
return True, str(timeStamp)
except:
error = traceback.format_exc()
return False, error
def crawl_info(self, **kwargs):
tel_info = {}
url = "http://www1.10086.cn/web-Center/interfaceService/custInfoQry.do"
headers = {
"Accept": "*/*",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"
}
data = "requestJson=" + self.get_info("if007_query_user_info", "0001")
code, key, resp = self.post(url, headers=headers, data=data)
if code != 0:
return code, key, {}
try:
js = resp.json()
code = js.get("result").get("response_code")
if code == "0000":
info = js.get("result").get("data").get("userInfo")
tel_info['address'] = info.get("userAddr")
# 20170207143512
open_date_str = info.get("userBegin")
open_date = self.time_stamp(open_date_str[:4]+'/'+open_date_str[4:6]+'/'+open_date_str[6:8]+' '+open_date_str[8:10]+":"+open_date_str[10:12]+":"+open_date_str[12:])
if open_date[0]:
tel_info['open_date'] = open_date[1]
else:
tel_info['open_date'] = ""
tel_info['id_card'] = ''
tel_info['full_name'] = info.get("userName")
else:
self.log("crawler", "未知原因导致获取个人信息失败", resp)
return 9, "html_error", {}
except:
error = traceback.format_exc()
self.log("crawler", u"解析用户信息失败{}".format(error), resp)
return 9, "html_error", {}
return 0, "success", tel_info
def crawl_phone_bill(self, **kwargs):
miss_list = []
data_list = []
error_num = 0
message = ""
for month in list(self.monthly_period())[1:]:
for i in range(self.max_retry):
url = "http://hl.10086.cn/rest/qry/billquery/qryBillHome?user_seq=000003&yearMonth={}&_={}".format(month, int(time.time()))
code, key, resp = self.get(url)
if code != 0:
message = "network_error"
continue
try:
js = resp.json()
dd = js.get("data").get("ROOT").get("BODY").get("OUT_DATA").get("PCAS_03")
data = {}
data['bill_month'] = month
data['bill_amount'] = str(float(dd.get("PCAS_03_12").get("REAL_FEE", "0.0"))/100)
data['bill_package'] = str(float(dd.get("PCAS_03_01").get("REAL_FEE", "0.0"))/100)
data['bill_ext_calls'] = str(float(dd.get("PCAS_03_02").get("REAL_FEE", "0.0"))/100)
data['bill_ext_data'] = str(float(dd.get("PCAS_03_04").get("REAL_FEE", "0.0"))/100)
data['bill_ext_sms'] = str(float(dd.get("PCAS_03_05").get("REAL_FEE", "0.0"))/100)
data['bill_zengzhifei'] = str(float(dd.get("PCAS_03_06").get("REAL_FEE", "0.0"))/100)
data['bill_daishoufei'] = str(float(dd.get("PCAS_03_09").get("REAL_FEE", "0.0"))/100)
data['bill_qita'] = str(float(dd.get("PCAS_03_10").get("REAL_FEE", "0.0"))/100)
data_list.append(data)
break
except:
error = traceback.format_exc()
message = u"解析账单数据失败{}".format(error)
continue
else:
if message != "network_error":
error_num += 1
miss_list.append(month)
if len(miss_list) == 5:
if error_num > 0:
return 9, "crawl_error", [], []
else:
return 9, "website_busy_error", [], []
return 0, "success", data_list, miss_list
def monthly_period(self, length=6, strf='%Y%m'):
current_time = datetime.datetime.now()
monthly_period_list = []
for month_offset in range(0, length):
monthly_period_list.append((current_time - relativedelta(months=month_offset)).strftime(strf))
return monthly_period_list
if __name__ == "__main__":
c = Crawler()
USER_ID = "13846194712"
USER_PASSWORD = "<PASSWORD>"
c.self_test(tel=USER_ID, pin_pwd=<PASSWORD>_PASSWORD)
| 2.328125
| 2
|
job_server/src/job_server/app.py
|
jessicalucci/EB-Worker-RDS-VPC
| 3
|
12777604
|
import os
import yaml
import tornado.ioloop
import tornado.gen
import tornado.web
from job_server.context import JobServerContext
from job_server.routes import PostJobHandler, RunJobHandler
from job_server.db import init_db
def job_server(context):
return tornado.web.Application([
(r'/job/run', RunJobHandler, dict(
context=context
)),
(r'/job/post/([A-z]+)', PostJobHandler, dict(
context=context
))
])
if __name__ == "__main__":
context = JobServerContext(yaml.load(file(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'config.yaml'),
'r')))
init_db(context)
app = job_server(context)
app.listen(8080)
tornado.ioloop.IOLoop.current().start()
| 2.171875
| 2
|
tests/test_util.py
|
mongodb-labs/mongo-web-shell
| 22
|
12777605
|
<gh_stars>10-100
# Copyright 2013 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import StringIO
from bson.json_util import dumps
import mock
from werkzeug.exceptions import NotFound, InternalServerError
from webapps.lib.db import get_db
from webapps.lib.util import UseResId, get_collection_names
from webapps.lib import CLIENTS_COLLECTION
from webapps.lib.MWSServerError import MWSServerError
from tests import MongoWSTestCase
class UseResIdTestCase(MongoWSTestCase):
def test_mangles_collection_names_automatically(self):
with self.real_app.app_context():
with UseResId('myresid.') as db:
coll = db.foo
self.assertEqual(coll.name, 'myresid.foo')
def test_updates_collection_list(self):
with self.real_app.app_context():
db = get_db()
res_id = 'myresid.'
# Setup resource id record
clients_collection = db[CLIENTS_COLLECTION]
clients_collection.remove({'res_id': res_id})
clients_collection.insert({
'res_id': res_id,
'collections': []
})
with UseResId(res_id) as db:
self.assertItemsEqual(get_collection_names(res_id), [])
db.foo.insert({'message': 'test'})
self.assertItemsEqual(get_collection_names(res_id), ['foo'])
self.assertItemsEqual(list(db.foo.find({}, {'_id': 0})),
[{'message': 'test'}])
db.bar.update({}, {'message': 'test'}, upsert=True)
self.assertItemsEqual(get_collection_names(res_id), ['foo', 'bar'])
self.assertItemsEqual(list(db.bar.find({}, {'_id': 0})),
[{'message': 'test'}])
db.foo.drop()
self.assertItemsEqual(get_collection_names(res_id), ['bar'])
self.assertNotIn(res_id + 'foo', get_collection_names(res_id))
class QuotaCollectionsTestCase(UseResIdTestCase):
def setUp(self):
super(QuotaCollectionsTestCase, self).setUp()
self.old_quota = self.real_app.config['QUOTA_NUM_COLLECTIONS']
self.res_id = 'myresid.'
with self.real_app.app_context():
collections = get_collection_names(self.res_id)
with UseResId(self.res_id) as db:
for c in collections:
db.drop_collection(c)
def tearDown(self):
self.real_app.config['QUOTA_NUM_COLLECTIONS'] = self.old_quota
def test_quota_collections(self):
self.real_app.config['QUOTA_NUM_COLLECTIONS'] = 2
with self.real_app.app_context():
with UseResId(self.res_id) as db:
db.a.insert({'a': 1})
db.b.insert({'b': 1})
with self.assertRaises(MWSServerError) as cm:
db.c.insert({'c': 1})
self.assertEqual(cm.exception.error, 429)
for c in ['a', 'b']:
db.drop_collection(c)
def test_quota_collections_zero(self):
self.real_app.config['QUOTA_NUM_COLLECTIONS'] = 0
with self.real_app.app_context():
with UseResId(self.res_id) as db:
with self.assertRaises(MWSServerError) as cm:
db.a.insert({'a': 1})
self.assertEqual(cm.exception.error, 429)
db.drop_collection('a')
| 1.929688
| 2
|
products/tests/test_views.py
|
Kaique425/ecommerce
| 0
|
12777606
|
<filename>products/tests/test_views.py
from pytest_django.asserts import assertTemplateUsed, assertQuerysetEqual
from products.tests.factories import ProductFactory
from django.urls import resolve, reverse
from ..models import Product
import pytest
pytestmark = pytest.mark.django_db
@pytest.fixture
def list_response(client):
return client.get(reverse('product:list'))
class TestProductList():
def test_status_code(self, list_response):
assert list_response.status_code == 200
def test_reverse_resolve(self):
assert reverse('product:list') == '/'
assert resolve('/').view_name == ('product:list')
def test_template(self, list_response):
assertTemplateUsed(list_response, 'products/product_list.html')
@pytest.fixture
def detail_response(client, product):
return client.get(reverse('product:detail', kwargs={'slug':product.slug}))
class TestProductDetail():
def test_status_code(self, client):
product = ProductFactory(is_available=True)
url = reverse('product:detail', kwargs={'slug': product.slug})
response = client.get(url)
assert response.status_code == 200
def test_reverse_resolve(self, product):
assert reverse('product:detail', kwargs={'slug':product.slug}) == f"/product/{product.slug}"
assert resolve(f'/product/{product.slug}').view_name == "product:detail"
def test_template(self, detail_response):
assertTemplateUsed(detail_response, "products/product_detail.html")
| 2.21875
| 2
|
python/ex039.py
|
deniseicorrea/Aulas-de-Python
| 0
|
12777607
|
<reponame>deniseicorrea/Aulas-de-Python<filename>python/ex039.py
from datetime import date
atual = date.today().year
nasc = int(input('Qual o ano do seu nascimento? '))
idade = atual- nasc
print(f'Voce tem {idade} anos')
if idade == 18:
print('Você tem que se alistar Imediatamente')
elif idade < 18:
saldo = 18 - idade
ano = atual + saldo
print(f'Você ainda não precisa se alistar, ainda faltam {saldo} anos.')
print(f'Você se alistará no ano {ano}')
elif idade > 18:
saldo = idade - 18
ano = atual - saldo
print(f'Você já deveria ter se alistado a {saldo} anos.')
print(f'Você deveria ter se alistado no ano {ano}')
| 3.859375
| 4
|
moving_message_g009dh/examples/dict_test.py
|
Kurocon/moving_message_g009dh
| 0
|
12777608
|
<reponame>Kurocon/moving_message_g009dh<filename>moving_message_g009dh/examples/dict_test.py
from moving_message_g009dh.ledbar import *
if __name__ == "__main__":
bar = LEDBar()
bar.data_from_dict(data={
'files': [{
'number': 1,
'lines': [{
'fade': 'pacman',
'speed': 'speed_8',
'texts': [{
'color': 'bright_red',
'font': 'extra_wide',
'text': 'X',
}, {
'color': 'bright_orange',
'font': 'extra_wide',
'text': 'T',
}, {
'color': 'bright_yellow',
'font': 'extra_wide',
'text': 'R',
}, {
'color': 'bright_green',
'font': 'extra_wide',
'text': 'A',
}, ]
}, {
'fade': 'open_from_center',
'texts': [{
'color': 'bright_layer_mix_rainbow',
'font': 'small',
'text': 'smol',
}]
}]
}]
}, clear=True)
print(" ".join(["0x{:02x}".format(x) for x in bar._data_buffer]))
bar.send_data()
| 1.976563
| 2
|
configuration-client/configurator/thriftgen/ConfigurationService/__init__.py
|
manimaul/xio
| 40
|
12777609
|
__all__ = ['ttypes', 'constants', 'ConfigurationService']
| 1.070313
| 1
|
o365harvest.py
|
jmpalk/o365harvest
| 3
|
12777610
|
#!/usr/bin/env python3
import requests
import sys
import argparse
import uuid
from time import sleep
from string import Template
def Spray(domain, users, target_url, output_file, wait, verbose, more_verbose, debug):
i = 0
results = []
if verbose or more_verbose:
print("Targeting: " + target_url + "\n")
for user in users:
if more_verbose:
print("\ntesting " + user)
body = '{"Username": "%s@%s"}' % (user, domain)
r = requests.post(target_url, data=body)
#print(target_url)
if debug:
print("Time elapsed: " + str(r.elapsed) + "\n")
if more_verbose:
print("Status: " + str(r.status_code))
print(r.headers)
print(r.text)
if 'ThrottleStatus' in r.headers.keys():
print("Throttling detected => ThrottleStatus: " + r.headers('ThrottleStatus'))
if '"IfExistsResult":0' in r.content.decode('UTF-8'):
output_file.write(user + "@" + domain +" - VALID\n")
if verbose or more_verbose:
print("Found " + user + "@" + domain)
continue
sleep(wait)
i = i + 1
if i % 50 == 0:
print("Tested " + str(i) + " possible users")
return results
def main():
parser = argparse.ArgumentParser(description="Enumerate users against Office365")
target_group = parser.add_argument_group(title="Attack Target")
target_group.add_argument('-d', dest='domain', type=str, help='Target domain - required')
target_group.add_argument('-l', dest='user_list', type=argparse.FileType('r'), help='File with list of target usernames (without domain)')
target_group.add_argument('-u', '--url', type=str, dest='url', help='Target URL if using something like fireprox; otherwise will directly call the O365 login endpoint')
target_group.add_argument('-w', '--wait', type=int, dest='wait', help='Number of seconds to sleep between individual user attempts', default=0)
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=False)
parser.add_argument('-vv', '--more-verbose', action='store_true', dest='more_verbose', default=False)
parser.add_argument('-D', '--debug', action='store_true', dest='debug', default=False)
parser.add_argument('-o', '--output', type=argparse.FileType('w'), dest='output_file', default='spray_results.txt', help='Output file for results (txt). Default is spray_results.txt')
args = parser.parse_args()
if not args.domain:
parser.print_help()
print('\nNo target domain provided')
sys.exit()
if not args.user_list:
parser.print_help()
print('\nNo list of target users provided')
sys.exit()
if not args.url:
target_url = 'https://login.microsoftonline.com/common/GetCredentialType'
else:
target_url = args.url + 'common/GetCredentialType'
if args.debug:
print("*** DEBUG MESSAGING ENABLED ***")
users = []
for line in args.user_list:
users.append(line.split('@')[0].strip())
results = Spray(args.domain, users, target_url, args.output_file, args.wait, args.verbose, args.more_verbose, args.debug)
if __name__ == '__main__':
main()
| 2.546875
| 3
|
Src/DockerMMODES/data_gen.py
|
beatrizgj/MDPbiomeGEM
| 0
|
12777611
|
#!/usr/bin/python3
# Script to shape the desired output to be processed (MMODES)
# the datatable way
# @author: <NAME>
# Creation: 09/06/2019
import os
import re
import numpy as np
import datatable as dt
from datatable import f
def log(cons, media):
'''
Writes information of consortium object to file
'''
logf = 'simulations.txt'
p = re.compile(r'#+ SIMULATION (\d+) #+')
if os.path.isfile(logf): # parse last simulation number
with open(logf) as l:
for line in l.readlines():
num_sim = p.search(line)
if num_sim:
head = " SIMULATION "+str(int(num_sim.group(1))+1)+" "
else:
head = " SIMULATION 1 "
lines = '{:{fill}{align}{width}}'.format(head,
fill = '#',
align = '^',
width = 30) + "\n"
lines += cons.__str__()
pers = ', '.join([per["PERTURBATION"] for per in media])
lines += "\nPERTURBATIONS: " + pers + "\n\n"
with open(logf, "a") as l:
l.write(lines)
return
def equidistant(df, n):
sample = np.linspace(df.nrows-1,1,n).astype('int')
sample.sort()
return df[sample, :]
def tsv_filter(medium = "", flux = "", txpers = {}, inplace = False, v = 0, equif = True, bin = False):
'''
Function that filters medium and fluxes TSV files based on perturbation times.
INPUTS -> medium: string, path to medium file;
flux: string, path to medium file;
txpers: dictionary, time : perturbation;
inplace: bool, whether overwrite input paths (default False);
v: float, volume magnitude to obtain medium concentrations;
equif: bool, whether write an additional fluxes filtered file,
with 100 equidistant points (default True)
OUTPUT -> it returns None, writes 2(3) TSV files
'''
dfs = []
if not medium:
print("Medium parameter wasn't supplied, it won't be generated.")
else:
dfs.append([dt.fread(medium), medium, 0])
if v != 0:
for i in range(1,dfs[0][0].ncols): dfs[0][0][:,i] = dfs[0][0][:,f[i]/v]
if not flux:
print("Medium parameter wasn't supplied, it won't be generated.")
else:
dfs.append([dt.fread(flux), flux, 1])
if not medium:
print("You must supply a txpers parameter. Exitting function...")
return
for log, path, n in dfs:
log[:,'Perturbations'] = "FALSE" # now last column (-1)
log[-1,-1] = "END"
if len(txpers) > 1:
for tp, per in txpers.items():
if tp == 0:
log[0,-1] = per
else:
# take last time that matches <= perturbation time
log[f.time == log[f.time < tp, f.time][-1,-1], -1] = per
# if per == 'START':
# log[0,-1] = 'START'
# else:
# # take last index that matches <= perturbation time
# log[f.time == log[f.time <= tp, f.time][-1,-1], -1] = per
else:
log[0, -1] = 'START'
if n != 0 and equif:
log_equif = equidistant(log,100) # take 100 equidistant rows
log_equif.to_csv(path[:-4] + '_equi' + '.tsv')
del(log_equif)
# TODO: I don't know how to implement a condroll with datatable
# We aren't currentyly using it, anyway
log = log[f.Perturbations != "FALSE", :]
if inplace:
log.to_csv(path)
else:
log.to_csv(path[:-4] + '_filtered' + '.tsv')
| 2.546875
| 3
|
secret_breakout/breakout.py
|
LaRiffle/private-RL
| 4
|
12777612
|
<filename>secret_breakout/breakout.py
from gym import logger
class Rect(object):
def __init__(self, left, top, width, height):
self.left = left
self.top = top
self.width = width
self.height = height
self.right = left + self.width
self.bottom = top + self.height
def move(self, x):
return Rect(self.left+x, self.top, self.width, self.height)
def destroyed(self):
self.left = -1
self.top = -1
self.width = -1
self.height = -1
self.right = -1
self.bottom = -1
def __repr__(self):
return 'Rect({}, {}, {}, {})'.format(
self.left, self.top, self.width, self.height)
class Blocks(object):
"""Implements blocks as a collection instead of
as individual block objects """
def __init__(self, args):
self.args = args
self.width = 100
self.height = 20
self.blocks = self.make_blocks()
self.num_blocks_start = len(self.blocks)
self.num_blocks_destroyed = 0
def make_blocks(self):
rects = []
rows = 5
self.rows_height = self.args.env_height // rows
for i in range(0, self.args.env_width, self.width):
for j in range(0, self.rows_height, self.height):
rects.append(Rect(i, j, self.width, self.height))
return rects
# removes single block from blocks list when it is hit by ball
# ball being the ball object
def collided(self, ball_object):
collided = False
for block in self.blocks:
if ball_object.collided(block, 'block'):
# set the block to destroyed if collision occured
block.destroyed()
collided = True
self.num_blocks_destroyed += 1
return collided
def block_locations(self):
block_locs = [-1] * (2*self.num_blocks_start)
for i,block in enumerate(self.blocks):
block_locs[2*i] = block.left
block_locs[(2*i)+1] = block.top
return block_locs
class Paddle(Rect):
def __init__(self, args):
self.args = args
# TODO(korymath): what is the correct size for the paddle
self.width = self.args.env_width // 3
self.height = 20
self.initial_x = (self.args.env_width // 2) - (self.width // 2)
self.initial_y = self.args.env_height - 50
self.rect = Rect(self.initial_x, self.initial_y,
self.width, self.height)
def move(self, speed):
# check if the move would collide paddle with edge
if ((self.rect.right + speed > self.args.env_width) or
(self.rect.left + speed < 0)):
# out of bounds, do not update the paddle position
# TODO[jason] handle reward corruption
return True
self.rect = self.rect.move(speed)
return False
class Ball(object):
"""Ball object that takes initial speed in x direction (speedx)
and initial speed in y direction(speedy)"""
def __init__(self, args, speedx, speedy):
self.args = args
self.radius = 10
self.x = self.args.env_width//2
self.y = self.args.env_height - 70
self.speed_magnitude = 5
self.speedx = speedx
self.speedy = speedy
def move(self):
# check for collision with the right side of the game screen
if self.x + self.radius + self.speedx >= self.args.env_width:
logger.debug('ball collide with right side of screen')
self.speedx = -self.speed_magnitude
# check for collision with the left hand side of the game screen
elif self.x + self.speedx <= 0:
logger.debug('ball collide with left side of screen')
self.speedx = self.speed_magnitude
# check for collision with the bottom of the game screen
if self.y + self.radius + self.speedy >= self.args.env_height:
logger.debug('ball collide with bottom of screen')
self.speedy = -self.speed_magnitude
return True
# check for collision with the top of the game screen
elif self.y + self.radius + self.speedy <= 0:
logger.debug('ball collide with top of screen')
self.speedy = self.speed_magnitude
# update the ball position
self.x += self.speedx
self.y += self.speedy
return False
# checks if ball has collided with the rect_obj
# which may block or paddle
def collided(self, rect, collider):
if collider == 'paddle':
left_temp = rect.rect.left
right_temp = rect.rect.right
bottom_temp = rect.rect.bottom
top_temp = rect.rect.top
else:
left_temp = rect.left
right_temp =rect.right
bottom_temp = rect.bottom
top_temp = rect.top
if ((left_temp <= self.x + self.radius) and
(self.x - self.radius <= right_temp)):
if top_temp < self.y + self.radius < bottom_temp:
logger.debug('ball collide with {}'.format(collider))
self.speedy = -self.speedy
# add an extra displacement to avoid double collision
self.y += self.speedy
return True
| 3.59375
| 4
|
pydatamailbox/__init__.py
|
optimdata/pydatamailbox
| 1
|
12777613
|
<reponame>optimdata/pydatamailbox
from .client import * # NOQA
from .exceptions import * # NOQA
| 0.929688
| 1
|
products/migrations/0011_product_products_pr_name_9ff0a3_idx.py
|
MattiMatt8/ship-o-cereal
| 1
|
12777614
|
<reponame>MattiMatt8/ship-o-cereal<gh_stars>1-10
# Generated by Django 3.2 on 2021-05-10 16:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0010_alter_brand_options'),
]
operations = [
migrations.AddIndex(
model_name='product',
index=models.Index(fields=['name'], name='products_pr_name_9ff0a3_idx'),
),
]
| 1.617188
| 2
|
tensorflow/python/autograph/pyct/static_analysis/type_inference.py
|
grasskin/tensorflow
| 2
|
12777615
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Type inference.
This analysis annotates all symbols nodes of an AST with type information
extracted from static sources:
* type annotations
* global and local symbols visible to the function at analysis time
* literals
Requires reaching function definitions analysis.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Tuple
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import annos
class Resolver(object):
"""Resolver objects handle the process of looking up actual names and types.
All resolve_* methods:
* have a first namespace argument, mapping string to actual values
* specify names as QN objects
* specify types as a Set of inferred types
All resolve_* methods must return either:
* a set of `type` objects
* None
"""
def res_name(self, ns, name):
"""Resolves the type an external (e.g. closure, global) variable."""
raise NotImplementedError('subclasses must implement')
def res_value(self, ns, value):
"""Resolves the type a literal value."""
raise NotImplementedError('subclasses must implement')
# TODO(mdan): Allow caller to model side effects.
def res_call(self, ns, name, target, args, keywords, starargs, kwargs):
"""Resolves the return type an external function or method call.
Args:
ns: namespace
name: str, the function name
target: if this is a method call, the types of the method target, None
otherwise
args: list or argument types
keywords: dict of name to argument types
starargs: list of types of the *args arguments (should be at most one)
kwargs: list of types of the **kwargs arguments (in order of appearance)
"""
raise NotImplementedError('subclasses must implement')
def res_arg(self, ns, f_name, arg_name, type_anno):
"""Resolves the type of a (possibly annotated) function argument."""
raise NotImplementedError('subclasses must implement')
class _SymbolTable(object):
"""Abstraction for the state of the CFG walk for type inference.
This is a value type. Only implements the strictly necessary operators.
Attributes:
value: Dict[qual_names.QN, Set[Type]], mapping symbols to the set of
possible types.
"""
def __init__(self, init_from=None):
if init_from:
assert isinstance(init_from, _SymbolTable)
self.value = {
s: set(other_types) for s, other_types in init_from.value.items()
}
else:
self.value = {}
def __eq__(self, other):
if frozenset(self.value.keys()) != frozenset(other.value.keys()):
return False
ret = all(self.value[s] == other.value[s] for s in self.value)
return ret
def __ne__(self, other):
return not self.__eq__(other)
def __or__(self, other):
assert isinstance(other, _SymbolTable)
result = _SymbolTable(self)
for s, other_types in other.value.items():
if s not in result.value:
self_types = set()
result.value[s] = self_types
else:
self_types = result.value[s]
self_types.update(other_types)
return result
def __repr__(self):
return 'SymbolTable {}'.format(self.value)
_GETITEM = qual_names.QN('__getitem__')
_HANDLERS = {
gast.Eq: qual_names.QN('__eq__'),
gast.NotEq: qual_names.QN('__ne__'),
gast.Lt: qual_names.QN('__lt__'),
gast.LtE: qual_names.QN('__le__'),
gast.Gt: qual_names.QN('__gt__'),
gast.GtE: qual_names.QN('__ge__'),
gast.In: qual_names.QN('__contains__'),
# TODO(mdan): Is this actually correct?
# NotIn(*) = Not(In(*))
gast.NotIn: qual_names.QN('__not__'),
gast.Add: qual_names.QN('__add__'),
gast.Sub: qual_names.QN('__sub__'),
gast.Mult: qual_names.QN('__mul__'),
gast.Div: qual_names.QN('__div__'),
gast.FloorDiv: qual_names.QN('__floordiv__'),
gast.Mod: qual_names.QN('__mod__'),
gast.Pow: qual_names.QN('__pow__'),
gast.LShift: qual_names.QN('__lshift__'),
gast.RShift: qual_names.QN('__rshift__'),
gast.BitOr: qual_names.QN('__or__'),
gast.BitXor: qual_names.QN('__xor__'),
gast.BitAnd: qual_names.QN('__and__'),
gast.MatMult: qual_names.QN('__matmul__'),
}
_FIXED_RETTYPES = {
gast.Is: bool,
gast.IsNot: bool,
}
class StmtInferrer(gast.NodeVisitor):
"""Runs type inference on a single AST statement.
This visitor annotates most nodes with type information. It also sets types
for the symbols modified by this statement in its types_out property.
"""
def __init__(self, resolver, scope, namespace, closure_types, types_in):
self.resolver = resolver
self.scope = scope
self.namespace = namespace
self.closure_types = closure_types
self.types_in = types_in
self.new_symbols = {}
def visit(self, node):
types = super().visit(node)
if types is not None:
# TODO(mdan): Normalize by removing subtypes.
anno.setanno(node, anno.Static.TYPES, tuple(types))
return types
def visit_FunctionDef(self, node):
# Skip local function definitions. They are analyzed separately.
return None
def visit_Constant(self, node):
return self.resolver.res_value(self.namespace, node.value)
def visit_Tuple(self, node):
if isinstance(node.ctx, gast.Load):
for elt in node.elts:
self.visit(elt)
# TODO(mdan): Parameterize it.
return {Tuple}
assert isinstance(node.ctx, gast.Store)
# TODO(mdan): Implement tuple unpacking.
return None
def visit_List(self, node):
if isinstance(node.ctx, gast.Load):
el_types = []
for elt in node.elts:
el_types.append(self.visit(elt))
return {list}
raise NotImplementedError('list unpacking')
def visit_Set(self, node):
raise NotImplementedError()
def visit_Name(self, node):
name = anno.getanno(node, anno.Basic.QN)
if isinstance(node.ctx, gast.Load):
types = self.types_in.value.get(name, None)
if (types is None) and (name not in self.scope.bound):
if name in self.closure_types:
types = self.closure_types[name]
else:
types = self.resolver.res_name(self.namespace, name)
return types
elif isinstance(node.ctx, gast.Param):
type_name = anno.getanno(node.annotation, anno.Basic.QN, None)
types = self.resolver.res_arg(self.namespace, self.scope.function_name,
name, type_name)
if types is not None:
self.new_symbols[name] = types
return types
elif isinstance(node.ctx, gast.Store):
if self.rvalue is not None:
self.new_symbols[name] = self.rvalue
else:
# No type information, assume Any.
self.new_symbols[name] = {Any}
return self.rvalue
assert False, 'unknown ctx'
def visit_Call(self, node):
f_name = anno.getanno(node.func, anno.Basic.QN)
kwargs = [self.visit(kw.value) for kw in node.keywords if kw.arg is None]
keywords = {
kw.arg: self.visit(kw.value)
for kw in node.keywords
if kw.arg is not None
}
is_starred = [isinstance(a, gast.Starred) for a in node.args]
args = [
self.visit(a)
for a, starred in zip(node.args, is_starred)
if not starred
]
starargs = [
self.visit(a.value)
for a, starred in zip(node.args, is_starred)
if starred
]
if f_name in self.scope.bound:
# Don't attempt external resolution of local functions.
# TODO(mdan): Use type annotations of the local definition.
return None
return self.resolver.res_call(
self.namespace, f_name, None, args, keywords, starargs, kwargs)
def visit_Index(self, node):
return self.visit(node.value)
def visit_Assign(self, node):
self.rvalue = self.visit(node.value)
for t in node.targets:
self.visit(t)
self.rvalue = None
def visit_Subscript(self, node):
val_type = self.visit(node.value)
slice_type = self.visit(node.slice)
if val_type is None or slice_type is None:
return None
return self.resolver.res_call(self.namespace, _GETITEM, val_type,
(slice_type,), {}, (), ())
def visit_Compare(self, node):
right_types = [self.visit(c) for c in node.comparators]
op_types = [type(o) for o in node.ops]
if len(op_types) > 1:
raise NotImplementedError('chained comparisons')
assert len(right_types) == 1
left_type = self.visit(node.left)
right_type, = right_types
op_type, = op_types
if left_type is None or right_type is None:
return None
f_name = _HANDLERS.get(op_type, None)
if f_name is None:
# Python doesn't allow overriding these operators. Their return types are
# fixed.
return {_FIXED_RETTYPES[op_type]}
return self.resolver.res_call(self.namespace, _HANDLERS[op_type],
left_type, (right_type,), {}, (), ())
def visit_BinOp(self, node):
left_type = self.visit(node.left)
right_type = self.visit(node.right)
if left_type is None or right_type is None:
return None
# TODO(mdan): This does not fully follow Python operator semantics.
# For example, in `a + b` Python will try `a.__add__`, but also `b.__radd__`
return self.resolver.res_call(self.namespace, _HANDLERS[type(node.op)],
left_type, (right_type,), {}, (), ())
class Analyzer(cfg.GraphVisitor):
"""CFG visitor that propagates type information across statements."""
def __init__(self, graph, resolver, namespace, scope, closure_types):
"""Creates a new analyzer.
Args:
graph: cfg.Graph
resolver: Resolver
namespace: Dict[str, Any]
scope: activity.Scope
closure_types: Dict[QN, Set]
"""
super(Analyzer, self).__init__(graph)
self.resolver = resolver
self.namespace = namespace
self.scope = scope
self.closure_types = closure_types
def init_state(self, _):
return _SymbolTable()
def _update_closure_types(self, ast_node, types):
existing_types = anno.getanno(ast_node, anno.Static.CLOSURE_TYPES, None)
if existing_types is None:
existing_types = {}
anno.setanno(ast_node, anno.Static.CLOSURE_TYPES, existing_types)
for k, v in types.value.items():
if k in existing_types:
existing_types[k].update(v)
else:
existing_types[k] = set(v)
def visit_node(self, node):
prev_types_out = self.out[node]
types_in = _SymbolTable()
for n in node.prev:
types_in |= self.out[n]
types_out = _SymbolTable(types_in)
ast_node = node.ast_node
inferrer = StmtInferrer(
self.resolver, self.scope, self.namespace, self.closure_types, types_in)
inferrer.visit(ast_node)
types_out.value.update(inferrer.new_symbols)
reaching_fndefs = anno.getanno(ast_node, anno.Static.DEFINED_FNS_IN)
node_scope = anno.getanno(ast_node, anno.Static.SCOPE, None)
if node_scope is not None:
# TODO(mdan): Check that it's actually safe to skip nodes without scope.
reads = {str(qn) for qn in node_scope.read}
for def_node in reaching_fndefs:
if def_node.name in reads:
self._update_closure_types(def_node, types_out)
self.in_[node] = types_in
self.out[node] = types_out
return prev_types_out != types_out
class FunctionVisitor(transformer.Base):
"""AST visitor that applies type inference to each function separately."""
def __init__(self, source_info, graphs, resolver):
super(FunctionVisitor, self).__init__(source_info)
self.graphs = graphs
self.resolver = resolver
def visit_FunctionDef(self, node):
subgraph = self.graphs[node]
scope = anno.getanno(node, annos.NodeAnno.ARGS_AND_BODY_SCOPE)
closure_types = anno.getanno(node, anno.Static.CLOSURE_TYPES, {})
analyzer = Analyzer(
subgraph, self.resolver, self.ctx.info.namespace, scope, closure_types)
analyzer.visit_forward()
# Recursively process any remaining subfunctions.
node.body = self.visit_block(node.body)
return node
def resolve(node, source_info, graphs, resolver):
"""Performs type inference.
Args:
node: ast.AST
source_info: transformer.SourceInfo
graphs: Dict[ast.FunctionDef, cfg.Graph]
resolver: Resolver
Returns:
ast.AST
"""
visitor = FunctionVisitor(source_info, graphs, resolver)
node = visitor.visit(node)
return node
| 1.945313
| 2
|
ERAutomation/steps/manager_login_steps.py
|
dboudreau4/ReimbursementSystemAutomation
| 0
|
12777616
|
<filename>ERAutomation/steps/manager_login_steps.py
from behave import given, when, then
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@given('The Manager is on the Manager Login Page')
def open_manager_login(context):
context.driver.get("C:\\Users\\david\\Documents\\ExpenseReimbursementFrontend\\managerlogin.html")
@when('The Manager types {username} into the username bar')
def type_m_username(context, username: str):
context.manager_login.username().send_keys(username)
@when('The Manager types {password} into the password bar')
def type_m_password(context, password: str):
context.manager_login.password().send_keys(password)
@when('The Manager clicks the login button')
def m_login(context):
context.manager_login.login_button().click()
@then('The page title should be {title}')
def verify_on_manager_page(context, title: str):
WebDriverWait(context.driver, 3).until(
EC.title_is(title))
assert context.driver.title == title
| 2.53125
| 3
|
third_party/maya/lib/usdMaya/testenv/testUsdMayaAdaptorGeom.py
|
YuqiaoZhang/USD
| 88
|
12777617
|
#!/pxrpythonsubst
#
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from maya import cmds
from maya import standalone
import os
import unittest
from pxr import Usd, UsdGeom
class testUsdMayaAdaptorGeom(unittest.TestCase):
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
@classmethod
def setUpClass(cls):
standalone.initialize('usd')
cmds.loadPlugin('pxrUsd')
usdFile = os.path.abspath('UsdAttrs.usda')
cmds.usdImport(file=usdFile, shadingMode='none')
def testImportImageable(self):
"""
Tests that UsdGeomImageable.purpose is properly imported.
"""
# Testing for the different purpose attributes
self.assertEqual(cmds.getAttr('pCube1.USD_ATTR_purpose'), 'default')
self.assertEqual(cmds.getAttr('pCube2.USD_ATTR_purpose'), 'render')
self.assertEqual(cmds.getAttr('pCube3.USD_ATTR_purpose'), 'proxy')
# pCube4 does not have a purpose attribute
self.assertFalse(cmds.objExists('pCube4.USD_ATTR_purpose'))
self.assertFalse(cmds.objExists('pCube4.USD_purpose')) # alias
def testExportImageable(self):
"""
Test that UsdGeomImageable.purpose is properly exported.
"""
newUsdFilePath = os.path.abspath('UsdAttrsNew.usda')
cmds.usdExport(file=newUsdFilePath, shadingMode='none')
newUsdStage = Usd.Stage.Open(newUsdFilePath)
# Testing the exported purpose attributes
geom1 = UsdGeom.Imageable(newUsdStage.GetPrimAtPath('/World/pCube1'))
self.assertEqual(geom1.GetPurposeAttr().Get(), 'default')
geom2 = UsdGeom.Imageable(newUsdStage.GetPrimAtPath('/World/pCube2'))
self.assertEqual(geom2.GetPurposeAttr().Get(), 'render')
geom3 = UsdGeom.Imageable(newUsdStage.GetPrimAtPath('/World/pCube3'))
self.assertEqual(geom3.GetPurposeAttr().Get(), 'proxy')
# Testing that there is no authored attribute
geom4 = UsdGeom.Imageable(newUsdStage.GetPrimAtPath('/World/pCube4'))
self.assertFalse(geom4.GetPurposeAttr().HasAuthoredValue())
if __name__ == '__main__':
unittest.main(verbosity=2)
| 2.015625
| 2
|
examples/QuantosDataService/runService.py
|
hzypage/TestPy
| 18
|
12777618
|
# encoding: UTF-8
"""
定时服务,可无人值守运行,实现每日自动下载更新历史行情数据到数据库中。
"""
import datetime as ddt
from dataService import *
if __name__ == '__main__':
taskCompletedDate = None
# 生成一个随机的任务下载时间,用于避免所有用户在同一时间访问数据服务器
taskTime = ddt.time(hour=17, minute=0)
# 进入主循环
while True:
t = ddt.datetime.now()
# 每天到达任务下载时间后,执行数据下载的操作
if t.time() > taskTime and (taskCompletedDate is None or t.date() != taskCompletedDate):
# 创建API对象
api = DataApi(DATA_SERVER)
info, msg = api.login(USERNAME, TOKEN)
if not info:
print u'数据服务器登录失败,原因:%s' %msg
# 下载数据
downloadAllMinuteBar(api)
# 更新任务完成的日期
taskCompletedDate = t.date()
else:
print u'当前时间%s,任务定时%s' %(t, taskTime)
sleep(60)
| 2.453125
| 2
|
python/image_processing/closing.py
|
SayanGhoshBDA/code-backup
| 16
|
12777619
|
<filename>python/image_processing/closing.py
import cv2
import numpy as np
img = cv2.imread('closing.png',0)
kernel = np.ones((5,5),np.uint8)
closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
blackhat = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)
gradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
tophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)
cv2.imshow('blackhat',blackhat)
cv2.imshow('image cv2',img)
cv2.imshow('closing',closing)
cv2.imshow('image erosion',tophat)
cv2.imshow('tophat',gradient)
cv2.waitKey(0)
# to save the image
# cv2.imwrite('image1.png',img)
cv2.destroyAllWindows()
| 3.0625
| 3
|
seq_utils/construct_graph/construct_graph_v2.py
|
PTYin/ESRT
| 0
|
12777620
|
<filename>seq_utils/construct_graph/construct_graph_v2.py
import dgl
import pygraphviz as pyg
import torch
import pandas as pd
from argparse import ArgumentParser
import os
def construct_graph(df: pd.DataFrame):
users = df['userID'].unique()
items = df['asin'].unique()
item_map = dict(zip(items, range(len(users), len(users) + len(items))))
current_review_id = 0
current_query_id = 0
tier1_src_r, tier1_des_r, = [], []
tier1_src_q, tier1_des_q = [], []
tier2_src, tier2_des = [], []
tier3_u, tier3_i = [], []
e_data = []
# words, reviews, users, items = [], [], [], []
for index, series in df.iterrows():
if index == 3:
break
if series['filter'] == 'Train':
# ------------------------Tier 1------------------------
# ********word->query********
current_words = eval(series['queryWords'])
tier1_src_q += current_words
tier1_des_q += [current_query_id] * len(current_words)
if len(eval(series['reviewText'])) != 0:
# ********word->review********
current_words = eval(series['reviewWords'])
tier1_src_r += current_words
tier1_des_r += [current_review_id] * len(current_words)
# ------------------------Tier 2------------------------
# ********review->entity********
tier2_src += [current_review_id, current_review_id]
tier2_des += [series['userID'], item_map[series['asin']]]
current_review_id += 1
# ------------------------Tier 3------------------------
# ********user<->item********
tier3_u.append(series['userID'])
tier3_i.append(item_map[series['asin']])
e_data.append(current_query_id)
current_query_id += 1
graph_data = {('word', 'composes', 'review'): (tier1_src_r, tier1_des_r),
('word', 'composes', 'query'): (tier1_src_q, tier1_des_q),
('review', 'profiles', 'entity'): (tier2_src, tier2_des),
('entity', 'purchased', 'entity'): (tier3_u, tier3_i),
('entity', 'purchased_by', 'entity'): (tier3_i, tier3_u)}
# num_nodes_dict = {'word': word_num, 'review': current_review_id, 'entity': len(users) + len(items)}
# graph: dgl.DGLHeteroGraph = dgl.heterograph(graph_data, num_nodes_dict)
graph: dgl.DGLHeteroGraph = dgl.heterograph(graph_data)
graph.edges['purchased'].data['q_id'] = torch.tensor(e_data, dtype=torch.long)
plot_meta(graph)
plot(graph)
return users, item_map, graph
def plot(graph: dgl.DGLHeteroGraph):
ag = pyg.AGraph(strict=False, directed=True)
for etype in [('word', 'composes', 'review'),
('word', 'composes', 'query'),
('review', 'profiles', 'entity'),
('entity', 'purchased', 'entity'),
('entity', 'purchased_by', 'entity')]:
src, des = graph.edges(etype=etype)
for i in range(len(src)):
ag.add_edge(etype[0] + repr(src[i]), etype[2] + repr(des[i]), label=etype[1])
ag.layout('dot')
ag.draw('graph.png', prog='dot')
def plot_meta(graph: dgl.DGLHeteroGraph):
meta_graph = graph.metagraph()
ag = pyg.AGraph(strict=False, directed=True)
for u, v, k in meta_graph.edges(keys=True):
ag.add_edge(u, v, label=k)
ag.layout('dot')
ag.draw('meta.png', prog='dot')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--dataset',
default='Musical_Instruments',
help='name of the dataset')
parser.add_argument('--processed_path',
default='/disk/yxk/processed/test/',
help="preprocessed path of the raw data")
config = parser.parse_args()
full_path = os.path.join(config.processed_path, "{}_full.csv".format(config.dataset))
full_df = pd.read_csv(full_path)
construct_graph(full_df)
| 2.609375
| 3
|
minpy/numpy/random.py
|
yuhonghong66/minpy
| 1,271
|
12777621
|
<reponame>yuhonghong66/minpy
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Mock numpy random module """
#pylint: disable= invalid-name
from __future__ import absolute_import
import sys
from minpy.numpy.mocking import Module
_old = {
'__name__' : __name__,
}
sys.modules[__name__] = Module(_old, 'random')
| 1.945313
| 2
|
maxentropy/maxentutils.py
|
bluerobe25/maxentropy
| 0
|
12777622
|
"""
Utility routines for the maximum entropy module.
Most of them are either Python replacements for the corresponding Fortran
routines or wrappers around matrices to allow the maxent module to
manipulate ndarrays, scipy sparse matrices, and PySparse matrices a
common interface.
Perhaps the logsumexp() function belongs under the utils/ branch where other
modules can access it more easily.
Copyright: <NAME>, 2003-2006
License: BSD-style (see LICENSE.txt in main source directory)
"""
# Future imports must come before any code in 2.5
from __future__ import division
from __future__ import print_function
from builtins import range
__author__ = "<NAME>"
__version__ = '2.0'
import random
import math
import cmath
import numpy as np
#from numpy import log, exp, asarray, ndarray, empty
import scipy.sparse
from scipy.misc import logsumexp
def feature_sampler(vec_f, auxiliary_sampler):
"""
A generator function for tuples (F, log_q_xs, xs)
Parameters
----------
vec_f : function
Pass `vec_f` as a (vectorized) function that operates on a vector of
samples xs = {x1,...,xn} and returns a feature matrix (m x n), where m
is some number of feature components.
auxiliary_sampler : function
Pass `auxiliary_sampler` as a function that returns a tuple
(xs, log_q_xs) representing a sample to use for sampling (e.g.
importance sampling) on the sample space of the model.
xs : list, 1d ndarray, or 2d matrix (n x d)
We require len(xs) == n.
Yields
------
tuples (F, log_q_xs, xs)
F : matrix (m x n)
log_q_xs : as returned by auxiliary_sampler
xs : as returned by auxiliary_sampler
"""
while True:
xs, log_q_xs = auxiliary_sampler()
F = vec_f(xs) # compute feature matrix from points
yield F, log_q_xs, xs
def dictsample(freq, size=None, return_probs=None):
"""
Create a sample of the given size from the specified discrete distribution.
Parameters
----------
freq : a dictionary
A mapping from values x_j in the sample space to probabilities (or
unnormalized frequencies).
size : a NumPy size parameter (like a shape tuple)
Something passable to NumPy as a size argument to np.random.choice(...)
return_probs : int, optional (default 0)
None: don't return pmf values at each sample point
'prob': return pmf values at each sample point
'logprob': return log pmf values at each sample point
Returns
-------
Returns a sample of the given size from the keys of the given
dictionary `freq` with probabilities given according to the
values (normalized to 1). Optionally returns the probabilities
under the distribution of each observation.
Example
-------
>>> freq = {'a': 10, 'b': 15, 'c': 20}
>>> dictsample(freq, size=1)
array([c, b, b, b, b, b, c, b, b, b], dtype=object)
"""
n = len(freq)
probs = np.fromiter(freq.values(), float)
probs /= probs.sum()
indices = np.random.choice(np.arange(n), size=size, p=probs)
labels = np.empty(n, dtype=object)
for i, label in enumerate(freq.keys()):
labels[i] = label
sample = labels[indices]
if return_probs is None:
return sample
sampleprobs = probs[indices]
if return_probs == 'prob':
return sample, sampleprobs
elif return_probs == 'logprob':
return sample, np.log(sampleprobs)
else:
raise ValueError('return_probs must be "prob", "logprob", or None')
def dictsampler(freq, size=None, return_probs=None):
"""
A generator of samples of the given size from the specified discrete
distribution.
Parameters
----------
freq : a dictionary
A mapping from values x_j in the sample space to probabilities (or
unnormalized frequencies).
size : a NumPy size parameter (like a shape tuple)
Something passable to NumPy as a size argument to np.random.choice(...)
return_probs : int, optional (default 0)
None: don't return pmf values at each sample point
'prob': return pmf values at each sample point
'logprob': return log pmf values at each sample point
Returns
-------
Returns a sample of the given size from the keys of the given
dictionary `freq` with probabilities given according to the
values (normalized to 1). Optionally returns the probabilities
under the distribution of each observation.
Example
-------
>>> freq = {'a': 10, 'b': 15, 'c': 20}
>>> g = dictsample_gen(freq, size=1)
>>> next(g)
array([c, b, b, b, b, b, c, b, b, b], dtype=object)
"""
while True:
yield dictsample(freq, size=size, return_probs=return_probs)
def auxiliary_sampler_scipy(auxiliary, dimensions=1, n=10**5):
"""
Sample (once) from the given scipy.stats distribution
Parameters
----------
auxiliary : a scipy.stats distribution object (rv_frozen)
Returns
-------
sampler : function
sampler(), when called with no parameters, returns a tuple
(xs, log_q_xs), where:
xs : matrix (n x d): [x_1, ..., x_n]: a sample
log_q_xs: log pdf values under the auxiliary sampler for each x_j
"""
def sampler():
xs = auxiliary.rvs(size=(n, dimensions))
log_q_xs = np.log(auxiliary.pdf(xs.T)).sum(axis=0)
return (xs, log_q_xs)
return sampler
def _logsumexpcomplex(values):
"""A version of logsumexp that should work if the values passed are
complex-numbered, such as the output of robustarraylog(). So we
expect:
cmath.exp(logsumexpcomplex(robustarraylog(values))) ~= sum(values,axis=0)
except for a small rounding error in both real and imag components.
The output is complex. (To recover just the real component, use
A.real, where A is the complex return value.)
"""
if len(values) == 0:
return 0.0
iterator = iter(values)
# Get the first element
while True:
# Loop until we have a value greater than -inf
try:
b_i = next(iterator) + 0j
except StopIteration:
# empty
return float('-inf')
if b_i.real != float('-inf'):
break
# Now the rest
for a_i in iterator:
a_i += 0j
if b_i.real > a_i.real:
increment = robustlog(1.+cmath.exp(a_i - b_i))
# print "Increment is " + str(increment)
b_i = b_i + increment
else:
increment = robustlog(1.+cmath.exp(b_i - a_i))
# print "Increment is " + str(increment)
b_i = a_i + increment
return b_i
def logsumexp_naive(values):
"""For testing logsumexp(). Subject to numerical overflow for large
values (e.g. 720).
"""
s = 0.0
for x in values:
s += math.exp(x)
return math.log(s)
def robustlog(x):
"""Returns log(x) if x > 0, the complex log cmath.log(x) if x < 0,
or float('-inf') if x == 0.
"""
if x == 0.:
return float('-inf')
elif type(x) is complex or (type(x) is float and x < 0):
return cmath.log(x)
else:
return math.log(x)
def _robustarraylog(x):
""" An array version of robustlog. Operates on a real array x.
"""
arraylog = empty(len(x), np.complex64)
for i in range(len(x)):
xi = x[i]
if xi > 0:
arraylog[i] = math.log(xi)
elif xi == 0.:
arraylog[i] = float('-inf')
else:
arraylog[i] = cmath.log(xi)
return arraylog
# def arrayexp(x):
# """
# OBSOLETE?
#
# Returns the elementwise antilog of the real array x.
#
# We try to exponentiate with np.exp() and, if that fails, with
# python's math.exp(). np.exp() is about 10 times faster but throws
# an OverflowError exception for numerical underflow (e.g. exp(-800),
# whereas python's math.exp() just returns zero, which is much more
# helpful.
# """
# try:
# ex = np.exp(x)
# except OverflowError:
# print("Warning: OverflowError using np.exp(). Using slower Python"\
# " routines instead!")
# ex = np.empty(len(x), float)
# for j in range(len(x)):
# ex[j] = math.exp(x[j])
# return ex
#
# def arrayexpcomplex(x):
# """
# OBSOLETE?
#
# Returns the elementwise antilog of the vector x.
#
# We try to exponentiate with np.exp() and, if that fails, with python's
# math.exp(). np.exp() is about 10 times faster but throws an
# OverflowError exception for numerical underflow (e.g. exp(-800),
# whereas python's math.exp() just returns zero, which is much more
# helpful.
#
# """
# try:
# ex = np.exp(x).real
# except OverflowError:
# ex = np.empty(len(x), float)
# try:
# for j in range(len(x)):
# ex[j] = math.exp(x[j])
# except TypeError:
# # Perhaps x[j] is complex. If so, try using the complex
# # exponential and returning the real part.
# for j in range(len(x)):
# ex[j] = cmath.exp(x[j]).real
# return ex
def sample_wr(population, k):
"""Chooses k random elements (with replacement) from a population.
(From the Python Cookbook).
"""
n = len(population)
_random, _int = random.random, int # speed hack
return [population[_int(_random() * n)] for i in range(k)]
def evaluate_feature_matrix(feature_functions,
xs,
vectorized=True,
format='csc_matrix',
dtype=float,
verbose=False):
"""Evaluate a (m x n) matrix of features `F` of the sample `xs` as:
F[i, :] = f_i(xs[:])
if xs is 1D, or as:
F[i, j] = f_i(xs[:, j])
if xs is 2D, for each feature function `f_i` in `feature_functions`.
Parameters
----------
feature_functions : a list of m feature functions f_i.
xs : either:
1. a (n x d) matrix representing n d-dimensional
observations xs[j, :] for j=1,...,n.
2. a 1d array or sequence (e.g list) of observations xs[j]
for j=1,...,n.
vectorized : bool (default True)
If True, the feature functions f_i are assumed to be vectorized;
then these will be passed all observations xs at once, in turn.
If False, the feature functions f_i will be evaluated one at a time.
format : str (default 'csc_matrix')
Options: 'ndarray', 'csc_matrix', 'csr_matrix', 'dok_matrix'.
If you have enough memory, it may be faster to create a dense
ndarray and then construct a e.g. CSC matrix from this.
Returns
-------
F : (m x n) matrix (in the given format: ndarray / csc_matrix / etc.)
Matrix of evaluated features.
"""
m = len(feature_functions)
if isinstance(xs, np.ndarray) and xs.ndim == 2:
n, d = xs.shape
if d == 1 and vectorized:
# xs may be a column vector, i.e. (n x 1) array.
# In this case, reshape it to a 1d array. This
# makes it easier to define functions that
# operate on only one variable (the usual case)
# given that sklearn's interface now forces 2D
# arrays X when calling .transform(X) and .fit(X).
xs = np.reshape(xs, n)
else:
n, d = len(xs), 1
if format in ('dok_matrix', 'csc_matrix', 'csr_matrix'):
F = scipy.sparse.dok_matrix((m, n), dtype=dtype)
elif format == 'ndarray':
F = np.empty((m, n), dtype=dtype)
else:
raise ValueError('matrix format not recognized')
for i, f_i in enumerate(feature_functions):
if verbose:
print('Computing feature {i} of {m} ...'.format(i=i, m=m))
if vectorized:
F[i::m, :] = f_i(xs)
else:
for j in range(n):
f_i_x = f_i(xs[j])
if f_i_x != 0:
F[i,j] = f_i_x
if format == 'csc_matrix':
return F.tocsc()
elif format == 'csr_matrix':
return F.tocsr()
else:
return F
# def densefeatures(f, x):
# """Returns a dense array of non-zero evaluations of the vector
# functions fi in the list f at the point x.
# """
#
# return np.array([fi(x) for fi in f])
# def densefeaturematrix(f, sample, verbose=False):
# """Compute an (m x n) dense array of non-zero evaluations of the
# scalar functions fi in the list f at the points x_1,...,x_n in the
# list sample.
# """
#
# # Was: return np.array([[fi(x) for fi in f] for x in sample])
#
# m = len(f)
# n = len(sample)
#
# F = np.empty((m, n), float)
# for i in range(m):
# f_i = f[i]
# for j in range(n):
# x = sample[j]
# F[i,j] = f_i(x)
# return F
# def sparsefeatures(f, x, format='csc_matrix'):
# """Compute an mx1 sparse matrix of non-zero evaluations of the
# scalar functions f_1,...,f_m in the list f at the point x.
#
# """
# m = len(f)
# if format in ('dok_matrix', 'csc_matrix', 'csr_matrix'):
# sparsef = scipy.sparse.dok_matrix((m, 1))
# else:
# raise ValueError("sparse matrix format not recognized")
#
# for i in range(m):
# f_i_x = f[i](x)
# if f_i_x != 0:
# sparsef[i, 0] = f_i_x
#
# if format == 'csc_matrix':
# print("Converting to CSC matrix ...")
# return sparsef.tocsc()
# elif format == 'csr_matrix':
# print("Converting to CSR matrix ...")
# return sparsef.tocsr()
# else:
# return sparsef
# def sparsefeaturematrix(f, sample, format='csc_matrix', verbose=False):
# """Compute an (m x n) sparse matrix of non-zero evaluations of the
# scalar functions f_1,...,f_m in the list f at the points x_1,...,x_n
# in the sequence 'sample'.
#
# """
# m = len(f)
# n = len(sample)
# if format in ('dok_matrix', 'csc_matrix', 'csr_matrix'):
# sparseF = scipy.sparse.dok_matrix((m, n))
# else:
# raise ValueError("sparse matrix format not recognized")
#
# for i in range(m):
# if verbose:
# print('Computing feature {i} of {m}'.format(i=i, m=m))
# f_i = f[i]
# for j in range(n):
# x = sample[j]
# f_i_x = f_i(x)
# if f_i_x != 0:
# sparseF[i,j] = f_i_x
#
# if format == 'csc_matrix':
# return sparseF.tocsc()
# elif format == 'csr_matrix':
# return sparseF.tocsr()
# else:
# return sparseF
# def sparsefeaturematrix_vectorized(feature_functions, xs, format='csc_matrix'):
# """
# Evaluate a (m x n) matrix of features `F` of the sample `xs` as:
#
# F[i, j] = f_i(xs[:, j])
#
# Parameters
# ----------
# feature_functions : a list of feature functions f_i.
#
# xs : either:
# 1. a (d x n) matrix representing n d-dimensional
# observations xs[: ,j] for j=1,...,n.
# 2. a 1d array or sequence (e.g list) of observations xs[j]
# for j=1,...,n.
#
# The feature functions f_i are assumed to be vectorized. These will be
# passed all observations xs at once, in turn.
#
# Note: some samples may be more efficient / practical to compute
# features one sample observation at a time (e.g. generated). For these
# cases, use sparsefeaturematrix().
#
# Only pass sparse=True if you need the memory savings. If you want a
# sparse matrix but have enough memory, it may be faster to
# pass dense=True and then construct a CSC matrix from the dense NumPy
# array.
#
# """
# m = len(feature_functions)
#
# if isinstance(xs, np.ndarray) and xs.ndim == 2:
# d, n = xs.shape
# else:
# n = len(xs)
# if not sparse:
# F = np.empty((m, n), float)
# else:
# import scipy.sparse
# F = scipy.sparse.lil_matrix((m, n), dtype=float)
#
# for i, f_i in enumerate(feature_functions):
# F[i::m, :] = f_i(xs)
#
# if format == 'csc_matrix':
# return F.tocsc()
# elif format == 'csr_matrix':
# return F.tocsr()
# else:
# return F
def old_vec_feature_function(feature_functions, sparse=False):
"""
Create and return a vectorized function `features(xs)` that
evaluates an (n x m) matrix of features `F` of the sample `xs` as:
F[j, i] = f_i(xs[:, j])
Parameters
----------
feature_functions : a list of feature functions f_i.
`xs` will be passed to these functions as either:
1. an (n x d) matrix representing n d-dimensional
observations xs[j, :] for j=1,...,n.
2. a 1d array or sequence (e.g list) of observations xs[j]
for j=1,...,n.
The feature functions f_i are assumed to be vectorized. These will be
passed all observations xs at once, in turn.
Note: some samples may be more efficient / practical to compute
features of one sample observation at a time (e.g. generated).
Only pass sparse=True if you need the memory savings. If you want a
sparse matrix but have enough memory, it may be faster to
pass sparse=False and then construct a CSC matrix from the dense NumPy
array.
"""
if sparse:
import scipy.sparse
m = len(feature_functions)
def vectorized_features(xs):
if isinstance(xs, np.ndarray) and xs.ndim == 2:
n, d = xs.shape
else:
n = len(xs)
if not sparse:
F = np.empty((n, m), float)
else:
F = scipy.sparse.lil_matrix((n, m), dtype=float)
# Equivalent:
# for i, f_i in enumerate(feature_functions):
# for k in range(len(xs)):
# F[len(feature_functions)*k+i, :] = f_i(xs[k])
for i, f_i in enumerate(feature_functions):
F[:, i::m] = f_i(xs)
if not sparse:
return F
else:
return scipy.sparse.csc_matrix(F)
return vectorized_features
def dotprod(u,v):
"""
This is a wrapper around general dense or sparse dot products.
It is not necessary except as a common interface for supporting
ndarray, scipy spmatrix, and PySparse arrays.
Returns the dot product of the (1 x m) sparse array u with the
(m x 1) (dense) numpy array v.
"""
#print "Taking the dot product u.v, where"
#print "u has shape " + str(u.shape)
#print "v = " + str(v)
try:
dotprod = np.array([0.0]) # a 1x1 array. Required by spmatrix.
u.matvec(v, dotprod)
return dotprod[0] # extract the scalar
except AttributeError:
# Assume u is a dense array.
return np.dot(u,v)
def innerprod(A,v):
"""
This is a wrapper around general dense or sparse dot products.
It is not necessary except as a common interface for supporting
ndarray, scipy spmatrix, and PySparse arrays.
Returns the inner product of the (m x n) dense or sparse matrix A
with the n-element dense array v. This is a wrapper for A.dot(v) for
dense arrays and spmatrix objects, and for A.matvec(v, result) for
PySparse matrices.
"""
# We assume A is sparse.
(m, n) = A.shape
vshape = v.shape
try:
(p,) = vshape
except ValueError:
(p, q) = vshape
if n != p:
raise TypeError("matrix dimensions are incompatible")
if isinstance(v, np.ndarray):
try:
# See if A is sparse
A.matvec
except AttributeError:
# It looks like A is dense
return np.dot(A, v)
else:
# Assume A is sparse
if scipy.sparse.isspmatrix(A):
innerprod = A.matvec(v) # This returns a float32 type. Why???
return innerprod
else:
# Assume PySparse format
innerprod = np.empty(m, float)
A.matvec(v, innerprod)
return innerprod
elif scipy.sparse.isspmatrix(v):
return A * v
else:
raise TypeError("unsupported types for inner product")
def innerprodtranspose(A,v):
"""
This is a wrapper around general dense or sparse dot products.
It is not necessary except as a common interface for supporting
ndarray, scipy spmatrix, and PySparse arrays.
Computes A^T V, where A is a dense or sparse matrix and V is a numpy
array. If A is sparse, V must be a rank-1 array, not a matrix. This
function is efficient for large matrices A. This is a wrapper for
A.T.dot(v) for dense arrays and spmatrix objects, and for
A.matvec_transp(v, result) for pysparse matrices.
"""
(m, n) = A.shape
#pdb.set_trace()
if hasattr(A, 'matvec_transp'):
# A looks like a PySparse matrix
if len(v.shape) == 1:
innerprod = np.empty(n, float)
A.matvec_transp(v, innerprod)
else:
raise TypeError("innerprodtranspose(A,v) requires that v be "
"a vector (rank-1 dense array) if A is sparse.")
return innerprod
elif scipy.sparse.isspmatrix(A):
return (A.conj().transpose() * v).transpose()
else:
# Assume A is dense
if isinstance(v, np.ndarray):
# v is also dense
if len(v.shape) == 1:
# We can't transpose a rank-1 matrix into a row vector, so
# we reshape it.
vm = v.shape[0]
vcolumn = np.reshape(v, (1, vm))
x = np.dot(vcolumn, A)
return np.reshape(x, (n,))
else:
#(vm, vn) = v.shape
# Assume vm == m
x = np.dot(np.transpose(v), A)
return np.transpose(x)
else:
raise TypeError("unsupported types for inner product")
def rowmeans(A):
"""
This is a wrapper for general dense or sparse dot products.
It is only necessary as a common interface for supporting ndarray,
scipy spmatrix, and PySparse arrays.
Returns a dense (m x 1) vector representing the mean of the rows of A,
which be an (m x n) sparse or dense matrix.
>>> a = np.array([[1,2],[3,4]], float)
>>> rowmeans(a)
array([ 1.5, 3.5])
"""
if type(A) is np.ndarray:
return A.mean(1)
else:
# Assume it's sparse
try:
n = A.shape[1]
except AttributeError:
raise TypeError("rowmeans() only works with sparse and dense "
"arrays")
rowsum = innerprod(A, np.ones(n, float))
return rowsum / float(n)
def columnmeans(A):
"""
This is a wrapper for general dense or sparse dot products.
It is only necessary as a common interface for supporting ndarray,
scipy spmatrix, and PySparse arrays.
Returns a dense (1 x n) vector with the column averages of A, which can
be an (m x n) sparse or dense matrix.
>>> a = np.array([[1,2],[3,4]],'d')
>>> columnmeans(a)
array([ 2., 3.])
"""
if type(A) is np.ndarray:
return A.mean(0)
else:
# Assume it's sparse
try:
m = A.shape[0]
except AttributeError:
raise TypeError("columnmeans() only works with sparse and dense "
"arrays")
columnsum = innerprodtranspose(A, np.ones(m, float))
return columnsum / float(m)
def columnvariances(A):
"""
This is a wrapper for general dense or sparse dot products.
It is not necessary except as a common interface for supporting ndarray,
scipy spmatrix, and PySparse arrays.
Returns a dense (1 x n) vector with unbiased estimators for the column
variances for each column of the (m x n) sparse or dense matrix A. (The
normalization is by (m - 1).)
>>> a = np.array([[1,2], [3,4]], 'd')
>>> columnvariances(a)
array([ 2., 2.])
"""
if type(A) is np.ndarray:
return np.std(A,0)**2
else:
try:
m = A.shape[0]
except AttributeError:
raise TypeError("columnvariances() only works with sparse "
"and dense arrays")
means = columnmeans(A)
return columnmeans((A-means)**2) * (m/(m-1.0))
def flatten(a):
"""Flattens the sparse matrix or dense array/matrix 'a' into a
1-dimensional array
"""
if scipy.sparse.isspmatrix(a):
return a.A.flatten()
else:
return np.asarray(a).flatten()
class DivergenceError(Exception):
"""Exception raised if the entropy dual has no finite minimum.
"""
def __init__(self, message):
self.message = message
Exception.__init__(self)
def __str__(self):
return repr(self.message)
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| 2.71875
| 3
|
tests/persistence/test_persistence.py
|
daniel-thom/ditto
| 44
|
12777623
|
import six
if six.PY2:
from backports import tempfile
else:
import tempfile
import pytest as pt
import os
from ditto.readers.opendss.read import Reader as Reader_opendss
from ditto.readers.cyme.read import Reader as Reader_cyme
from ditto.writers.json.write import Writer
from ditto.store import Store
import logging
import json_tricks
logger = logging.getLogger(__name__)
test_list = os.walk('data')
for (dirpath, dirname, files) in test_list:
if files !=[]:
reader_type = dirpath.split('\\')[2]
m = Store()
if reader_type == 'opendss':
reader = Reader_opendss(master_file = os.path.join('..',dirpath,'master.dss'), buscoordinates_file = os.path.join('..',dirpath,'buscoord.dss'))
elif reader_type == 'cyme':
reader = Reader_cyme(data_folder_path=os.path.join('..',dirpath))
else:
#Update with other tests if they get added to the persistence tests
continue
reader.parse(m)
m.set_names()
output_path = tempfile.TemporaryDirectory()
w = Writer(output_path=output_path.name, log_path=output_path)
w.write(m)
original = json_tricks.load(open(os.path.join(dirpath,files[0]),'r'))
update = json_tricks.load(open(os.path.join(output_path.name,'Model.json'),'r'))
try:
assert update["model"] == original["model"]
except AssertionError as e:
logger.error("Model differs for usecase {loc}".format(loc = dirpath))
e.args += ("Model differs for usecase {loc}".format(loc = dirpath),)
raise
| 1.976563
| 2
|
2522.py
|
BACCHUS-S/Baekjoon
| 0
|
12777624
|
<filename>2522.py
i = int(input())
for j in range(1,i+1):
print(" "*(i-j) + "*"*j)
for k in range(1,i):
print(" "*k + "*"*(i-k))
| 3.25
| 3
|
Utils/process_valencic04.py
|
karllark/fuv_mir_rv_relationship
| 0
|
12777625
|
<filename>Utils/process_valencic04.py
import glob
# import numpy as np
from measure_extinction.extdata import ExtData
if __name__ == "__main__":
fpath = "data/valencic04/"
files = glob.glob(f"{fpath}*bin.fits")
for fname in files:
ifile = fname
ext = ExtData(ifile)
# get A(V) values
ext.calc_AV()
if "AV" in ext.columns.keys():
ext.calc_RV()
ext.type = "elx"
ext.type_rel_band = "V"
ofile = ifile.replace("valencic04/", "val04_")
ext.save(ofile)
| 2.125
| 2
|
pyfitterbap/entry_points/crc.py
|
jetperch/fitterbap
| 21
|
12777626
|
<filename>pyfitterbap/entry_points/crc.py<gh_stars>10-100
# Copyright 2020-2021 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pyfitterbap import crc
def parser_config(p):
"""compute CRC."""
p.add_argument('--data',
help='The CRC data.')
return on_cmd
def on_cmd(args):
if args.data is not None:
x = np.array([int(x, 0) for x in args.data.split(',')], dtype=np.uint8)
y = crc.crc32(0, x)
print(f'0x{y:08x}')
return 0
return 1
| 2.609375
| 3
|
pyatlas/unit_tests/test_identifier_converters.py
|
yazad3/atlas
| 188
|
12777627
|
import unittest
from pyatlas import identifier_converters
class IdentifierConvertersTest(unittest.TestCase):
def setUp(self):
pass
def test_osm_conversion(self):
atlas_id = 222222000000
osm_id = 222222
self.assertEqual(osm_id, identifier_converters.get_osm_identifier(atlas_id))
atlas_id = 123001002
osm_id = 123
self.assertEqual(osm_id, identifier_converters.get_osm_identifier(atlas_id))
atlas_id = 3101220
osm_id = 3
self.assertEqual(osm_id, identifier_converters.get_osm_identifier(atlas_id))
atlas_id = -222222000001
osm_id = 222222
self.assertEqual(osm_id, identifier_converters.get_osm_identifier(atlas_id))
def test_country_code_conversion(self):
atlas_id = 222222000000
country_code = 0
self.assertEqual(country_code, identifier_converters.get_country_code(atlas_id))
atlas_id = 123001002
country_code = 1
self.assertEqual(country_code, identifier_converters.get_country_code(atlas_id))
atlas_id = 3101220
country_code = 101
self.assertEqual(country_code, identifier_converters.get_country_code(atlas_id))
atlas_id = -222222002001
country_code = 2
self.assertEqual(country_code, identifier_converters.get_country_code(atlas_id))
def test_way_section_conversion(self):
atlas_id = 222222000000
way_section = 0
self.assertEqual(way_section, identifier_converters.get_way_section_index(atlas_id))
atlas_id = 123001002
way_section = 2
self.assertEqual(way_section, identifier_converters.get_way_section_index(atlas_id))
atlas_id = 3101220
way_section = 220
self.assertEqual(way_section, identifier_converters.get_way_section_index(atlas_id))
atlas_id = -222222002001
way_section = 1
self.assertEqual(way_section, identifier_converters.get_way_section_index(atlas_id))
| 2.71875
| 3
|
bkcore/strdistlib.py
|
accidental-bebop/BkStringMatch
| 1
|
12777628
|
"""
String distance algorithm implementations
"""
# --- Imports
# --- String Distance Algorithms
def calculate_levenshtein_distance(string1, string2):
"""
Compute the minimum number of substitutions, deletions, and additions
needed to change string1 into string2.
Parameters
----------
string1 : str
string to calculate distance from
string2 : str
string to calculate distance to
Return value
------------
prev[-1] : int
levenshtein distance
"""
if len(string1) < len(string2):
return calculate_levenshtein_distance(string2, string1)
if not string2:
return len(string1)
prev = list(range(len(string2) + 1))
for i, curr1 in enumerate(string1):
curr = [i + 1]
for j, curr2 in enumerate(string2):
insertions = prev[j + 1] + 1
deletions = curr[j] + 1
substitutions = prev[j] + (curr1 != curr2)
curr.append(min(insertions, deletions, substitutions))
prev = curr
return prev[-1]
def calculate_lc_substring_length(string1, string2):
"""
Calculate the number of maximum consecutive symbols shared between two
input strings.
Parameters
----------
string1 : str
string to calculate distance from
string2 : str
string to calculate distance to
Return value
------------
lcsd : int
longest common substring length
"""
matrix = [[0] * (1 + len(string2)) for i in range(1 + len(string1))]
lcsd = 0
for cursor1 in range(1, 1 + len(string1)):
for cursor2 in range(1, 1 + len(string2)):
if string1[cursor1 - 1] == string2[cursor2 - 1]:
matrix[cursor1][cursor2] = matrix[cursor1 - 1][cursor2 - 1] + 1
if matrix[cursor1][cursor2] > lcsd:
lcsd = matrix[cursor1][cursor2]
else:
matrix[cursor1][cursor2] = 0
return lcsd
def calculate_hamming_distance(string1, string2):
"""
Calculate the inverse of the minimum number of substitutions required to
change string1 into string2.
Parameters
----------
string1 : str
string to calculate distance from
string2 : str
string to calculate distance to
Return value
------------
hamming : int
hamming distance
Exceptions
----------
std::invalid_argument - length of string1 and string2 differ
"""
hamming = 0
s1len = len(string1)
s2len = len(string2)
if s1len == s2len:
for i in range(0, s1len):
if string1[i] == string2[i]:
hamming = hamming + 1
else:
return 'Error: different string lengths'
return hamming
def generate_q_gram_matrix(string1, string2, q_value):
"""
Generate a vector of q-gram occurences in two strings given a
window size of q.
Parameters
----------
string1 : str
string to calculate distance from
string2 : str
string to calculate distance to
q_value : int
size of q-gram window
Return values
-------------
q_gram_matrix1, q_gram_matrix2 : array
q-gram matrices of respective input strings
Exceptions
----------
std::invalid_argument - q_value greater than length of string1
std::invalid_argument - q_value greater than length of string2
"""
s1len = len(string1)
s2len = len(string2)
i = 0
j = 0
q_gram_matrix1 = []
q_gram_matrix2 = []
if q_value > s1len or q_value > s1len:
return 'Error: q_value larger than string length'
for i in range(s1len - q_value + 1):
q_gram_matrix1.append(string1[i:i + q_value])
for j in range(s2len - q_value + 1):
q_gram_matrix2.append(string2[j:j + q_value])
return q_gram_matrix1, q_gram_matrix2
def calculate_q_gram_distance(string1, string2, q_value):
"""
Calculate the sum of the absolute differences between two q-gram matricies
from strings.
Parameters
----------
string1 : str
string to calculate distance from
string2 : str
string to calculate distance to
q_value : int
size of q-gram window
Return value
------------
q_gram_distance : int
q-gram distance
"""
s1len = len(string1)
s2len = len(string2)
q_gram_count = 0
q_gram_matricies = generate_q_gram_matrix(string1, string2, q_value)
q_gram_matrix1 = q_gram_matricies[0]
q_gram_matrix2 = q_gram_matricies[1]
for qgram1, qgram1 in enumerate(q_gram_matrix1):
for qgram2, qgram2 in enumerate(q_gram_matrix2):
if qgram1 == qgram2:
q_gram_count = q_gram_count + 1
q_gram_distance = ((s1len - q_value + 1) +
(s2len - q_value + 1)) - (2 * q_gram_count)
return q_gram_distance
def calculate_jaccard_distance(string1, string2, q_value):
"""
Calculate Jaccard distance, where distance is one minues the quotient of
the number of shared q-grams to the total number of unique q-grams between
two strings.
Parameters
----------
string1 : str
string to calculate distance from
string2 : str
string to calculate distance to
q_value : int
size of q-gram window
Return value
------------
jaccard_distace : float
jaccard distance
"""
s1len = len(string1)
s2len = len(string2)
q_gram_count = 0
q_gram_matricies = generate_q_gram_matrix(string1, string2, q_value)
q_gram_matrix1 = q_gram_matricies[0]
q_gram_matrix2 = q_gram_matricies[1]
for qgram1, qgram1 in enumerate(q_gram_matrix1):
for qgram2, qgram2 in enumerate(q_gram_matrix2):
if qgram1 == qgram2:
q_gram_count = q_gram_count + 1
observed_q_gram = ((s1len - q_value + 1) +
(s2len - q_value + 1)) - (q_gram_count)
jaccard_distance = (1 - (float(q_gram_count)) / observed_q_gram)
return jaccard_distance
| 3.921875
| 4
|
util_func/math_utils.py
|
ltoppyl/Zissen_team1_AGE
| 1
|
12777629
|
import numpy as np
def softmax(x, axis=None):
max = np.max(x,axis=axis,keepdims=True)
e_x = np.exp(x - max)
sum = np.sum(e_x,axis=axis,keepdims=True)
f_x = e_x / sum
return f_x
| 3.015625
| 3
|
braillingo-demo/obr.py
|
code-coffee-ufcg/braillingo-backend
| 1
|
12777630
|
import cv2
import numpy as np
import statistics as stat
class optical_braille_recognition():
def __init__(self) -> None:
pass
def make_histogram_y(self, img):
'''
Organiza os dados da projeção horizontal na imagem
Entrada:
img -> Array da imagem
Saída:
hist -> Array com os valores do histograma de projeção horizontal
'''
height, width = img.shape
hist = np.zeros(height)
for x in range(height):
for y in range(width):
if (img[x][y] == 1):
hist[x] += 1
return hist
def make_histogram_x(self, img):
'''
Organiza os dados da projeção vertical na imagem, essa projeção só pode ser
feita se a imagem de entrada possuir apenas uma única linha de caracteres
braiile
Entrada:
img -> Array da imagem
Saída:
hist -> Array com os valores do histograma de projeção vertical
'''
height, width = img.shape
hist = np.zeros(width)
for x in range(height):
for y in range(width):
if (img[x][y] == 1):
hist[y] += 1
return hist
def get_delimiters(self, hist):
'''
Encontra os delimitadores verticais e horizontais da posição onde se
encontram os pontos dos caracteres braille por meio do histograma
Entrada:
hist --> Array com os valores do histograma
Saída:
delimiters --> Array com os delimitadores de posição dos pontos
'''
delimiters = list()
for i in range(1, len(hist)-1):
if (hist[i] > 0) and (hist[i-1] == 0) and (hist[i+1] > 0):
delimiters.append(i-1)
if (hist[i] > 0) and (hist[i-1] > 0) and (hist[i+1] == 0):
delimiters.append(i+1)
return delimiters
def get_line_delimiters(self, delimiters):
'''
Encontra os delimitadores que determinam onde começam e onde terminam
as linhas de texto braille da imagem
Entrada:
delimiters --> Array com os delimitadores de posição dos pontos
Saída:
line_delimiters --> Array com os delimitadores de linha
'''
distances = list()
for i in range(len(delimiters)-1):
distances.append(delimiters[i+1] - delimiters[i])
# print(f"{delimiters[i+1]} - {delimiters[i]}", end='\n')
distances = np.array(distances)
# print(distances)
min = distances.min() # Distância entre linhas de pontos de um mesmo caractere
mode = stat.mode(distances) # Diâmetro dos pontos
# print(mode)
if (mode - min) > 2:
limiar = min+2
else:
limiar = min+1
line_delimiters = list()
for i in range(1, len(delimiters)-2):
if (distances[i] > mode and distances[i+1] > limiar and distances[i-1] > limiar):
line_delimiters.append(delimiters[i])
line_delimiters.append(delimiters[i+1])
if i-1 == 0:
line_delimiters.append(delimiters[i-1])
if i+1 == len(delimiters)-2:
line_delimiters.append(delimiters[i+2])
return line_delimiters
def get_character_delimiters(self, delimiters):
'''
Utiliza os delimitadores de posição para determinar os delimitadores dos
caracteres braille por meio do cálculo de suas distâncias
Entrada:
delimiters --> Array com os delimitadores de posição dos pontos
Saída:
character_delimiters --> Array com os delimitadores dos caracteres
'''
distances = list()
for i in range(len(delimiters)-1):
distances.append(delimiters[i+1] - delimiters[i])
# print(f"{delimiters[i+1]} - {delimiters[i]}", end='\n')
distances = np.array(distances)
min = distances.min()
mode=stat.mode(distances)
if (mode - min) > 2:
limiar = min+2
else:
limiar = min+1
# print(limiar)
# print(distances)
character_delimiters = list()
for i in range(len(delimiters)-1):
# Delimitando os caracters que possuem pontos nas duas colunas
diameter = mode
if (distances[i] <= limiar and distances[i] != mode-1 ):
if i != 0:
diameter = delimiters[i] - delimiters[i-1]
character_delimiters.append(delimiters[i] - diameter)
character_delimiters.append(delimiters[i+1] + diameter)
#Delimitando os caracteres de início e final de linha
elif i == 0 and distances[i+1] > limiar:
# Caso em que o caractere possui pontos apenas na coluna da esquerda
if (distances[i+1] > mode+limiar):
character_delimiters.append(delimiters[i+1] + min + mode)
character_delimiters.append(delimiters[i])
# Caso em que o caractere possui pontos apenas na coluna da direita
else:
character_delimiters.append(delimiters[i] - min - mode)
character_delimiters.append(delimiters[i+1])
elif (i == len(distances)-1) and distances[i-1] > limiar:
# Caso em que o caractere possui pontos apenas na coluna da direita
if (distances[i-1] > mode+limiar and distances[i-3] > limiar):
character_delimiters.append(delimiters[i-1] - min - mode)
character_delimiters.append(delimiters[i])
# Caso em que o caractere possui pontos apenas na coluna da esquerda
else:
character_delimiters.append(delimiters[i+1] + min + mode)
character_delimiters.append(delimiters[i])
# Delimitando os caracteres que possuem pontos apenas na coluna da esquerda
if (distances[i] > 1.5*mode+min):
if i > 1 and distances[i-2] > limiar:
character_delimiters.append(delimiters[i] + min + mode)
character_delimiters.append(delimiters[i-1])
# Delimitando os caracteres que possuem pontos apenas na coluna da direita
elif ((distances[i] > 1.5*mode+min) and (i < len(delimiters)-3) and
(distances[i+2] > limiar)):
# if (i < len(delimiters_x)-3) and distances[i+2] > min+1:
character_delimiters.append(delimiters[i+2])
character_delimiters.append(delimiters[i+1] - min - mode)
# elif i == len(delimiters)-2:
# character_delimiters.append(delimiters[i+2])
# character_delimiters.append(delimiters[i+1] - min - mode)
# Delimitando os caracteres de espaço em branco
if (distances[i] >= 3*mode+min):
character_delimiters.append(delimiters[i] + mode)
character_delimiters.append(delimiters[i+1] - mode)
return character_delimiters
def get_line_subimages(self, img, line_delimiters):
'''
Utiliza os delimitadores de linha para recortar a imagem em subimagens, cada
uma com uma linha de carateres braille
Entrada:
img -> Array da imagem que será recortada
line_delimiters --> Array com os delimitadores de linha
Saída:
line_subimages --> Array com subimagens das linhas recortadas
'''
line_delimiters = sorted(line_delimiters)
line_subimages = list()
for i in range(len(line_delimiters)//2):
line_subimages.append(img[line_delimiters[2*i]:line_delimiters[2*i+1],:])
return line_subimages
def get_character_subimages(self, img, char_delimiters):
'''
Recorta a imagem que contém uma linha de caracteres braille em subimagens
contendo os caracteres, que por sua vez são armazenadas em um array na ordem
de leitura
Entrada:
img --> Array da imagem contendo um linha de caracteres
char_delimiters --> Array com os delimitadores dos caracteres
Saída:
subimages --> Array com as subimagens dos caracteres
'''
char_delimiters = sorted(char_delimiters)
for i in range(len(char_delimiters)):
if char_delimiters[i] < 0:
char_delimiters[i] = 0
char_subimages = list()
for i in range(len(char_delimiters)//2):
char_subimages.append(img[:,char_delimiters[2*i]:char_delimiters[2*i+1]])
return char_subimages
def optical_braille_recognition(self, img):
'''
Recebe uma imagem pré-processada contendo um texto em braille, detecta a
posição desses caracters na imagem e apartir disso obtem uma matriz de
subimagens contendo uma palavra do texto em cada linha
Entrada:
img --> Array da imagem pré-processada
Saída:
subimages --> matriz de subimagens, onde cada linha possui os caracteres de
uma palavra
'''
hist_y = self.make_histogram_y(img)
delimiters_y = self.get_delimiters(hist_y)
line_delimiters = self.get_line_delimiters(delimiters_y)
line_subimages = self.get_line_subimages(img, line_delimiters)
subimages = list()
for i in range(len(line_subimages)):
hist_x = self.make_histogram_x(line_subimages[i])
delimiters_x = self.get_delimiters(hist_x)
char_delimiters = self.get_character_delimiters(delimiters_x)
char_subimages = self.get_character_subimages(line_subimages[i], char_delimiters)
word_subimages = list()
for j in range(len(char_subimages)):
hist_x = self.make_histogram_x(char_subimages[j])
if np.max(hist_x) != 0:
word_subimages.append(char_subimages[j])
else:
subimages.append(word_subimages)
word_subimages = list()
if np.max(hist_x) != 0 and j == len(char_subimages)-1:
subimages.append(word_subimages)
word_subimages = list()
return subimages
def tilt_correction(self, img):
max = 0
rows, cols = img.shape
for theta in np.arange(-6, 6, 0.1):
Mr = cv2.getRotationMatrix2D( (cols/2, rows/2), theta , 1)
aux_img = cv2.warpAffine(img, Mr, (cols, rows))
hist_y = self.make_histogram_y(aux_img)
delimiters_y = self.get_delimiters(hist_y)
if len(delimiters_y) > max:
max = len(delimiters_y)
dst_img = aux_img
return dst_img
| 3.390625
| 3
|
app/tasks/forms.py
|
3dnygm4/titanium
| 1
|
12777631
|
<reponame>3dnygm4/titanium
#forms.py - help forms handing and data validation
#/app/tasks/forms.py
from wtforms import Form, TextField, DateField, IntegerField, \
SelectField, PasswordField, validators, RadioField
class AddTask(Form):
task_id = IntegerField('Priority')
name = TextField('Task Name', [validators.Required()])
due_date = DateField('Date Due (mm/dd/yyyy)', [validators.Required()],
format = '%m/%d/%Y')
priority = SelectField('Priority', [validators.Required()],choices=[('1','1'),
('2','2'),('3','3'),('4','4'),('5','5'),('6','6'),
('7','7'),('8','8'),('9','9'),('10','10')])
posted_date = DateField('Posted Date (mm/dd/yyyy)', [validators.Required()], format='%m/%d/%Y')
status = IntegerField('Status')
| 2.234375
| 2
|
open_fmri/apps/dataset/migrations/0018_auto_20151021_2215.py
|
rwblair/open_fmri
| 5
|
12777632
|
<reponame>rwblair/open_fmri
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('dataset', '0017_auto_20151020_2027'),
]
operations = [
migrations.AddField(
model_name='revision',
name='aws_link_title',
field=models.CharField(max_length=255, blank=True),
),
migrations.AddField(
model_name='revision',
name='aws_link_url',
field=models.TextField(validators=[django.core.validators.URLValidator()], blank=True),
),
]
| 1.734375
| 2
|
bluetoothctl.py
|
ArcanoxDragon/SwitchProConProxy
| 0
|
12777633
|
import time
import pexpect
import re
import subprocess
from pexpect_strip_ansi import StripAnsiSpawn
class BluetoothctlError(Exception):
"""This exception is raised when bluetoothctl fails to start."""
pass
class Bluetoothctl:
"""A wrapper for bluetoothctl utility."""
def __init__(self, log=False):
out = subprocess.check_output("rfkill unblock bluetooth", shell = True)
logfile = open("bluetoothctl.log", "w") if log else None
self.child = StripAnsiSpawn("bluetoothctl", echo = False, encoding="utf-8", logfile=logfile)
def get_output(self, command, pause = 0):
"""Run a command in bluetoothctl prompt, return output as a list of lines."""
self.child.send(command + "\n")
time.sleep(pause)
start_failed = self.child.expect([r"\[[^\]]+\]#", pexpect.EOF])
if start_failed:
raise BluetoothctlError("Bluetoothctl failed after running " + command)
return self.child.before.split("\r\n")
def start_scan(self):
"""Start bluetooth scanning process."""
try:
out = self.get_output("scan on")
except BluetoothctlError as e:
print(e)
return None
def make_discoverable(self):
"""Make device discoverable."""
try:
out = self.get_output("discoverable on")
except BluetoothctlError as e:
print(e)
return None
def parse_device_info(self, info_string):
"""Parse a string corresponding to a device."""
device = {}
block_list = ["[\x1b[0;", "removed"]
string_valid = not any(keyword in info_string for keyword in block_list)
if string_valid:
try:
device_position = info_string.index("Device")
except ValueError:
pass
else:
if device_position > -1:
attribute_list = info_string[device_position:].split(" ", 2)
device = {
"mac_address": attribute_list[1],
"name": attribute_list[2]
}
return device
def get_available_devices(self):
"""Return a list of tuples of paired and discoverable devices."""
try:
out = self.get_output("devices")
except BluetoothctlError as e:
print(e)
return None
else:
available_devices = []
for line in out:
device = self.parse_device_info(line)
if device:
available_devices.append(device)
return available_devices
def get_paired_devices(self):
"""Return a list of tuples of paired devices."""
try:
out = self.get_output("paired-devices")
except BluetoothctlError as e:
print(e)
return None
else:
paired_devices = []
for line in out:
device = self.parse_device_info(line)
if device:
paired_devices.append(device)
return paired_devices
def get_discoverable_devices(self):
"""Filter paired devices out of available."""
available = self.get_available_devices()
paired = self.get_paired_devices()
return [d for d in available if d not in paired]
def get_device_info(self, mac_address):
"""Get device info by mac address."""
try:
out = self.get_output("info " + mac_address)
except BluetoothctlError as e:
print(e)
return None
else:
info_lines: list[str] = [line for line in out if not re.match(r"^\s*Device", line)]
info = {}
for line in info_lines:
try:
attr_name, attr_value = [part.strip() for part in line.split(":", maxsplit=1)]
info[attr_name] = attr_value
except:
pass
return info
def pair(self, mac_address):
"""Try to pair with a device by mac address."""
try:
out = self.get_output("pair " + mac_address, 4)
except BluetoothctlError as e:
print(e)
return None
else:
res = self.child.expect(["Failed to pair", "Pairing successful", pexpect.EOF])
success = True if res == 1 else False
return success
def remove(self, mac_address):
"""Remove paired device by mac address, return success of the operation."""
try:
out = self.get_output("remove " + mac_address, 3)
except BluetoothctlError as e:
print(e)
return None
else:
res = self.child.expect(["not available", "Device has been removed", pexpect.EOF])
success = True if res == 1 else False
return success
def connect(self, mac_address):
"""Try to connect to a device by mac address."""
try:
out = self.get_output("connect " + mac_address, 2)
except BluetoothctlError as e:
print(e)
return None
else:
res = self.child.expect(["Failed to connect", r".*Connection successful", pexpect.EOF])
success = True if res == 1 else False
return success
def disconnect(self, mac_address):
"""Try to disconnect to a device by mac address."""
try:
out = self.get_output("disconnect " + mac_address, 2)
except BluetoothctlError as e:
print(e)
return None
else:
res = self.child.expect(["Failed to disconnect", "Successful disconnected", pexpect.EOF])
success = True if res == 1 else False
return success
def trust(self, mac_address):
"""Try to trust a device by mac address."""
try:
out = self.get_output("trust " + mac_address, 2)
except BluetoothctlError as e:
print(e)
return None
else:
res = self.child.expect(["not available", r"Changing ([A-Z0-9:]+) trust succeeded", pexpect.EOF])
success = True if res == 1 else False
return success
def untrust(self, mac_address):
"""Try to untrust a device by mac address."""
try:
out = self.get_output("untrust " + mac_address, 2)
except BluetoothctlError as e:
print(e)
return None
else:
res = self.child.expect(["not available", r"Changing ([A-Z0-9:]+) untrust succeeded", pexpect.EOF])
success = True if res == 1 else False
return success
| 2.875
| 3
|
webapp/__init__.py
|
PASTAplus/dex-deprecated
| 0
|
12777634
|
<filename>webapp/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:Mod: __init__
:Synopsis:
Initialize webapp, including working directories (see Config.ROOT_DIR).
:Author:
servilla
:Created:
4/12/20
"""
import os
import daiquiri
from webapp.config import Config
logger = daiquiri.getLogger(__name__)
path = Config.ROOT_DIR + "/static"
os.makedirs(path, exist_ok=True)
logger.info(f"Created root working directories: {Config.ROOT_DIR} and {path} ")
| 1.960938
| 2
|
clumioapi/models/ebs_restore_target_v1.py
|
clumio-code/clumio-python-sdk
| 0
|
12777635
|
<filename>clumioapi/models/ebs_restore_target_v1.py<gh_stars>0
#
# Copyright 2021. Clumio, Inc.
#
from typing import Any, Dict, Mapping, Optional, Sequence, Type, TypeVar
from clumioapi.models import aws_tag_common_model
T = TypeVar('T', bound='EBSRestoreTargetV1')
class EBSRestoreTargetV1:
"""Implementation of the 'EBSRestoreTargetV1' model.
The configuration of the EBS volume to be restored.
Attributes:
aws_az:
The availability zone into which the EBS volume is restored. For example, `us-
west-2a`.
Use the [GET /datasources/aws/environments](#operation/list-aws-environments)
endpoint to fetch valid values.
environment_id:
The Clumio-assigned ID of the AWS environment to be used as the restore
destination. Use the [GET /datasources/aws/environments](#operation/list-aws-
environments) endpoint to fetch valid values.
kms_key_native_id:
The KMS encryption key ID used to encrypt the EBS volume data. The KMS
encryption key ID is stored in the AWS cloud as part of your AWS account.
tags:
The AWS tags to be applied to the restored volume. The tags are stored in the
AWS cloud as part of your AWS account.
An EBS volume can be have multiple tags. The target volume will not inherit any
tags that were applied
to the original volume. To find the tags that were applied to the original
volume,
use the [GET /backups/aws/ebs-volumes](#operation/list-aws-ebs-volumes) endpoint
to display the original volume's tag keys (`tags.key`) and tag values
(`tags.value`).
"""
# Create a mapping from Model property names to API property names
_names = {
'aws_az': 'aws_az',
'environment_id': 'environment_id',
'kms_key_native_id': 'kms_key_native_id',
'tags': 'tags',
}
def __init__(
self,
aws_az: str = None,
environment_id: str = None,
kms_key_native_id: str = None,
tags: Sequence[aws_tag_common_model.AwsTagCommonModel] = None,
) -> None:
"""Constructor for the EBSRestoreTargetV1 class."""
# Initialize members of the class
self.aws_az: str = aws_az
self.environment_id: str = environment_id
self.kms_key_native_id: str = kms_key_native_id
self.tags: Sequence[aws_tag_common_model.AwsTagCommonModel] = tags
@classmethod
def from_dictionary(cls: Type, dictionary: Mapping[str, Any]) -> Optional[T]:
"""Creates an instance of this model from a dictionary
Args:
dictionary: A dictionary representation of the object as obtained
from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if not dictionary:
return None
# Extract variables from the dictionary
aws_az = dictionary.get('aws_az')
environment_id = dictionary.get('environment_id')
kms_key_native_id = dictionary.get('kms_key_native_id')
tags = None
if dictionary.get('tags'):
tags = list()
for value in dictionary.get('tags'):
tags.append(aws_tag_common_model.AwsTagCommonModel.from_dictionary(value))
# Return an object of this model
return cls(aws_az, environment_id, kms_key_native_id, tags)
| 2.125
| 2
|
planning/path_generator/astar.py
|
HybridRobotics/cbf
| 9
|
12777636
|
import heapq as hq
import math
import numpy as np
from models.geometry_utils import *
# TODO: Generalize to 3D?
class Node:
def __init__(self, pos, parent=None, g_cost=math.inf, f_cost=math.inf):
self.pos = pos
self.parent = parent
self.g_cost = g_cost
self.f_cost = f_cost
def __eq__(self, other):
return all(self.pos == other.pos)
def __le__(self, other):
if self.pos[0] == other.pos[0]:
return self.pos[1] <= other.pos[1]
else:
return self.pos[0] <= other.pos[0]
def __lt__(self, other):
if self.pos[0] == other.pos[0]:
return self.pos[1] < other.pos[1]
else:
return self.pos[0] < other.pos[0]
# TODO: Generalize to 3D
class GridMap:
# cell_size > 0; don't make cell_size too small
def __init__(self, bounds=((0.0, 0.0), (10.0, 10.0)), cell_size=0.1, quad=True):
self.bounds = bounds
self.cell_size = cell_size
self.quad = quad
self.Nx = math.ceil((bounds[1][0] - bounds[0][0]) / cell_size)
self.Ny = math.ceil((bounds[1][1] - bounds[0][1]) / cell_size)
pos = lambda i, j: np.array([bounds[0][0] + (i + 0.5) * cell_size, bounds[0][1] + (j + 0.5) * cell_size])
self.grid = [[Node(pos(i, j)) for j in range(self.Ny)] for i in range(self.Nx)]
# pos should be within bounds
def set_node(self, pos, parent, g_cost, f_cost):
i_x = math.floor((pos[0] - self.bounds[0][0]) / self.cell_size)
i_y = math.floor((pos[1] - self.bounds[0][1]) / self.cell_size)
self.grid[i_x][i_y].parent = parent
self.grid[i_x][i_y].g_cost = g_cost
self.grid[i_x][i_y].f_cost = f_cost
return self.grid[i_x][i_y]
# pos should be within bounds
def get_node(self, pos):
i_x = math.floor((pos[0] - self.bounds[0][0]) / self.cell_size)
i_y = math.floor((pos[1] - self.bounds[0][1]) / self.cell_size)
return self.grid[i_x][i_y]
def get_neighbours(self, node):
i_x = math.floor((node.pos[0] - self.bounds[0][0]) / self.cell_size)
i_y = math.floor((node.pos[1] - self.bounds[0][1]) / self.cell_size)
neighbours = []
for i in range(i_x - 1, i_x + 2):
for j in range(i_y - 1, i_y + 2):
if i == i_x and j == i_y:
continue
if self.quad:
if 0 <= i <= self.Nx - 1 and 0 <= j <= self.Ny - 1 and abs(i - i_x) + abs(j - i_y) <= 1:
neighbours.append(self.grid[i][j])
else:
if 0 <= i <= self.Nx - 1 and 0 <= j <= self.Ny - 1:
neighbours.append(self.grid[i][j])
return neighbours
class GraphSearch:
def __init__(self, graph, obstacles, margin):
self.graph = graph
self.obstacles = obstacles
self.margin = margin
def a_star(self, start_pos, goal_pos):
h_cost = lambda pos: np.linalg.norm(goal_pos - pos)
edge_cost = lambda n1, n2: np.linalg.norm(n1.pos - n2.pos)
openSet = []
start = self.graph.set_node(start_pos, None, 0.0, h_cost(start_pos))
goal = self.graph.get_node(goal_pos)
hq.heappush(openSet, (start.f_cost, start))
while len(openSet) > 0:
current = openSet[0][1]
if current == goal:
return self.reconstruct_path(current)
hq.heappop(openSet)
for n in self.graph.get_neighbours(current):
if self.check_collision(n.pos):
continue
g_score = current.g_cost + edge_cost(current, n)
if g_score < n.g_cost:
n_ = self.graph.set_node(n.pos, current, g_score, g_score + h_cost(n.pos))
if not n in (x[1] for x in openSet):
hq.heappush(openSet, (n_.f_cost, n_))
return []
def theta_star(self, start_pos, goal_pos):
h_cost = lambda pos: np.linalg.norm(goal_pos - pos)
edge_cost = lambda n1, n2: np.linalg.norm(n1.pos - n2.pos)
openSet = []
start = self.graph.set_node(start_pos, None, 0.0, h_cost(start_pos))
goal = self.graph.get_node(goal_pos)
hq.heappush(openSet, (start.f_cost, start))
while len(openSet) > 0:
current = openSet[0][1]
if current == goal:
return self.reconstruct_path(current)
hq.heappop(openSet)
for n in self.graph.get_neighbours(current):
if self.check_collision(n.pos):
continue
if (not current.parent is None) and self.line_of_sight(current.parent, n):
g_score = current.parent.g_cost + edge_cost(current.parent, n)
if g_score < n.g_cost:
n_ = self.graph.set_node(n.pos, current.parent, g_score, g_score + h_cost(n.pos))
# delete n from min-heap
for i in range(len(openSet)):
if openSet[i][1] == n:
openSet[i] = openSet[-1]
openSet.pop()
if i < len(openSet):
hq._siftup(openSet, i)
hq._siftdown(openSet, 0, i)
break
hq.heappush(openSet, (n_.f_cost, n_))
else:
g_score = current.g_cost + edge_cost(current, n)
if g_score < n.g_cost:
n_ = self.graph.set_node(n.pos, current, g_score, g_score + h_cost(n.pos))
# delete n from min-heap
for i in range(len(openSet)):
if openSet[i][1] == n:
openSet[i] = openSet[-1]
openSet.pop()
if i < len(openSet):
hq._siftup(openSet, i)
hq._siftdown(openSet, 0, i)
break
hq.heappush(openSet, (n_.f_cost, n_))
return []
# TODO: optimize
def line_of_sight(self, n1, n2):
e = self.graph.cell_size
div = np.linalg.norm(n2.pos - n1.pos) / e
for i in range(1, math.floor(div) + 1):
if self.check_collision((n2.pos * i + n1.pos * (div - i)) / div):
return False
return True
def check_collision(self, pos):
for o in self.obstacles:
A, b = o.get_convex_rep()
b = b.reshape((len(b),))
if all(A @ pos - b - self.margin * np.linalg.norm(A, axis=1) <= 0):
return True
return False
def reconstruct_path(self, node):
path = [node]
while not node.parent is None:
node = node.parent
path.append(node)
return [path[len(path) - i - 1] for i in range(len(path))]
def reduce_path(self, path):
red_path = []
if len(path) > 1:
for i in range(1, len(path)):
if (not path[i].parent.parent is None) and self.line_of_sight(path[i], path[i].parent.parent):
path[i].parent = path[i].parent.parent
else:
red_path.append(path[i].parent)
red_path.append(path[-1])
return red_path
| 2.8125
| 3
|
main.py
|
Seokky/avito-flats-parser
| 0
|
12777637
|
<reponame>Seokky/avito-flats-parser
import requests
from bs4 import BeautifulSoup
from constants import BASE_URL, AD_ITEM_CLASS, RESULT_FNAME
from helpers import getAdContent, writeAdContentToFile
req = requests.get(BASE_URL)
soup = BeautifulSoup(req.text, features="lxml")
ads = soup.findAll('div', AD_ITEM_CLASS)
last_floor_apartments = []
def print_ad_content(data):
text, address, url = data
print(f'{text}, {address}\n{url}\n')
def print_regular_apartments():
for ad in ads:
text, address, url, last_floor = getAdContent(ad)
if (last_floor == True):
last_floor_apartments.append(ad)
def print_last_floor_apartments():
for ad in last_floor_apartments:
text, address, url, last_floor = getAdContent(ad)
print_ad_content([text, address, url])
writeAdContentToFile(f, [text, address, url])
with open(RESULT_FNAME, encoding='utf-8', mode="w") as f:
f.write(f'Fetching from: {BASE_URL}\n\n')
print_regular_apartments()
print_last_floor_apartments()
| 2.703125
| 3
|
examples/pyqtgraph_plot_block.py
|
Sout/pyrf
| 0
|
12777638
|
#!/usr/bin/env python
# import required libraries
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import sys
import numpy as np
from pyrf.devices.thinkrf import WSA
from pyrf.util import read_data_and_context
from pyrf.numpy_util import compute_fft
# plot constants
CENTER_FREQ = 2450 * 1e6
SAMPLE_SIZE = 1024
ATTENUATOR = 1
DECIMATION = 1
RFE_MODE = 'ZIF'
# connect to WSA device
dut = WSA()
ip = sys.argv[1]
dut.connect(ip)
class MainApplication(pg.GraphicsWindow):
def __init__(self, dut):
super(MainApplication, self).__init__()
self.dut = dut
def keyPressEvent(self, event):
if event.text() == ';':
cmd, ok = QtGui.QInputDialog.getText(win, 'Enter SCPI Command',
'Enter SCPI Command:')
if ok:
if '?' not in cmd:
dut.scpiset(cmd)
win = MainApplication(dut)
win.resize(1000,600)
win.setWindowTitle("PYRF FFT Plot Example")
# initialize WSA configurations
dut.reset()
dut.request_read_perm()
dut.freq(CENTER_FREQ)
dut.decimation(DECIMATION)
dut.attenuator(ATTENUATOR)
dut.rfe_mode(RFE_MODE)
BANDWIDTH = dut.properties.FULL_BW[RFE_MODE]
# initialize plot
fft_plot = win.addPlot(title="Power Vs. Frequency")
# initialize x-axes limits
plot_xmin = (CENTER_FREQ) - (BANDWIDTH / 2)
plot_xmax = (CENTER_FREQ) + (BANDWIDTH / 2)
fft_plot.setLabel('left', text= 'Power', units = 'dBm', unitPrefix=None)
# initialize the y-axis of the plot
plot_ymin = -130
plot_ymax = 20
fft_plot.setYRange(plot_ymin ,plot_ymax)
fft_plot.setLabel('left', text= 'Power', units = 'dBm', unitPrefix=None)
# disable auto size of the x-y axis
fft_plot.enableAutoRange('xy', False)
# initialize a curve for the plot
curve = fft_plot.plot(pen='g')
def update():
global dut, curve, fft_plot, plot_xmin, plot_xmax
# read data
data, context = read_data_and_context(dut, SAMPLE_SIZE)
# compute the fft and plot the data
pow_data = compute_fft(dut, data, context)
# update the frequency range (Hz)
freq_range = np.linspace(plot_xmin , plot_xmax, len(pow_data))
# initialize the x-axis of the plot
fft_plot.setXRange(plot_xmin,plot_xmax)
fft_plot.setLabel('bottom', text= 'Frequency', units = 'Hz', unitPrefix=None)
curve.setData(freq_range,pow_data, pen = 'g')
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| 2.453125
| 2
|
tests/test_util.py
|
D-PLACE/pydplace
| 1
|
12777639
|
from pydplace.util import *
def test_remove_subdirs(tmpdir):
tmpdir.join('a').mkdir()
tmpdir.join('a', 'b').mkdir()
assert tmpdir.join('a', 'b').check()
remove_subdirs(str(tmpdir))
assert not tmpdir.join('a').check()
| 2.671875
| 3
|
setup.py
|
amehtaSF/QualtricsData
| 0
|
12777640
|
<gh_stars>0
import setuptools
import os
with open(f"{os.path.dirname(os.path.realpath(__file__))}/README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="QualtricsData",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="A package to read and preprocess Qualtrics Data",
long_description=long_description,
url="https://github.com/amehtaSF/QualtricsData",
packages=setuptools.find_packages(),
license = "License :: OSI Approved :: MIT License",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 1.367188
| 1
|
jauth/repository/token_base.py
|
pjongy/jauth
| 1
|
12777641
|
<filename>jauth/repository/token_base.py<gh_stars>1-10
import abc
from jauth.model.token import Token
from jauth.repository import BaseRepository
class TokenRepository(BaseRepository, abc.ABC):
@abc.abstractmethod
async def find_token_by_id(self, _id: str) -> Token:
pass
@abc.abstractmethod
async def create_token(self, user_id: str) -> Token:
pass
@abc.abstractmethod
async def delete_token(self, token_id: str) -> int:
pass
| 2.265625
| 2
|
UnitTest/RPi_CameraTest/Camera+View.py
|
kullken/Pet-Mk-IV
| 1
|
12777642
|
import picamera
from time import sleep
import os
# Xlib: extension "RANDR" missing on display ":10.0".
#(gpicview:2869):
# GLib-GObject-WARNING **:
# Attempt to add property GtkSettings:
# :gtk-scrolled-window-placement after class was initialised
camera = picamera.PiCamera()
camera.rotation = 180
print ('klick1.py: Take picture')
camera.capture('python-camera.jpg')
print ('klick1.py: Launch Viewer')
os.system('gpicview python-camera.jpg &')
print ('klick1.py: Wait 1')
sleep(2)
print ('klick1.py: Wait 2')
sleep(2)
print ('klick1.py: Wait 3')
sleep(2)
print ('klick1.py: Close')
os.system('killall gpicview')
#camera.start_preview()
#sleep(5)
#camera.stop_preview()
| 2.765625
| 3
|
dev/scripts/process-starter.py
|
kohkimakimoto/hq
| 62
|
12777643
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import, unicode_literals
import argparse, os, sys, re, fcntl, time, subprocess, textwrap, threading, signal
# utilities for compatibility.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
input = raw_input
def as_bytes(s, encoding='utf-8'):
if isinstance(s, str):
return s
else:
return s.encode(encoding)
def as_string(s, encoding='utf-8'):
if isinstance(s, unicode):
return s
else:
return s.decode(encoding)
else:
input = input
def as_bytes(s, encoding='utf8'):
if isinstance(s, bytes):
return s
else:
return s.encode(encoding)
def as_string(s, encoding='utf8'):
if isinstance(s, str):
return s
else:
return s.decode(encoding)
def shell_escape(s):
return "'" + s.replace("'", "'\"'\"'") + "'"
def run(cmd):
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
print(e, file=sys.stderr)
def sig_handler(signum, frame):
sys.exit(0)
def start(args):
run_commands = args.run
pre_commands = args.pre
post_commands = args.post
# handing signal to execute finally code.
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler)
try:
# run pre command
for cmd in pre_commands:
run(cmd)
# start run commands
threads = []
for cmd in run_commands:
t = threading.Thread(target=run, args=(cmd,))
threads.append(t)
t.start()
# wait for all run command threads finish
for t in threads:
t.join()
finally:
# run post command
for cmd in post_commands:
run(cmd)
def main():
parser = argparse.ArgumentParser(
description="process-starter.py is a utility to start multiple processes",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
description:
A utility to start multiple processes
example:
process-starter.py --run "your-file-watcher-command" "your-dev-server-start-command"
process-starter.py --pre "your-build-command" --run "your-dev-server-start-command"
Copyright (c) <NAME> <<EMAIL>>
The MIT License (MIT)
'''))
parser.add_argument("--pre", dest="pre", metavar="COMMAND", nargs='*', help="Set commands that are executed before run commands", default=[])
parser.add_argument("--post", dest="post", metavar="COMMAND", nargs='*',help="Set commands that are executed after run commands", default=[])
parser.add_argument("--run", "-r", dest="run", metavar="COMMAND", nargs='*', help="Set commands to run concurrently", default=[])
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
start(args)
if __name__ == '__main__': main()
| 2.421875
| 2
|
tree.py
|
SIshikawa1106/planner
| 0
|
12777644
|
<gh_stars>0
import kdtree
from collections import deque
import numpy as np
DEBUG_VIEW = True
class Tree(object):
def __init__(self, node):
self.root = node
self.node_list = node[np.newaxis, :]
self._tree = kdtree.create([node], dimensions=node.size)
def get_root(self):
return self.root
def add_node(self, node):
self._tree.add(node)
self.node_list = np.append(self.node_list, node[np.newaxis,:], axis=0)
def find_nearest_node(self, target_node):
nearest_node = self._tree.search_nn(target_node)
if DEBUG_VIEW:
print(nearest_node)
return nearest_node[0].data
def get_node_list(self):
return self.node_list
if __name__ == "__main__":
import numpy as np
import sys, os
sys.path.append("../")
import Plot3DViewer
print("TEST")
node_tree = None
for n in range(100):
node = np.random.rand(3)
if node_tree is None:
node_tree = Tree(node)
else:
node_tree.add_node(node=node)
points = node_tree.get_node_list()
Plot3DViewer.Plot_3D(node_tree.get_node_list(), pause_time=0.1)
| 2.859375
| 3
|
frappe/website/template.py
|
cadencewatches/frappe
| 0
|
12777645
|
<reponame>cadencewatches/frappe
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import strip_html
from frappe.website.utils import scrub_relative_urls
from jinja2.utils import concat
from jinja2 import meta
import re
def render_blocks(context):
"""returns a dict of block name and its rendered content"""
out = {}
env = frappe.get_jenv()
def _render_blocks(template_path):
source = frappe.local.jloader.get_source(frappe.local.jenv, template_path)[0]
for referenced_template_path in meta.find_referenced_templates(env.parse(source)):
if referenced_template_path:
_render_blocks(referenced_template_path)
template = frappe.get_template(template_path)
for block, render in template.blocks.items():
out[block] = scrub_relative_urls(concat(render(template.new_context(context))))
_render_blocks(context["template_path"])
# default blocks if not found
if "title" not in out and out.get("header"):
out["title"] = out["header"]
if "title" not in out:
out["title"] = context.get("title")
if "header" not in out and out.get("title"):
out["header"] = out["title"]
if not out["header"].startswith("<h"):
out["header"] = "<h2>" + out["header"] + "</h2>"
if "breadcrumbs" not in out:
out["breadcrumbs"] = scrub_relative_urls(
frappe.get_template("templates/includes/breadcrumbs.html").render(context))
if "<!-- no-sidebar -->" in out.get("content", ""):
out["no_sidebar"] = 1
if "sidebar" not in out and not out.get("no_sidebar"):
out["sidebar"] = scrub_relative_urls(
frappe.get_template("templates/includes/sidebar.html").render(context))
out["title"] = strip_html(out.get("title") or "")
# remove style and script tags from blocks
out["style"] = re.sub("</?style[^<>]*>", "", out.get("style") or "")
out["script"] = re.sub("</?script[^<>]*>", "", out.get("script") or "")
return out
| 1.882813
| 2
|
gym_game_nim/envs/__init__.py
|
hfwittmann/gym_game_nim
| 1
|
12777646
|
<reponame>hfwittmann/gym_game_nim<filename>gym_game_nim/envs/__init__.py
from gym_game_nim.envs.game_nim_env import GameNimEnv
| 1.203125
| 1
|
data/test_drawing_box.py
|
vuanh96/Thesis
| 0
|
12777647
|
import cv2
import os
import sys
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
if __name__ == "__main__":
for line in open("MOT17/train/ImageSets/Main/trainval.txt", "r"):
line = line.rstrip()
img_path = os.path.join("MOT17/train/JPEGImages", line + ".jpg")
anno_path = os.path.join("MOT17/train/Annotations", line + ".xml")
img = cv2.imread(img_path)
anno = ET.parse(anno_path).getroot()
file_name = anno.find('filename').text.lower().strip()
pts = ['xmin', 'ymin', 'xmax', 'ymax']
for obj in anno.iter('object'):
bbox = obj.find('bndbox')
box = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
box.append(cur_pt)
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 2)
cv2.imshow("MOT17", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
| 2.625
| 3
|
MAIN/LOGIC/board.py
|
SI-Jeson-Mor-NineHorses/NineHorses
| 0
|
12777648
|
from MAIN.LOGIC.pieces import *
class Board:
# Plansza reprezentowana za pomocą tablicy 9x9. 'None' oznacza pusty kwadrat.
def __init__(self):
self.recently_highlighted = []
self.empty = [[None for x in range(9)] for y in range(9)]
self.array = [
[Knight("b", 0, i) for i in range(9)],
[Empty("_", 1, x) for x in range(9)],
[Empty("_", 2, x) for x in range(9)],
[Empty("_", 3, x) for x in range(9)],
[Empty("_", 4, x) for x in range(9)],
[Empty("_", 5, x) for x in range(9)],
[Empty("_", 6, x) for x in range(9)],
[Empty("_", 7, x) for x in range(9)],
[Knight("w", 8, i) for i in range(9)],
]
def get_all_legal_moves(self, color):
moves_list = []
for i in self.array:
for j in i:
if j.color == color:
for move in j.gen_legal_moves(self):
moves_list.append({color: {'from': (j.y, j.x), 'to': move}})
return moves_list
def move_piece(self, piece, y, x):
oldx = piece.x
oldy = piece.y
piece.x = x
piece.y = y
piece.rect.x = x * 60
piece.rect.y = y * 60
self.array[oldy][oldx] = Empty('_', oldy, oldx)
self.array[y][x] = piece
piece.unhighlight()
def get_piece(self, x, y):
return self.array[y][x]
# Wypisanie tablicy planszy do konsoli
def print_to_terminal(self):
for j in range(9):
arr = []
for piece in self.array[j]:
if piece != None:
arr.append(piece.color + piece.symbol)
else:
arr.append("--")
print(arr)
# podświetlenie opcjonalnych ruchów
def highlight_optional_moves(self, moves):
self.recently_highlighted = moves
for x in moves:
self.array[x[0]][x[1]].highlight()
# usunięcie podświetlenia opcjonalnych ruchów
def unhighlight_optional_moves(self):
for x in self.recently_highlighted:
self.array[x[0]][x[1]].unhighlight()
| 3.15625
| 3
|
src/spaceone/inventory/model/region_model.py
|
whdalsrnt/inventory
| 9
|
12777649
|
<gh_stars>1-10
from mongoengine import *
from spaceone.core.model.mongo_model import MongoModel
class RegionTag(EmbeddedDocument):
key = StringField(max_length=255)
value = StringField(max_length=255)
class Region(MongoModel):
region_id = StringField(max_length=40, generate_id='region', unique=True)
name = StringField(max_length=255)
region_key = StringField(max_length=255)
region_code = StringField(max_length=255, unique_with=['provider', 'domain_id'])
provider = StringField(max_length=255)
ref_region = StringField(max_length=255)
tags = ListField(EmbeddedDocumentField(RegionTag))
domain_id = StringField(max_length=255)
updated_by = StringField(default=None, null=True)
created_at = DateTimeField(auto_now_add=True)
updated_at = DateTimeField(auto_now=True)
meta = {
'updatable_fields': [
'name',
'region_key',
'tags',
'updated_by',
'updated_at'
],
'minimal_fields': [
'region_id',
'name',
'region_code',
'provider'
],
'ordering': [
'name'
],
'indexes': [
'region_id',
'region_key',
'region_code',
'provider',
'ref_region',
'domain_id',
('tags.key', 'tags.value')
]
}
| 2.25
| 2
|
src/passpredict/satellites/__init__.py
|
samtx/pass-predictor
| 0
|
12777650
|
from .base import LLH
from .sgp4 import SGP4Propagator
from .kepler import KeplerPropagator
__all__ = [
'LLH',
'SGP4Propagator',
'KeplerPropagator',
]
| 1.007813
| 1
|