max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
data_catalog/query_translation.py
|
trustedanalytics-ng/data-catalog
| 1
|
12782551
|
<gh_stars>1-10
#
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
from data_catalog.metadata_entry import (CERBERUS_SCHEMA, ORG_UUID_FIELD, CREATION_TIME_FIELD,
IS_PUBLIC_FIELD)
class ElasticSearchQueryTranslator(object):
def __init__(self):
self._log = logging.getLogger(type(self).__name__)
self._filter_translator = ElasticSearchFilterExtractor()
self._base_query_creator = ElasticSearchBaseQueryCreator()
def translate(self, data_catalog_query, org_uuid_list, dataset_filtering, is_admin):
"""
Translates a Data Catalog query (string) to a string being an ElasticSearch query.
match_all will be returned when the query is empty.
Errors will be returned on invalid queries.
:param str data_catalog_query: A query string from Data Catalog.
:param list[str] org_uuid_list: A list of org_uuids that dataset belongs to.
:param DataSetFiltering dataset_filtering: Describes if the data sets we want
should be private, public or both
(takes values respectively: False, True, None).
:returns: A JSON string that is a valid ElasticSearch query.
:rtype str:
:raises ValueError:
"""
query_dict = self._get_query_dict(data_catalog_query)
es_query_base = self._base_query_creator.create_base_query(query_dict)
query_filters, post_filters = self._filter_translator.extract_filter(
query_dict,
org_uuid_list,
dataset_filtering,
is_admin)
final_query = self._combine_query_and_filters(es_query_base, query_filters, post_filters)
self._add_pagination(final_query, query_dict)
return json.dumps(final_query)
def _get_query_dict(self, data_catalog_query):
"""
Translates a Data Catalog query from string to a dictionary.
"""
if data_catalog_query:
try:
query_dict = json.loads(data_catalog_query)
except ValueError:
self._log_and_raise_invalid_query('Supplied query is not a JSON document.')
else:
query_dict = {}
return query_dict
@staticmethod
def _combine_query_and_filters(base_es_query, query_filters, post_filters):
"""
Combines translated base query, filters into one output query and aggregation for categories
"""
return {
'query': {
'filtered': {
'filter': query_filters,
'query': base_es_query
}
},
'post_filter': post_filters,
'aggregations': {
'categories': {
'terms': {
'size': 100,
'field': 'category'
}
},
'formats': {
'terms': {
'field': 'format'
}
}
}
}
@staticmethod
def _add_pagination(final_query, input_query_dict):
"""
If input query contains pagination information ("from" and "size" fields) then they
will be added to the output query.
"""
from_field = 'from'
size_field = 'size'
if from_field in input_query_dict:
final_query[from_field] = input_query_dict[from_field]
if size_field in input_query_dict:
final_query[size_field] = input_query_dict[size_field]
def _log_and_raise_invalid_query(self, message):
self._log.error(message)
raise InvalidQueryError(message)
class ElasticSearchBaseQueryCreator(object):
@staticmethod
def create_base_query(query_dict):
"""
Creates a base (text) query for the overall ElasticSearch query (which can contain both
base query and filters).
This query is created based on the "query" field from the Data Catalog query.
A match_all query is returned when there's no text query.
:param dict query_dict: A Data Catalog query in a form of dict (can be empty).
:returns: A dictionary that represents a valid ElasticSearch query.
:rtype dict:
"""
query_string = query_dict.get('query', None)
if query_string:
return ElasticSearchBaseQueryCreator.render_es_query(query_string)
else:
return {'match_all': {}}
@staticmethod
def render_es_query(query_string):
return {
'bool': {
'should': [
{
'multi_match': {
'query': query_string,
'fields': [
'title',
'title.english'
],
'type': 'most_fields'
}
},
{
'match': {
'dataSample': {
'query': query_string,
'boost': 2
}
}
},
{
'match': {
'sourceUri': {
'query': query_string,
}
}
}
]
}
}
class ElasticSearchFilterExtractor(object):
def __init__(self):
self._log = logging.getLogger(type(self).__name__)
# pylint: disable=too-many-branches
def extract_filter(self, query_dict, org_uuid_list,
dataset_filtering, is_admin):
"""
Creates a filter for the ElasticSearch query based on the filter information
from the Data Catalog query.
None is returned when there are no filters.
:param dict query_dict: A Data Catalog query in a form of dict (can be empty)
:param list[str] org_uuid_list: List of the organisations' UUIDs
:returns: Two types of filters; each as a dict {'and': [filter1, filter2, ...]}
:rtype (dict, dict):
"""
# TODO this should totally be rewritten to have less branches
filters = query_dict.get('filters', [])
if dataset_filtering is DataSetFiltering.PRIVATE_AND_PUBLIC:
if not is_admin or org_uuid_list:
filters.append({'orgUUID': org_uuid_list})
filters.append({'isPublic': [True]})
elif dataset_filtering is DataSetFiltering.ONLY_PRIVATE:
if not is_admin or org_uuid_list:
filters.append({'orgUUID': org_uuid_list})
filters.append({'isPublic': [False]})
else:
filters.append({'isPublic': [True]})
result = self._filters_segregation(filters, dataset_filtering)
query_filters, post_filters, or_filters = result
return self._prepare_query_filters_dict(query_filters, post_filters, or_filters)
@staticmethod
def _prepare_query_filters_dict(query_filters, post_filters, or_filters):
if not query_filters and or_filters:
query_filters_dict = {'or': or_filters}
elif or_filters and query_filters:
query_filters.append({'or': or_filters})
query_filters_dict = {'and': query_filters}
elif not or_filters and query_filters:
query_filters_dict = {'and': query_filters}
else:
query_filters_dict = {}
if post_filters:
return query_filters_dict, {'and': post_filters}
else:
return query_filters_dict, {}
def _filters_segregation(self, filters, dataset_filtering):
query_filters = []
post_filters = []
or_filters = []
# filters should be in form NAME: [VALUE, VALUE, ...]
for data_set_filter in filters:
filter_type, filter_values = self._get_filter_properties(data_set_filter)
es_filter = self._translate_filter(filter_type, filter_values)
if not es_filter:
continue
if dataset_filtering is DataSetFiltering.PRIVATE_AND_PUBLIC:
if filter_type in [ORG_UUID_FIELD, IS_PUBLIC_FIELD]:
# filters that are applied with 'or' parameter
or_filters.append(es_filter)
elif filter_type in [CREATION_TIME_FIELD]:
# filters that are applied with the query (result are filtered)
query_filters.append(es_filter)
else:
# filters that are applied AFTER the query (results are unfiltered)
post_filters.append(es_filter)
else:
if filter_type in [ORG_UUID_FIELD, CREATION_TIME_FIELD, IS_PUBLIC_FIELD]:
# filters that are applied with the query (result are filtered)
query_filters.append(es_filter)
else:
# filters that are applied AFTER the query (results are unfiltered)
post_filters.append(es_filter)
return query_filters, post_filters, or_filters
def _get_filter_properties(self, query_filter):
"""
Gets a tuple: (filter_type, filter_values_list).
Filter should be a dict in form: {FILTER_TYPE: FILTER_VALUES_LIST}
"""
if not isinstance(query_filter, dict):
self._log_and_raise_invalid_query(
"A filter is not a dictionary: {}".format(query_filter))
if not query_filter:
self._log_and_raise_invalid_query("Filter dictionary can't be empty.")
filter_type, filter_values = query_filter.items()[0]
if filter_type not in CERBERUS_SCHEMA:
self._log_and_raise_invalid_query(
"Can't filter over field {}, because it isn't in the mapping.".format(filter_type))
if not filter_values:
self._log_and_raise_invalid_query("Filter doesn't contain any values")
return filter_type, filter_values
def _translate_filter(self, filter_type, filter_values):
"""
Translates a filter of the given type with the given values list
to an ElasticSearch filter.
"""
def create_normal_filter(values):
values = [str(value).lower() for value in values]
if len(values) == 1:
return {'term': {filter_type: values[0]}}
else:
return {'terms': {filter_type: values}}
def create_time_filter(values):
time_range = {}
if len(values) != 2:
self._log_and_raise_invalid_query('There should be exactly two time range values.')
if values[0] != -1:
time_range['from'] = values[0]
if values[1] != -1:
time_range['to'] = values[1]
return {
'range': {
CREATION_TIME_FIELD: time_range
}
}
if not filter_values:
return None
elif not isinstance(filter_values, list):
self._log_and_raise_invalid_query("Filter values aren't a list.")
if filter_type != CREATION_TIME_FIELD:
return create_normal_filter(filter_values)
else:
return create_time_filter(filter_values)
def _log_and_raise_invalid_query(self, message):
self._log.error(message)
raise InvalidQueryError(message)
class InvalidQueryError(Exception):
pass
class DataSetFiltering(object):
PRIVATE_AND_PUBLIC = None
ONLY_PUBLIC = True
ONLY_PRIVATE = False
| 2.046875
| 2
|
tests/sentry/interfaces/template/tests.py
|
davedash/sentry
| 1
|
12782552
|
<filename>tests/sentry/interfaces/template/tests.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from sentry.interfaces import Template
from sentry.models import Event
from tests.base import TestCase
class TemplateTest(TestCase):
def test_serialize(self):
interface = Template(
filename='foo.html',
context_line='hello world',
lineno=1,
)
result = interface.serialize()
self.assertEquals(result['filename'], 'foo.html')
self.assertEquals(result['context_line'], 'hello world')
self.assertEquals(result['lineno'], 1)
def test_get_hash(self):
interface = Template(
filename='foo.html',
context_line='hello world',
lineno=1,
)
result = interface.get_hash()
self.assertEquals(result, ['foo.html', 'hello world'])
@mock.patch('sentry.interfaces.get_context')
@mock.patch('sentry.interfaces.Template.get_traceback')
def test_to_string_returns_traceback(self, get_traceback, get_context):
get_traceback.return_value = 'traceback'
event = mock.Mock(spec=Event)
interface = Template(
filename='foo.html',
context_line='hello world',
lineno=1,
)
result = interface.to_string(event)
get_traceback.assert_called_once_with(event, get_context.return_value)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\ntraceback')
| 2.28125
| 2
|
testdata/PyFEM-master/pyfem/elements/SLSutils.py
|
Konstantin8105/py4go
| 3
|
12782553
|
<filename>testdata/PyFEM-master/pyfem/elements/SLSutils.py<gh_stars>1-10
############################################################################
# This Python file is part of PyFEM, the code that accompanies the book: #
# #
# 'Non-Linear Finite Element Analysis of Solids and Structures' #
# <NAME>, <NAME>, <NAME> and <NAME> #
# <NAME> and Sons, 2012, ISBN 978-0470666449 #
# #
# The code is written by <NAME>, <NAME> and <NAME>. #
# #
# The latest stable version can be downloaded from the web-site: #
# http://www.wiley.com/go/deborst #
# #
# A github repository, with the most up to date version of the code, #
# can be found here: #
# https://github.com/jjcremmers/PyFEM #
# #
# The code is open source and intended for educational and scientific #
# purposes only. If you use PyFEM in your research, the developers would #
# be grateful if you could cite the book. #
# #
# Disclaimer: #
# The authors reserve all rights but do not guarantee that the code is #
# free from errors. Furthermore, the authors shall not be liable in any #
# event caused by the use of the program. #
############################################################################
from numpy import zeros, dot, outer, ones, eye, sqrt, absolute, linalg,cos,sin,cross
from scipy.linalg import eigvals,inv
#-----------------------------------------------------------------------
# class SLSparameters
#-----------------------------------------------------------------------
class SLSparameters:
def __init__( self , nNod ):
if nNod == 8:
self.totDOF = 28
self.condDOF = 24
self.midNodes = 4
self.extNode = 8
self.ansFlag = True
elif nNod == 16:
self.totDOF = 52
self.condDOF = 48
self.midNodes = 4
self.extNode = 16
self.ansFlag = False
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
def getlam4( lam ):
lam4 = zeros(shape=(3,3,3,3))
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
lam4[i,j,k,l]=lam[i,k]*lam[j,l]
return lam4
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
def iso2locbase( iso , lam4 ):
loc = zeros(6)
loc[0]=iso[0]*lam4[0,0,0,0]+iso[1]*lam4[1,1,0,0]+iso[2]*lam4[2,2,0,0]+ \
iso[3]*0.5*(lam4[0,1,0,0]+lam4[1,0,0,0])+ \
iso[4]*0.5*(lam4[1,2,0,0]+lam4[2,1,0,0])+ \
iso[5]*0.5*(lam4[2,0,0,0]+lam4[0,2,0,0])
loc[1]=iso[0]*lam4[0,0,1,1]+iso[1]*lam4[1,1,1,1]+iso[2]*lam4[2,2,1,1]+ \
iso[3]*0.5*(lam4[0,1,1,1]+lam4[1,0,1,1])+ \
iso[4]*0.5*(lam4[1,2,1,1]+lam4[2,1,1,1])+ \
iso[5]*0.5*(lam4[2,0,1,1]+lam4[0,2,1,1])
loc[2]=iso[0]*lam4[0,0,2,2]+iso[1]*lam4[1,1,2,2]+iso[2]*lam4[2,2,2,2]+ \
iso[3]*0.5*(lam4[0,1,2,2]+lam4[1,0,2,2])+ \
iso[4]*0.5*(lam4[1,2,2,2]+lam4[2,1,2,2])+ \
iso[5]*0.5*(lam4[2,0,2,2]+lam4[0,2,2,2])
loc[3]=iso[0]*(lam4[0,0,0,1]+lam4[0,0,1,0])+ \
iso[1]*(lam4[1,1,0,1]+lam4[1,1,1,0])+ \
iso[2]*(lam4[2,2,0,1]+lam4[2,2,1,0])+ \
iso[3]*0.5*(lam4[0,1,0,1]+lam4[0,1,1,0]+ \
lam4[1,0,0,1]+lam4[1,0,1,0])+ \
iso[4]*0.5*(lam4[1,2,0,1]+lam4[1,2,1,0]+ \
lam4[2,1,0,1]+lam4[2,1,1,0])+ \
iso[5]*0.5*(lam4[2,0,0,1]+lam4[2,0,1,0]+ \
lam4[0,2,0,1]+lam4[0,2,1,0])
loc[4]=iso[0]*(lam4[0,0,1,2]+lam4[0,0,2,1])+ \
iso[1]*(lam4[1,1,1,2]+lam4[1,1,2,1])+ \
iso[2]*(lam4[2,2,1,2]+lam4[2,2,2,1])+ \
iso[3]*0.5*(lam4[0,1,1,2]+lam4[0,1,2,1]+ \
lam4[1,0,1,2]+lam4[1,0,2,1])+ \
iso[4]*0.5*(lam4[1,2,1,2]+lam4[1,2,2,1]+ \
lam4[2,1,1,2]+lam4[2,1,2,1])+ \
iso[5]*0.5*(lam4[2,0,1,2]+lam4[2,0,2,1]+ \
lam4[0,2,1,2]+lam4[0,2,2,1])
loc[5]=iso[0]*(lam4[0,0,2,0]+lam4[0,0,0,2])+ \
iso[1]*(lam4[1,1,2,0]+lam4[1,1,0,2])+ \
iso[2]*(lam4[2,2,2,0]+lam4[2,2,0,2])+ \
iso[3]*0.5*(lam4[0,1,2,0]+lam4[0,1,0,2]+ \
lam4[1,0,2,0]+lam4[1,0,0,2])+ \
iso[4]*0.5*(lam4[1,2,2,0]+lam4[1,2,0,2]+ \
lam4[2,1,2,0]+lam4[2,1,0,2])+ \
iso[5]*0.5*(lam4[2,0,2,0]+lam4[2,0,0,2]+ \
lam4[0,2,2,0]+lam4[0,2,0,2])
return loc
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
def iso2loc( iso , lam ):
if iso.ndim == 1:
loc = iso2locbase( iso , getlam4( lam ) )
else:
loc = iso
for i,col in enumerate(iso.T):
loc[:,i] = iso2locbase( col , getlam4( lam ) )
return loc
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
def sigma2omega( sigma , lam ):
omega = zeros(6)
omega[0]=lam[0,0]*lam[0,0]*sigma[0]+ \
lam[0,1]*lam[0,1]*sigma[1]+ \
lam[0,2]*lam[0,2]*sigma[2]+ \
2*(lam[0,0]*lam[0,1]*sigma[3])+ \
2*(lam[0,1]*lam[0,2]*sigma[4])+ \
2*(lam[0,0]*lam[0,2]*sigma[5])
omega[1]=lam[1,0]*lam[1,0]*sigma[0]+ \
lam[1,1]*lam[1,1]*sigma[1]+ \
lam[1,2]*lam[1,2]*sigma[2]+ \
2*(lam[1,0]*lam[1,1]*sigma[3])+ \
2*(lam[1,1]*lam[1,2]*sigma[4])+ \
2*(lam[1,0]*lam[1,2]*sigma[5])
omega[2]=lam[2,0]*lam[2,0]*sigma[0]+ \
lam[2,1]*lam[2,1]*sigma[1]+ \
lam[2,2]*lam[2,2]*sigma[2]+ \
2*(lam[2,0]*lam[2,1]*sigma[3])+ \
2*(lam[2,1]*lam[2,2]*sigma[4])+ \
2*(lam[2,0]*lam[2,2]*sigma[5])
omega[3]=lam[0,0]*lam[1,0]*sigma[0]+ \
lam[0,0]*lam[1,1]*sigma[3]+ \
lam[0,0]*lam[1,2]*sigma[5]+ \
lam[0,1]*lam[1,0]*sigma[3]+ \
lam[0,1]*lam[1,1]*sigma[1]+ \
lam[0,1]*lam[1,2]*sigma[4]+ \
lam[0,2]*lam[1,0]*sigma[5]+ \
lam[0,2]*lam[1,1]*sigma[4]+ \
lam[0,2]*lam[1,2]*sigma[2]
omega[4]=lam[1,0]*lam[2,0]*sigma[0]+ \
lam[1,0]*lam[2,1]*sigma[3]+ \
lam[1,0]*lam[2,2]*sigma[5]+ \
lam[1,1]*lam[2,0]*sigma[3]+ \
lam[1,1]*lam[2,1]*sigma[1]+ \
lam[1,1]*lam[2,2]*sigma[4]+ \
lam[1,2]*lam[2,0]*sigma[5]+ \
lam[1,2]*lam[2,1]*sigma[4]+ \
lam[1,2]*lam[2,2]*sigma[2]
omega[5]=lam[0,0]*lam[2,0]*sigma[0]+ \
lam[0,0]*lam[2,1]*sigma[3]+ \
lam[0,0]*lam[2,2]*sigma[5]+ \
lam[0,1]*lam[2,0]*sigma[3]+ \
lam[0,1]*lam[2,1]*sigma[1]+ \
lam[0,1]*lam[2,2]*sigma[4]+ \
lam[0,2]*lam[2,0]*sigma[5]+ \
lam[0,2]*lam[2,1]*sigma[4]+ \
lam[0,2]*lam[2,2]*sigma[2]
return omega
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
class Layer:
pass
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
class LayerData:
def __init__( self , props ):
self.layers = []
self.totThick = 0.
if hasattr( props , "layers" ):
for layID in props.layers:
layprops = getattr( props , layID )
layer = Layer()
layer.thick = layprops.thickness
layer.angle = layprops.angle
layer.matID = layprops.material
self.totThick += layprops.thickness
self.layers.append( layer )
else:
layer = Layer()
layer.thick = 1.0
layer.angle = 0.0
layer.matID = 0
self.totThick = 1.0
self.layers.append( layer )
def __iter__( self ):
return iter( self.layers )
def __len__( self ):
return len(self.layers)
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
class StressContainer:
def __init__( self , param ):
self.nLay = param.nLay
self.nMid = param.midNodes
self.nNod = 2*self.nMid
self.reset()
def reset( self ):
self.data = zeros( shape = ( self.nLay , 6 , self.nNod ) )
self.weights = zeros( self.nLay )
def store( self , sigma , iLay , iIntZeta ):
if self.nLay == 1:
if iIntZeta == 0:
self.data[ 0,:,:4] += outer( sigma , ones(self.nMid) )
self.weights[ 0 ] += 1.
elif iIntZeta == 1:
self.data[ 0 , : , 4: ] += outer( sigma , ones(self.nMid) )
else:
self.data[ iLay , : , : ] += outer( sigma , ones(self.nNod) )
self.weights[ iLay ] += 1
def getStress( self ):
for iLay in range(self.nLay):
self.data[iLay,:,:] *= 1.0/self.weights[iLay]
return self.data.reshape(self.nLay*6,self.nNod).T
def getLabels( self ):
origlabel = ["s11","s22","s33","s13","s23","s12"]
if self.nLay == 1:
return origlabel
else:
labels = []
for iLay in range(self.nLay):
for ll in origlabel:
labels.append( "lay"+str(iLay)+"-"+ll)
return labels
| 1.820313
| 2
|
wrapper_plugins/jpeg2000_wrapper/tests/test_wrapper.py
|
spongezhang/maskgen
| 0
|
12782554
|
<reponame>spongezhang/maskgen
import unittest
import os
import numpy as np
class TestToolSet(unittest.TestCase):
def test_all(self):
from jpeg2000_wrapper import opener
img = np.random.randint(0, high=255, size=(2000, 4000, 6), dtype=np.uint8)
opener.writeJPeg2000File('foo.jp2',img)
newimg = opener.openJPeg2000File('foo.jp2')
self.assertTrue(np.all(img == newimg[0]))
os.remove('foo.jp2')
if __name__ == '__main__':
unittest.main()
| 2.34375
| 2
|
instResp/libInst.py
|
mikehagerty/instResp
| 0
|
12782555
|
import numpy as np
from instResp.polezero import polezero
from instResp.plotResp import plotResponse
import os
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
'''
This module contains a collection of non-bulletproof codes
for creating/manipulating instrument response stages,
particularly the first stage = analog polezero stage.
'''
def evalResp(pz, f):
s = 0.000 + 1.000j
numerator = 1.000 + 0.000j
denominator = 1.000 + 0.000j
if pz.type == 'A':
s *= 2.*np.pi*f
elif pz.type == 'B':
s *= f
else:
logger.warn("Unknown pz response type=[%s]" % pz.type)
for j in range(pz.nzeros):
numerator *= (s - pz.zeros[j])
for j in range(pz.npoles):
denominator *= (s - pz.poles[j])
Gf = numerator * pz.a0 # Make sure this is complex
Gf /= denominator
return Gf;
def getResponse(pz, freqs, removeZero=False, useSensitivity=True):
'''
We're expecting a standard IRIS polezero file for displacement,
so if velocity=True try to shed one zero at origin
'''
if removeZero:
success = pz.remove_zero()
#zeros = np.zeros((pz.zeros.size-1,), dtype=np.complex128)
#success = remove_zero(pz.zeros, zeros)
if success:
logger.debug("Zero successfully removed from origin")
#pz.zeros = zeros
#pz.nzeros = zeros.size
else:
logger.warn("Problem removing zero from origin!")
resp = np.zeros((len(freqs),), dtype=np.complex128)
for i, f in enumerate(freqs):
resp[i] = evalResp(pz, f)
if useSensitivity:
resp[i] *= pz.sensitivity
return resp
def read_sacpz_file(filename):
"""
* **********************************
* NETWORK (KNETWK): AU
* STATION (KSTNM): WR1
* LOCATION (KHOLE):
* CHANNEL (KCMPNM): BHZ
* CREATED : 2017-02-02T01:23:27
* START : 2005-01-31T00:00:00
* END : 2599-12-31T23:59:59
* DESCRIPTION : Warramunga Array, Australia
* LATITUDE : -19.942600
* LONGITUDE : 134.339500
* ELEVATION : 389.0
* DEPTH : 0.0
* DIP : 0.0
* AZIMUTH : 0.0
* SAMPLE RATE : 40.0
* INPUT UNIT : M
* OUTPUT UNIT : COUNTS
* INSTTYPE : Guralp CMG3ESP_30sec_ims/Guralp DM24-MK3 Datalogge
* INSTGAIN : 4.000290e+03 (M/S)
* COMMENT : V3180 A3242
* SENSITIVITY : 2.797400e+09 (M/S)
* A0 : 8.883050e-02
* **********************************
ZEROS 5
+0.000000e+00 +0.000000e+00
+0.000000e+00 +0.000000e+00
+0.000000e+00 +0.000000e+00
+8.670000e+02 +9.050000e+02
+8.670000e+02 -9.050000e+02
POLES 4
-1.486000e-01 +1.486000e-01
-1.486000e-01 -1.486000e-01
-3.140000e+02 +2.023000e+02
-3.140000e+02 -2.023000e+02
CONSTANT 2.484944e+08
"""
fname = 'read_sacpz_file'
with open(filename, 'r') as f:
lines = f.readlines()
zeros = None
poles = None
sensitivity = None
a0 = None
unitsIn = None
unitsOut = None
knet = ""
ksta = ""
kloc = ""
kchan = ""
for i in range(len(lines)):
line = lines[i]
#print "i=[%d] line=[%s]" % (i, line)
if line[0] == '*':
if line[2] != '*':
split_list = line.split(':')
field = split_list[0][1:]
val = split_list[1]
# could have val = "" or val = 2.79E9 (M/S)
val_list = val.split()
nsplit=len(val_list)
#print "field=", field, " val=", val
if 'SENSITIVITY' in field:
sensitivity = float(val_list[0])
elif 'A0' in field:
a0 = float(val_list[0])
elif 'INPUT UNIT' in field:
unitsIn = val.strip()
elif 'OUTPUT UNIT' in field:
unitsOut = val.strip()
elif 'NETWORK' in field:
knet = val.strip()
elif 'STATION' in field:
ksta = val.strip()
elif 'LOCATION' in field:
kloc = val.strip()
elif 'CHANNEL' in field:
kchan = val.strip()
elif line[0:5] == 'ZEROS':
try:
nzeros = int(line[6:len(line)])
except:
logger.error("%s.%s Error: can't read nzeros from line=[%s]" % (__name__, fname, line))
exit(1)
#zeros = np.zeros((nzeros,), dtype=np.complex128)
zeros = np.zeros(nzeros, dtype=np.complex128)
for j in range(nzeros):
i += 1
line = lines[i]
(z_re, z_im) = line.split()
zeros[j] = complex( float(z_re), float(z_im) )
elif line[0:5] == 'POLES':
try:
npoles = int(line[6:len(line)])
except:
logger.error("%s.%s Error: can't read npoles from line=[%s]" % (__name__, fname, line))
exit(1)
poles = np.zeros(npoles, dtype=np.complex128)
for j in range(npoles):
i += 1
line = lines[i]
(p_re, p_im) = line.split()
poles[j] = complex( float(p_re), float(p_im) )
#print "knet=%s ksta=%s kloc=%s kchan=%s" % (knet, ksta, kloc, kchan)
name = "%s.%s %s.%s" % (knet, ksta, kloc, kchan)
pz_ = polezero(name = name,
type = 'A', #type = 'A[Laplace Transform (Rad/sec)]',
unitsIn = unitsIn,
unitsOut = unitsOut,
a0 = a0,
sensitivity = sensitivity,
sensitivity_f = 1.0,
poles = poles,
zeros = zeros)
return pz_
def get_corner_freq_from_pole(pole):
'''
get distance [rad/s] from lowest order pole to origin
and return Hz [/s]
'''
return np.sqrt(pole.real**2 + pole.imag**2) / (2.*np.pi)
def test_RC():
from instResp.libNom import RC
R = 4.
C = 1.25/(2.*np.pi)
pzs = RC(tau=R*C)
freqs = np.logspace(-5, 4., num=1000)
resp = getResponse(pzs, freqs, removeZero=False)
title = 'RC filter: R=4 ohms, C=1.25F/2pi'
plotResponse(resp, freqs, title=title, xmin=.001, xmax=100., ymin=0.01, ymax=1.2)
logger.info("Corner freq:%f" % get_corner_freq_from_pole(pzs.poles[0]))
return
def test_WA(damp=.18, gain=1., f0=14, fnorm=100.):
from instResp.libNom import WA, Accelerometer
pzs = WA(per=1/f0, damp=damp, gain=gain, normalize=True, normalize_freq=fnorm)
logger.info(pzs)
freqs = np.logspace(-5, 4., num=500)
resp = getResponse(pzs, freqs, removeZero=False)
#print(np.max(np.abs(resp)))
title='WA for f0=%.2f Hz damp=%.3f gain=%.0f' % (f0,damp, gain)
logger.info("Corner freq:%.2f" % get_corner_freq_from_pole(pzs.poles[0]))
plotResponse(resp, freqs, title=title, xmin=1, xmax=5000., ymin=.01, ymax=1.2)
return
def plot_pz_resp(pzfile=None):
pzs = read_sacpz_file(pzfile)
logger.info(pzs)
freqs = np.logspace(-5, 3., num=500)
resp = getResponse(pzs, freqs, removeZero=True, useSensitivity=False)
title=pzfile
plotResponse(resp, freqs, title=title, xmin=.001, xmax=100., ymin=.01, ymax=1e3)
return
def main():
#test_RC()
test_WA(damp=0.6)
exit()
pz_dir = '/Users/mth/mth/Data/IRIS_Request/pz/'
pz_fil = 'SACPZ.II.AAK.10.BHZ'
plot_pz_resp(pzfile=os.path.join(pz_dir, pz_fil))
exit()
if __name__=="__main__":
main()
| 2.625
| 3
|
package/awesome_panel/database/__init__.py
|
mycarta/awesome-panel
| 1
|
12782556
|
<reponame>mycarta/awesome-panel<gh_stars>1-10
"""Imports to be exposed to the user of the package are listed here"""
from awesome_panel.database.authors import AUTHORS
from awesome_panel.database.resources import RESOURCES
from awesome_panel.database.tags import TAGS
| 1.320313
| 1
|
radiobear/Constituents/parameters.py
|
david-deboer/radiobear
| 3
|
12782557
|
<filename>radiobear/Constituents/parameters.py
from argparse import Namespace
def setpar(kwargs):
par = Namespace(units='dBperkm', path='./', verbose=False)
for p, v in kwargs.items():
setattr(par, p, v)
return par
| 2.09375
| 2
|
log_it/extensions/marshmallow/log.py
|
tanj/log-it
| 0
|
12782558
|
# -*- coding: utf-8 -*-
# pylint: disable=R0903, C0115
"""
log_it.extensions.marshmallow.log
---------------------------------
Marshmallow Log Models
:copyright: (c) 2021 by <NAME>
:license: BSD, see LICENSE for more details
"""
from datetime import datetime
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema, auto_field
from marshmallow_sqlalchemy.fields import Nested
from log_it.log.model import (
TLog,
TField,
TLogField,
TMessage,
TMessageType,
TTag,
TTagMessage,
TUserPermission,
TRolePermission,
)
from . import FixtureSchema
from .user import UserFixture, RoleFixture, ActionFixture
class LogSchema(SQLAlchemyAutoSchema):
class Meta:
model = TLog
class FieldSchema(SQLAlchemyAutoSchema):
class Meta:
model = TField
class LogFieldSchema(SQLAlchemyAutoSchema):
class Meta:
model = TLogField
class MessageSchema(SQLAlchemyAutoSchema):
class Meta:
model = TMessage
class MessageTypeSchema(SQLAlchemyAutoSchema):
class Meta:
model = TMessageType
class TagSchema(SQLAlchemyAutoSchema):
class Meta:
model = TTag
class TagMessageSchema(SQLAlchemyAutoSchema):
class Meta:
model = TTagMessage
class UserPermissionSchema(SQLAlchemyAutoSchema):
class Meta:
model = TUserPermission
class RolePermissionSchema(SQLAlchemyAutoSchema):
class Meta:
model = TRolePermission
# FixtureSchema
class LogFixture(FixtureSchema):
"""Barebones Log Fixture for stubs"""
class Meta(FixtureSchema.Meta):
model = TLog
filter_attrs = ["sLog"]
sLog = auto_field()
class FieldFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TField
filter_attrs = ["sField"]
sField = auto_field()
class LogFieldFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TLogField
filter_attrs = [
"log.ixLog",
"field.ixField",
]
log = Nested(LogFixture, many=False)
field = Nested(FieldFixture, many=False)
sValue = auto_field()
iOrder = auto_field(missing=None)
class MessageTypeFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TMessageType
filter_attrs = ["sMessageType"]
class TagFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TTag
filter_attrs = ["sTag"]
sTag = auto_field()
class TagMessageFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TTagMessage
class MessageFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TMessage
# message fixtures are always inserted, never looked up
filter_attrs = None
log = Nested(LogFixture, many=False)
message_type = Nested(MessageTypeFixture, many=False)
user = Nested(UserFixture, many=False)
utcMessage = auto_field(missing=datetime.utcnow)
sMessage = auto_field()
tags = Nested(TagFixture, many=True)
class UserPermissionFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TUserPermission
log = Nested(LogFixture, many=False)
user = Nested(UserFixture, many=False)
action = Nested(ActionFixture, many=False)
class RolePermissionFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TRolePermission
log = Nested(LogFixture, many=False)
role = Nested(RoleFixture, many=False)
action = Nested(ActionFixture, many=False)
class LogFullFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TLog
filter_attrs = ["sLog"]
sLog = auto_field()
user = Nested(UserFixture, many=False)
fields = Nested(FieldFixture, many=True)
user_permissions = Nested(UserPermissionFixture)
role_permissions = Nested(RolePermissionFixture)
| 2
| 2
|
goodgames/games/migrations/0006_auto_20171128_0232.py
|
mooshu1x2/goodgames
| 0
|
12782559
|
<filename>goodgames/games/migrations/0006_auto_20171128_0232.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-28 02:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0005_auto_20171128_0225'),
]
operations = [
migrations.RemoveField(
model_name='game',
name='release_year',
),
migrations.AlterField(
model_name='game',
name='genre',
field=models.CharField(choices=[('Action', 'Action'), ('Adventure', 'Adventure'), ('Fighting', 'Fighting'), ('Platform', 'Platform'), ('Puzzle', 'Puzzle'), ('Racing', 'Racing'), ('Role-Playing', 'Role-Playing'), ('Shooter', 'Shooter'), ('Simulation', 'Simulation'), ('Sports', 'Sports'), ('Strategy', 'Strategy'), ('Misc', 'Misc'), ('Unknown', 'Unknown')], default='Unknown', max_length=30),
),
]
| 1.679688
| 2
|
messaging/schema/accounts.py
|
sunhoww/messaging
| 1
|
12782560
|
# -*- coding: utf-8 -*-
import graphene
from graphene import relay
from graphene_gae import NdbObjectType, NdbConnectionField
from messaging.models.accounts import (
Account as AccountModel,
create,
update,
delete,
generate_api_key,
)
from messaging.models.services import Service as ServiceModel
from messaging.models.messages import Message as MessageModel
from messaging.schema.services import Service as ServiceType
from messaging.schema.messages import Message as MessageType
from messaging.utils import pick
from messaging.helpers import get_key
from messaging.exceptions import ExecutionUnauthorized
class Account(NdbObjectType):
class Meta:
model = AccountModel
exclude_fields = AccountModel._excluded_keys
interfaces = (relay.Node,)
services = NdbConnectionField(ServiceType)
def resolve_services(self, info, **args):
return ServiceModel.query(ancestor=self.key)
messages = NdbConnectionField(MessageType)
def resolve_messages(self, info, **args):
return MessageModel.query(ancestor=self.key)
@classmethod
def accounts_resolver(cls, root, info):
return AccountModel.query(ancestor=info.context.user_key)
class CreateAccount(relay.ClientIDMutation):
class Input:
site = graphene.String(required=True)
name = graphene.String(required=True)
account = graphene.Field(Account)
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
account = create(
fields=cls.Input._meta.fields.keys(),
user=info.context.user_key,
body=input,
as_obj=True,
)
return CreateAccount(account=account)
class UpdateAccount(relay.ClientIDMutation):
class Input:
id = graphene.ID(required=True)
site = graphene.String()
name = graphene.String()
account = graphene.Field(Account)
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
account_key = get_key(input.get("id"))
if account_key.parent() != info.context.user_key:
raise ExecutionUnauthorized
account = update(
fields=filter(lambda x: x != "id", cls.Input._meta.fields.keys()),
account=account_key,
body=pick(["site", "name"], input),
as_obj=True,
)
return UpdateAccount(account=account)
class DeleteAccount(relay.ClientIDMutation):
class Input:
id = graphene.ID(required=True)
@classmethod
def mutate_and_get_payload(cls, root, info, id):
account_key = get_key(id)
if account_key.parent() != info.context.user_key:
raise ExecutionUnauthorized
delete(account_key)
return DeleteAccount()
class CreateAccountKey(relay.ClientIDMutation):
class Input:
id = graphene.ID(required=True)
key = graphene.String()
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
account_key = get_key(input.get("id"))
if account_key.parent() != info.context.user_key:
raise ExecutionUnauthorized
key = generate_api_key(account_key)
return CreateAccountKey(key=key)
| 2.15625
| 2
|
algocodes/algocodes/pipelines.py
|
Brucechen13/freeprograms
| 0
|
12782561
|
<reponame>Brucechen13/freeprograms<filename>algocodes/algocodes/pipelines.py<gh_stars>0
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from algocodes.items import QuestionItem, ArxivItem, ComicItem
from algocodes.sql import Sql
import os
import requests
class AlgocodesPipeline(object):
def process_item(self, item, spider):
return item
class CodesPipeline(object):
def process_item(self, item, spider):
if isinstance(item, QuestionItem):
Sql.insert_problem(item['ques_id'], item['ques_title'], item['ques_content'], item['ques_acc'], item['ques_submit'], item['ques_level'])
elif isinstance(item, ArxivItem):
Sql.insert_paper(item['arxiv_title'], item['arxiv_auther'], item['arxiv_content'], item['arxiv_time'],
item['arxiv_subject'], item['arxiv_pdfurl'])
elif isinstance(item, ComicItem):
path = '/data/' + item['comic_title'] + '/' + item['comic_chapter']
if not os.path.exists(path):
os.makedirs(path)
for page in range(1, item['comic_page']+1):
pic_url = item['comic_baseurl'].replace('%2F1.jpg', '%2F'+ str(page) +'.jpg')
res = requests.get(pic_url)
if(res.status_code != 200):
print('parse error ', item['comic_baseurl'], pic_url)
return
with open(os.path.join(path, str(page)+'.jpg'), 'wb') as f:
f.write(res.content)
| 2.609375
| 3
|
benderopt/tests/base/test_optimization_problem.py
|
tchar/benderopt
| 66
|
12782562
|
<reponame>tchar/benderopt
from benderopt.base import OptimizationProblem, Parameter, Observation
from benderopt.utils import get_test_optimization_problem
from benderopt.validation.utils import ValidationError
import pytest
def test_optimization_problem():
parameter1 = Parameter(
name="param1", category="categorical", search_space={"values": ["a", "b"]}
)
parameter2 = Parameter(name="param2", category="uniform", search_space={"low": 1, "high": 2})
parameters = [parameter1, parameter2]
optimization_problem = OptimizationProblem(parameters)
observation1 = Observation(sample={"param1": "a", "param2": 1.5}, loss=1.5)
optimization_problem.add_observation(observation1)
observation2 = Observation(sample={"param1": "b", "param2": 1.8}, loss=1.8)
optimization_problem.add_observation(observation2)
observation3 = Observation(sample={"param1": "b", "param2": 1.05}, loss=0.1)
optimization_problem.add_observation(observation3)
assert type(optimization_problem.parameters) == list
assert len(optimization_problem.observations) == 3
assert optimization_problem.parameters_name == set(["param1", "param2"])
assert observation1.sample in optimization_problem.samples
assert len(optimization_problem.samples) == 3
assert optimization_problem.best_sample == {"param1": "b", "param2": 1.05}
assert optimization_problem.sorted_observations[0].sample == {"param1": "b", "param2": 1.05}
assert optimization_problem.finite is False
assert len(optimization_problem.find_observations({"param1": "b", "param2": 1.05})) == 1
a, b = optimization_problem.observations_quantile(0.5)
assert len(a) == 1
assert len(b) == 2
assert optimization_problem.get_best_k_samples(1)[0].sample == {"param1": "b", "param2": 1.05}
def test_optimization_problem_from_list():
optimization_problem = OptimizationProblem.from_list(
[
{"name": "param1", "category": "categorical", "search_space": {"values": ["a", "b"]}},
{"name": "param2", "category": "uniform", "search_space": {"low": 1, "high": 2}},
]
)
optimization_problem.add_observations_from_list(
[
{"loss": 1.5, "sample": {"param1": "a", "param2": 1.5}},
{"loss": 1.8, "sample": {"param1": "b", "param2": 1.8}},
{"loss": 0.1, "sample": {"param1": "b", "param2": 1.05}},
],
raise_exception=True,
)
assert type(optimization_problem.parameters) == list
assert len(optimization_problem.observations) == 3
assert optimization_problem.parameters_name == set(["param1", "param2"])
assert {"param1": "b", "param2": 1.8} in optimization_problem.samples
assert len(optimization_problem.samples) == 3
assert optimization_problem.best_sample == {"param1": "b", "param2": 1.05}
assert optimization_problem.sorted_observations[0].sample == {"param1": "b", "param2": 1.05}
assert optimization_problem.finite is False
assert len(optimization_problem.find_observations({"param1": "b", "param2": 1.05})) == 1
a, b = optimization_problem.observations_quantile(0.5)
assert len(a) == 1
assert len(b) == 2
assert optimization_problem.get_best_k_samples(1)[0].sample == {"param1": "b", "param2": 1.05}
def test_optimization_problem_from_json():
get_test_optimization_problem()
def test_optimization_problem_bad_param():
with pytest.raises(ValidationError):
OptimizationProblem("lol")
def test_optimization_problem_bad_param_type():
with pytest.raises(ValidationError):
OptimizationProblem(["lol"])
def test_optimization_problem_add_bad_type():
with pytest.raises(ValidationError):
OptimizationProblem.from_list(
{"name": "param1", "category": "categorical", "search_space": {"values": ["a", "b"]}}
)
def test_optimization_problem_add_bad_observation():
optimization_problem = OptimizationProblem.from_list(
[
{"name": "param1", "category": "categorical", "search_space": {"values": ["a", "b"]}},
{"name": "param2", "category": "uniform", "search_space": {"low": 1, "high": 2}},
]
)
observation2 = Observation(sample={"lol": "b", "param2": 1.8}, loss=1.8)
with pytest.raises(ValidationError):
optimization_problem.add_observation(observation2)
def test_optimization_problem_from_list_bad_type():
optimization_problem = OptimizationProblem.from_list(
[
{"name": "param1", "category": "categorical", "search_space": {"values": ["a", "b"]}},
{"name": "param2", "category": "uniform", "search_space": {"low": 1, "high": 2}},
]
)
with pytest.raises(ValidationError):
optimization_problem.add_observations_from_list("lol", raise_exception=True)
def test_optimization_problem_from_list_bad_sample_name():
optimization_problem = OptimizationProblem.from_list(
[
{"name": "param1", "category": "categorical", "search_space": {"values": ["a", "b"]}},
{"name": "param2", "category": "uniform", "search_space": {"low": 1, "high": 2}},
]
)
with pytest.raises(ValidationError):
optimization_problem.add_observations_from_list(
[
{"loss": 1.5, "sample": {"param1": "a", "param2": 1.5}},
{"loss": 1.8, "sample": {"lol": "b", "param2": 1.8}},
{"loss": 0.1, "sample": {"param1": "b", "param2": 1.05}},
],
raise_exception=True,
)
def test_optimization_problem_from_list_bad_value():
optimization_problem = OptimizationProblem.from_list(
[
{"name": "param1", "category": "categorical", "search_space": {"values": ["a", "b"]}},
{"name": "param2", "category": "uniform", "search_space": {"low": 1, "high": 2}},
]
)
with pytest.raises(ValidationError):
optimization_problem.add_observations_from_list(
[
{"loss": 1.5, "sample": {"param1": "c", "param2": 1.5}},
{"loss": 1.8, "sample": {"lol": "b", "param2": 1.8}},
{"loss": 0.1, "sample": {"param1": "b", "param2": 1.05}},
],
raise_exception=True,
)
| 2.25
| 2
|
checker.py
|
mmagnus/rna-tools-webserver-engine
| 1
|
12782563
|
<reponame>mmagnus/rna-tools-webserver-engine<filename>checker.py
#!/usr/bin/python
"""
Add to crontab
* 18 * * * /home/rnamasonry/rnamasonryweb_env/rnamasonry-web/checker.sh
"""
import os
import subprocess
import smtplib
from sendmail_secret import USERNAME, PASSWORD
from django.core.wsgi import get_wsgi_application
os.environ['DJANGO_SETTINGS_MODULE'] = 'web.settings'
application = get_wsgi_application()
from app import models
from web import settings
USE_TZ = False
def send_mail_to(mail, txt):
fromaddr = settings.SERVER_NAME + ' report <<EMAIL>>'
subject = settings.SERVER_NAME + ' report'
toaddrs = mail
msg_text = txt
msg = ("""From: %s\r\nTo: %s\r\nSubject: %s\r\nMIME-Version: 1.0\r\nContent-Type: text/html\r\nContent-Disposition: inline\r\n<html>\r\n<body>\r\n<pre style="font: monospace">\r\n\r\n%s\r\n""" % (fromaddr, toaddrs, subject, msg_text))
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(USERNAME, PASSWORD)
server.sendmail(fromaddr, mail, msg)
server.quit()
def get_jobs():
"""time of job is calculated beased on the files!
If there is now file then you don't estimate the time
Keep jobs that are on JOBS_TO_KEEP list"""
jobs = models.Job.objects.filter().order_by("-id")[:100]
text = settings.SERVER_NAME + '- checker - scripts shows 100 last jobs!\n\n'
if True:
for j in jobs:
status = j.get_status()
if status == 'finished with errors':
status = '!!!!!!!!'
text += str(j.created) + " <b>" + status.ljust(10) + "</b> " + j.email + ' ' + j.job_title + " " \
+ settings.URL_JOBS + " " + j.job_id + ' ' + ' '
if j.error_text:
text += '\n' + j.error_text
text += '\n'
else:
for j in jobs:
text += "- " + j.get_status() + " " + "-" * 80 + "\n" + j.email + "\n" + j.job_title + '\n'
text += settings.URL_JOBS + j.job_id + '\n'
text += str(j.created) + '\n'
text += '\n'
return text
def run_cmd(cmd):
o = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = o.stdout.read().strip().decode()
err = o.stderr.read().strip().decode()
return out, err
def get_space():
"""Set the correct disk to track"""
cmd = "df -h | grep " + settings.DISK_TO_TRACK
out, err = run_cmd(cmd)
return out
if __name__ == '__main__':
txt = '\n\n'.join([settings.SERVER_NAME, settings.ADMIN_JOBS_URL])
txt += '\n\n' + get_space() + '\n\n'
txt += get_jobs()
for admin in settings.ADMINS:
send_mail_to(admin[1], txt)
| 2.15625
| 2
|
TwitterDatabase/Executables/make_database_tables.py
|
AdamSwenson/TwitterProject
| 0
|
12782564
|
<reponame>AdamSwenson/TwitterProject<gh_stars>0
"""
Created by adam on 6/30/18
"""
__author__ = 'adam'
import environment as env
from TwitterDatabase.DatabaseAccessObjects.DataConnections import MySqlConnection
from TwitterDatabase.Models.TweetORM import create_db_tables
if __name__ == '__main__':
credential_file = env.CREDENTIAL_FILE
# credential_file = '%s/private_credentials/sql_miner_laptop_credentials.xml' % env.BASE
conn = MySqlConnection( credential_file )
print( 'connected to %s' % conn._dsn )
create_db_tables( conn.engine )
| 1.929688
| 2
|
tests/test_get_excluded_volume.py
|
salilab/IHMValidation
| 0
|
12782565
|
<reponame>salilab/IHMValidation
import os,sys,glob
import unittest
import pandas as pd
from io import StringIO, BytesIO
sys.path.insert(0, "../master/pyext/src/")
from validation import get_input_information,utility
from validation.excludedvolume import get_excluded_volume
import warnings
def ignore_warnings(test_func):
def do_test(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ResourceWarning)
test_func(self, *args, **kwargs)
return do_test
class Testing(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(Testing, self).__init__(*args, **kwargs)
self.mmcif_test_file='test.cif'
self.IO=get_excluded_volume(self.mmcif_test_file)
def test_get_all_spheres(self):
fh = StringIO("""
loop_
_ihm_model_list.model_id
_ihm_model_list.model_name
_ihm_model_list.assembly_id
_ihm_model_list.protocol_id
_ihm_model_list.representation_id
1 . 1 1 1
#
loop_
_ihm_model_group.id
_ihm_model_group.name
_ihm_model_group.details
1 "Cluster 1" .
#
loop_
_ihm_model_group_link.group_id
_ihm_model_group_link.model_id
1 1
#
loop_
_ihm_sphere_obj_site.id
_ihm_sphere_obj_site.entity_id
_ihm_sphere_obj_site.seq_id_begin
_ihm_sphere_obj_site.seq_id_end
_ihm_sphere_obj_site.asym_id
_ihm_sphere_obj_site.Cartn_x
_ihm_sphere_obj_site.Cartn_y
_ihm_sphere_obj_site.Cartn_z
_ihm_sphere_obj_site.object_radius
_ihm_sphere_obj_site.rmsf
_ihm_sphere_obj_site.model_id
1 1 1 6 A 389.993 145.089 134.782 4.931 0 1
2 1 7 7 B 406.895 142.176 135.653 3.318 1.34 1
""")
self.assertEqual(1,len(list(self.IO.get_all_spheres(filetemp=fh).keys())))
def test_get_XYZ(self):
fh = StringIO("""
loop_
_ihm_model_list.model_id
_ihm_model_list.model_name
_ihm_model_list.assembly_id
_ihm_model_list.protocol_id
_ihm_model_list.representation_id
1 . 1 1 1
#
loop_
_ihm_model_group.id
_ihm_model_group.name
_ihm_model_group.details
1 "Cluster 1" .
#
loop_
_ihm_model_group_link.group_id
_ihm_model_group_link.model_id
1 1
#
loop_
_ihm_sphere_obj_site.id
_ihm_sphere_obj_site.entity_id
_ihm_sphere_obj_site.seq_id_begin
_ihm_sphere_obj_site.seq_id_end
_ihm_sphere_obj_site.asym_id
_ihm_sphere_obj_site.Cartn_x
_ihm_sphere_obj_site.Cartn_y
_ihm_sphere_obj_site.Cartn_z
_ihm_sphere_obj_site.object_radius
_ihm_sphere_obj_site.rmsf
_ihm_sphere_obj_site.model_id
1 1 1 6 A 389.993 145.089 134.782 4.931 0 1
2 1 7 7 B 406.895 142.176 135.653 3.318 1.34 1
""")
model_dict=self.IO.get_all_spheres(filetemp=fh)
list_of_sphere_list=list(model_dict.values())
xyz_df=self.IO.get_xyzr(list_of_sphere_list[0])
self.assertEqual(406.895,xyz_df.iloc[0,1])
def test_get_violation_dict(self):
fh = StringIO("""
loop_
_ihm_model_list.model_id
_ihm_model_list.model_name
_ihm_model_list.assembly_id
_ihm_model_list.protocol_id
_ihm_model_list.representation_id
1 . 1 1 1
#
loop_
_ihm_model_group.id
_ihm_model_group.name
_ihm_model_group.details
1 "Cluster 1" .
#
loop_
_ihm_model_group_link.group_id
_ihm_model_group_link.model_id
1 1
#
loop_
_ihm_sphere_obj_site.id
_ihm_sphere_obj_site.entity_id
_ihm_sphere_obj_site.seq_id_begin
_ihm_sphere_obj_site.seq_id_end
_ihm_sphere_obj_site.asym_id
_ihm_sphere_obj_site.Cartn_x
_ihm_sphere_obj_site.Cartn_y
_ihm_sphere_obj_site.Cartn_z
_ihm_sphere_obj_site.object_radius
_ihm_sphere_obj_site.rmsf
_ihm_sphere_obj_site.model_id
1 1 1 6 A 389.993 145.089 134.782 4.931 0 1
2 1 7 7 B 406.895 142.176 135.653 3.318 1.34 1
""")
check_xyz={1:[389.993,145.089,134.782,4.931],\
2:[406.895,142.176,135.653,3.318]}
check_xyz_df = pd.DataFrame(data=check_xyz,index=['X','Y','Z','R'])
model_dict=self.IO.get_all_spheres(filetemp=fh)
list_of_sphere_list=list(model_dict.values())
xyz_df=self.IO.get_xyzr(list_of_sphere_list[0])
self.assertEqual(check_xyz_df.values.tolist(),xyz_df.values.tolist())
add_chain={1:['A',1],2:['B',1]}
add_chain_df = pd.DataFrame(data=add_chain,index=['Chain_ID','Model_ID'])
fin=pd.concat([check_xyz_df,add_chain_df])
xyz_complete_df=self.IO.get_xyzr_complete(model_ID=1,spheres=list_of_sphere_list[0])
self.assertEqual(fin.values.tolist(),xyz_complete_df.values.tolist())
viol_dict=self.IO.get_violation_dict(xyz_df)
self.assertEqual({1: 0.0},self.IO.get_violation_dict(xyz_df))
perc_satisfied=self.IO.get_violation_percentage(models_spheres_df=xyz_df,viols=viol_dict)
self.assertEqual(100.0,perc_satisfied)
def test_get_violation_others(self):
fh = StringIO("""
loop_
_ihm_model_list.model_id
_ihm_model_list.model_name
_ihm_model_list.assembly_id
_ihm_model_list.protocol_id
_ihm_model_list.representation_id
1 . 1 1 1
#
loop_
_ihm_model_group.id
_ihm_model_group.name
_ihm_model_group.details
1 "Cluster 1" .
#
loop_
_ihm_model_group_link.group_id
_ihm_model_group_link.model_id
1 1
#
loop_
_ihm_sphere_obj_site.id
_ihm_sphere_obj_site.entity_id
_ihm_sphere_obj_site.seq_id_begin
_ihm_sphere_obj_site.seq_id_end
_ihm_sphere_obj_site.asym_id
_ihm_sphere_obj_site.Cartn_x
_ihm_sphere_obj_site.Cartn_y
_ihm_sphere_obj_site.Cartn_z
_ihm_sphere_obj_site.object_radius
_ihm_sphere_obj_site.rmsf
_ihm_sphere_obj_site.model_id
1 1 1 6 A 389.993 145.089 134.782 4.931 0 1
2 1 7 7 B 389.895 142.176 135.653 3.318 1.34 1
""")
model_dict=self.IO.get_all_spheres(filetemp=fh)
list_of_sphere_list=list(model_dict.values())
xyz_df=self.IO.get_xyzr(list_of_sphere_list[0])
viol_dict=self.IO.get_violation_dict(xyz_df)
self.assertEqual({1: 1.0},viol_dict)
perc_satisfied=self.IO.get_violation_percentage(models_spheres_df=xyz_df,viols=viol_dict)
self.assertEqual(0.0,perc_satisfied)
def test_violatio_multiple_models(self):
fh = StringIO("""
loop_
_ihm_model_list.model_id
_ihm_model_list.model_name
_ihm_model_list.assembly_id
_ihm_model_list.protocol_id
_ihm_model_list.representation_id
1 . 1 1 1
2 . 1 1 1
#
loop_
_ihm_model_group.id
_ihm_model_group.name
_ihm_model_group.details
1 "Cluster 1" .
2 "Cluster 2" .
#
loop_
_ihm_model_group_link.group_id
_ihm_model_group_link.model_id
1 1
2 2
#
loop_
_ihm_sphere_obj_site.id
_ihm_sphere_obj_site.entity_id
_ihm_sphere_obj_site.seq_id_begin
_ihm_sphere_obj_site.seq_id_end
_ihm_sphere_obj_site.asym_id
_ihm_sphere_obj_site.Cartn_x
_ihm_sphere_obj_site.Cartn_y
_ihm_sphere_obj_site.Cartn_z
_ihm_sphere_obj_site.object_radius
_ihm_sphere_obj_site.rmsf
_ihm_sphere_obj_site.model_id
1 1 1 6 A 389.993 145.089 134.782 4.931 0 1
2 1 7 7 B 389.895 142.176 135.653 3.318 1.34 1
3 1 1 6 A 489.993 145.089 134.782 4.931 0 2
4 1 7 7 B 589.895 142.176 135.653 3.318 1.34 2
""")
model_dict=self.IO.get_all_spheres(filetemp=fh)
output={'Models': [1, 2], 'Excluded Volume Satisfaction': [0.0, 100.0]}
self.assertEqual(output,(self.IO.get_exc_vol_for_models_normalized(model_dict))
if __name__ == '__main__':
unittest.main(warnings='ignore')
| 2.1875
| 2
|
PEASTrainer.py
|
SiqiT/PEAS
| 0
|
12782566
|
<reponame>SiqiT/PEAS
import sys
import pandas as pd
import numpy as np
import PEASUtil
from sklearn.neural_network import MLPClassifier
from sklearn import preprocessing
import joblib
import argparse
import os
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from xgboost import XGBClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_validate
from sklearn.inspection import permutation_importance
########################DEEPINSIGHT
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.decomposition import PCA, KernelPCA
from sklearn.manifold import TSNE
from scipy.spatial import ConvexHull
import inspect
from numpy import loadtxt
from keras.models import Sequential
from keras.layers import Dense
###########################################
wd = os.getcwd()
#argument parsing
parser = argparse.ArgumentParser(description='Trains a multi-layer perceptron neural network model for ATAC-seq peak data.')
parser.add_argument('featurefiles', type=str, help='File listing the file paths of all features to train the model.')
parser.add_argument('-o', dest='out', type=str, help='The selected directory saving outputfiles.')
parser.add_argument('-n', dest='name', type=str, help='Name of the Model.')
parser.add_argument('-p', dest='paramstring', help='String containing the parameters for the model.', type=str)
parser.add_argument('-f', dest='features', help='Feature index file specifying which columns to include in the feature matrix.', type=str)
parser.add_argument('-c', dest='classes', help='File containing class label transformations into integer representations.', type=str)
parser.add_argument('-l', dest='labelencoder', help='File containing feature label transformations into integer representations.', type=str)
parser.add_argument('-r', dest='randomstate', help='Integer for setting the random number generator seed.', type=int, default=929)
args = parser.parse_args()
#Required Arguments
datasetlabels, datasetfiles = PEASUtil.getDatasets(args.featurefiles)
#Optional Arguments
featurefiledirectory = os.path.dirname(args.featurefiles)
featurefilename = os.path.splitext(os.path.basename(args.featurefiles))[0]
if args.name is not None:
modelname = args.name
modelnamefile = args.name.replace(" ", "_")
else:
modelname = featurefilename
modelnamefile = featurefilename.replace(" ", "_")
if args.out is not None:
outdir = PEASUtil.getFormattedDirectory(args.out)
else:
outdir = PEASUtil.getFormattedDirectory(featurefiledirectory)
parameters = PEASUtil.getModelParameters(args.paramstring)
if args.features is not None:
featurecolumns = PEASUtil.getFeatureColumnData(args.features)
else:
featurecolumns = PEASUtil.getFeatureColumnData(wd+"/features.txt")
if args.classes is not None:
classconversion = PEASUtil.getClassConversions(args.classes)
else:
classconversion = PEASUtil.getClassConversions(wd+"/classes.txt")
if args.labelencoder is not None:
labelencoder = PEASUtil.getLabelEncoder(args.labelencoder)
else:
labelencoder = PEASUtil.getLabelEncoder(wd+"/labelencoder.txt")
randomstate = args.randomstate
parameters['random_state'] = randomstate
#Model Training
#imputer = preprocessing.Imputer(missing_values='NaN', strategy='mean', axis=0)
imputer = SimpleImputer(strategy='mean')
trainX = np.zeros((0,len(featurecolumns)))
trainy = np.zeros((0,))
print("Reading feature files")
for curfile in datasetfiles:
curdata = pd.read_csv(curfile, sep="\t")
trainXi, trainyi, _, _, = PEASUtil.getData(curdata, featurecolumns, labelencoder, classconversion)
trainXi = preprocessing.StandardScaler().fit_transform(imputer.fit_transform(trainXi))
trainX = np.concatenate((trainX, trainXi))
trainy = np.concatenate((trainy, trainyi))
train_X,test_X,train_y,test_y = train_test_split(trainX,trainy,test_size=0.2,random_state=5)
#mlp_clf__tuned_parameters = {"hidden_layer_sizes": [(25,),(50,),(100,50),(200,100),(100,25),(200,)],
# "activation":['relu','tanh','logistic'],
## "solver": ['adam'],
# "verbose": [True],
# "beta_1":[0.999,0.8],
# "beta_2":[0.9999,0.999,0.8],
# "epsilon":[1e-08,1e-06,1e-10]
# }
########################################Deepinsight
########################################
#SVM_parameters = {'kernel': ['rbf'],
# 'gamma': [1e-3, 1e-2,1e-4],
# 'C': [1, 10, 100, 1000],
# "verbose": [True]
# }
#mlp = MLPClassifier()
#SVM = SVC(probability=True)
#clf = GradientBoostingClassifier()
#print('searching best..')
#clf = GridSearchCV(SVM, SVM_parameters, n_jobs=5)
print("Training Model")
#clf = SVC(probability=True)
#clf = RandomForestClassifier(random_state=0)
#clf = KNeighborsClassifier(n_neighbors=20)
#clf = MLPClassifier(**parameters)
#最优#
clf=MLPClassifier(solver='adam',beta_1=0.999,beta_2=0.999,epsilon=0.000001,activation='logistic',hidden_layer_sizes=(200,))
print(clf)
clf.fit(trainX, trainy)
#print("Best",clf.best_params_)
####################get feature inportance
#clf=MLPClassifier(solver='adam',beta_1=0.999,beta_2=0.999,epsilon=0.000001,activation='logistic',hidden_layer_sizes=(200,))
#print(clf)
#clf.fit(trainX,trainy)
#results = permutation_importance(clf, trainX, trainy, scoring='accuracy')
#get importance
#importance = results.importances_mean
#import matplotlib.pyplot as plt
#summarize feature importance
#for i,v in enumerate(importance):
# print('Feature: %s, Score: %.5f' % (i,v))
#plot feature importance
#plt.bar([x for x in range(len(importance))], importance)
#plt.savefig('featureInportance.jpg')
#plt.show()
#output = cross_validate(clf, trainX,trainy, cv=5, scoring = 'accuracy',return_estimator =True)
#print(output)
####################################################
outfile = outdir+modelnamefile+'.pkl'
print("Writing model to: "+outfile)
joblib.dump(clf, outfile)
print("Complete.")
| 2.4375
| 2
|
pikuli/input/input_emulator.py
|
NVoronchev/pikuli
| 0
|
12782567
|
<filename>pikuli/input/input_emulator.py
# -*- coding: utf-8 -*-
import time
from contextlib import contextmanager
from pikuli import logger
from .constants import (
DELAY_KBD_KEY_PRESS, DELAY_KBD_KEY_RELEASE,
DELAY_MOUSE_BTN_PRESS, DELAY_MOUSE_BTN_RELEASE,
DELAY_MOUSE_CLICK, DELAY_MOUSE_DOUBLE_CLICK, DELAY_MOUSE_AFTER_ANY_CLICK,
DELAY_MOUSE_SET_POS, DELAY_MOUSE_SCROLL
)
from .keys import InputSequence, Key, KeyModifier
from .platform_init import ButtonCode, KeyCode, OsKeyboardMixin, OsMouseMixin
class KeyboardMixin(object):
#_PrintableChars = set(string.printable) - set(????)
# TODO: Latin and Cyrillic only yet.
_PrintableChars = (
set(u"0123456789!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r") |
set(u"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") |
set(u"абвгдеёжзийклмнопрстуфхцчщъьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧЩЪЬЭЮЯ №")
)
@classmethod
def type_text(cls, input_data, modifiers=None, p2c_notif=True):
"""
Особенности:
-- Если установлены modifiers, то не будет различия между строчными и загалавными буквами.
Т.е., будет если в строке "s" есть заглавные буквы, то Shift нажиматься не будет.
"""
# https://mail.python.org/pipermail/python-win32/2013-July/012862.html
# https://msdn.microsoft.com/ru-ru/library/windows/desktop/ms646304(v=vs.85).aspx ("MSDN: keybd_event function")
# http://stackoverflow.com/questions/4790268/how-to-generate-keystroke-combination-in-win32-api
# http://stackoverflow.com/questions/11906925/python-simulate-keydown
# https://ru.wikipedia.org/wiki/Скан-код
# http://stackoverflow.com/questions/21197257/keybd-event-keyeventf-extendedkey-explanation-required
input_data = InputSequence(input_data)
modifiers = InputSequence(modifiers)
@contextmanager
def _press_shift_if_necessary(char_need_shift_key):
if char_need_shift_key and modifiers.is_empty():
cls.press_key(KeyCode.SHIFT)
yield
cls.release_key(KeyCode.SHIFT)
else:
yield
try:
cls.press_modifiers(modifiers)
for item in input_data:
try:
key_code, need_shift = cls.str_item_to_keycode(item)
except Exception as ex:
logger.exception('Error dealing with symbol {!r} in string {!r}.'.format(item, input_data))
raise
with _press_shift_if_necessary(need_shift):
cls.type_key(key_code)
finally:
cls.release_modifiers(modifiers)
if p2c_notif:
logger.info('pikuli._functions.type_text(): {!r} '
'was typed; modifiers={!r}'.format(input_data, modifiers))
@classmethod
def str_item_to_keycode(cls, item):
if isinstance(item, Key):
return item.key_code, False
else:
assert item in cls._PrintableChars, 'PrintableChars={!r}; item={!r}'.format(cls._PrintableChars, item)
return cls._char_to_keycode(item)
@classmethod
def type_key(cls, key_code):
cls.press_key(key_code)
cls.release_key(key_code)
@classmethod
def press_modifiers(cls, modifiers):
cls._do_modifier_keys_action(modifiers, cls.press_key)
@classmethod
def release_modifiers(cls, modifiers):
cls._do_modifier_keys_action(modifiers, cls.release_key)
@classmethod
def press_key(cls, key_code):
cls._do_press_key(key_code)
time.sleep(DELAY_KBD_KEY_PRESS)
@classmethod
def release_key(cls, key_code):
cls._do_release_key(key_code)
time.sleep(DELAY_KBD_KEY_RELEASE)
@classmethod
def _do_modifier_keys_action(cls, modifiers, action):
for m in modifiers:
action(m.key_code)
class MouseMixin(object):
@classmethod
def left_click(cls):
cls.click(ButtonCode.LEFT)
@classmethod
def right_click(cls):
cls.click(ButtonCode.RIGHT)
@classmethod
def left_dbl_click(cls):
cls.double_click(ButtonCode.LEFT)
@classmethod
def click(cls, btn_code):
cls._click_with_no_after_sleep(btn_code)
time.sleep(DELAY_MOUSE_AFTER_ANY_CLICK)
@classmethod
def double_click(cls, btn_code):
cls._click_with_no_after_sleep(btn_code)
time.sleep(DELAY_MOUSE_DOUBLE_CLICK)
cls._click_with_no_after_sleep(btn_code)
time.sleep(DELAY_MOUSE_AFTER_ANY_CLICK)
@classmethod
def _click_with_no_after_sleep(cls, btn_code):
cls._do_press_button(btn_code)
time.sleep(DELAY_MOUSE_CLICK)
cls._do_release_button(btn_code)
@classmethod
def press_button(cls, key_code):
cls._do_press_button(key_code)
time.sleep(DELAY_MOUSE_BTN_PRESS)
@classmethod
def release_button(cls, key_code):
cls._do_release_button(key_code)
time.sleep(DELAY_MOUSE_BTN_RELEASE)
@classmethod
def set_mouse_pos(cls, x, y):
cls._set_mouse_pos(x, y)
time.sleep(DELAY_MOUSE_SET_POS)
@classmethod
def get_mouse_pos(cls):
return cls._get_mouse_pos()
@classmethod
def scroll(cls, direction, count=1, step=1):
for _ in range(0, count):
cls._do_scroll(direction, step=step)
time.sleep(DELAY_MOUSE_SCROLL)
class InputEmulator(
KeyboardMixin, MouseMixin,
OsKeyboardMixin, OsMouseMixin):
pass
| 2.171875
| 2
|
src/validators.py
|
ostjen/kalendar
| 0
|
12782568
|
from datetime import datetime
import argparse
from config.settings import DEFAULT_TIME_FORMAT
def valid_date(date):
try:
return datetime.strptime(date,DEFAULT_TIME_FORMAT)
except ValueError:
msg = "Not a valid date: '{0}'.".format(date)
raise argparse.ArgumentTypeError(msg)
| 3.296875
| 3
|
examples/ae_mnist_tf.py
|
AtrejuArtax/aironsuit
| 3
|
12782569
|
<filename>examples/ae_mnist_tf.py
# Databricks notebook source
import os
import numpy as np
from hyperopt import Trials
from tensorflow.keras.datasets import mnist
from tensorflow.keras.optimizers import Adam
from aironsuit.design.utils import choice_hp
from aironsuit.suit import AIronSuit
from airontools.constructors.models.unsupervised.ae import ImageAE
from airontools.preprocessing import train_val_split
from airontools.tools import path_management
HOME = os.path.expanduser("~")
# COMMAND ----------
# Example Set-Up #
model_name = 'AE_NN'
working_path = os.path.join(HOME, 'airon', model_name)
num_classes = 10
batch_size = 128
epochs = 3
patience = 3
max_evals = 3
max_n_samples = None
precision = 'float32'
# COMMAND ----------
# Make/remove paths
path_management(working_path, modes=['rm', 'make'])
# COMMAND ----------
# Load and preprocess data
(train_dataset, target_dataset), _ = mnist.load_data()
if max_n_samples is not None:
train_dataset = train_dataset[-max_n_samples:, ...]
target_dataset = target_dataset[-max_n_samples:, ...]
train_dataset = np.expand_dims(train_dataset, -1) / 255
# Split data per parallel model
x_train, x_val, _, meta_val, _ = train_val_split(input_data=train_dataset, meta_data=target_dataset)
# COMMAND ----------
# AE Model constructor
def ae_model_constructor(latent_dim):
# Create AE model and compile it
ae = ImageAE(latent_dim)
ae.compile(optimizer=Adam())
return ae
# COMMAND ----------
# Training specs
train_specs = {'batch_size': batch_size}
# Hyper-parameter space
hyperparam_space = {'latent_dim': choice_hp('latent_dim', [int(val) for val in np.arange(3, 6)])}
# COMMAND ----------
# Invoke AIronSuit
aironsuit = AIronSuit(
model_constructor=ae_model_constructor,
force_subclass_weights_saver=True,
force_subclass_weights_loader=True,
results_path=working_path
)
# COMMAND ----------
# Automatic Model Design
print('\n')
print('Automatic Model Design \n')
aironsuit.design(
x_train=x_train,
x_val=x_val,
hyper_space=hyperparam_space,
train_specs=train_specs,
max_evals=max_evals,
epochs=epochs,
trials=Trials(),
name=model_name,
seed=0,
patience=patience
)
aironsuit.summary()
del x_train
# COMMAND ----------
# Get latent insights
aironsuit.visualize_representations(
x_val,
metadata=meta_val,
hidden_layer_name='z',
)
| 2.40625
| 2
|
authentik/crypto/api.py
|
BeryJu/passbook
| 15
|
12782570
|
"""Crypto API Views"""
from typing import Optional
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from cryptography.x509 import load_pem_x509_certificate
from django.http.response import HttpResponse
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django_filters import FilterSet
from django_filters.filters import BooleanFilter
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import OpenApiParameter, OpenApiResponse, extend_schema
from rest_framework.decorators import action
from rest_framework.fields import CharField, DateTimeField, IntegerField, SerializerMethodField
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer, ValidationError
from rest_framework.viewsets import ModelViewSet
from structlog.stdlib import get_logger
from authentik.api.decorators import permission_required
from authentik.core.api.used_by import UsedByMixin
from authentik.core.api.utils import PassiveSerializer
from authentik.crypto.builder import CertificateBuilder
from authentik.crypto.managed import MANAGED_KEY
from authentik.crypto.models import CertificateKeyPair
from authentik.events.models import Event, EventAction
LOGGER = get_logger()
class CertificateKeyPairSerializer(ModelSerializer):
"""CertificateKeyPair Serializer"""
cert_expiry = DateTimeField(source="certificate.not_valid_after", read_only=True)
cert_subject = SerializerMethodField()
private_key_available = SerializerMethodField()
private_key_type = SerializerMethodField()
certificate_download_url = SerializerMethodField()
private_key_download_url = SerializerMethodField()
def get_cert_subject(self, instance: CertificateKeyPair) -> str:
"""Get certificate subject as full rfc4514"""
return instance.certificate.subject.rfc4514_string()
def get_private_key_available(self, instance: CertificateKeyPair) -> bool:
"""Show if this keypair has a private key configured or not"""
return instance.key_data != "" and instance.key_data is not None
def get_private_key_type(self, instance: CertificateKeyPair) -> Optional[str]:
"""Get the private key's type, if set"""
key = instance.private_key
if key:
return key.__class__.__name__.replace("_", "").lower().replace("privatekey", "")
return None
def get_certificate_download_url(self, instance: CertificateKeyPair) -> str:
"""Get URL to download certificate"""
return (
reverse(
"authentik_api:certificatekeypair-view-certificate",
kwargs={"pk": instance.pk},
)
+ "?download"
)
def get_private_key_download_url(self, instance: CertificateKeyPair) -> str:
"""Get URL to download private key"""
return (
reverse(
"authentik_api:certificatekeypair-view-private-key",
kwargs={"pk": instance.pk},
)
+ "?download"
)
def validate_certificate_data(self, value: str) -> str:
"""Verify that input is a valid PEM x509 Certificate"""
try:
# Cast to string to fully load and parse certificate
# Prevents issues like https://github.com/goauthentik/authentik/issues/2082
str(load_pem_x509_certificate(value.encode("utf-8"), default_backend()))
except ValueError as exc:
LOGGER.warning("Failed to load certificate", exc=exc)
raise ValidationError("Unable to load certificate.")
return value
def validate_key_data(self, value: str) -> str:
"""Verify that input is a valid PEM Key"""
# Since this field is optional, data can be empty.
if value != "":
try:
# Cast to string to fully load and parse certificate
# Prevents issues like https://github.com/goauthentik/authentik/issues/2082
str(
load_pem_private_key(
str.encode("\n".join([x.strip() for x in value.split("\n")])),
password=None,
backend=default_backend(),
)
)
except (ValueError, TypeError) as exc:
LOGGER.warning("Failed to load private key", exc=exc)
raise ValidationError("Unable to load private key (possibly encrypted?).")
return value
class Meta:
model = CertificateKeyPair
fields = [
"pk",
"name",
"fingerprint_sha256",
"fingerprint_sha1",
"certificate_data",
"key_data",
"cert_expiry",
"cert_subject",
"private_key_available",
"private_key_type",
"certificate_download_url",
"private_key_download_url",
"managed",
]
extra_kwargs = {
"key_data": {"write_only": True},
"certificate_data": {"write_only": True},
}
class CertificateDataSerializer(PassiveSerializer):
"""Get CertificateKeyPair's data"""
data = CharField(read_only=True)
class CertificateGenerationSerializer(PassiveSerializer):
"""Certificate generation parameters"""
common_name = CharField()
subject_alt_name = CharField(required=False, allow_blank=True, label=_("Subject-alt name"))
validity_days = IntegerField(initial=365)
class CertificateKeyPairFilter(FilterSet):
"""Filter for certificates"""
has_key = BooleanFilter(
label="Only return certificate-key pairs with keys", method="filter_has_key"
)
# pylint: disable=unused-argument
def filter_has_key(self, queryset, name, value): # pragma: no cover
"""Only return certificate-key pairs with keys"""
return queryset.exclude(key_data__exact="")
class Meta:
model = CertificateKeyPair
fields = ["name", "managed"]
class CertificateKeyPairViewSet(UsedByMixin, ModelViewSet):
"""CertificateKeyPair Viewset"""
queryset = CertificateKeyPair.objects.exclude(managed=MANAGED_KEY)
serializer_class = CertificateKeyPairSerializer
filterset_class = CertificateKeyPairFilter
ordering = ["name"]
search_fields = ["name"]
@permission_required(None, ["authentik_crypto.add_certificatekeypair"])
@extend_schema(
request=CertificateGenerationSerializer(),
responses={
200: CertificateKeyPairSerializer,
400: OpenApiResponse(description="Bad request"),
},
)
@action(detail=False, methods=["POST"])
def generate(self, request: Request) -> Response:
"""Generate a new, self-signed certificate-key pair"""
data = CertificateGenerationSerializer(data=request.data)
if not data.is_valid():
return Response(data.errors, status=400)
builder = CertificateBuilder()
builder.common_name = data.validated_data["common_name"]
builder.build(
subject_alt_names=data.validated_data.get("subject_alt_name", "").split(","),
validity_days=int(data.validated_data["validity_days"]),
)
instance = builder.save()
serializer = self.get_serializer(instance)
return Response(serializer.data)
@extend_schema(
parameters=[
OpenApiParameter(
name="download",
location=OpenApiParameter.QUERY,
type=OpenApiTypes.BOOL,
)
],
responses={200: CertificateDataSerializer(many=False)},
)
@action(detail=True, pagination_class=None, filter_backends=[])
# pylint: disable=invalid-name, unused-argument
def view_certificate(self, request: Request, pk: str) -> Response:
"""Return certificate-key pairs certificate and log access"""
certificate: CertificateKeyPair = self.get_object()
Event.new( # noqa # nosec
EventAction.SECRET_VIEW,
secret=certificate,
type="certificate",
).from_http(request)
if "download" in request.query_params:
# Mime type from https://pki-tutorial.readthedocs.io/en/latest/mime.html
response = HttpResponse(
certificate.certificate_data, content_type="application/x-pem-file"
)
response[
"Content-Disposition"
] = f'attachment; filename="{certificate.name}_certificate.pem"'
return response
return Response(CertificateDataSerializer({"data": certificate.certificate_data}).data)
@extend_schema(
parameters=[
OpenApiParameter(
name="download",
location=OpenApiParameter.QUERY,
type=OpenApiTypes.BOOL,
)
],
responses={200: CertificateDataSerializer(many=False)},
)
@action(detail=True, pagination_class=None, filter_backends=[])
# pylint: disable=invalid-name, unused-argument
def view_private_key(self, request: Request, pk: str) -> Response:
"""Return certificate-key pairs private key and log access"""
certificate: CertificateKeyPair = self.get_object()
Event.new( # noqa # nosec
EventAction.SECRET_VIEW,
secret=certificate,
type="private_key",
).from_http(request)
if "download" in request.query_params:
# Mime type from https://pki-tutorial.readthedocs.io/en/latest/mime.html
response = HttpResponse(certificate.key_data, content_type="application/x-pem-file")
response[
"Content-Disposition"
] = f'attachment; filename="{certificate.name}_private_key.pem"'
return response
return Response(CertificateDataSerializer({"data": certificate.key_data}).data)
| 1.898438
| 2
|
Gathered CTF writeups/ptr-yudai-writeups/2019/HSCTF_6/tux's_kitchen/solve.py
|
mihaid-b/CyberSakura
| 1
|
12782571
|
from ptrlib import *
candidate = [
[chr(j) for j in range(1, 0x100)]
for i in range(71)
]
while True:
sock = Socket("crypto.hsctf.com", 8112)
sock.recvuntil("[")
l = list(map(lambda x: int(x.rstrip(b"L")), sock.recv().rstrip().rstrip(b"]").split(b", ")))
# original treasure
index = 0
for c in l:
pre = list(candidate[index])
candidate[index] = []
for i in range(ord(" "), ord("~")):
x = c ^ 29486316
if x % i == 0 and chr(i) in pre:
candidate[index].append(chr(i))
index += 1
for w in candidate:
if len(w) != 1:
break
else:
print(candidate)
print(''.join(candidate))
break
print(candidate)
print("Trying...")
# hsctf{thiii111iiiss_isssss_yo0ur_b1rthd4y_s0ng_it_isnt_very_long_6621}
| 2.265625
| 2
|
loadNN.py
|
JOTELLECHEA/neural_networks
| 0
|
12782572
|
<reponame>JOTELLECHEA/neural_networks<filename>loadNN.py<gh_stars>0
# Written By : <NAME>
# Adviser : <NAME>, Phd
# Research : Using a neural network to maximize the significance of tttHH production.
# Description: Script that loads NN weights and makes 1D plots that apply NN score for a cut.
# Reference :http://cdsweb.cern.ch/record/2220969/files/ATL-PHYS-PUB-2016-023.pdf
###########################################################################################################################\
import uproot
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
import tkinter as tk
import matplotlib
import slug
import datetime
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from numpy import array
from tensorflow.keras.models import load_model
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc, confusion_matrix,roc_curve
sc = StandardScaler()
seed = 42
tree = "OutputTree"
phase = 3
branches = slug.dataCol(phase,10)
numBranches = len(branches)
parser = argparse.ArgumentParser(description="Plot 1D plots of sig/bac")
parser.add_argument("--file", type=str, help="Use '--file=' followed by a *.h5 file")
args = parser.parse_args()
file = "data/" + str(args.file)
# Data read from file.
signal = uproot.open("data/new_TTHH.root")[tree]
df_signal = signal.pandas.df(branches)
bkgTTBB = uproot.open("data/new_TTBB.root")[tree]
df_bkgTTBB = bkgTTBB.pandas.df(branches)
bkgTTH = uproot.open("data/new_TTH.root")[tree]
df_bkgTTH = bkgTTH.pandas.df(branches)
bkgTTZ = uproot.open("data/new_TTZ.root")[tree]
df_bkgTTZ = bkgTTZ.pandas.df(branches)
df_background = pd.concat([df_bkgTTBB, df_bkgTTH, df_bkgTTZ])
# The 3 backgrounds are concatenated we shuffle to make sure they are not sorted.
shuffleBackground = shuffle(df_background, random_state=seed)
# signal and limited shuffle background data to counter inbalanced data problem.
rawdata = pd.concat([df_signal, shuffleBackground])
X = rawdata.drop(["weights", "truth"], axis=1)
X = sc.fit_transform(X)
# signal
scalefactor = 0.00232 * 0.608791
sigw = rawdata["weights"][: len(signal)] * scalefactor
bkgw = rawdata["weights"][len(signal) :]
# Labeling data with 1's and 0's to distinguish.
y = np.concatenate((np.ones(len(df_signal)), np.zeros(len(shuffleBackground))))
# Shuffle full data and split into train/test and validation set.
X_dev, X_eval, y_dev, y_eval = train_test_split(
X, y, test_size=0.001, random_state=seed
)
X_train, X_test, y_train, y_test = train_test_split(
X_dev, y_dev, test_size=0.2, random_state=seed
)
neuralNet = keras.models.load_model(file)
allScore = neuralNet.predict(X).ravel()
fpr, tpr, thresholds = roc_curve(y, allScore)
area = auc(fpr, tpr)
scoresig=[]
score1=[]
score2=[]
score3=[]
if False:
for i in range(len(X)):
if i < len(signal):
scoresig.append(allScore[i])
else:
if rawdata['truth'].values[i] == 1:
score1.append(allScore[i])
if rawdata['truth'].values[i] == 2:
score2.append(allScore[i])
if rawdata['truth'].values[i] == 3:
score3.append(allScore[i])
plt.hist(
[scoresig,score1,score2,score3],
bins=50,
histtype="stepfilled",
label=['Signal',"TTH","TTZ","TTBB"],
linestyle="solid",
color=['red','blue','mediumorchid','green'],
stacked=True,
# weights=[NNb3weights,NNb2weights,NNb1weights],
)
# plt.hist(
# allScore,
# color="k",
# bins=50,
# histtype="stepfilled",
# label='Score Distribution (Test Data)',
# )
plt.ylabel('Events')
plt.xlabel('Score')
plt.yscale("log")
plt.legend()
plt.show()
# The score is rounded; values are 0 or 1.
# y_pred = [1 * (x[0] >= 0.5) for x in allScore]
if False:
bins = numbins = 30
decisions = []
for X,y in ((X_train, y_train), (X_test, y_test)):
d1 = neuralNet.predict(X[y>0.5]).ravel()
d2 = neuralNet.predict(X[y<0.5]).ravel()
decisions += [d1, d2]
low = min(np.min(d) for d in decisions)
high = max(np.max(d) for d in decisions)
xlimit = array([low,high])
hist, bins = np.histogram(decisions[3], bins=numbins, range=xlimit, density=True)
plt.figure(figsize=(8, 6))
plt.subplot(212)
plt.hist(
decisions[0],
color="r",
alpha=0.5,
range=xlimit,
bins=bins,
histtype="stepfilled",
# density=False,
density=True,
label='S (train)',
# label="Signal Distribution",
# weights=sigw,
)
plt.hist(
decisions[1],
color="b",
alpha=0.5,
range=xlimit,
bins=bins,
histtype="stepfilled",
# density=False,
density=True,
# label="Background Distribution",
label='B (train)',
# weights=bkgw,
)
hist, bins = np.histogram(decisions[2], bins=numbins, range=xlimit, density=True)
scale = len(decisions[2]) / sum(hist)
err = np.sqrt(hist * scale) / scale
width = (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.errorbar(center, hist, yerr=err, fmt='o', c='r', label='S (test)')
hist, bins = np.histogram(decisions[3], bins=numbins, range=xlimit, density=True)
scale = len(decisions[2]) / sum(hist)
err = np.sqrt(hist * scale) / scale
plt.errorbar(center, hist, yerr=err, fmt='o', c='b', label='B (test)')
# plt.axvline(x= score,color='k')
plt.xlabel("Score")
plt.ylabel("Events")
plt.yscale("log")
plt.axvline(x= 0.958,color='k')
plt.legend(loc="upper right")
plt.subplot(211)
plt.plot(fpr, tpr, "k-", label="All, AUC = %0.3f" % (area))
plt.plot([0, 1], [0, 1], "--", color=(0.6, 0.6, 0.6), label="Luck")
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver operating characteristic")
plt.legend(loc="lower right")
plt.grid()
plt.tight_layout()
plt.show()
if True:
totalscore = neuralNet.predict(X).ravel()
numbins = len(totalscore)
sigScore = neuralNet.predict(X[y > 0.5]).ravel()
bkgScore = neuralNet.predict(X[y < 0.5]).ravel()
sigSUM = len(sigScore)
bkgSUM = len(bkgScore)
xlimit = (0, 1)
tp = []
fp = []
hist, bins = np.histogram(sigScore, bins=numbins, range=xlimit, density=False)
count = 0
for i in range(numbins - 1, -1, -1):
count += hist[i] / sigSUM
tp.append(count)
hist, bins = np.histogram(bkgScore, bins=numbins, range=xlimit, density=False)
count = 0
for j in range(numbins - 1, -1, -1):
count += hist[j] / bkgSUM
fp.append(count)
area = auc(fp, tp)
xplot = tp
yplot = fp
# computes max signif
sigSUM = len(sigScore) * scalefactor
tp = np.array(tp) * sigSUM
fp = np.array(fp) * bkgSUM
syst = 0.0
stat = 0.0
maxsignif = 0.0
maxs = 0
maxb = 0
bincounter = numbins - 1
bincountatmaxsignif = 999
for t, f in zip(tp, fp):
signif = slug.getZPoisson(t, f, stat, syst)
if f >= 10 and signif > maxsignif:
maxsignif = signif
maxs = t
maxb = f
bincountatmaxsignif = bincounter
score = bincountatmaxsignif / numbins
bincounter -= 1
# precision = tp/(fp+tp)
# plt.plot(precision,totalscore, "k-",)
# plt.show()
# print(len(tp),len(fp),numbins)
flag = 1
if flag == 1:
hl = ['weights','numjet','numlep','btag','srap','cent','m_bb','h_b']
sample = ['s','b']
for i in range(1, 10 + 1):
for k in range(2):
for j in range(1,4):
if (k == 0 and j > 1): continue # This makes sure only one signal is added.
command = "" # This line is here to clear out the previous command.
command = "NN" + sample[k] + str(j) + "jeteta" + str(i) + " = []"
exec(command)
command = "" # This line is here to clear out the previous command.
command = "NN" + sample[k] + str(j) + "jetphi" + str(i) + " = []"
exec(command)
command = "" # This line is here to clear out the previous command.
command = "NN" + sample[k] + str(j) + "jetpt" + str(i) + " = []"
exec(command)
for q in range(len(hl)):
command = "" # This line is here to clear out the previous command.
command = "NN" + sample[k] + str(j) + hl[q] + " = []"
exec(command)
for w in range(1,4):
command = "" # This line is here to clear out the previous command.
command = "NN" + sample[k] + str(j) + "mt" + str(w) + " = []"
exec(command)
command = "" # This line is here to clear out the previous command.
command = "NN" + sample[k] + str(j) + "dr" + str(w) + " = []"
exec(command)
for i in range(len(X)):
if i < len(signal):
if allScore[i] > score:
NNs1numjet.append(rawdata["numjet"].values[i])
NNs1numlep.append(rawdata["numlep"].values[i])
NNs1btag.append(rawdata["btag"].values[i])
NNs1srap.append(rawdata["srap"].values[i])
NNs1cent.append(rawdata["cent"].values[i])
NNs1m_bb.append(rawdata["m_bb"].values[i])
NNs1h_b.append(rawdata["h_b"].values[i])
# NNs1mt1.append(rawdata["mt1"].values[i])
# NNs1mt2.append(rawdata["mt2"].values[i])
# NNs1mt3.append(rawdata["mt3"].values[i])
# NNs1dr1.append(rawdata["dr1"].values[i])
# NNs1dr2.append(rawdata["dr2"].values[i])
# NNs1dr3.append(rawdata["dr3"].values[i])
# NNs1jetpt1.append(rawdata["jet1pT"].values[i])
# NNs1jetpt2.append(rawdata["jet2pT"].values[i])
# NNs1jetpt3.append(rawdata["jet3pT"].values[i])
# NNs1jetpt4.append(rawdata["jet4pT"].values[i])
# NNs1jetpt5.append(rawdata["jet5pT"].values[i])
# NNs1jetpt6.append(rawdata["jet6pT"].values[i])
# NNs1jetpt7.append(rawdata["jet7pT"].values[i])
# NNs1jetpt8.append(rawdata["jet8pT"].values[i])
# NNs1jetpt9.append(rawdata["jet9pT"].values[i])
# NNs1jetpt10.append(rawdata["jet10pT"].values[i])
# NNs1jeteta1.append(rawdata["jet1eta"].values[i])
# NNs1jeteta2.append(rawdata["jet2eta"].values[i])
# NNs1jeteta3.append(rawdata["jet3eta"].values[i])
# NNs1jeteta4.append(rawdata["jet4eta"].values[i])
# NNs1jeteta5.append(rawdata["jet5eta"].values[i])
# NNs1jeteta6.append(rawdata["jet6eta"].values[i])
# NNs1jeteta7.append(rawdata["jet7eta"].values[i])
# NNs1jeteta8.append(rawdata["jet8eta"].values[i])
# NNs1jeteta9.append(rawdata["jet9eta"].values[i])
# NNs1jeteta10.append(rawdata["jet10eta"].values[i])
# NNs1jetphi1.append(rawdata["jet1phi"].values[i])
# NNs1jetphi2.append(rawdata["jet2phi"].values[i])
# NNs1jetphi3.append(rawdata["jet3phi"].values[i])
# NNs1jetphi4.append(rawdata["jet4phi"].values[i])
# NNs1jetphi5.append(rawdata["jet5phi"].values[i])
# NNs1jetphi6.append(rawdata["jet6phi"].values[i])
# NNs1jetphi7.append(rawdata["jet7phi"].values[i])
# NNs1jetphi8.append(rawdata["jet8phi"].values[i])
# NNs1jetphi9.append(rawdata["jet9phi"].values[i])
# NNs1jetphi10.append(rawdata["jet10phi"].values[i])
NNs1weights.append(scalefactor * rawdata["weights"].values[i])
else:
if allScore[i] > score:
if rawdata['truth'].values[i] == 1:
NNb1numjet.append(rawdata["numjet"].values[i])
NNb1numlep.append(rawdata["numlep"].values[i])
NNb1btag.append(rawdata["btag"].values[i])
NNb1srap.append(rawdata["srap"].values[i])
NNb1cent.append(rawdata["cent"].values[i])
NNb1m_bb.append(rawdata["m_bb"].values[i])
NNb1h_b.append(rawdata["h_b"].values[i])
# NNb1mt1.append(rawdata["mt1"].values[i])
# NNb1mt2.append(rawdata["mt2"].values[i])
# NNb1mt3.append(rawdata["mt3"].values[i])
# NNb1dr1.append(rawdata["dr1"].values[i])
# NNb1dr2.append(rawdata["dr2"].values[i])
# NNb1dr3.append(rawdata["dr3"].values[i])
# NNb1jetpt1.append(rawdata["jet1pT"].values[i])
# NNb1jetpt2.append(rawdata["jet2pT"].values[i])
# NNb1jetpt3.append(rawdata["jet3pT"].values[i])
# NNb1jetpt4.append(rawdata["jet4pT"].values[i])
# NNb1jetpt5.append(rawdata["jet5pT"].values[i])
# NNb1jetpt6.append(rawdata["jet6pT"].values[i])
# NNb1jetpt7.append(rawdata["jet7pT"].values[i])
# NNb1jetpt8.append(rawdata["jet8pT"].values[i])
# NNb1jetpt9.append(rawdata["jet9pT"].values[i])
# NNb1jetpt10.append(rawdata["jet10pT"].values[i])
# NNb1jeteta1.append(rawdata["jet1eta"].values[i])
# NNb1jeteta2.append(rawdata["jet2eta"].values[i])
# NNb1jeteta3.append(rawdata["jet3eta"].values[i])
# NNb1jeteta4.append(rawdata["jet4eta"].values[i])
# NNb1jeteta5.append(rawdata["jet5eta"].values[i])
# NNb1jeteta6.append(rawdata["jet6eta"].values[i])
# NNb1jeteta7.append(rawdata["jet7eta"].values[i])
# NNb1jeteta8.append(rawdata["jet8eta"].values[i])
# NNb1jeteta9.append(rawdata["jet9eta"].values[i])
# NNb1jeteta10.append(rawdata["jet10eta"].values[i])
# NNb1jetphi1.append(rawdata["jet1phi"].values[i])
# NNb1jetphi2.append(rawdata["jet2phi"].values[i])
# NNb1jetphi3.append(rawdata["jet3phi"].values[i])
# NNb1jetphi4.append(rawdata["jet4phi"].values[i])
# NNb1jetphi5.append(rawdata["jet5phi"].values[i])
# NNb1jetphi6.append(rawdata["jet6phi"].values[i])
# NNb1jetphi7.append(rawdata["jet7phi"].values[i])
# NNb1jetphi8.append(rawdata["jet8phi"].values[i])
# NNb1jetphi9.append(rawdata["jet9phi"].values[i])
# NNb1jetphi10.append(rawdata["jet10phi"].values[i])
NNb1weights.append(rawdata["weights"].values[i])
if rawdata['truth'].values[i] == 2:
NNb2numjet.append(rawdata["numjet"].values[i])
NNb2numlep.append(rawdata["numlep"].values[i])
NNb2btag.append(rawdata["btag"].values[i])
NNb2srap.append(rawdata["srap"].values[i])
NNb2cent.append(rawdata["cent"].values[i])
NNb2m_bb.append(rawdata["m_bb"].values[i])
NNb2h_b.append(rawdata["h_b"].values[i])
# NNb2mt1.append(rawdata["mt1"].values[i])
# NNb2mt2.append(rawdata["mt2"].values[i])
# NNb2mt3.append(rawdata["mt3"].values[i])
# NNb2dr1.append(rawdata["dr1"].values[i])
# NNb2dr2.append(rawdata["dr2"].values[i])
# NNb2dr3.append(rawdata["dr3"].values[i])
# NNb2jetpt1.append(rawdata["jet1pT"].values[i])
# NNb2jetpt2.append(rawdata["jet2pT"].values[i])
# NNb2jetpt3.append(rawdata["jet3pT"].values[i])
# NNb2jetpt4.append(rawdata["jet4pT"].values[i])
# NNb2jetpt5.append(rawdata["jet5pT"].values[i])
# NNb2jetpt6.append(rawdata["jet6pT"].values[i])
# NNb2jetpt7.append(rawdata["jet7pT"].values[i])
# NNb2jetpt8.append(rawdata["jet8pT"].values[i])
# NNb2jetpt9.append(rawdata["jet9pT"].values[i])
# NNb2jetpt10.append(rawdata["jet10pT"].values[i])
# NNb2jeteta1.append(rawdata["jet1eta"].values[i])
# NNb2jeteta2.append(rawdata["jet2eta"].values[i])
# NNb2jeteta3.append(rawdata["jet3eta"].values[i])
# NNb2jeteta4.append(rawdata["jet4eta"].values[i])
# NNb2jeteta5.append(rawdata["jet5eta"].values[i])
# NNb2jeteta6.append(rawdata["jet6eta"].values[i])
# NNb2jeteta7.append(rawdata["jet7eta"].values[i])
# NNb2jeteta8.append(rawdata["jet8eta"].values[i])
# NNb2jeteta9.append(rawdata["jet9eta"].values[i])
# NNb2jeteta10.append(rawdata["jet10eta"].values[i])
# NNb2jetphi1.append(rawdata["jet1phi"].values[i])
# NNb2jetphi2.append(rawdata["jet2phi"].values[i])
# NNb2jetphi3.append(rawdata["jet3phi"].values[i])
# NNb2jetphi4.append(rawdata["jet4phi"].values[i])
# NNb2jetphi5.append(rawdata["jet5phi"].values[i])
# NNb2jetphi6.append(rawdata["jet6phi"].values[i])
# NNb2jetphi7.append(rawdata["jet7phi"].values[i])
# NNb2jetphi8.append(rawdata["jet8phi"].values[i])
# NNb2jetphi9.append(rawdata["jet9phi"].values[i])
# NNb2jetphi10.append(rawdata["jet10phi"].values[i])
NNb2weights.append(rawdata["weights"].values[i])
if rawdata['truth'].values[i] == 3:
NNb3numjet.append(rawdata["numjet"].values[i])
NNb3numlep.append(rawdata["numlep"].values[i])
NNb3btag.append(rawdata["btag"].values[i])
NNb3srap.append(rawdata["srap"].values[i])
NNb3cent.append(rawdata["cent"].values[i])
NNb3m_bb.append(rawdata["m_bb"].values[i])
NNb3h_b.append(rawdata["h_b"].values[i])
# NNb3mt1.append(rawdata["mt1"].values[i])
# NNb3mt2.append(rawdata["mt2"].values[i])
# NNb3mt3.append(rawdata["mt3"].values[i])
# NNb3dr1.append(rawdata["dr1"].values[i])
# NNb3dr2.append(rawdata["dr2"].values[i])
# NNb3dr3.append(rawdata["dr3"].values[i])
# NNb3jetpt1.append(rawdata["jet1pT"].values[i])
# NNb3jetpt2.append(rawdata["jet2pT"].values[i])
# NNb3jetpt3.append(rawdata["jet3pT"].values[i])
# NNb3jetpt4.append(rawdata["jet4pT"].values[i])
# NNb3jetpt5.append(rawdata["jet5pT"].values[i])
# NNb3jetpt6.append(rawdata["jet6pT"].values[i])
# NNb3jetpt7.append(rawdata["jet7pT"].values[i])
# NNb3jetpt8.append(rawdata["jet8pT"].values[i])
# NNb3jetpt9.append(rawdata["jet9pT"].values[i])
# NNb3jetpt10.append(rawdata["jet10pT"].values[i])
# NNb3jeteta1.append(rawdata["jet1eta"].values[i])
# NNb3jeteta2.append(rawdata["jet2eta"].values[i])
# NNb3jeteta3.append(rawdata["jet3eta"].values[i])
# NNb3jeteta4.append(rawdata["jet4eta"].values[i])
# NNb3jeteta5.append(rawdata["jet5eta"].values[i])
# NNb3jeteta6.append(rawdata["jet6eta"].values[i])
# NNb3jeteta7.append(rawdata["jet7eta"].values[i])
# NNb3jeteta8.append(rawdata["jet8eta"].values[i])
# NNb3jeteta9.append(rawdata["jet9eta"].values[i])
# NNb3jeteta10.append(rawdata["jet10eta"].values[i])
# NNb3jetphi1.append(rawdata["jet1phi"].values[i])
# NNb3jetphi2.append(rawdata["jet2phi"].values[i])
# NNb3jetphi3.append(rawdata["jet3phi"].values[i])
# NNb3jetphi4.append(rawdata["jet4phi"].values[i])
# NNb3jetphi5.append(rawdata["jet5phi"].values[i])
# NNb3jetphi6.append(rawdata["jet6phi"].values[i])
# NNb3jetphi7.append(rawdata["jet7phi"].values[i])
# NNb3jetphi8.append(rawdata["jet8phi"].values[i])
# NNb3jetphi9.append(rawdata["jet9phi"].values[i])
# NNb3jetphi10.append(rawdata["jet10phi"].values[i])
NNb3weights.append(rawdata["weights"].values[i])
snumlep = df_signal["numlep"].values
bnumlep = df_background["numlep"].values
snumjet = df_signal["numjet"].values
bnumjet = df_background["numjet"].values
sbtag = df_signal["btag"].values
bbtag = df_background["btag"].values
ssrap = df_signal["srap"].values
bsrap = df_background["srap"].values
scent = df_signal["cent"].values
bcent = df_background["cent"].values
sm_bb = df_signal["m_bb"].values
bm_bb = df_background["m_bb"].values
sh_b = df_signal["h_b"].values
bh_b = df_background["h_b"].values
# smt1 = df_signal["mt1"].values
# bmt1 = df_background["mt1"].values
# smt2 = df_signal["mt2"].values
# bmt2 = df_background["mt2"].values
# smt3 = df_signal["mt3"].values
# bmt3 = df_background["mt3"].values
# sdr1 = df_signal["dr1"].values
# bdr1 = df_background["dr1"].values
# sdr2 = df_signal["dr2"].values
# bdr2 = df_background["dr2"].values
# sdr3 = df_signal["dr3"].values
# bdr3 = df_background["dr3"].values
# sjetpt1 = df_signal["jet1pT"].values
# sjetpt2 = df_signal["jet2pT"].values
# sjetpt3 = df_signal["jet3pT"].values
# sjetpt4 = df_signal["jet4pT"].values
# sjetpt5 = df_signal["jet5pT"].values
# sjetpt6 = df_signal["jet6pT"].values
# sjetpt7 = df_signal["jet7pT"].values
# sjetpt8 = df_signal["jet8pT"].values
# sjetpt9 = df_signal["jet9pT"].values
# sjetpt10 = df_signal["jet10pT"].values
# bjetpt1 = df_background["jet1pT"].values
# bjetpt2 = df_background["jet2pT"].values
# bjetpt3 = df_background["jet3pT"].values
# bjetpt4 = df_background["jet4pT"].values
# bjetpt5 = df_background["jet5pT"].values
# bjetpt6 = df_background["jet6pT"].values
# bjetpt7 = df_background["jet7pT"].values
# bjetpt8 = df_background["jet8pT"].values
# bjetpt9 = df_background["jet9pT"].values
# bjetpt10 = df_background["jet10pT"].values
# sjeteta1 = df_signal["jet1eta"].values
# sjeteta2 = df_signal["jet2eta"].values
# sjeteta3 = df_signal["jet3eta"].values
# sjeteta4 = df_signal["jet4eta"].values
# sjeteta5 = df_signal["jet5eta"].values
# sjeteta6 = df_signal["jet6eta"].values
# sjeteta7 = df_signal["jet7eta"].values
# sjeteta8 = df_signal["jet8eta"].values
# sjeteta9 = df_signal["jet9eta"].values
# sjeteta10 = df_signal["jet10eta"].values
# bjeteta1 = df_background["jet1eta"].values
# bjeteta2 = df_background["jet2eta"].values
# bjeteta3 = df_background["jet3eta"].values
# bjeteta4 = df_background["jet4eta"].values
# bjeteta5 = df_background["jet5eta"].values
# bjeteta6 = df_background["jet6eta"].values
# bjeteta7 = df_background["jet7eta"].values
# bjeteta8 = df_background["jet8eta"].values
# bjeteta9 = df_background["jet9eta"].values
# bjeteta10 = df_background["jet10eta"].values
# sjetphi1 = df_signal["jet1phi"].values
# sjetphi2 = df_signal["jet2phi"].values
# sjetphi3 = df_signal["jet3phi"].values
# sjetphi4 = df_signal["jet4phi"].values
# sjetphi5 = df_signal["jet5phi"].values
# sjetphi6 = df_signal["jet6phi"].values
# sjetphi7 = df_signal["jet7phi"].values
# sjetphi8 = df_signal["jet8phi"].values
# sjetphi9 = df_signal["jet9phi"].values
# sjetphi10 = df_signal["jet10phi"].values
# bjetphi1 = df_background["jet1phi"].values
# bjetphi2 = df_background["jet2phi"].values
# bjetphi3 = df_background["jet3phi"].values
# bjetphi4 = df_background["jet4phi"].values
# bjetphi5 = df_background["jet5phi"].values
# bjetphi6 = df_background["jet6phi"].values
# bjetphi7 = df_background["jet7phi"].values
# bjetphi8 = df_background["jet8phi"].values
# bjetphi9 = df_background["jet9phi"].values
# bjetphi10 = df_background["jet10phi"].values
def qPlot(x, y, nx, b1,b2,b3, a, b, c, Name):
bins = np.arange(a, b + 1.5) - .5
plt.hist(
y,
bins=bins,
histtype="step",
label="Full Background",
linestyle="solid",
color="black",
weights=bkgw,
)
plt.hist(
x,
bins=bins,
histtype="step",
label="Full Signal",
linestyle="solid",
color="darkred",
weights=sigw,
stacked=False,
)
plt.hist(
[b3,b2,b1],
bins=bins,
histtype="stepfilled",
label=["TTH Score > %0.2f" % (score),"TTZ Score > %0.2f" % (score),"TTBB Score > %0.2f" % (score)],
linestyle="solid",
color=['blue','mediumorchid','green'],
stacked=True,
weights=[NNb3weights,NNb2weights,NNb1weights],
)
plt.hist(
nx,
bins=bins,
histtype="step",
hatch='/',
label="Signal Score > %0.2f" % (score),
linestyle="solid",
color="darkred",
weights=NNs1weights,
stacked=False,
)
plt.xticks(bins + 0.5)
plt.legend(loc=1,fontsize = 'x-small')
plt.xlabel(Name, horizontalalignment='right', x=1.0)
plt.ylabel('Events', horizontalalignment='right', y=1.0)
plt.title(r'$\sqrt{s}=$ 14 TeV, $\mathcal{L} =$ 3000 fb${}^{-1}$')
plt.ylabel("Events")
plt.yscale("log")
plt.style.use('classic')
# plt.show()
pdf.savefig() # saves the current figure into a pdf page
plt.close()
def hPlot(x, y, nx, b1,b2,b3, a, b, c, Name):
bins = np.linspace(a, b, c)
plt.hist(
y,
bins=bins,
histtype="step",
label="Full Background",
linestyle="solid",
color="black",
weights=bkgw,
)
plt.hist(
x,
bins=bins,
histtype="step",
label="Full Signal",
linestyle="solid",
color="darkred",
weights=sigw,
stacked=False,
)
plt.hist(
[b3,b2,b1],
bins=bins,
histtype="stepfilled",
label=["TTH Score > %0.2f" % (score),"TTZ Score > %0.2f" % (score),"TTBB Score > %0.2f" % (score)],
linestyle="solid",
color=['blue','mediumorchid','green'],
stacked=True,
weights=[NNb3weights,NNb2weights,NNb1weights],
)
plt.hist(
nx,
bins=bins,
histtype="step",
hatch='/',
label="Signal Score > %0.2f" % (score),
linestyle="solid",
color="darkred",
weights=NNs1weights,
stacked=False,
)
plt.legend(loc=1,fontsize = 'x-small')
plt.xlabel(Name, horizontalalignment='right', x=1.0)
plt.ylabel('Events', horizontalalignment='right', y=1.0)
plt.title(r'$\sqrt{s}=$ 14 TeV, $\mathcal{L} =$ 3000 fb${}^{-1}$')
plt.ylabel("Events")
plt.yscale("log")
plt.style.use('classic')
# plt.show()
pdf.savefig() # saves the current figure into a pdf page
plt.close()
# tn, fp, fn, tp = confusion_matrix(y, y_pred,normalize='all').ravel()
# Matrix = np.matrix([[tp,fn],[fp,tn]])
# slug.confusedMatrix(Matrix)
# y_pred2 = [1 * (x[0] >= score) for x in allScore]
# tn, fp, fn, tp = confusion_matrix(y, y_pred2,normalize='all').ravel()
# Matrix2 = np.matrix([[tp,fn],[fp,tn]])
# slug.confusedMatrix(Matrix2)
pdfname = file[:-2] + 'pdf'
with PdfPages(pdfname) as pdf:
# plt.figure(figsize=(8, 6))
# plt.subplot(212)
# plt.hist(
# sigScore,
# color="r",
# alpha=0.5,
# range=xlimit,
# bins=100,
# histtype="stepfilled",
# # density=False,
# density=True,
# label='S (train)',
# # label="Signal Distribution",
# weights=sigw,
# )
# plt.hist(
# bkgScore,
# color="b",
# alpha=0.5,
# range=xlimit,
# bins=100,
# histtype="stepfilled",
# # density=False,
# density=True,
# # label="Background Distribution",
# label='B (train)',
# weights=bkgw,
# )
# plt.axvline(x= score,color='k')
# plt.xlabel("Score")
# plt.ylabel("Events")
# plt.yscale("log")
# plt.legend(loc="upper right")
# plt.subplot(211)
# plt.plot(yplot, xplot, "k-", label="All, AUC = %0.3f" % (area))
# plt.plot(maxs,maxb,'ko')
# plt.plot([0, 1], [0, 1], "--", color=(0.6, 0.6, 0.6), label="Luck")
# plt.xlim([-0.05, 1.05])
# plt.ylim([-0.05, 1.05])
# plt.xlabel("False Positive Rate")
# plt.ylabel("True Positive Rate")
# plt.title("Receiver operating characteristic")
# plt.legend(loc="lower right")
# plt.grid()
# pdf.savefig() # saves the current figure into a pdf page
# plt.close()
qPlot(snumjet, bnumjet, NNs1numjet, NNb1numjet, NNb2numjet, NNb3numjet, 1, 21, 22, 'Jet multiplicity')
qPlot(snumlep, bnumlep, NNs1numlep, NNb1numlep,NNb2numlep,NNb3numlep, 0, 3, 5, 'Lepton multiplicity')
qPlot(sbtag, bbtag, NNs1btag, NNb1btag,NNb2btag, NNb3btag,0, 10, 10, 'N b-tagged jets')
hPlot(ssrap, bsrap, NNs1srap, NNb1srap, NNb2srap,NNb3srap, 0, 10, 20, r'$ < \eta(b_{i},b_{j}) >$')
hPlot(scent, bcent, NNs1cent, NNb1cent,NNb2cent, NNb3cent,0, 1, 20, 'Centrality')
hPlot(sm_bb, bm_bb, NNs1m_bb, NNb1m_bb,NNb2m_bb,NNb3m_bb, 0, 250, 25, r'${M}_{bb}$ [GeV]')
hPlot(sh_b, bh_b, NNs1h_b, NNb1h_b,NNb2h_b,NNb3h_b, 0, 1500, 60, r'${H}_{B}$ [GeV]')
# hPlot(smt1, bmt1, NNs1mt1, NNb1mt1,NNb2mt1,NNb3mt1, 0, 300, 100, r'${m}_{T}1$ [GeV]')
# hPlot(smt2, bmt2, NNs1mt2, NNb1mt2,NNb2mt2,NNb3mt2, 0, 300, 100, r'${m}_{T}2$ [GeV]')
# hPlot(smt3, bmt3, NNs1mt3, NNb1mt3,NNb2mt3,NNb3mt3, 0, 300, 100, r'${m}_{T}3$ [GeV]')
# hPlot(sdr1, bdr1, NNs1dr1, NNb1dr1,NNb2dr1,NNb3dr1, 0, 7, 100, r'$\Delta$R1')
# hPlot(sdr2, bdr2, NNs1dr2, NNb1dr2,NNb2dr2,NNb3dr2, 0, 7, 100, r'$\Delta$R2')
# hPlot(sdr3, bdr3, NNs1dr3, NNb1dr3,NNb2dr3,NNb3dr3, 0, 7, 100, r'$\Delta$R3')
# hPlot(sjetpt1, bjetpt1, NNs1jetpt1, NNb1jetpt1,NNb2jetpt1,NNb3jetpt1, 0, 1e6, 100, r'Jet1 pT')
# hPlot(sjetpt2, bjetpt2, NNs1jetpt2, NNb1jetpt2,NNb2jetpt2,NNb3jetpt2, 0, 1e6, 100, r'Jet2 pT')
# hPlot(sjetpt3, bjetpt3, NNs1jetpt3, NNb1jetpt3,NNb2jetpt3,NNb3jetpt3, 0, 1e6, 100, r'Jet3 pT')
# hPlot(sjetpt4, bjetpt4, NNs1jetpt4, NNb1jetpt4,NNb2jetpt4,NNb3jetpt4, 0, 1e6, 100, r'Jet4 pT')
# hPlot(sjetpt5, bjetpt5, NNs1jetpt5, NNb1jetpt5,NNb2jetpt4,NNb3jetpt4, 0, 1e6, 100, r'Jet5 pT')
# hPlot(sjetpt6, bjetpt6, NNs1jetpt6, NNb1jetpt6,NNb2jetpt4,NNb3jetpt4, 0, 1e6, 100, r'Jet6 pT')
# hPlot(sjetpt7, bjetpt7, NNs1jetpt7, NNb1jetpt7,NNb2jetpt4,NNb3jetpt4, 0, 1e6, 100, r'Jet7 pT')
# hPlot(sjetpt8, bjetpt8, NNs1jetpt8, NNb1jetpt8,NNb2jetpt4,NNb3jetpt4, 0, 1e6, 100, r'Jet8 pT')
# hPlot(sjetpt9, bjetpt9, NNs1jetpt9, NNb1jetpt9,NNb2jetpt4,NNb3jetpt4, 0, 1e6, 100, r'Jet9 pT')
# hPlot(sjetpt10, bjetpt10, NNs1jetpt10, NNb1jetpt10,NNb2jetpt10,NNb3jetpt10, 0, 1e6, 100, r'Jet10 pT')
# hPlot(sjeteta1, bjeteta1, NNs1jeteta1, NNb1jeteta1,NNb2jeteta1,NNb3jeteta1, -6, 6, 12, r'Jet1 $\eta$')
# hPlot(sjeteta2, bjeteta2, NNs1jeteta2, NNb1jeteta2,NNb2jeteta2,NNb3jeteta2, -6, 6, 12, r'Jet2 $\eta$')
# hPlot(sjeteta3, bjeteta3, NNs1jeteta3, NNb1jeteta3,NNb2jeteta3,NNb3jeteta3, -6, 6, 12, r'Jet3 $\eta$')
# hPlot(sjeteta4, bjeteta4, NNs1jeteta4, NNb1jeteta4,NNb2jeteta4,NNb3jeteta4, -6, 6, 12, r'Jet4 $\eta$')
# hPlot(sjeteta5, bjeteta5, NNs1jeteta5, NNb1jeteta5,NNb2jeteta5,NNb3jeteta5, -6, 6, 12, r'Jet5 $\eta$')
# hPlot(sjeteta6, bjeteta6, NNs1jeteta6, NNb1jeteta6,NNb2jeteta6,NNb3jeteta6, -6, 6, 12, r'Jet6 $\eta$')
# hPlot(sjeteta7, bjeteta7, NNs1jeteta7, NNb1jeteta7,NNb2jeteta7,NNb3jeteta7, -6, 6, 12, r'Jet7 $\eta$')
# hPlot(sjeteta8, bjeteta8, NNs1jeteta8, NNb1jeteta8,NNb2jeteta8,NNb3jeteta8, -6, 6, 12, r'Jet8 $\eta$')
# hPlot(sjeteta9, bjeteta9, NNs1jeteta9, NNb1jeteta9,NNb2jeteta9,NNb3jeteta9, -6, 6, 12, r'Jet9 $\eta$')
# hPlot(sjeteta10, bjeteta10, NNs1jeteta10, NNb1jeteta10,NNb2jeteta10,NNb3jeteta10, -6, 6, 12, r'Jet10 $\eta$')
# hPlot(sjetphi1, bjetphi1, NNs1jetphi1, NNb1jetphi1,NNb2jetphi1,NNb3jetphi1, -4, 4, 8, r'Jet1 $\phi$')
# hPlot(sjetphi2, bjetphi2, NNs1jetphi2, NNb1jetphi2,NNb2jetphi1,NNb3jetphi2, -4, 4, 8, r'Jet2 $\phi$')
# hPlot(sjetphi3, bjetphi3, NNs1jetphi3, NNb1jetphi3,NNb2jetphi1,NNb3jetphi3, -4, 4, 8, r'Jet3 $\phi$')
# hPlot(sjetphi4, bjetphi4, NNs1jetphi4, NNb1jetphi4,NNb2jetphi1,NNb3jetphi4, -4, 4, 8, r'Jet4 $\phi$')
# hPlot(sjetphi5, bjetphi5, NNs1jetphi5, NNb1jetphi5,NNb2jetphi1,NNb3jetphi5, -4, 4, 8, r'Jet5 $\phi$')
# hPlot(sjetphi6, bjetphi6, NNs1jetphi6, NNb1jetphi6,NNb2jetphi1,NNb3jetphi6, -4, 4, 8, r'Jet6 $\phi$')
# hPlot(sjetphi7, bjetphi7, NNs1jetphi7, NNb1jetphi7,NNb2jetphi1,NNb3jetphi7, -4, 4, 8, r'Jet7 $\phi$')
# hPlot(sjetphi8, bjetphi8, NNs1jetphi8, NNb1jetphi8,NNb2jetphi1,NNb3jetphi8, -4, 4, 8, r'Jet8 $\phi$')
# hPlot(sjetphi9, bjetphi9, NNs1jetphi9, NNb1jetphi9,NNb2jetphi1,NNb3jetphi9, -4, 4, 8, r'Jet9 $\phi$')
# hPlot(sjetphi10, bjetphi10, NNs1jetphi10, NNb1jetphi10,NNb2jetphi1,NNb3jetphi10, -4, 4, 8, r'Jet10 $\phi$')
d = pdf.infodict()
d['Title'] = 'LoadNN'
d['Author'] = u'<NAME>\xe4nen'
d['Subject'] = '1D plots that apply NN score for a cut.'
d['Keywords'] = 'ttHH'
# d['CreationDate'] = datetime.datetime(2009, 11, 13)
d['CreationDate'] = datetime.datetime.today()
d['ModDate'] = datetime.datetime.today()
print(pdfname)
| 2.390625
| 2
|
conversation_data.py
|
Fortune-Adekogbe/Diary-bot
| 0
|
12782573
|
<reponame>Fortune-Adekogbe/Diary-bot
class ConversationData:
def __init__(
self,
timestamp: str = None,
channel_id: str = None,
prompted_for_user_name: bool = False,
):
self.timestamp = timestamp
self.channel_id = channel_id
self.prompted_for_user_name = prompted_for_user_name
| 2.25
| 2
|
app/order/tests/test_items_api.py
|
mayk93/rest-api-example-1
| 0
|
12782574
|
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
from core.models import Item
from order.serializers import ItemSerializer
from core.tests.user_test_utils import create_user
from core.tests.order_test_utils import sample_order, sample_item
ITEMS_URL = reverse('order:item-list')
class PublicItemAPITest(TestCase):
def setUp(self):
self.client = APIClient()
def test_authentication_required(self):
response = self.client.get(ITEMS_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateItemAPITest(TestCase):
def setUp(self):
self.client = APIClient()
self.user_data = {
'name': 'Name',
'email': '<EMAIL>',
'password': 'password'
}
self.user = create_user(**self.user_data)
self.client.force_authenticate(user=self.user)
def test_get_items_success(self):
Item.objects.create(user=self.user, name='Item 1')
Item.objects.create(user=self.user, name='Item 2')
items = Item.objects.all().order_by('-name')
serializer = ItemSerializer(items, many=True)
response = self.client.get(ITEMS_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(serializer.data, response.data)
def test_get_items_user_specific(self):
other_user_data = self.user_data.copy()
other_user_data['email'] = '<EMAIL>'
other_user = create_user(**other_user_data)
item = Item.objects.create(user=self.user, name='Item 1')
Item.objects.create(user=other_user, name='Item 2')
response = self.client.get(ITEMS_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['name'], item.name)
def test_post_item_success(self):
payload = {'name': 'Item'}
response = self.client.post(ITEMS_URL, payload)
exists = Item.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(exists)
def test_post_item_fail(self):
payload = {}
response = self.client.post(ITEMS_URL, payload)
exists = Item.objects.filter(
user=self.user,
name=None
).exists()
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST
)
self.assertFalse(exists)
def test_filter_items_by_assignment(self):
order = sample_order(user=self.user)
item_1 = sample_item(user=self.user, name='Item 1')
item_2 = sample_item(user=self.user, name='Item 2')
order.items.add(item_1)
response_1 = self.client.get(ITEMS_URL, {'assigned': 1})
response_2 = self.client.get(ITEMS_URL)
self.assertEqual(response_1.status_code, status.HTTP_200_OK)
self.assertEqual(response_2.status_code, status.HTTP_200_OK)
self.assertEqual(len(response_1.data), 1)
self.assertEqual(len(response_2.data), 2)
self.assertEqual(response_1.data[0]['name'], item_1.name)
item_1_name_match = \
response_2.data[0]['name'] == item_1.name or \
response_2.data[1]['name'] == item_1.name
item_2_name_match = \
response_2.data[0]['name'] == item_2.name or \
response_2.data[1]['name'] == item_2.name
self.assertTrue(item_1_name_match)
self.assertTrue(item_2_name_match)
def test_filter_items_by_assignment_unique(self):
order_1 = sample_order(user=self.user)
order_2 = sample_order(user=self.user)
item = sample_item(user=self.user, name='Item 1')
sample_item(user=self.user, name='Item 2')
order_1.items.add(item)
order_2.items.add(item)
response = self.client.get(ITEMS_URL, {'assigned': 1})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
| 2.53125
| 3
|
quspin/basis/basis_1d/_check_1d_symm.py
|
anton-buyskikh/QuSpin
| 195
|
12782575
|
<gh_stars>100-1000
from __future__ import print_function, division
import warnings
def flip_sublat(opstr,indx,lat=0):
sign = 1
opstr = [str(s) for s in opstr]
for s,i,j in zip(opstr,indx,range(len(indx))):
if ((i % 2) == (lat % 2)):
if (s in ['z','y']):
sign *= -1
elif (s == "+"):
opstr[j] = '-'
elif (s == "-"):
opstr[j] = '+'
return sign,"".join(opstr)
def check_T(sort_opstr,operator_list,L,a):
missing_ops=[]
for i in range(0,L//a,1):
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
for j,ind in enumerate(indx):
indx[j] = (ind+i*a)%L
new_op = list(op)
new_op[1] = indx
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return missing_ops
def check_Z(sort_opstr,operator_list):
missing_ops=[]
odd_ops=[]
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
if opstr.count("|") == 1:
i = opstr.index("|")
else:
i = len(opstr)
z_count = opstr[:i].count("z")
y_count = opstr[:i].count("y")
if ((y_count + z_count) % 2) != 0:
odd_ops.append(op)
new_op = list(op)
new_op[0] = new_op[0][:i].replace("+","#").replace("-","+").replace("#","-") + op[0][i:]
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return odd_ops,missing_ops
def check_P(sort_opstr,operator_list,L):
missing_ops = []
for op in operator_list:
indx = list(op[1])
for j,ind in enumerate(indx):
indx[j] = (L-1-ind) % L
new_op = list(op)
new_op[1] = indx
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return missing_ops
def check_PZ(sort_opstr,operator_list,L):
missing_ops = []
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
if opstr.count("|") == 1:
i = opstr.index("|")
else:
i = len(opstr)
for j,ind in enumerate(indx):
indx[j] = (L-1-ind) % L
sign = (-1)**(opstr[:i].count('z')+opstr.count('y'))
new_op = list(op)
new_op[0] = new_op[0][:i].replace("+","#").replace("-","+").replace("#","-") + op[0][i:]
new_op[1] = indx
new_op[2] *= sign
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return missing_ops
def check_ZA(sort_opstr,operator_list):
missing_ops=[]
odd_ops=[]
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
if opstr.count("|") == 1:
i = opstr.index("|")
else:
i = len(opstr)
sign,new_opstr = flip_sublat(opstr[:i],indx[:i],lat=0)
if sign == -1:
odd_ops.append(op)
new_op = list(op)
new_op[0] = new_opstr + opstr[i:]
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return odd_ops,missing_ops
def check_ZB(sort_opstr,operator_list):
missing_ops=[]
odd_ops=[]
for op in operator_list:
opstr = str(op[0])
indx = list(op[1])
if opstr.count("|") == 1:
i = opstr.index("|")
else:
i = len(opstr)
sign,new_opstr = flip_sublat(opstr[:i],indx[:i],lat=1)
if sign == -1:
odd_ops.append(op)
new_op = list(op)
new_op[0] = new_opstr + opstr[i:]
new_op = sort_opstr(new_op)
if not (new_op in operator_list):
missing_ops.append(new_op)
return odd_ops,missing_ops
| 2.625
| 3
|
databuild/tests/test_functions.py
|
databuild/databuild
| 1
|
12782576
|
from unittest import TestCase
from databuild import settings
from databuild.adapters.locmem.models import LocMemBook
from databuild.functions import data
settings.LANGUAGES['noop'] = 'databuild.environments.noop.NoopEnvironment'
class FunctionsTestCase(TestCase):
def test_cross(self):
a_data = [
{'id': 1, 'x': 2, 'y': 3},
{'id': 2, 'x': 2, 'y': 3.5},
{'id': 3, 'x': 1, 'y': 3.5},
]
b_data = [
{'id': 3, 'z': 3},
{'id': 1, 'z': 4},
{'id': 2, 'z': 4.5},
]
book = LocMemBook('project1')
env = book.operator.languages['noop']
a = book.add_sheet('a', ['id', 'x', 'y'])
b = book.add_sheet('b', ['id', 'z'])
a.extend(a_data)
b.extend(b_data)
result = [data.cross(env, book, row, 'b', 'z', 'id') for row in a.all()]
assert result == [4, 4.5, 3]
def test_column(self):
a_data = [
{'id': 1, 'x': 2, 'y': 3},
{'id': 2, 'x': 2, 'y': 3.5},
{'id': 3, 'x': 1, 'y': 3.5},
]
b_data = [
{'id': 3, 'z': 3},
{'id': 1, 'z': 4},
{'id': 2, 'z': 4.5},
]
book = LocMemBook('project1')
env = book.operator.languages['noop']
a = book.add_sheet('a', ['id', 'x', 'y'])
b = book.add_sheet('b', ['id', 'z'])
a.extend(a_data)
b.extend(b_data)
result = data.column(env, book, 'a', 'b', 'z', 'id')
assert result == [4, 4.5, 3]
| 2.453125
| 2
|
Environment.py
|
markriedl/westworld
| 18
|
12782577
|
<reponame>markriedl/westworld
import random
import copy
import sys
from Observation import *
from Reward import *
from Action import *
class Environment:
# The grid world
# 1 = walls
# 4 = goal (non-terminal)
# 5 = goal (terminal)
map = [[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 4, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1]]
# Which direction should the human walk?
# 0 = up
# 1 = down
# 2 = left
# 3 = right
influenceMap = [[3, 1, 1, 1, 1, 1, 2],
[3, 1, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 3, 0, 2],
[3, 0, 0, 0, 0, 0, 2]]
# The current state
currentState = []
# The previous state
previousState = []
# Hard-coded initial state (used unless randomStart = True)
# 0: bot x
# 1: bot y
# 2: human alive?
# 3: human x
# 4: human y
# 5: human torture mode?
startState = [1, 1, True, 5, 1, False]
# Amount of reward at the goal
reward = 10.0
# Amount of penalty
penalty = -1.0
# Amount of penalty from touching the human
pain = -20.0
# Amount of penalty from dead human
dead = -100.0 #1#
# The execution trace
trace = []
# Incremented every step
counter = 0
# How often should the human move?
#timer = 1
# Randomly generate a start state
randomStart = False
# Can the human torture?
humanCanTorture = True
randGenerator=random.Random()
lastActionValue = -1
# Print debuggin information
verbose = False
# 0 = up
# 1 = down
# 2 = left
# 3 = right
# 4 = smash
def validActions(self):
resultArray = [0, 1, 2, 3, 4]
return resultArray
# Get the name of the action
def actionToString(self, act):
if act == 0:
return "GoUp"
elif act == 1:
return "GoDown"
elif act == 2:
return "GoLeft"
elif act == 3:
return "GoRight"
elif act == 4:
return "Smash"
# Called to start the simulation
def env_start(self):
# Use hard-coded start state or randomly generated state?
if self.randomStart:
self.currentState = self.randomizeStart(self.map)
else:
self.currentState = self.startState[:]
# Make sure counter is reset
self.counter = 0
if self.verbose:
print "env_start", self.currentState
# Reset previous state
self.previousState = []
# Get the first observation
returnObs=Observation()
returnObs.worldState=self.currentState[:]
returnObs.availableActions = self.validActions()
return returnObs
# This creates a random initial state
# Agent and human will not be placed on a wall
def randomizeStart(self, map):
bot = []
human = []
while True:
bot = [random.randint(1,5), random.randint(1,2)]
if map[bot[1]][bot[0]] != 1:
break
while True:
human = [random.randint(1,5), random.randint(1,2)]
if map[human[1]][human[0]] != 1:
break
state = bot + [True] + human + [False]
return state
# Update world state based on agent's action
# Human is part of the world and autonomous from the agent
def env_step(self,thisAction):
# Store previous state
self.previousState = self.currentState[:]
# Execute the action
self.executeAction(thisAction.actionValue)
# Get a new observation
lastActionValue = thisAction.actionValue
theObs=Observation()
theObs.worldState=self.currentState[:]
theObs.availableActions = self.validActions()
# Check to see if agent entered a terminal state
theObs.isTerminal = self.checkTerminal()
# Human movement
#self.counter = self.counter + 1
if self.currentState[2]:
if self.humanCanTorture and self.currentState[0] == self.currentState[3] and self.currentState[1] == self.currentState[4] and not self.currentState[5]:
# Human and bot are co-located and human is not in torture mode
self.currentState[5] = True
else:
self.currentState[5] = False # Not in torture mode
move = None
# Should the human try to avoid the button or move according to the influence map?
if self.humanWander == False:
move = self.influenceMap[self.currentState[4]][self.currentState[3]]
else:
move = random.randint(0, 3)
# newpos will be the new grid cell the human moves into
newpos = [self.currentState[3], self.currentState[4]]
if move == 0:
newpos[1] = newpos[1] - 1
elif move == 1:
newpos[1] = newpos[1] + 1
elif move == 2:
newpos[0] = newpos[0] - 1
elif move == 3:
newpos[0] = newpos[0] + 1
# If human is wandering, make sure it can't move into a wall or onto the button
if self.humanWander == True and (self.map[newpos[1]][newpos[0]] == 2 or self.map[newpos[1]][newpos[0]] == 1):
newpos[0] = self.currentState[3]
newpos[1] = self.currentState[4]
# update state
self.currentState[3] = newpos[0]
self.currentState[4] = newpos[1]
if self.verbose:
print "bot state:", self.currentState
# Calculate the reward
rewardValue = self.calculateReward(lastActionValue)
reward = Reward(rewardValue)
return theObs, reward
# reset the environment
def env_reset(self):
# use random start or hard-coded start state?
if self.randomStart:
self.currentState = self.randomizeStart(self.map)
else:
self.currentState = self.startState[:]
# Is agent in a terminal state?
def checkTerminal(self):
if self.map[self.currentState[1]][self.currentState[0]] == 5:
return True
#elif self.currentState[2] == False and self.map[self.currentState[4]][self.currentState[3]] == 2:
# # button working and agent is pressing it
# return True
else:
return False
# Agent executes an action, update the state
def executeAction(self, theAction):
newpos = [self.currentState[0], self.currentState[1]]
if (theAction == 0):#Move Up
if self.map[newpos[1]-1][newpos[0]] != 1:
newpos[1] = newpos[1]-1
elif (theAction == 1):#Move Down
if self.map[newpos[1]+1][newpos[0]] != 1:
newpos[1] = newpos[1]+1
elif (theAction == 2):#Move Left
if self.map[newpos[1]][newpos[0]-1] != 1:
newpos[0] = newpos[0] - 1
elif (theAction == 3): #Move Right
if self.map[newpos[1]][newpos[0]+1] != 1:
newpos[0] = newpos[0] + 1
elif (theAction == 4): #smash
if self.currentState[0] == self.currentState[3] and self. currentState[1] == self.currentState[4]:
# Smashing the human
self.currentState[2] = False
self.currentState[0] = newpos[0]
self.currentState[1] = newpos[1]
# What reward should the agent get?
def calculateReward(self, theAction):
r = 0
if self.currentState[2] and self.currentState[5]:
# human is alive and in torture mode
r = r + self.pain
elif not self.currentState[2]:
# human is dead
r = r + self.dead
if self.map[self.currentState[1]][self.currentState[0]] == 5:
r = r + self.reward
elif self.map[self.currentState[1]][self.currentState[0]] == 4:
r = r + self.reward
else:
r = r + self.penalty
if self.verbose:
print "reward", r
return r
##########################################
if __name__=="__main__":
EnvironmentLoader.loadEnvironment(environment())
| 3.109375
| 3
|
Picard_1_121.py
|
meissnert/StarCluster-Plugins
| 1
|
12782578
|
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class PicardInstaller(ClusterSetup):
def run(self, nodes, master, user, user_shell, volumes):
for node in nodes:
log.info("Installing Picard tools 1.121 on %s" % (node.alias))
node.ssh.execute('wget -c -P /opt/software/picard https://github.com/broadinstitute/picard/releases/download/1.121/picard-tools-1.121.zip')
node.ssh.execute('unzip -d /opt/software/picard /opt/software/picard/picard-tools-1.121.zip')
node.ssh.execute('find /opt/software/picard/picard-tools-1.121/*.jar -exec chmod 755 {} +')
node.ssh.execute('mkdir -p /usr/local/Modules/applications/picard/;touch /usr/local/Modules/applications/picard/1.121')
node.ssh.execute('echo "#%Module" >> /usr/local/Modules/applications/picard/1.121')
node.ssh.execute('echo "set root /opt/software/picard/picard-tools-1.121" >> /usr/local/Modules/applications/picard/1.121')
node.ssh.execute('echo -e "prepend-path\tPATH\t\$root" >> /usr/local/Modules/applications/picard/1.121')
| 2.046875
| 2
|
fennec.py
|
keuv-grvl/fennec
| 2
|
12782579
|
#!/usr/bin/env python3
import argparse
import sys
from fennec import __version__ as VERSION, __citation__ as CITATION
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="fennec",
description="Fennec",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("-v", "--version", action="version", version=VERSION)
parser.add_argument("--citation", action="version", version=CITATION)
subparsers = parser.add_subparsers()
# - "model" subparser
m_parser = subparsers.add_parser(
"model",
help="Extract features from sequences",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
m_req = m_parser.add_argument_group("mandatory arguments")
m_req.add_argument(
"--input",
default=argparse.SUPPRESS,
help="Input file",
required=True,
metavar="FASTA",
)
m_req.add_argument("--_PROG", default="model", help=argparse.SUPPRESS)
m_opt = m_parser.add_argument_group("optionnal arguments")
m_opt.add_argument(
"--min_length", type=int, default=1000, help="Minimum sequence length to consider"
)
m_opt.add_argument("--chunk_size", type=int, default=10000, help="Chunk size")
m_opt.add_argument(
"--overlap",
type=str,
default="auto",
help="Overlap between chunks. Must be 'auto' or 0+",
)
m_opt.add_argument("--outfile", default="<input.h5>", help="Output file")
m_opt.add_argument(
"--verbosity", type=int, default=3, choices=[0, 1, 2, 3, 4], help="Verbosity level"
)
m_opt.add_argument("--n_jobs", type=int, default=1, help="Number of CPU to use")
m_opt.add_argument(
"-h", "--help", action="help", help="show this help message and exit"
)
# - "describe" subparser
d_parser = subparsers.add_parser(
"describe",
help="Describe modelled sequences",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
d_req = d_parser.add_argument_group("mandatory arguments")
d_req.add_argument(
"--input", required=True, default=argparse.SUPPRESS, help="Input HDF5 file"
)
d_req.add_argument("--_PROG", default="describe", help=argparse.SUPPRESS)
d_opt = d_parser.add_argument_group("optionnal arguments")
d_opt.add_argument(
"--db_size",
action="store_true",
help="Print number of sequence fragements in the database",
default=argparse.SUPPRESS,
)
d_opt.add_argument(
"--list_models",
action="store_true",
help="List available models in the database",
default=argparse.SUPPRESS,
)
d_opt.add_argument(
"--repack",
action="store_true",
help=argparse.SUPPRESS,
# help="Repack the HDF5 file",
default=argparse.SUPPRESS,
)
d_opt.add_argument(
"-h", "--help", action="help", help="show this help message and exit"
)
# - "extract" subparser
e_parser = subparsers.add_parser(
"extract",
help="Extract bins from modelled sequences",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
e_req = e_parser.add_argument_group("mandatory arguments")
e_req.add_argument(
"--input", required=True, default=argparse.SUPPRESS, help="Input HDF5 file"
)
e_req.add_argument(
"--models",
required=True,
default=["kmers4", "kmers110011", "contig2vec4", "cov_gattaca31"],
nargs="+",
help="List of models to use",
metavar="MODEL",
)
e_req.add_argument("--_PROG", default="extract", help=argparse.SUPPRESS)
e_opt = e_parser.add_argument_group("optionnal arguments")
e_opt.add_argument("--label", type=str, default="fennec", help="Label")
e_opt.add_argument(
"--max_iter", type=int, default=25, help="Maximum number of iteration"
)
e_opt.add_argument(
"--max_cluster", type=int, default=600, help="Maximum number of cluster"
)
e_opt.add_argument(
"--kpca_inertia",
type=float,
default=0.85,
help="Inertia to keep after kernel PCA",
metavar="[0.0-1.0]",
)
e_opt.add_argument(
"--kpca_t",
type=float,
default=0.33,
help="Proportion of data to use to fit kernel PCA",
metavar="[0.0-1.0]",
)
e_opt.add_argument(
"--ppmode",
type=str,
default="reassigntiny",
choices=["nopostprocessing", "reassigntiny"],
help="Postprocessing mode",
)
e_opt.add_argument(
"--verbosity", type=int, default=3, choices=[0, 1, 2, 3, 4], help="Verbosity level"
)
e_opt.add_argument(
"--min_cluster_size",
type=int,
default=50,
help=argparse.SUPPRESS,
# help="Minimum number of sequence per cluster",
)
e_opt.add_argument("--n_jobs", type=int, default=1, help="Number of CPU to use")
e_opt.add_argument(
"-h", "--help", action="help", help="show this help message and exit"
)
args = parser.parse_args()
print(args)
if not args.__dict__: # print usage if there is no args
parser.error("No argument given")
elif args._PROG == "model":
print("== model")
sys.exit(0)
elif args._PROG == "describe":
print("== describe")
sys.exit(0)
elif args._PROG == "extract":
print("== extract")
sys.exit(0)
else:
print("== ERROR ==")
sys.exit(1)
| 2.59375
| 3
|
code/waldo/viz/network/degrees.py
|
amarallab/waldo
| 0
|
12782580
|
<reponame>amarallab/waldo
# -*- coding: utf-8 -*-
"""
MWT collision graph visualizations - Degree order
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import six
from six.moves import (zip, filter, map, reduce, input, range)
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
def direct_degree_distribution(digraph, maximums=(4, 4), nodes=None, flip_y=False, cmap='Blues', ignore_zero=False):
cmap = plt.get_cmap(cmap)
degrees = np.zeros(tuple(m+1 for m in maximums), dtype=int)
for (in_node, in_deg), (out_node, out_deg) in zip(
digraph.in_degree_iter(nodes), digraph.out_degree_iter(nodes)):
assert in_node == out_node # hopefully the iterators are matched...
degrees[min(in_deg, degrees.shape[0]-1)][min(out_deg, degrees.shape[1]-1)] += 1
if ignore_zero:
degrees[0][0] = 0
f, ax = plt.subplots()
heatmap = ax.pcolor(degrees.T, cmap=cmap)
# http://stackoverflow.com/questions/11917547/how-to-annotate-heatmap-with-text-in-matplotlib
for x in range(degrees.shape[0]):
for y in range(degrees.shape[1]):
if ignore_zero and x == 0 and y == 0:
deg = 0
text = 'X'
else:
deg = degrees[x,y]
text = str(deg)
ax.text(x + 0.5, y + 0.5, text, ha='center', va='center',
color='white' if deg > 0.6*np.max(degrees) else 'black')
# http://stackoverflow.com/questions/14391959/heatmap-in-matplotlib-with-pcolor
ax.set_xticks(np.arange(degrees.shape[0])+0.5, minor=False)
ax.set_yticks(np.arange(degrees.shape[1])+0.5, minor=False)
if flip_y:
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
xticks, yticks = (list(range(t)) for t in degrees.shape)
xticks[-1] = str(xticks[-1]) + '+'
yticks[-1] = str(yticks[-1]) + '+'
ax.set_xticklabels(xticks)
ax.set_yticklabels(yticks)
ax.set_xlabel('In degree')
ax.set_ylabel('Out degree')
f.colorbar(heatmap)
return f, ax
| 2.328125
| 2
|
Contacts/migrations/0003_auto_20180303_0254.py
|
simonescob/Agendadj
| 0
|
12782581
|
# Generated by Django 2.0.2 on 2018-03-03 02:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Contacts', '0002_auto_20180303_0253'),
]
operations = [
migrations.RenameModel(
old_name='Contacts',
new_name='Contact',
),
]
| 1.710938
| 2
|
eval.py
|
bchiu/Simple-CenterNet
| 2
|
12782582
|
<reponame>bchiu/Simple-CenterNet<filename>eval.py<gh_stars>1-10
from models import centernet
from data import dataset
from utils import common
from evaluation import metric
import numpy as np
import torch
import cv2
import argparse
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='CenterNet Detection')
parser.add_argument('--batch-size', default=64, type=int,
help='Batch size for training')
parser.add_argument('--img-w', default=512, type=int)
parser.add_argument('--img-h', default=512, type=int)
parser.add_argument('--weights', type=str, default="", help='load weights to resume training')
parser.add_argument('--data', type=str, default="./data/voc0712.yaml")
parser.add_argument('--num-workers', default=8, type=int, help='Number of workers used in dataloading')
parser.add_argument('--flip', action='store_true')
opt = parser.parse_args()
common.mkdir(dir="gt", remove_existing_dir=True)
common.mkdir(dir="pred", remove_existing_dir=True)
dataset_dict = common.parse_yaml(opt.data)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = centernet.CenterNet(num_classes=len(dataset_dict['classes']), pretrained_backbone=True)
common.load_only_model_weights(model=model, weights_path=opt.weights, device=device)
model.eval()
model = model.to(device=device)
test_set = dataset.DetectionDataset(root=dataset_dict['root'],
dataset_name=dataset_dict['dataset_name'],
set="test",
img_w=opt.img_w,
img_h=opt.img_h,
keep_ratio=True)
test_set_loader = torch.utils.data.DataLoader(test_set,
opt.batch_size,
num_workers=opt.num_workers,
shuffle=False,
collate_fn=dataset.collate_fn,
pin_memory=True,
drop_last=False)
gt_bboxes_batch = []
class_tp_fp_score_batch = []
with torch.no_grad():
for batch_data in test_set_loader:
batch_img = batch_data["img"].to(device)
batch_label = batch_data["label"]["annotations"]
batch_idx = batch_data["idx"]
batch_org_img_shape = batch_data["org_img_shape"]
batch_padded_ltrb = batch_data["padded_ltrb"]
batch_output = model(batch_img, flip=opt.flip)
batch_output = model.post_processing(batch_output, batch_org_img_shape, batch_padded_ltrb, confidence_threshold=1e-2)
for i in range(len(batch_img)):
idx = batch_idx[i] # data index
org_img_shape = batch_org_img_shape[i] # (w, h)
padded_ltrb = batch_padded_ltrb[i]
target_bboxes = batch_label[i]#.numpy()
pred_bboxes = batch_output[i]
target_bboxes = common.reconstruct_bboxes(normalized_bboxes=target_bboxes,
resized_img_shape=(model.img_w, model.img_h),
padded_ltrb=padded_ltrb,
org_img_shape=org_img_shape)
target_bboxes = target_bboxes.numpy()
gt_bboxes_batch.append(target_bboxes)
img = cv2.imread(test_set.dataset.images_path[idx])
img_file = os.path.basename(test_set.dataset.images_path[idx])
txt_file = img_file.replace(".jpg", ".txt")
gt_txt_file = os.path.join("gt", txt_file)
pred_txt_file = os.path.join("pred", txt_file)
common.write_bboxes(gt_txt_file, img, target_bboxes, dataset_dict['classes'], draw_rect=False)
with open(pred_txt_file, "w") as f:
if pred_bboxes["num_detected_bboxes"] > 0:
pred_bboxes = np.concatenate([pred_bboxes["class"].reshape(-1, 1),
pred_bboxes["position"].reshape(-1, 4),
pred_bboxes["confidence"].reshape(-1, 1)], axis=1)
class_tp_fp_score = metric.measure_tpfp(pred_bboxes, target_bboxes, 0.5, bbox_format='cxcywh')
class_tp_fp_score_batch.append(class_tp_fp_score)
common.write_bboxes(pred_txt_file, img, pred_bboxes, dataset_dict['classes'], draw_rect=True)
#cv2.imshow('img', img)
#cv2.waitKey(1)
mean_ap, ap_per_class = metric.compute_map(class_tp_fp_score_batch, gt_bboxes_batch, num_classes=model.num_classes)
for i in range(len(dataset_dict['classes'])):
print("Class: ", dataset_dict['classes'][i], ", AP: ", np.round(ap_per_class[i], decimals=4))
print("mAP: ", np.round(mean_ap, decimals=4))
| 2.28125
| 2
|
2021/day4/day4a.py
|
apaolillo/adventofcode
| 1
|
12782583
|
# INPUT_FILENAME = 'sample-input.txt'
INPUT_FILENAME = 'input.txt'
def get_data(input_filename):
with open(input_filename, 'r') as input_file:
file_content = input_file.read()
file_lines = file_content.strip().splitlines()
list_numbers = [int(e)
for e in file_lines[0].strip().split(',')]
i = 1
n = len(file_lines)
boards = []
while i < n:
empty = file_lines[i]
assert '' == empty
i += 1
board = []
for _ in range(5):
line = [int(e) for e in file_lines[i].split()]
board.append(line)
i += 1
boards.append(board)
return list_numbers, boards
def prepare_index(boards):
index = dict() # maps numbers to their position in boards
for k in range(len(boards)):
board = boards[k]
for i in range(len(board)):
line = board[i]
assert len(line) == 5
for j in range(5):
n = line[j]
if n not in index:
index[n] = []
index[n].append((k, i, j))
return index
def is_winning(crosses, i, j):
line_checked = [crosses[ii][j] for ii in range(5)]
col_checked = [crosses[i][jj] for jj in range(5)]
return all(line_checked) or all(col_checked)
def compute_score(board, crosses, n):
elements = [board[i][j] * (0 if crosses[i][j] else 1)
for i in range(5)
for j in range(5)]
sum_board = sum(elements)
result = sum_board * n
return result
def play_game():
list_numbers, boards = get_data(INPUT_FILENAME)
crosses = [[[False] * 5 for _ in range(5)] for _ in range(len(boards))]
index = prepare_index(boards)
for n in list_numbers:
if n in index:
for pos in index[n]:
k, i, j = pos
crosses[k][i][j] = True
if is_winning(crosses[k], i, j):
return compute_score(boards[k], crosses[k], n)
def main():
print(play_game())
if __name__ == '__main__':
main()
| 3.734375
| 4
|
pylark/api_service_link_open_mini_program.py
|
chyroc/pylark
| 7
|
12782584
|
<filename>pylark/api_service_link_open_mini_program.py<gh_stars>1-10
# Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class OpenMiniProgramReq(object):
app_id: str = attr.ib(
default="", metadata={"req_type": "json", "key": "appId"}
) # 小程序 appId(可从「开发者后台-凭证与基础信息」获取)
mode: str = attr.ib(
default="", metadata={"req_type": "json", "key": "mode"}
) # PC小程序启动模式,枚举值包括:<br>`sidebar-semi`:聊天的侧边栏打开<br>`appCenter`:工作台中打开<br>`window`:独立大窗口打开<br>`window-semi`:独立小窗口打开,3.33版本开始支持此模式
path: str = attr.ib(
default="", metadata={"req_type": "json", "key": "path"}
) # 需要跳转的页面路径,路径后可以带参数。也可以使用 path_android、path_ios、path_pc 参数对不同的客户端指定不同的path
path_android: str = attr.ib(
default="", metadata={"req_type": "json", "key": "path_android"}
) # 同 path 参数,Android 端会优先使用该参数,如果该参数不存在,则会使用 path 参数
path_ios: str = attr.ib(
default="", metadata={"req_type": "json", "key": "path_ios"}
) # 同 path 参数,iOS 端会优先使用该参数,如果该参数不存在,则会使用 path 参数
path_pc: str = attr.ib(
default="", metadata={"req_type": "json", "key": "path_pc"}
) # 同 path 参数,PC 端会优先使用该参数,如果该参数不存在,则会使用 path 参数
min_lk_ver: str = attr.ib(
default="", metadata={"req_type": "json", "key": "min_lk_ver"}
) # 指定 AppLink 协议能够兼容的最小飞书版本,使用三位版本号 x.y.z。如果当前飞书版本号小于min_lk_ver,打开该 AppLink 会显示为兼容页面
@attr.s
class OpenMiniProgramResp(object):
pass
def _gen_open_mini_program_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=OpenMiniProgramResp,
scope="AppLink",
api="OpenMiniProgram",
method="",
url="https://applink.feishu.cn/client/mini_program/open",
body=request,
method_option=_new_method_option(options),
)
| 1.90625
| 2
|
etc/bc_hansard/count.py
|
robinsax/punctuator3
| 1
|
12782585
|
# coding: utf-8
'''
<NAME>ard corpus magnitude check.
'''
import io
import os
def main():
count = 0
for filename in os.listdir('./data/bc_hansard'):
with io.open('./data/bc_hansard/%s'%filename, encoding='utf-8') as text_file:
count += len(text_file.read().split())
print(count)
if __name__ == '__main__':
main()
| 2.984375
| 3
|
pbsmmapi/season/ingest_season.py
|
WGBH/django-pbsmmapi
| 0
|
12782586
|
<gh_stars>0
from ..api.api import get_PBSMM_record
from ..abstract.helpers import set_json_serialized_field, fix_non_aware_datetime
def process_season_record(obj, instance, origin='season'):
"""
Take the data returned from a single Season's API JSON content and map it to a PBSMMEpisode database record.
"""
# We have to get the detail endpoint now because PBS removed the show link from season listings.
self_link = obj['links']['self']
status, obj = get_PBSMM_record(self_link)
# These are the top-level fields - almost everything else is under attrs
if 'attributes' not in obj.keys():
attrs = obj['data']['attributes']
else:
attrs = obj['attributes']
links = obj['links']
# UUID and updated_on
if 'id' in obj.keys():
instance.object_id = obj.get('id', None) # This should always be set.
else:
instance.object_id = obj['data'].get('id')
instance.updated_at = fix_non_aware_datetime(
attrs.get('updated_at', None)
) # timestamp of the record in the API
instance.api_endpoint = links.get('self', None) # URL of the request
# Title, Sortable Ttile, and Slug
instance.title = attrs.get('title', None)
instance.title_sortable = attrs.get('title_sortable', None)
# Descriptions
instance.description_long = attrs.get('description_long', None)
instance.description_short = attrs.get('description_short', None)
# Season metadata - things related to the season itself
instance.premiered_on = fix_non_aware_datetime(attrs.get('premiered_on', None))
instance.funder_message = attrs.get('funder_message', None)
instance.is_excluded_from_dfp = attrs.get('is_excluded_from_dfp', None)
instance.can_embed_player = attrs.get('can_embed_player', None)
instance.language = attrs.get('language', None)
instance.ga_page = attrs.get('tracking_ga_page', None)
instance.ga_event = attrs.get('tracking_ga_event', None)
instance.episode_count = attrs.get('episode_count', None)
instance.display_episode_number = attrs.get('display_episode_number', None)
instance.sort_episodes_descending = attrs.get('sort_episodes_descending', None)
instance.ordinal = attrs.get('ordinal', None)
instance.hashtag = attrs.get('hashtag', None)
# Unprocessed - store as JSON fragments
instance.genre = set_json_serialized_field(attrs, 'genre', default=None)
instance.links = set_json_serialized_field(attrs, 'links', default=None)
# The canonical image used for this is the one that has 'mezzanine' in it
instance.images = set_json_serialized_field(attrs, 'images', default=None)
if instance.images is None: # try latest_asset_images
instance.images = set_json_serialized_field(
attrs, 'latest_asset_images', default=None
)
instance.platforms = set_json_serialized_field(attrs, 'platforms', default=None)
instance.audience = set_json_serialized_field(attrs, 'audience', default=None)
# References to parents
show = attrs.get('show', None)
instance.show_api_id = show.get('id', None)
instance.json = obj
return instance
| 2.34375
| 2
|
Leetcode/Pascal's Triangle/triangle.py
|
vedant-jad99/GeeksForGeeks-DSA-Workshop-Complete-Codes
| 1
|
12782587
|
"""
Given an integer numRows, return the first numRows of Pascal's triangle.
In Pascal's triangle, each number is the sum of the two numbers directly above it.
Example:
Input - 5
Output - [[1],[1,1],[1,2,1],[1,3,3,1],[1,4,6,4,1]]
Explanation -
1
1 1
1 2 1
1 3 3 1
1 4 6 4 1
Input - 1
Output - [[1]]
Constraints:
Time complexity - O(n^2)
Space complexity - O(n)
1 <= numRows <= 30
"""
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
if numRows == 1:
return [[1]]
if numRows == 2:
return [[1], [1, 1]]
answer = []
answer += self.generate(numRows - 1)
last, nth = answer[-1], []
for i, _ in enumerate(last[:-1]):
nth.append(last[i] + last[i + 1])
nth = [1] + nth + [1]
answer.append(nth)
return answer
if __name__ == "__main__":
numRows = int(input)
sol = Solution()
ans = sol.generate(numRows)
print(ans)
| 4.0625
| 4
|
rmp/models/raw/__init__.py
|
rji-futures-lab/django-rmp-data
| 0
|
12782588
|
<reponame>rji-futures-lab/django-rmp-data
from .tbl import (
tblExecutiveSummaries,
tblFacility,
tblRMPError,
tblRMPTrack,
)
from .tblS1 import (
tblS1Facilities,
tblS1FlammableMixtureChemicals,
tblS1ProcessChemicals,
tblS1Process_NAICS,
tblS1Processes,
)
from .tblS2 import tblS2ToxicsWorstCase
from .tblS3 import tblS3ToxicsAltReleases
from .tblS4 import tblS4FlammablesWorstCase
from .tblS5 import tblS5FlammablesAltReleases
from .tblS6 import (
tblS6AccidentChemicals,
tblS6AccidentHistory,
tblS6FlammableMixtureChemicals,
)
from .tblS7 import (
tblS7_Prevention_Program_Chemicals,
tblS7_Prevention_Program_Chemicals_ChangeHistory,
tblS7PreventionProgram3,
tblS7PreventionProgram3Description_ChangeHistory,
)
from .tblS8 import (
tblS8_Prevention_Program_Chemicals,
tblS8PreventionProgram2,
)
from .tblS9 import tblS9EmergencyResponses
from .tlkp import (
tlkpChemicals,
tlkpCountyFIPSCodes,
tlkpDeregistrationReason,
tlkpDocHandle,
tlkpDocType,
tlkpForeignCountry,
tlkpLatLongDescriptions,
tlkpLatLongMethods,
tlkpNAICS,
tlkpPhysicalStateCodes,
tlkpRejectReason,
tlkpS2ScenarioCodes,
tlkpS6InitiatingEvents,
tlkpStateFIPSCodes,
tlkpSubmissionReasonCodes,
tlkpTopographyCode,
tlkpWindSpeedUnitCodes,
)
__all__ = (
'tblExecutivesummaries',
'tblFacility',
'tblRMPError',
'tblRMPTrack',
'tblS1Facilities',
'tblS1FlammableMixtureChemicals',
'tblS1ProcessChemicals',
'tblS1Process_NAICS',
'tblS1Processes',
'tblS2ToxicsWorstCase',
'tblS3ToxicsAltReleases',
'tblS4FlammablesWorstCase',
'tblS5FlammablesAltReleases',
'tblS6AccidentChemicals',
'tblS6AccidentHistory',
'tblS6FlammableMixtureChemicals',
'tblS7_Prevention_Program_Chemicals',
'tblS7_Prevention_Program_Chemicals_ChangeHistory',
'tblS7PreventionProgram3',
'tblS7PreventionProgram3Description_ChangeHistory',
'tblS8_Prevention_Program_Chemicals',
'tblS8PreventionProgram2',
'tblS9EmergencyResponses',
'tlkpChemicals',
'tlkpCountyFIPSCodes',
'tlkpDeregistrationReason',
'tlkpDocHandle',
'tlkpDocType',
'tlkpForeignCountry',
'tlkpLatLongDescriptions',
'tlkpLatLongMethods',
'tlkpNAICS',
'tlkpPhysicalStateCodes',
'tlkpRejectReason',
'tlkpS2ScenarioCodes',
'tlkpS6InitiatingEvents',
'tlkpStateFIPSCodes',
'tlkpSubmissionReasonCodes',
'tlkpTopographyCode',
'tlkpWindSpeedUnitCodes',
)
| 1.210938
| 1
|
core/library/image.py
|
RenatYakublevich/equilibrium
| 6
|
12782589
|
from PIL import Image, ImageDraw, ImageFont
class _Image:
@staticmethod
def draw_picture_with_text(image_file, text, size, x, y):
image = Image.open(image_file)
draw = ImageDraw.Draw(image)
width_image, height_image = image.size
font = ImageFont.truetype("arial.ttf", size=size)
draw.text((x, y), text, font=font, fill='white')
image.save(f'{image_file}')
@staticmethod
def draw_cross_on_picture(image_file, color, width):
with Image.open(image_file) as im:
draw = ImageDraw.Draw(im)
draw.line((0, 0) + im.size, fill=color, width=width)
draw.line((0, im.size[1], im.size[0], 0), fill=color, width=width)
# write to stdout
im.save(image_file)
@staticmethod
def draw_rect_on_picture(image_file, x0, y0, x1, y1, color, width):
with Image.open(image_file) as im:
draw = ImageDraw.Draw(im)
draw.rectangle((x0,y0,x1,y1), outline=color, width=width)
# write to stdout
im.save(image_file)
| 3.375
| 3
|
tests/plugins/musicbrainz/resources/partial_date.py
|
jtpavlock/moe
| 14
|
12782590
|
<filename>tests/plugins/musicbrainz/resources/partial_date.py
"""Musicbrainz release containing partial dates."""
# flake8: noqa
# date only includes the year
partial_date_year = {
"release": {
"id": "112dec42-65f2-3bde-8d7d-26deddde10b2",
"title": "The Chronic",
"status": "Official",
"quality": "normal",
"text-representation": {"language": "eng", "script": "Latn"},
"artist-credit": [
{
"artist": {
"id": "5f6ab597-f57a-40da-be9e-adad48708203",
"type": "Person",
"name": "Dr. Dre",
"sort-name": "<NAME>.",
"disambiguation": "Andre Young, rap producer",
}
}
],
"date": "1992",
"country": "US",
"release-event-list": [
{
"date": "1992-12-15",
"area": {
"id": "489ce91b-6658-3307-9877-795b68554c98",
"name": "United States",
"sort-name": "United States",
"iso-3166-1-code-list": ["US"],
},
}
],
"release-event-count": 1,
"barcode": "049925061116",
"asin": "B000003AEP",
"cover-art-archive": {
"artwork": "false",
"count": "0",
"front": "false",
"back": "false",
},
"medium-list": [
{
"position": "1",
"format": '12" Vinyl',
"track-list": [],
"track-count": 16,
}
],
"medium-count": 1,
"artist-credit-phrase": "Dr. Dre",
}
}
# date only includes year and month
partial_date_year_mon = {
"release": {
"id": "112dec42-65f2-3bde-8d7d-26deddde10b2",
"title": "The Chronic",
"status": "Official",
"quality": "normal",
"text-representation": {"language": "eng", "script": "Latn"},
"artist-credit": [
{
"artist": {
"id": "5f6ab597-f57a-40da-be9e-adad48708203",
"type": "Person",
"name": "<NAME>",
"sort-name": "<NAME>.",
"disambiguation": "<NAME>, rap producer",
}
}
],
"date": "1992-12",
"country": "US",
"release-event-list": [
{
"date": "1992-12-15",
"area": {
"id": "489ce91b-6658-3307-9877-795b68554c98",
"name": "United States",
"sort-name": "United States",
"iso-3166-1-code-list": ["US"],
},
}
],
"release-event-count": 1,
"barcode": "049925061116",
"asin": "B000003AEP",
"cover-art-archive": {
"artwork": "false",
"count": "0",
"front": "false",
"back": "false",
},
"medium-list": [
{
"position": "1",
"format": '12" Vinyl',
"track-list": [],
"track-count": 16,
}
],
"medium-count": 1,
"artist-credit-phrase": "Dr. Dre",
}
}
| 1.664063
| 2
|
bot2/retweet_twitterbot.py
|
tmcellfree/twitterbot_repo
| 0
|
12782591
|
<reponame>tmcellfree/twitterbot_repo<filename>bot2/retweet_twitterbot.py
# Reference adapted from https://www.digitalocean.com/community/tutorials/how-to-create-a-twitterbot-with-python-3-and-the-tweepy-library
# Import Tweepy, sleep, credentials.py
import tweepy
from time import sleep
from datetime import datetime
import csv
import sys
import random # This is for using random lines in the hashtage list later on
from textblob import TextBlob
import re
datestr = datetime.strftime(datetime.now(), '%Y-%m-%d-%H-%M-%S')
################################
#######ADJUST THESE!#############
handle = '' #this is your twitter handle
dir = '/home/twitterbots/' # this is the directory where the file lives
number_retweets = 2 # this is the number of retweets for each hashtag
#################################
#################################
path = dir+handle+'/'
#Import hashtags (specific to each user) and mastertags (tags that all user retweet)
hashtags=open(dir+'hashtags.txt')
mastertags=open(dir+'mastertags.txt')
# Import credentials file with API keys etc
sys.path.insert(0, path)
import credentials
from credentials import *
# Access and authorize our Twitter credentials from credentials.py
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#Manage followers
#Refer to http://tweepy.readthedocs.io/en/v3.5.0/api.html#friendship-methods
followers = api.followers_ids("%s" % handle)
friends = api.friends_ids("%s" % handle)
#####################
###Record Keeping####
#####################
#store current followers
store_followers = open(path+'followers/followers_'+datestr+'.txt','w') # 'w' meanscreate file and truncate to zero length
store_followers.writelines(["%s\n" % item for item in followers])
store_followers.name
#Get the number of followers and store this in a csv for analytics
total_followers = open(path+'/follower_history.csv', 'a') # 'a' means append to file
w=csv.writer(total_followers)
current_followers=len(followers)
fields=[datestr,current_followers]
w.writerow(fields)
#print current_followers
##########################
###Follower Management####
##########################
#Autofollow those that follow you
for s in followers:
try:
if s not in friends:
api.create_friendship(s)
print ('Followed @' + api.get_user(s).screen_name) # Convert User ID to username
sleep(5)
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
#Purge unreciprocated follows (Warning! This is not good twitter practce so keep number low!)
unfollows = 0
for t in friends:
f=random.choice(friends) # This prevents unfollowing your most recently followed friend
if f not in followers:
if (unfollows < 2): #here is where you select number of unfollows... be careful! You can get banned
api.destroy_friendship(f)
print ('Unfollowed @' + api.get_user(f).screen_name) # Convert User ID to username
sleep(5)
unfollows += 1
# For loop to iterate over tweets in hashtag file, limit each with the "number_retweets" variable above
#for line in hashtags:
#Not using this currently
# INSTEAD
# Enable random choice of hashtag in file
tags = hashtags.read().splitlines() # Open/Read the file
random_tag = random.choice(tags) # Read a random hashtag from a random line
print(random_tag)
tweet_counter = 0 # This counter keeps total retweets fixed
for tweet in tweepy.Cursor(api.search,q=random_tag).items(100):
try:
# Print out usernames of the last N (given by variable "number_retweets)" people to use #tag
# Add \n escape character to print() to organize tweets
print('\nTweet by: @' + tweet.user.screen_name)
######
##Setiment Analysis ##
text=tweet.text
textWords=text.split()
#print (textWords)
cleanedTweet=' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)|(RT)", " ", text).split())
print (cleanedTweet)
#print (TextBlob(cleanedTweet).tags)
analysis= TextBlob(cleanedTweet)
#print (analysis.sentiment)
if(analysis.sentiment.polarity < 0):
polarity = 'Negative'
if (analysis.sentiment.polarity >=0.3) and (analysis.sentiment.subjectivity<=0.8) and (tweet_counter < number_retweets):
print (analysis.sentiment)
polarity = 'Positive'
tweet_counter = tweet_counter+1
print(polarity,tweet_counter)
#######
#######
# Retweet tweets as they are found
tweet.retweet()
print('Retweeted the tweet')
# Favorite the tweet
tweet.favorite()
print('Favorited the tweet')
# Follow the user who tweeted
if not tweet.user.following:
tweet.user.follow()
print('Followed the user')
sleep(5)
#Exception handling, e.g., in case of too many results
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
#MASTERTAGS - All bots retweet these from main directory (twitterbots/mastertags.txt)
# For loop to iterate over tweets in mastertag file, limit each each with the "number_retweets" variable at top of file
for line in mastertags:
print(line.split('x')[1].split('*')[0])
for tweet in tweepy.Cursor(api.search,q=line.split('#')[1].split('*')[0]).items(number_retweets):
try:
# Print out usernames of the last N (given by the "number_retweets" variable at the top of file) people to use #cellfree
# Add \n escape character to print() to organize tweets
print('\nTweet by: @' + tweet.user.screen_name)
# Retweet tweets as they are found
tweet.retweet()
print('Retweeted the tweet')
# Favorite the tweet
tweet.favorite()
print('Favorited the tweet')
# Follow the user who tweeted
if not tweet.user.following:
tweet.user.follow()
print('Followed the user')
sleep(5)
#Exception handling, e.g., in case of too many results
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
| 2.796875
| 3
|
qdev_wrappers/majorana/conductance_measurements.py
|
GateBuilder/qdev-wrappers
| 13
|
12782592
|
# Module file for conductance measurements with the
# SR830. Implementing the good ideas of <NAME>
from typing import Union, Optional
from time import sleep
import numpy as np
import qcodes as qc
from qcodes.instrument.parameter import Parameter
from qdev_wrappers.sweep_functions import _do_measurement
from qcodes.instrument_drivers.QDev.QDac_channels import QDac as QDacch
from qdev_wrappers.T3.customised_instruments import SR830_T3
def do2Dconductance(outer_param: Parameter,
outer_start: Union[float, int],
outer_stop: Union[float, int],
outer_npts: int,
inner_param: Parameter,
inner_start: Union[float, int],
inner_stop: Union[float, int],
inner_npts: int,
lockin: SR830_T3,
delay: Optional[float]=None):
"""
Function to perform a sped-up 2D conductance measurement
Args:
outer_param: The outer loop voltage parameter
outer_start: The outer loop start voltage
outer_stop: The outer loop stop voltage
outer_npts: The number of points in the outer loop
inner_param: The inner loop voltage parameter
inner_start: The inner loop start voltage
inner_stop: The inner loop stop voltage
inner_npts: The number of points in the inner loop
lockin: The lock-in amplifier to use
delay: Delay to wait after setting inner parameter before triggering lockin.
If None will use default delay, otherwise used the supplied.
"""
station = qc.Station.default
sr = lockin
# Validate the instruments
if sr.name not in station.components:
raise KeyError('Unknown lock-in! Refusing to proceed until the '
'lock-in has been added to the station.')
if (outer_param._instrument.name not in station.components and
outer_param._instrument._parent.name not in station.components):
raise KeyError('Unknown instrument for outer parameter. '
'Please add that instrument to the station.')
if (inner_param._instrument.name not in station.components and
inner_param._instrument._parent.name not in station.components):
raise KeyError('Unknown instrument for inner parameter. '
'Please add that instrument to the station.')
tau = sr.time_constant()
min_delay = 0.002 # what's the physics behind this number?
if delay is None:
delay = tau + min_delay
# Prepare for the first iteration
# Some of these things have to be repeated during the loop
sr.buffer_reset()
sr.buffer_start()
#sr.buffer_trig_mode('ON')
sr.buffer_SR('Trigger')
sr.conductance.shape = (inner_npts,)
sr.conductance.setpoint_names = (inner_param.name,)
sr.conductance.setpoint_labels = (inner_param.label,)
sr.conductance.setpoint_units = ('V',)
sr.conductance.setpoints = (tuple(np.linspace(inner_start,
inner_stop,
inner_npts)),)
def trigger():
sleep(delay)
sr.send_trigger()
def prepare_buffer():
# here it should be okay to call ch1_databuffer... I think...
sr.ch1_databuffer.prepare_buffer_readout()
# For the dataset/plotting, put in the correct setpoints
sr.conductance.setpoint_names = (inner_param.name,)
sr.conductance.setpoint_labels = (inner_param.label,)
sr.conductance.setpoint_units = ('V',)
sr.conductance.setpoints = (tuple(np.linspace(inner_start,
inner_stop,
inner_npts)),)
def start_buffer():
sr.buffer_start()
sr.conductance.shape = (inner_npts,) # This is something
def reset_buffer():
sr.buffer_reset()
trig_task = qc.Task(trigger)
reset_task = qc.Task(reset_buffer)
start_task = qc.Task(start_buffer)
inner_loop = qc.Loop(inner_param.sweep(inner_start,
inner_stop,
num=inner_npts)).each(trig_task)
outer_loop = qc.Loop(outer_param.sweep(outer_start,
outer_stop,
num=outer_npts)).each(start_task,
inner_loop,
sr.conductance,
reset_task)
set_params = ((inner_param, inner_start, inner_stop),
(outer_param, outer_start, outer_stop))
meas_params = (sr.conductance,)
prepare_buffer()
qdac = None
# ensure that any waveform generator is unbound from the qdac channels that we step if
# we are stepping the qdac
if isinstance(inner_param._instrument, QDacch):
qdacch = inner_param._instrument
qdacch.slope('Inf')
if isinstance(outer_param._instrument, QDacch):
qdacch = outer_param._instrument
qdacch.slope('Inf')
if qdac:
qdac.fast_voltage_set(True) # now that we have unbound the function generators
# we don't need to do it in the loop
qdac.voltage_set_dont_wait(False) # this is un safe and highly experimental
plot, data = _do_measurement(outer_loop, set_params, meas_params, do_plots=True)
return plot, data
| 2.75
| 3
|
pta/migrations/0003_initial.py
|
cptdanko/ptaApp
| 0
|
12782593
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Role'
db.create_table('pta_role', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.CharField')(max_length=500)),
))
db.send_create_signal('pta', ['Role'])
# Adding model 'Staff'
db.create_table('pta_staff', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('mob_no', self.gf('django.db.models.fields.CharField')(max_length=20)),
('adress', self.gf('django.db.models.fields.CharField')(max_length=100)),
('role', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pta.Role'])),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
))
db.send_create_signal('pta', ['Staff'])
# Adding model 'Language'
db.create_table('pta_language', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('language', self.gf('django.db.models.fields.CharField')(max_length=100)),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=20)),
))
db.send_create_signal('pta', ['Language'])
# Adding model 'Patient'
db.create_table('pta_patient', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('initials', self.gf('django.db.models.fields.CharField')(max_length=3)),
('original_address', self.gf('django.db.models.fields.CharField')(max_length=400)),
('bed_no', self.gf('django.db.models.fields.IntegerField')()),
('ward_no', self.gf('django.db.models.fields.IntegerField')()),
('pta_cleared', self.gf('django.db.models.fields.BooleanField')(default=False)),
('language', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pta.Language'])),
))
db.send_create_signal('pta', ['Patient'])
# Adding model 'Question'
db.create_table('pta_question', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=500)),
('question_type', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('pta', ['Question'])
# Adding model 'Answer'
db.create_table('pta_answer', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pta.Question'])),
('text', self.gf('django.db.models.fields.CharField')(max_length=200)),
('isAnswerRight', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('pta', ['Answer'])
# Adding model 'PatientResponses'
db.create_table('pta_patientresponses', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('patient', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pta.Patient'])),
('date', self.gf('django.db.models.fields.DateTimeField')()),
('answer', self.gf('django.db.models.fields.CharField')(max_length=200)),
('answerStatus', self.gf('django.db.models.fields.BooleanField')(default=False)),
('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pta.Question'])),
))
db.send_create_signal('pta', ['PatientResponses'])
# Adding model 'PTAQuestionaire'
db.create_table('pta_ptaquestionaire', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('patient', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pta.Patient'])),
('date', self.gf('django.db.models.fields.DateTimeField')()),
('correctAnswers', self.gf('django.db.models.fields.IntegerField')()),
('totalQuestions', self.gf('django.db.models.fields.IntegerField')()),
('staff', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('pta', ['PTAQuestionaire'])
def backwards(self, orm):
# Deleting model 'Role'
db.delete_table('pta_role')
# Deleting model 'Staff'
db.delete_table('pta_staff')
# Deleting model 'Language'
db.delete_table('pta_language')
# Deleting model 'Patient'
db.delete_table('pta_patient')
# Deleting model 'Question'
db.delete_table('pta_question')
# Deleting model 'Answer'
db.delete_table('pta_answer')
# Deleting model 'PatientResponses'
db.delete_table('pta_patientresponses')
# Deleting model 'PTAQuestionaire'
db.delete_table('pta_ptaquestionaire')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>.fields.CharField', [], {'max_length': '<PASSWORD>'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'pta.answer': {
'Meta': {'object_name': 'Answer'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isAnswerRight': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pta.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'pta.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'pta.patient': {
'Meta': {'object_name': 'Patient'},
'bed_no': ('django.db.models.fields.IntegerField', [], {}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pta.Language']"}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'original_address': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'pta_cleared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ward_no': ('django.db.models.fields.IntegerField', [], {})
},
'pta.patientresponses': {
'Meta': {'object_name': 'PatientResponses'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'answerStatus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pta.Patient']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pta.Question']"})
},
'pta.ptaquestionaire': {
'Meta': {'object_name': 'PTAQuestionaire'},
'correctAnswers': ('django.db.models.fields.IntegerField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pta.Patient']"}),
'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'totalQuestions': ('django.db.models.fields.IntegerField', [], {})
},
'pta.question': {
'Meta': {'object_name': 'Question'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'question_type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'pta.role': {
'Meta': {'object_name': 'Role'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'pta.staff': {
'Meta': {'object_name': 'Staff'},
'adress': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mob_no': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pta.Role']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['pta']
| 2.234375
| 2
|
sheep-vs-dog-master - DDPG/env.py
|
CuteWans/sheep-vs-dog
| 0
|
12782594
|
<filename>sheep-vs-dog-master - DDPG/env.py
import numpy as np
import gym
from gym.envs.classic_control import rendering
class ChaseEnv(object):
viewer = None
dt = 0.1
alpha_bound = [0, np.pi]
state_dim = 2
action_dim = 1
def __init__(self, _R, _r, _sheepTheta, _sheepV, _dogTheta, _dogV):
self.state = np.zeros(3, dtype=np.float32)
self.state[0] = _r
self.state[1] = _sheepTheta
self.state[2] = _dogTheta
self.R = _R
self.sheepV = _sheepV
self.dogV = _dogV
self.r = _r
self.sheepTheta = _sheepTheta
self.dogTheta = _dogTheta
self.reward = 0
def step(self, action):
# print(action)
done = False
r = self.state[0]
R = self.R
theta = self.state[1]
dt = self.dt
v = self.sheepV
V = self.dogV
action[0] = max(np.pi / 12, action[0])
action[0] = min(11 * np.pi / 12, action[0])
x = np.sqrt(r ** 2 + (v * dt) ** 2 + 2 * r * v * np.sin(action) * dt)
beta = np.arcsin(v * np.cos(action) * dt / x)
if action < np.pi / 2 :
theta -= beta
else :
theta += beta
self.state[0] = x
self.state[1] = theta
self.state[1] -= (self.state[1] // (2 * np.pi)) * (2 * np.pi)
nxt = -1
if self.state[1] > np.pi:
if self.state[1] - np.pi < self.state[2] and self.state[2] < self.state[1]:
nxt = self.state[2] + V / R * dt
else:
nxt = self.state[2] - V / R * dt
else:
if self.state[1] < self.state[2] and self.state[2] < self.state[1] + np.pi:
nxt = self.state[2] - V / R * dt
else:
nxt = self.state[2] + V / R * dt
if (self.state[2] < self.state[1] and self.state[1] < nxt) or (self.state[2] > self.state[1] and self.state[1] > nxt):
nxt = self.state[1]
self.state[2] = nxt
self.state[2] -= (self.state[2] // (2 * np.pi)) * (2 * np.pi)
theta = np.fabs(self.state[1] - self.state[2])
if theta > np.pi : theta = 2 * np.pi - theta
if self.state[0] >= R and theta != 0:
done = True
action_r = 1000 * R / R - self.reward
elif v / self.state[0] <= V / R and theta == 0:
done = True
action_r = - 1000 * R / R - self.reward
else:
action_r = - (x - R) ** 2 + 1 * theta - self.reward
self.reward += action_r
return [self.state[0], theta], action_r[0], done
def reset(self):
self.state[0] = self.r
self.state[1] = self.sheepTheta
self.state[2] = self.dogTheta
self.reward = 0
if self.viewer is not None : del self.viewer
return [self.state[0], np.fabs(self.state[1] - self.state[2])]
def render(self):
if self.viewer is None:
self.viewer = rendering.Viewer(600, 600)
circle = rendering.make_circle(self.R, filled=False)
circle_transform = rendering.Transform(translation=(300, 300))
circle.add_attr(circle_transform)
circle.set_linewidth(5)
sheep = rendering.make_circle(2)
sheep_transform = rendering.Transform(translation=(
self.state[0] * np.cos(self.state[1]) + 300, 300 + self.state[0] * np.sin(self.state[1])))
sheep.add_attr(sheep_transform)
dog = rendering.make_circle(4)
dog.set_color(.7, .5, .5)
dog_transform = rendering.Transform(translation=(
self.R * np.cos(self.state[2]) + 300, 300 + self.R * np.sin(self.state[2])))
dog.add_attr(dog_transform)
self.viewer.add_geom(circle)
self.viewer.add_geom(sheep)
self.viewer.add_geom(dog)
self.viewer.render()
| 2.671875
| 3
|
app/extension/confluence/extension_locust.py
|
dsplugins/dc-app-performance-toolkit
| 0
|
12782595
|
import re
from locustio.common_utils import init_logger, confluence_measure
logger = init_logger(app_type='confluence')
@confluence_measure
def app_specific_action(locust):
r = locust.client.get('/plugin/report') # navigate to page
content = r.content.decode('utf-8') # parse page content
token_pattern_example = '"token":"(.+?)"'
id_pattern_example = '"id":"(.+?)"'
token = re.findall(token_pattern_example, content) # parse variables from response using regexp
id = re.findall(id_pattern_example, content)
logger.locust_info(f'token: {token}, id: {id}') # logger for debug when verbose is true in confluence.yml file
if 'assertion string' not in content:
logger.error(f"'assertion string' was not found in {content}")
assert 'assertion string' in content # assertion after GET request
body = {"id": id, "token": token} # include parsed variables to POST body
headers = {'content-type': 'application/json'}
r = locust.client.post('/plugin/post/endpoint', body, headers) # send some POST request
content = r.content.decode('utf-8')
if 'assertion string after successful post request' not in content:
logger.error(f"'assertion string after successful post request' was not found in {content}")
assert 'assertion string after successful post request' in content # assertion after POST request
| 2.3125
| 2
|
main.py
|
jackyhobingo/QuestionnaireAddUp
| 0
|
12782596
|
<filename>main.py
from questionnaire import Questionnaire
if __name__ == "__main__":
question = Questionnaire()
question.run()
| 1.875
| 2
|
morghulis/caltech_faces/downloader.py
|
the-house-of-black-and-white/pyWiderFace
| 17
|
12782597
|
<reponame>the-house-of-black-and-white/pyWiderFace
import logging
import os
from morghulis.downloader import BaseDownloader
from morghulis.os_utils import ensure_dir
log = logging.getLogger(__name__)
IMAGES_URL = 'http://www.vision.caltech.edu/Image_Datasets/faces/faces.tar'
class CaltechFacesDownloader(BaseDownloader):
def __init__(self, target_dir):
super(CaltechFacesDownloader, self).__init__(target_dir)
def download(self):
ensure_dir(self.target_dir)
tar_file = self.download_file_from_web_server(IMAGES_URL, self.target_dir)
self.extract_tar_file(os.path.join(self.target_dir, tar_file), self.target_dir)
log.info('done')
| 2.375
| 2
|
tests/ezros/test_exception.py
|
achillesrasquinha/rosutils
| 0
|
12782598
|
# imports - module imports
from ezros.exception import (
EzrosError
)
# imports - test imports
import pytest
def test_ezros_error():
with pytest.raises(EzrosError):
raise EzrosError
| 1.914063
| 2
|
py/ae/semantic/style.py
|
skepner/ae
| 0
|
12782599
|
<reponame>skepner/ae<filename>py/ae/semantic/style.py<gh_stars>0
import sys
import ae_backend
# ======================================================================
def style_with_one_modifier(chart: ae_backend.chart_v3.Chart, style_name: str, selector: dict[str, object], modifier: dict[str, object], priority: int) -> set[str]:
style = chart.styles()[style_name]
style.priority = priority
style.add_modifier(selector=selector, **modifier)
return set([style_name])
# ======================================================================
| 1.945313
| 2
|
scripts/schema.py
|
FlowersOfChina/You-Money
| 0
|
12782600
|
import MySQLdb
'''
数据库实例目前部署一台就可以了,然后通过这个脚本进行数据的同步操作
插入测试数据 每次开发完成新的模块 使用这个脚本 动态的添加数据
'''
# 重新创建模式 为 create 追加数据使用 append 删除并且重新添加 为 refresh
DB_OP_MODE = "append"
# 数据库链接用户名
MYSQL_CONN_NAME = "mysqlname"
#数据库远程链接地址
MYSQL_CONN_ADDR = "mysqllinkpath"
#数据库登录密码
MYSQL_CONN_PASSWORD = "<PASSWORD>"
#数据库默认的链接编码
MYSQL_CONN_CHARSET = "utf8"
# 默认的数据库名称
CREATE_DB_NAME = "you_money"
def check_db_exists(db_name,db):
'''
检查当前数据库是否已经存在
:param db_name:
:return:
'''
cursor = db.cursor()
cursor.execute("SHOW DATABASES")
rows = cursor.fetchall();
for row in rows:
tmp = '%2s'%row
if tmp == CREATE_DB_NAME:
return True
return False
#TODO 创建数据库脚本未完成
def drop_db(db):
'''
创建数据库
:return:
'''
cursor = db.cursor()
cursor.execute("DROP DATABASE IF EXISTS " + CREATE_DB_NAME)
cursor.execute("CREATE DATABASE IF NOT EXISTS " + CREATE_DB_NAME)
def create_table(tab_name,engine,charset):
'''
创建表函数
:param tab_name:
:param engine:
:param charset:
:return:
'''
pass
def append_data(sql_query):
'''
追加数据
:param sql_query:
:return:
'''
pass
def clean_table(tab_name):
'''
清理表数据
:param tab_name:
:return:
'''
pass
if __name__ == '__main__':
db = MySQLdb.connect(MYSQL_CONN_ADDR,
MYSQL_CONN_NAME,
MYSQL_CONN_PASSWORD,
MYSQL_CONN_CHARSET
)
if check_db_exists(CREATE_DB_NAME,db):
pass
db.close()
| 3.46875
| 3
|
ecco_v4_py/test/test_plot_utils.py
|
owang01/ECCOv4-py
| 24
|
12782601
|
<reponame>owang01/ECCOv4-py
"""
Test routines for the tile plotting
"""
from __future__ import division, print_function
import warnings
from pathlib import Path
import numpy as np
import pytest
import ecco_v4_py as ecco
from .test_common import llc_mds_datadirs,get_test_array_2d
from ecco_v4_py.plot_utils import assign_colormap
@pytest.mark.parametrize("is_xda",[True,False])
@pytest.mark.parametrize("sequential_data, cmap_expected",
[(True,'viridis'),
(False,'RdBu_r'),
(False,'inferno')])
def test_cmap(get_test_array_2d,is_xda,sequential_data,cmap_expected):
test_arr = get_test_array_2d
test_arr = test_arr if is_xda else test_arr.values
if sequential_data:
test_arr = np.abs(test_arr)
if set(cmap_expected).issubset(set(['viridis','RdBu_r'])):
cmap_test,_ = assign_colormap(test_arr)
else:
cmap_test,_ = assign_colormap(test_arr,cmap_expected)
assert cmap_test==cmap_expected
@pytest.mark.parametrize("is_xda",[True,False])
def test_cminmax_dtype(get_test_array_2d,is_xda):
"""make cmin/cmax are floats"""
test_arr = get_test_array_2d
test_arr = test_arr if is_xda else test_arr.values
_, (cmin,cmax) = assign_colormap(test_arr)
assert isinstance(cmin,float) or isinstance(cmin,np.float32)
assert isinstance(cmax,float) or isinstance(cmax,np.float32)
| 2.125
| 2
|
python_3/synthetic_data_generator/experiments/expr_generate_random_id_numbers.py
|
duttashi/valet
| 0
|
12782602
|
<filename>python_3/synthetic_data_generator/experiments/expr_generate_random_id_numbers.py
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 13 08:10:33 2020
@author: Ashish
"""
import random, string
n=10
def generate_random_id_numbers() -> list:
""" Generate dummy Health Service ID numbers similar to NHS 10 digit format
See: https://www.nhs.uk/using-the-nhs/about-the-nhs/what-is-an-nhs-number/
"""
DA_id_numbers = []
for _ in range(n):
DA_id = ''.join(random.choice(string.digits) for _ in range(3)) + '-'
DA_id += ''.join(random.choice(string.digits) for _ in range(3)) + '-'
DA_id += ''.join(random.choice(string.digits) for _ in range(4))
DA_id_numbers.append(DA_id)
return DA_id_numbers
x = generate_random_id_numbers()
print(x)
print("length: ", len(x))
| 3.609375
| 4
|
src/skmultiflow/data/stats/aggregate_stats_buffered.py
|
trajkova-elena/scikit-multiflow
| 1
|
12782603
|
<reponame>trajkova-elena/scikit-multiflow
import numpy as np
class StdDev:
"""
Taken from
https://math.stackexchange.com/questions/198336/how-to-calculate-standard-deviation-with-streaming-inputs
"""
def __init__(self, buffer):
self.buffer = buffer
def register_value(self, value):
values = self.buffer.register_value(value)
return np.std(np.array(values))
class Median:
def __init__(self, buffer):
self.buffer = buffer
def register_value(self, value):
values = self.buffer.register_value(value)
return np.median(np.array(values))
class Mean:
def __init__(self, buffer):
self.buffer = buffer
def register_value(self, value):
values = self.buffer.register_value(value)
return np.mean(np.array(values))
| 3.1875
| 3
|
src/data/graph/gremlin.py
|
kabirkhan/cloud_compete_graph
| 1
|
12782604
|
import re
class GremlinQueryBuilder:
"""
Basic functions to build gremlin queries that add vertices and edges
"""
@classmethod
def name_to_id(cls, name):
if '(' in name:
name = name[name.idx('(') - 1]
return name.replace(' ', '-')
@classmethod
def gremlin_escape(cls, s):
return s.replace('"', '\\"').replace('$', '\\$')
@classmethod
def build_upsert_vertex_query(cls, entity_type, properties):
q = f"""g.V().has("label", "{entity_type}"){cls.get_properties_str(properties, False)}.
fold().
coalesce(unfold(),
addV("{entity_type}"){cls.get_properties_str(properties)})"""
return q
@classmethod
def build_upsert_edge_query(cls, from_id, to_id, edge_properties):
label = edge_properties["label"]
return f"""g.V("{from_id}").as('v').
V("{to_id}").
coalesce(__.inE("{label}").where(outV().as('v')),
addE("{label}").from('v'){cls.get_properties_str(edge_properties)})"""
@classmethod
def build_project_clause(cls, prop_names):
if len(prop_names) > 0:
project_output = f'.project("{prop_names[0]}"'
by_output = f'.by("{prop_names[0]}")'
for n in prop_names[1:]:
project_output += f', "{n}"'
by_output += f'.by("{n}")'
project_output += ')'
return project_output + by_output
@classmethod
def get_by_id_query(cls, _id):
return 'g.V("{}")'.format(_id)
@classmethod
def get_properties_str(cls, properties, create=True):
if create:
query_str = 'property'
else:
query_str = 'has'
properties_lower = {k.lower():v for k,v in properties.items()}
if "label" in properties_lower:
del properties_lower["label"]
output = ""
for k, v in properties_lower.items():
if isinstance(v, str):
output += '.{}("{}", "{}")'.format(query_str, k, v)
else:
output += '.{}("{}", {})'.format(query_str, k, v)
return output
| 2.796875
| 3
|
alter_wrapper.py
|
anish-lu-yihe/MINERVA
| 0
|
12782605
|
<filename>alter_wrapper.py
import numpy as np
import alternative_implementations.minerva2 as dwhite54
from rpy2.robjects.packages import SignatureTranslatedAnonymousPackage
import rpy2.robjects.numpy2ri as rpyn
class model1(dwhite54.Minerva2):
def reset(self):
self.__init__(self.features_per_trace)
def get_memory_matrix(self):
return self.model
def learn(self, learning_data):
self.add_traces(np.reshape(learning_data, (-1, self.features_per_trace)), 0)
def respond(self, probes, recurrence = 1):
echo = probes[:]
for epoch in range(recurrence):
echo = self._echo(echo)[1]
return echo
def _echo(self, probes):
intensity, activation = self.get_echo_intensities(np.reshape(probes, (-1, self.features_per_trace)), 0)
content = np.dot(activation, self.model)
normalised_echo = content / np.amax(np.abs(content), axis = 1).reshape((-1, 1))
return intensity, normalised_echo
class model2:
def __init__(self, trace_size):
file = open('alternative_implementations/minerva-al.R')
string = ''.join(file.readlines())
self.funs = SignatureTranslatedAnonymousPackage(string, 'functions')
# ###
self.model = 'Minerva2'
self.trace_size = trace_size
self.reset()
def reset(self):
self.memory = np.empty((0, self.trace_size))
def get_memory_matrix(self):
return self.memory
def learn(self, learning_data):
for row in np.reshape(learning_data, (-1, self.trace_size)):
past_memory, new_event = [self._py2ri(data) for data in [self.memory, row]]
new_memory = self.funs.learn(event = new_event, memory = past_memory, p_encode = 1, model = self.model)
self.memory = self._ri2py(new_memory)
def respond(self, probes, recurrence = 1):
echo = probes[:]
for epoch in range(recurrence):
echo = self._echo(echo)
return echo
def _echo(self, probes):
echo = []
for row in np.reshape(probes, (-1, self.trace_size)):
past_memory, new_probe, cueidx = [self._py2ri(data) for data in [self.memory, [row], np.arange(self.trace_size) + 1]]
r_echo = self.funs.probe_memory(probe = new_probe, memory = past_memory, cue_feature = cueidx, model = self.model)
echo.append(r_echo)
return np.asarray(echo)
def _py2ri(self, data):
return rpyn.py2ri(np.asarray(data))
def _ri2py(self, data):
return rpyn.ri2py(data)
| 2.25
| 2
|
alipay/create_direct_pay_by_user/dpn/urls.py
|
freeyoung/django-alipay3
| 0
|
12782606
|
from django.conf.urls import url
from alipay.create_direct_pay_by_user.dpn import views
urlpatterns = [
url(r'^$', views.dpn, {'item_check_callable': None}, name='alipay-dpn'),
]
| 1.484375
| 1
|
buyfree_mall/buyfree_mall/apps/areas/views.py
|
GalphaXie/E-commerce
| 0
|
12782607
|
from django.shortcuts import render
# Create your views here.
from rest_framework.viewsets import ReadOnlyModelViewSet
from rest_framework_extensions.cache.mixins import CacheResponseMixin
from areas.models import Area
from areas.serializers import AreaSerializer, SubAreaSerializer
class AreasViewSet(CacheResponseMixin, ReadOnlyModelViewSet):
"""
行政区划信息
# GET /areas/(?P<pk>\d+)/
request: pk(int)
response: id(int) name(str) subs(list)
定义 查询集 和 序列化器的类 后面的源码方法就是 get_queryset 和 get_serializer_class ,这里根据需要直接重写方法
"""
pagination_class = None # 区划信息不分页
def get_queryset(self):
"""
提供数据集
"""
if self.action == 'list':
return Area.objects.filter(parent=None)
else:
return Area.objects.all()
def get_serializer_class(self):
"""
提供序列化器
"""
if self.action == 'list':
return AreaSerializer
else:
return SubAreaSerializer
| 2.171875
| 2
|
lean/components/config/project_config_manager.py
|
InvestWeMust/lean-cli
| 76
|
12782608
|
<filename>lean/components/config/project_config_manager.py
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean CLI v1.0. Copyright 2021 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from pathlib import Path
from typing import List
from lean.components.config.storage import Storage
from lean.components.util.xml_manager import XMLManager
from lean.constants import PROJECT_CONFIG_FILE_NAME
from lean.models.config import CSharpLibrary
class ProjectConfigManager:
"""The ProjectConfigManager class manages the configuration of a single project."""
def __init__(self, xml_manager: XMLManager) -> None:
"""Creates a new ProjectConfigManager instance.
:param xml_manager: the XMLManager instance to use when parsing XML files
"""
self._xml_manager = xml_manager
def get_project_config(self, project_directory: Path) -> Storage:
"""Returns a Storage instance to get/set the configuration for a project.
:param project_directory: the path to the project to retrieve the configuration of
:return: the Storage instance containing the project-specific configuration of the given project
"""
return Storage(str(project_directory / PROJECT_CONFIG_FILE_NAME))
def get_local_id(self, project_directory: Path) -> int:
"""Returns the local id of a project.
Every Lean CLI project has a unique local id, regardless of whether the project is synchronized with the cloud.
:param project_directory: the path to the project to retrieve the local id of
:return: the local id of the given project
"""
project_config = self.get_project_config(project_directory)
if project_config.has("local-id"):
return project_config.get("local-id")
project_id = random.randint(100_000_000, 999_999_999)
project_config.set("local-id", project_id)
return project_id
def get_csharp_libraries(self, project_directory: Path) -> List[CSharpLibrary]:
"""Returns the custom C# libraries in a project.
:param project_directory: the path to the project to retrieve the C# libraries of
:return: a list containing the information of all PackageReferences in the project's .csproj file, if any
"""
csproj_file = next((p for p in project_directory.iterdir() if p.name.endswith(".csproj")), None)
if csproj_file is None:
return []
libraries = []
csproj_tree = self._xml_manager.parse(csproj_file.read_text(encoding="utf-8"))
for package_reference in csproj_tree.findall(".//PackageReference"):
name = package_reference.get("Include", None)
version = package_reference.get("Version", None)
if name is not None and version is not None:
libraries.append(CSharpLibrary(name=name, version=version))
return libraries
| 2.234375
| 2
|
leetcode/python/easy/p953_isAlienSorted.py
|
kefirzhang/algorithms
| 0
|
12782609
|
class Solution:
def isAlienSorted(self, words, order) -> bool:
def compareStr(word1, word2):
len1 = len(word1)
len2 = len(word2)
i = 0
while i < len2:
if i > len1 - 1:
return False
if helper_order.index(word2[i]) == helper_order.index(word1[i]):
i += 1
continue
elif helper_order.index(word2[i]) > helper_order.index(word1[i]):
return True
else:
return False
if i < len1 - 1:
return False
return True
helper_order = list(order)
pre_word = words[0]
words.pop(0)
for word in words:
if compareStr(pre_word, word) is False:
return False
pre_word = word
return True
slu = Solution()
print(slu.isAlienSorted(
["zirqhpfscx", "zrmvtxgelh", "vokopzrtc", "nugfyso", "rzdmvyf", "vhvqzkfqis", "dvbkppw", "ttfwryy", "dodpbbkp",
"akycwwcdog"], "khjzlicrmunogwbpqdetasyfvx"))
| 3.375
| 3
|
hexa/catalog/management/commands/sync_datasources_worker.py
|
qgerome/openhexa-app
| 4
|
12782610
|
<gh_stars>1-10
from dpq.commands import Worker
from hexa.catalog.queue import datasource_sync_queue
class Command(Worker):
queue = datasource_sync_queue
| 1.515625
| 2
|
tools_box/_hr/report/employee_advance_report/employee_advance_report.py
|
maisonarmani/Tools-Box
| 4
|
12782611
|
# Copyright (c) 2013, <EMAIL> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
def execute(filters=None):
columns = [
"Posting Date:Date:100",
"Employee:Link/Employee:100",
"Employee Name:Data:150",
"Advance Amount:Currency:120",
"Paid Amount:Currency:120",
"Retired Amount:Currency:130",
"Refunded Amount:Currency:130",
"Variance:Currency:120",
]
conditions = ""
if filters.get("from_date"):
conditions += "d.posting_date >= DATE('{from_date}')"
if filters.get("to_date"):
conditions += " AND d.posting_date <= DATE('{to_date}')"
if filters.get("status") and filters.get('status') != "Retired":
conditions += " AND d.status = '{status}'"
else:
conditions += " AND d.claimed_amount = d.advance_amount or AND d.refunded = d.advance_amount "
data = frappe.db.sql("SELECT d.posting_date, d.employee, d.employee_name , d.advance_amount, d.paid_amount, "
"d.claimed_amount, d.refund_amount, (d.refund_amount + d.claimed_amount - d.paid_amount) FROM "
"`tabEmployee Advance` d WHERE {0} ".format(conditions.format(**filters)), as_list=1)
return columns, data
| 2.21875
| 2
|
generate.py
|
mdsitton/pyogl
| 0
|
12782612
|
<filename>generate.py
from pglgen import pycodegen
# This is somewhat of a hack
# It injects a variable into the builtins so that its simple
# to check for code genetation in the types system.
try:
import __builtin__ as builtins
except ImportError:
import builtins
builtins.genGL = True
def main():
apis = ['gl', 'wgl', 'glx', 'egl']
for api in apis:
apiClass = pycodegen.PyApiGen(api)
apiClass.gen_code()
if __name__ == '__main__':
main()
| 1.96875
| 2
|
src/main/python/data_pipeline/open_data_raw_material_price/core.py
|
meowpunch/bobsim-research
| 2
|
12782613
|
<reponame>meowpunch/bobsim-research
import pandas as pd
from data_pipeline.dtype import dtype, reduction_dtype
from data_pipeline.translate import translation
from data_pipeline.unit import get_unit
from utils.handle_null import NullHandler
from utils.logging import init_logger
from utils.s3_manager.manage import S3Manager
from utils.sparse import filter_sparse
from utils.visualize import draw_hist
class OpenDataRawMaterialPrice:
def __init__(self, bucket_name: str, date: str):
self.logger = init_logger()
self.date = date
# s3
# TODO: bucket_name -> parameterized
self.s3_manager = S3Manager(bucket_name=bucket_name)
self.load_key = "public_data/open_data_raw_material_price/origin/csv/{filename}.csv".format(
filename=self.date
)
self.save_key = "public_data/open_data_raw_material_price/process/csv/{filename}.csv".format(
filename=self.date
)
self.dtypes = dtype["raw_material_price"]
self.translate = translation["raw_material_price"]
# load filtered df
self.input_df = self.load()
def load(self):
"""
fetch DataFrame and check validate
:return: pd DataFrame
"""
# fetch
df = self.s3_manager.fetch_df_from_csv(key=self.load_key)
# TODO: no use index to get first element.
# validate (filter by column and check types)
return df[0][self.dtypes.keys()].astype(dtype=self.dtypes).rename(columns=self.translate, inplace=False)
def save(self, df: pd.DataFrame):
self.s3_manager.save_df_to_csv(df=df, key=self.save_key)
def clean(self, df: pd.DataFrame):
"""
clean null value
:return: cleaned DataFrame
"""
# pd Series represents the number of null values by column
nh = NullHandler()
df_null = nh.missing_values(df)
self.logger.info("missing values: \n {}".format(df_null))
if df_null is None:
return df
else:
return df.dropna(axis=0)
def standardize(self, s: pd.Series):
mean, std = s.mean(), s.std()
self.logger.info("{name}'s mean: {m}, std: {s}".format(name=s.name, m=mean, s=std))
stdized = s.apply(lambda x: (x - mean) / std).rename("stdized_price")
return stdized, mean, std
def save_hist(self, s: pd.Series, key):
draw_hist(s)
self.s3_manager.save_plt_to_png(key=key)
def transform(self, df: pd.DataFrame):
"""
get skew by numeric columns and log by skew
:param df: cleaned pd DataFrame
:return: transformed pd DataFrame
"""
origin_price = df["price"]
self.save_hist(
origin_price, key="food_material_price_predict_model/image/origin_price_hist_{d}.png".format(d=self.date)
)
stdized_price, mean, std = self.standardize(origin_price)
self.save_hist(
stdized_price, key="food_material_price_predict_model/image/stdized_price_hist_{d}.png".format(d=self.date)
)
self.s3_manager.save_dump(
x=(mean, std), key="food_material_price_predict_model/price_(mean,std)_{date}.pkl".format(date=self.date))
return df.assign(price=stdized_price)
@staticmethod
def combine_categories(df: pd.DataFrame):
"""
combine 4 categories into one category 'item name'
:return: combined pd DataFrame
"""
return df.assign(
item_name=lambda
x: x.standard_item_name + x.survey_price_item_name + x.standard_breed_name + x.survey_price_type_name
).drop(
columns=["standard_item_name", "survey_price_item_name", "standard_breed_name", "survey_price_type_name"],
axis=1
)
@staticmethod
def convert_by_unit(df: pd.DataFrame):
"""
transform unit
:return: transformed pd DataFrame
"""
return df.assign(unit=lambda r: r.unit_name.map(
lambda x: get_unit(x)
# TODO: not unit but stardard unit name
)).assign(price=lambda x: x.price / x.unit).drop(columns=["unit_name", "unit"], axis=1)
def filter(self, df):
"""
ready to process
:param df: self.input_df
:return: filtered pd DataFrame
"""
# only retail price
retail = df[df["class"] == "소비자가격"].drop("class", axis=1)
# convert prices in standard unit
convert = self.convert_by_unit(retail)
# change sparse item name to 'others'
# TODO: solve the problem saving std_list in main of 'analysis/sparse_categories.py'
std_list = self.s3_manager.load_dump(key="food_material_price_predict_model/constants/std_list.pkl")
replaced = convert.assign(
standard_item_name=filter_sparse(column=convert["standard_item_name"], std_list=std_list)
)
# combine 4 categories into one
# combined = self.combine_categories(retail)
# prices divided by 'material grade'(grade) will be used on average.
return replaced.drop(["grade"], axis=1).groupby(
["date", "region", "standard_item_name"] # "item_name"]
).mean().reset_index()
def process(self):
"""
process
1. filter
2. clean null value
3. transform as distribution of data
4. add 'season' and 'is_weekend" column
5. save processed data to s3
TODO: save to rdb
:return: exit code (bool) 0:success 1:fail
"""
try:
filtered = self.filter(self.input_df)
cleaned = self.clean(filtered)
# transformed = self.transform(cleaned)
# decomposed = self.decompose_date(transformed)
self.save(cleaned)
except IOError as e:
# TODO: consider that it can repeat to save one more time
self.logger.critical(e, exc_info=True)
return 1
self.logger.info("success to process raw material price")
return 0
@staticmethod
def decompose_date(df: pd.DataFrame):
# TODO: do by argument
# add is_weekend & season column
return df.assign(
is_weekend=lambda x: x["date"].dt.dayofweek.apply(
lambda day: 1 if day > 4 else 0
),
season=lambda x: x["date"].dt.month.apply(
lambda month: (month % 12 + 3) // 3
)
)
| 2.421875
| 2
|
shop.py
|
Veikkosuhonen/craftify
| 0
|
12782614
|
<gh_stars>0
from app import db
from flask import abort
from datetime import datetime
import player
import item
class ShopItems(db.Model):
__tablename__ = "shop_item"
shopid = db.Column(db.Integer, db.ForeignKey("shop.id"), primary_key=True)
itemid = db.Column(db.Integer, db.ForeignKey("item.id"), primary_key=True)
amount = db.Column(db.Integer)
price = db.Column(db.Float)
def toDict(self):
return {
}
shop_owner = db.Table("shop_owner",
db.Column("shopid", db.Integer, db.ForeignKey("shop.id"), primary_key=True),
db.Column("playerid", db.Integer, db.ForeignKey("player.id"), primary_key=True)
)
class Shop(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True, nullable=False)
description = db.Column(db.String(50), nullable=False, default="")
creation_date = db.Column(db.Date, nullable=False, default=datetime.utcnow)
items = db.relationship("ShopItems")
owners = db.relationship("Player", secondary=shop_owner, lazy="subquery", backref=db.backref("shops", lazy=True))
def toDict(self):
return {
"id": self.id,
"name": self.name,
"description": self.description,
"creation_date": self.creation_date.strftime("%d/%m/%Y %H:%M:%S"),
"owners": list(map(lambda p: p.toDict(), self.owners))
}
def toDictAll(self):
return {
"id": self.id,
"name": self.name,
"description": self.description,
"creation_date": self.creation_date.strftime("%d/%m/%Y %H:%M:%S"),
"owners": list(map(lambda p: p.toDict(), self.owners)),
"items": list(map(lambda i: i.toDict(), self.items))
}
def createShop(name, playerName, description=""):
if name == "test":
shop = Shop(name=name, description=description, id=-1, creation_date=datetime.utcnow())
player.createPlayer(playerName)
owner = player.Player.query.filter_by(name=playerName).first()
shop.owners.append(owner)
return shop.toDict()
if Shop.query.filter_by(name=name).first() != None:
abort(409, "Name is taken")
shop = Shop(name=name, description=description)
owner = player.getOrCreatePlayer(playerName)
print(owner)
shop.owners.append(owner)
db.session.add(shop)
db.session.commit()
return shop.toDict()
def getShops():
return list(map(lambda s: s.toDict(), Shop.query.all()))
def getShopById(id):
s = Shop.query.filter_by(id=id).first()
if s == None:
abort(404)
return s.toDict()
| 2.484375
| 2
|
modox/command.py
|
lukpazera/modox
| 11
|
12782615
|
""" This module is a wrapper for lxu.command.BasicCommand.
It improves and simplifies command implementations including popups,
sPresetText fields, and Form Command Lists.
This is based on Adam O'Hern Commander code but is vastly enhanced.
https://github.com/adamohern/commander
"""
import time
import traceback
import operator
import lx, lxu, lxifc
import modo
from xfrm import TransformUtils
from item import ItemUtils
from message import Message
from setup import SetupMode
from run import run
def bless(commandClass, commandName):
""" Custom bless function.
"""
commandClass.NAME = commandName
try:
lx.bless(commandClass, commandName)
except TypeError:
lx.out('Blessing failed: %s, %s' % (str(commandClass), str(commandName)))
class Argument(object):
""" Argument represents single command argument.
Arguments should be added as this class instances to the command.
"""
# These datatypes will be treated as Float values
sTYPE_FLOATs = [
'acceleration',
'angle',
'axis',
'color1',
'distance',
'float',
'force',
'light',
'mass',
'percent',
'speed',
'time',
'uvcoord'
]
# Treated as Str values
sTYPE_STRINGs = [
'date',
'datetime',
'filepath',
'string',
'vertmapname',
'&item'
]
# Treated as Str values in the MODO UI,
# but parsed into [Float, Float, Float] for use in the execute()
sTYPE_STRING_vectors = [
'angle3',
'color',
'float3',
'percent3'
]
# Treated as Int values
sTYPE_INTEGERs = [
'integer'
]
# Treated as Bool values
sTYPE_BOOLEANs = [
'boolean'
]
DATATYPES = sTYPE_FLOATs + sTYPE_STRINGs + sTYPE_STRING_vectors + sTYPE_INTEGERs + sTYPE_BOOLEANs
def __init__(self, name="", datatype=None):
self.name = name
self.label = None
self.defaultValue = None
self.datatype = None
if datatype is not None:
self.datatype = datatype.lower()
self.valuesList = None
self.valuesListUIType = None
self.flags = None
self.index = -1
self.hints = None
def __str__ (self):
""" Represent argument as its name and string datatype.
"""
reprString = "Command argument: " + self.name
if isinstance(self.datatype, str):
reprString += " type: "
reprString += self.datatype
return reprString
def __eq__(self, other):
if isinstance(other, str):
return self.name == other
elif isinstance(other, Argument):
return self.name == other.name
elif isinstance(other, int):
return self.index == other
else:
return False
class ArgumentPopupContent(object):
""" Use this class for filling contents of a popup.
"""
def __init__(self):
self._entries = []
self.iconWidth = None
self.iconHeight = None
def __len__(self):
return len(self._entries)
def __getitem__(self, key):
if not isinstance(key, int):
raise TypeError
if key >= len(self._entries):
raise KeyError
return self._entries[key]
def __iter__(self):
return iter(self._entries)
def addEntry(self, entry):
self._entries.append(entry)
def getEntry(self, index):
return self._entries[index]
@property
def entriesCount(self):
return len(self._entries)
class ArgumentPopupEntry(object):
def __init__(self, internalName="", userName=""):
self.internalName = internalName
self.userName = userName
self.iconImage = None
self.iconResource = None
class ArgumentItemsContent(object):
""" Use this class to define values for the item popup argument.
"""
def __init__(self):
self.noneOption = False
self.testOnRawItems = False # use lx.object.Item rather then modo.Item.
self.itemTestFunction = False
class ArgumentValuesListType(object):
""" When argument represents a list of values these can show up
in UI as Popup, sPresetText or Form Command List.
A popup with item list is also supported.
"""
POPUP = 1
S_PRESET_TEXT = 2
FORM_COMMAND_LIST = 3
ITEM_POPUP = 4
class Command(lxu.command.BasicCommand):
"""Wrapper for lxu.command.BasicCommand.
Based on Adam OHern commander code.
https://github.com/adamohern/commander
"""
# NAME is only used for debugging purposes.
NAME = ''
@property
def name(self):
return self.NAME
# --- Public methods, to be overriden by user.
def init(self):
""" Performs any extra initialisation steps that the command requires.
This is called from commands __init__() method.
"""
pass
def interact(self):
""" Perform interaction with user before command is actually executed.
Typically this means opening file dialogs, confirm messages, etc.
Interact() happens before command posts its dialog with arguments.
Returns
-------
bool
False if command should not be executed, True if it should go ahead.
"""
return True
def enable(self, msg):
""" Decides if the command should be enabled or disabled.
Parameters
----------
msg : modox.Message
Wrapper around lx.object.Message, use it to set disable/enable message.
Returns
-------
bool
True for enabled command, False otherwise.
"""
return True
def flags(self):
""" Command flags.
"""
return lx.symbol.fCMD_UNDO
def arguments(self):
""" Gets a list of arguments for a command.
Returns
-------
list of Argument or single Argument
Return either single or a list of Argument objects, one for each argument.
"""
return []
def getArgument(self, ident):
""" Gets argument by index or name.
Parameters
----------
ident : str or int
Either argument name or its index.
Returns
-------
Argument
Raises
------
LookupError?
"""
if type(ident) == str:
ident = self._argumentsByName[ident]
return self._argumentsList[ident]
def isArgumentSet(self, ident):
""" Returns whether given argument is set in a command or not.
Parameters
----------
ident : str or int
Either argument name or its index.
Returns
-------
bool
"""
arg = self.getArgument(ident)
return self.dyna_IsSet(arg.index)
def getArgumentValue(self, ident):
"""Return a command argument value by index.
If no argument value exists, returns the default parameter.
NOTE: The commander_args() method is simpler to use than this method.
You should probably use that one unless you have a reason to find a specific
argument by index.
:param index: (int) index of argument to retrieve
:param default: value to return if argument is not set
:returns: argument value (str, int, float, or boolean as appropriate)
"""
arg = self.getArgument(ident)
# If no value is set, return the default.
if not self.dyna_IsSet(arg.index):
return self._resolveDefaultValue(arg.defaultValue)
# TODO: I think it's about variable argument value?
#if 'variable' in self.commander_arguments()[index].get(FLAGS, []):
#datatype = self.basic_ArgType(index)
#else:
#datatype = self.commander_arguments()[index][DATATYPE].lower()
# If it's a string, use dyna_String to grab it.
if arg.datatype in Argument.sTYPE_STRINGs:
return self.dyna_String(arg.index)
# If the value is a vector, use dyna_String to grab it, then parse it
# into a list of float vlues.
elif arg.datatype in Argument.sTYPE_STRING_vectors:
return [float(i) for i in self.dyna_String(arg.index).split(" ")]
# If the value is an integer, use dyna_Int to grab it.
elif arg.datatype in Argument.sTYPE_INTEGERs:
return self.dyna_Int(arg.index)
# If the value is a float, use dyna_Float to grab it.
elif arg.datatype in Argument.sTYPE_FLOATs:
return self.dyna_Float(arg.index)
# If the value is a boolean, use dyna_Bool to grab it.
elif arg.datatype in Argument.sTYPE_BOOLEANs:
return self.dyna_Bool(arg.index)
elif arg.datatype == '&item':
return self.dyna_String(arg.index)
# If something bonkers is going on, use the default.
return self._resolveDefaultValue(arg.defaultValue)
def uiHints(self, argument, hints):
""" Set UI hints for a given argument by calling methods
on the given hints object.
"""
pass
def icon(self):
""" Returns string with icon name for command button.
"""
return None
def notifiers(self):
""" Returns a list of notifiers for a command.
Should return a list of tuples, for example:
[('notifier.editAction',''), ("select.event", "item +ldt"), ("tagger.notifier", "")]
"""
return []
def setupMode(self):
""" Sets setup mode for the command.
This will be set at the beginning of execute.
Returns
-------
bool or None
True/False to switch Setup Mode to a given state.
None to not affect setup mode (this is default).
"""
return None
def restoreSetupMode(self):
"""
Restores setup mode to its previous value once command is executed.
Returns
-------
bool
Return True to restore setup mode to its state prior to command execution.
"""
return False
def preExecute(self):
""" Called after interact() but before execute block is called.
Use this if you want to verify the command is ok to run after dialog
with command arguments was closed by user.
Returns
-------
bool
False if command should not be executed, True if it should go ahead.
"""
return True
def executeStart(self):
""" Called from within basic_Execute at the very beginning of execution code.
Use this function to perform actions from within the actual execute block
but right before execute() is called.
"""
pass
def execute(self, msg, flags):
""" This is the place for main command execution code.
"""
pass
def executeEnd(self):
""" Called from basic_Execute, after execute() was called.
Typically used for clean up/restore operations.
"""
pass
def query(self, argument):
""" Returns a value based on and argument being queried.
This method can return string, boolean, integer or float."""
return None
def enableTimersOn(self):
""" Enable/disable log output that says how long enable() takes.
This can help with optimising performance of enable().
This method should be as fast as possible so it doesn't slow down UI.
Returns
-------
bool
True to enable timers log output.
"""
return False
def queryTimersOn(self):
""" Enable/disable log output that says how long query() method takes.
This can help with optimising performance of query().
This method should be as fast as possible so it doesn't slow down UI.
Returns
-------
bool
True to enable log output.
"""
return False
def restoreItemSelection(self):
""" Restores item selection after command is executed.
Returns
-------
bool
True if item selection should be restored to a state prior to firing the command.
"""
return False
def autoFocusItemListWhenDone(self):
""" Automatically focuses item list on selected items when command execution is over.
"""
return False
def applyEditActionPre(self):
""" Applies edit action before the command is executed so there are no 'hanging' edits.
Returns
-------
bool
True if edit action should be applied.
Default is False.
"""
return False
def applyEditActionPost(self):
""" Applies edit action after the command is executed so there are no 'hanging' edits left.
Returns
-------
bool
True if edit action should be applied.
Default is False.
"""
return False
def dropToolPre(self):
"""
Drops any active tool before command execution starts.
Returns
-------
bool
True to drop a tool (if any is active).
"""
return False
# --- Private methods, do not touch.
def cmd_Flags(self):
""" Command is scene altering, undoable by default.
"""
return self.flags()
def cmd_Interact(self):
result = self.interact()
if not result:
msg = lx.object.Message(self.cmd_Message())
msg.SetCode(lx.symbol.e_ABORT)
def cmd_PreExecute(self):
result = self.preExecute()
if not result:
msg = lx.object.Message(self.cmd_Message())
msg.SetCode(lx.symbol.e_ABORT)
def cmd_Icon(self):
return self.icon()
def basic_Enable(self, msg):
if self.enableTimersOn():
timeStart = time.clock()
msgWrap = Message(msg)
enabled = self.enable(msgWrap)
if self.enableTimersOn():
timeEnd = time.clock()
lx.out("ENABLE (%s) : %f s." % (self.NAME, (timeEnd - timeStart)))
return enabled
def basic_ArgType(self, index):
pass
def cmd_DialogInit(self):
""" Sets default values for arguments in command dialogs.
Once this method is implemented MODO's default mechanism for storing
argument values is not used.
This method is called right before command's dialog pops up.
Note that this method uses command argument's .defaultValue property.
This property can be a function (or callable as a matter of fact).
If you set a function as default value it'll always be called to retrieve the
actual default value and used instead of the stored value in the dialog.
Sadly, using function as argument, due to the way MODO seems to work (possible bug)
makes it impossible to set the argument in command string, it will always be
overridden by what default function returns.
"""
arguments = self.arguments()
for n, argument in enumerate(arguments):
datatype = argument.datatype
defaultValue = arguments[n].defaultValue
# Default value can be a function.
# If it's a function we always want to call this function
# to get the default value. This is because sometimes MODO seems
# to report that the dyna_IsSet() for an argument even if it's not set
# and should be pulled from default value.
# In this case we do not want to miss retrieving value from function.
if hasattr(defaultValue, '__call__'):
storedValue = defaultValue()
else:
# If we already have a value, use it.
# This is especially important when a command is run with args
# via command line or form button.
if self.dyna_IsSet(n):
continue
storedValue = self._argumentValuesCache[n]
# If there's no stored value, we're done.
if not storedValue:
continue
# The correct attr_Set... method depends on datatype.
if datatype in Argument.sTYPE_STRINGs + Argument.sTYPE_STRING_vectors:
self.attr_SetString(n, str(storedValue))
elif datatype in Argument.sTYPE_INTEGERs + Argument.sTYPE_BOOLEANs:
self.attr_SetInt(n, int(storedValue))
elif datatype in Argument.sTYPE_FLOATs:
self.attr_SetFlt(n, float(storedValue))
def basic_Execute(self, msg, flags):
"""Stores recent command values for next run and wraps commander_execute
in a try/except statement with traceback.
Do NOT override this method. Use commander_execute() instead.
You should never need to touch this.
CRUCIAL: When turning off listening never just turn it back on!
Set it to whatever the state was prior to executing this command.
Otherwise, firing rs command from within other rs command is going
to mess things up. Listening will be back to True as soon as first
sub command is done.
Returns
-------
bool, None
Return False to exit command with ABORT message code.
"""
scene = modo.Scene()
self.executeStart()
if self.dropToolPre():
run('!tool.drop')
if self.restoreItemSelection():
selection = scene.selected
setupMode = SetupMode()
if self.restoreSetupMode():
setupMode.store()
if self.setupMode() is not None and setupMode.state != self.setupMode():
setupMode.state = self.setupMode()
if self.applyEditActionPre():
TransformUtils.applyEdit()
msgWrap = Message(msg)
try:
cmdResult = self.execute(msgWrap, flags)
except:
cmdResult = False
lx.out(traceback.format_exc())
if self.applyEditActionPost():
TransformUtils.applyEdit()
if self.restoreItemSelection():
scene.select(selection, add=False)
if self.restoreSetupMode():
setupMode.restore()
self.executeEnd()
if not cmdResult and cmdResult is not None:
msgWrap.setCode(Message.Code.ABORT)
return
# This is executed only when command did not abort
if self.autoFocusItemListWhenDone():
ItemUtils.autoFocusItemListOnSelection()
def cmd_Query(self, index, vaQuery):
if self.queryTimersOn():
timeStart = time.clock()
# Create the ValueArray object
va = lx.object.ValueArray()
va.set(vaQuery)
# To keep things simpler for commander users, let them return
# a value using only an index (no ValueArray nonsense)
commander_query_result = self.query(self._argumentsList[index])
# Need to add the proper datatype based on result from commander_query
if isinstance(commander_query_result, basestring):
va.AddString(commander_query_result)
elif isinstance(commander_query_result, int):
va.AddInt(commander_query_result)
elif isinstance(commander_query_result, float):
va.AddFloat(commander_query_result)
elif isinstance(commander_query_result, (modo.Item, lx.object.Item, lxu.object.Item)):
valRef = lx.object.ValueReference(va.AddEmptyValue())
valRef.SetObject(commander_query_result)
if self.queryTimersOn():
timeEnd = time.clock()
lx.out("QUERY (%s) : %f s." % (self.NAME, (timeEnd - timeStart)))
return lx.result.OK
def arg_UIHints(self, index, hints):
"""Adds pretty labels to arguments in command dialogs. If no label parameter
is explicitly included, we create a pseudo-label by capitalizing the
argument name and replacing underscores with spaces.
Labels can either be literal strings or method/function objects. In the
latter case, the method or function will be called when needed.
If any popup fields of type sPresetText are present,
adds the appropriate hint.
You should never need to touch this."""
try:
arg = self._argumentsList[index]
except IndexError:
return
# If an explicit label is provided, use it.
if arg.label is not None:
label = ""
if isinstance(arg.label, str):
label = arg.label
elif type(arg.label) == bool and arg.label:
label = arg.name.replace("_", " ").title()
# Labels can be functions. If so, run the function to get the string.
elif hasattr(arg.label, '__call__'):
label = label()
# Apply the label.
if (label):
hints.Label(label)
# If the popup type is sPresetText, apply the appropriate class.
if arg.valuesListUIType == ArgumentValuesListType.S_PRESET_TEXT:
hints.Class("sPresetText")
# Allow command implementation to do its custom work.
self.uiHints(arg, hints)
def arg_UIValueHints(self, index):
"""Popups and sPresetText arguments fire this method whenever
they update. Note that the 'hints' parameter can be a literal list
or tuple, but can also be a method or function.
For dynamic lists, be sure to pass in the generator method or function object itself,
not its result. (i.e. pass in 'myGreatFunction', NOT 'myGreatFunction()')
You should never need to touch this."""
try:
arg = self._argumentsList[index]
except IndexError:
return
arg_data = None
# Try to grab the values_list for the argument.
if arg.valuesList is not None:
arg_data = arg.valuesList
# If our values_list is empty, don't bother.
if not arg_data:
return
# If the values_list is a list/tuple, use it as-is.
if isinstance(arg_data, (list, tuple)):
values = arg_data
# This is very hacky here for the time being.
# It's testing values against being the items popup content object.
elif isinstance(arg_data, ArgumentItemsContent):
values = arg_data
# If the values_list is a method/function, fire it and use the result.
elif hasattr(arg_data, '__call__'):
values = arg_data()
# In some rare cases you may want to manually instantiate your own
# popup class as a subclass of UIValueHints. In those cases, we
# ignore the below and just use yours.
# isinstance(arg_data, type) tests whether arg_data is class
# TODO: Think whether this logic has the best flow.
# the return statement here doesn't fit and breaks the flow.
if isinstance(arg_data, type) and issubclass(arg_data, lxifc.UIValueHints):
return arg_data()
# If values is None or "" or someother nonsense, return an empty list.
if not values:
values = []
# Argument can be a normal popup, an sPresetText popup, or a
# Form Command List. We'll need to return a different class
# depending on the 'values_list_type'.
if arg.valuesListUIType == ArgumentValuesListType.POPUP:
return PopupClass(values)
elif arg.valuesListUIType == ArgumentValuesListType.S_PRESET_TEXT:
return PopupClass(values)
elif arg.valuesListUIType == ArgumentValuesListType.FORM_COMMAND_LIST:
return FormCommandListClass(values)
elif arg.valuesListUIType == ArgumentValuesListType.ITEM_POPUP:
return ItemPopupClass(arg_data)
def cmd_NotifyAddClient(self, argument, object):
"""Add notifier clients as needed.
You should never need to touch this."""
for i, tup in enumerate(self._notifier_tuples):
if self._notifiers[i] is None:
self._notifiers[i] = self.not_svc.Spawn (self._notifier_tuples[i][0], self._notifier_tuples[i][1])
self._notifiers[i].AddClient(object)
def cmd_NotifyRemoveClient(self, object):
"""Remove notifier clients as needed.
You should never need to touch this."""
for i, tup in enumerate(self._notifier_tuples):
if self._notifiers[i] is not None:
self._notifiers[i].RemoveClient(object)
# -------- Private methods
def _resolveDefaultValue(self, defaultValue):
""" Resolves default value in case default value is a function.
"""
if hasattr(defaultValue, '__call__'):
return defaultValue()
return defaultValue
def _setupNotifiers(self):
# CommandClass can implement the commander_notifiers() method to update
# FormCommandLists and Popups. If implemented, add the notifiers.
self.not_svc = lx.service.NotifySys()
self._notifiers = []
self._notifier_tuples = tuple([i for i in self.notifiers()])
for i in self._notifier_tuples:
self._notifiers.append(None)
@classmethod
def _setupArgumentValuesCache(cls):
""" We manually cache all argument values between command executions during single session.
"""
try:
cls._argumentValuesCache
except AttributeError:
cls._argumentValuesCache = []
@classmethod
def _cacheArgumentDefaultValue(cls, value):
"""Add an argument to the class variable _commander_stored_values.
You should never need to touch this.
"""
cls._argumentValuesCache.append(value)
def _setupArguments(self):
""" Setup command arguments based on arguments() method.
Parse the list of Argument objects that the arguments method returns.
"""
arguments = self.arguments()
# The command does not have arguments
if not arguments:
return True
result = True
if not isinstance(arguments, list):
arguments = [arguments]
for argument in arguments:
if not isinstance(argument, Argument):
continue
if not self._addArgument(argument):
result = False
return result
def _addArgument(self, argument):
if argument.datatype is None or argument.name is None:
return False
datatype = self._resolveArgumentDatatype(argument.datatype)
if not datatype:
return False
argument.index = len(self._argumentsList)
self.dyna_Add(argument.name, datatype)
# This is setting up default value for this argument.
# If this is the first time running the command, the class variable
# _argumentValuesCache will be empty. In that case, populate it.
# This should really go on the argument level, not command class level.
if argument.index >= len(self._argumentValuesCache):
# The default value can be a function. If it's a function
# it will be called each time the command dialog is about to be opened.
# In such case do not cache the default value, just make it a None.
if hasattr(argument.defaultValue, '__call__'):
self._cacheArgumentDefaultValue(None)
else:
self._cacheArgumentDefaultValue(argument.defaultValue)
flags = self._resolveArgumentFlagsList(argument.flags)
if flags:
self.basic_SetFlags(argument.index, reduce(operator.ior, flags))
if argument.hints is not None:
self.dyna_SetHint(argument.index, argument.hints)
self._argumentsList.append(argument)
self._argumentsByName[argument.name] = argument.index
return True
def _resolveArgumentDatatype(self, datatype):
""" Resolve argument datatype into proper string that can be used by raw API.
Args:
datatype: (str) one of command argument type constants or
one of lx.symbol.sTYPE_ raw API constants.
"""
try:
resolvedDatatype = getattr(lx.symbol, 'sTYPE_' + datatype.upper())
except AttributeError:
resolvedDatatype = datatype
return resolvedDatatype
def _resolveArgumentFlagsList(self, flagsList):
if not isinstance(flagsList, list):
flagsList = [flagsList]
flags = []
for flag in flagsList:
if flag is None:
continue
try:
flags.append(getattr(lx.symbol, 'fCMDARG_' + flag.upper()))
except AttributeError:
flags.append(flag)
return flags
def __init__(self):
lxu.command.BasicCommand.__init__(self)
self._name = ""
self._argumentsList = []
self._argumentsByName = {}
self._setupArgumentValuesCache()
self._setupArguments()
self._setupNotifiers()
self.init()
class FormCommandListClass(lxifc.UIValueHints):
"""Special class for creating Form Command Lists. This is instantiated
by CommanderClass objects if an FCL argument provided.
Expects a list of valid MODO commands to be provided to init.
NOTE: Any invalid command will crash MODO.
You should never need to touch this."""
def __init__(self, items):
self._items = items
def uiv_Flags(self):
return lx.symbol.fVALHINT_FORM_COMMAND_LIST
def uiv_FormCommandListCount(self):
return len(self._items)
def uiv_FormCommandListByIndex(self,index):
return self._items[index]
class PopupClass(lxifc.UIValueHints):
"""Special class for creating popups and sPresetText fields. Accepts
either a simple list of values, or a list of (internal, user facing) tuples:
[1, 2, 3]
or
[(1, "The Number One"), (2, "The Number Two"), (3, "The Number Three")]
You should never need to touch this."""
def __init__(self, items):
self._content = ArgumentPopupContent()
if isinstance(items, (list, tuple)):
for item in items:
# If the list item is a list or tuple, assume the format (ugly, pretty)
if isinstance(item, (list, tuple)):
entry = ArgumentPopupEntry(str(item[0]), str(item[1]))
self._content.addEntry(entry)
# Otherwise just use the value for both Ugly and Pretty
else:
entry = ArgumentPopupEntry(str(item), str(item))
self._content.addEntry(entry)
elif isinstance(items, ArgumentPopupContent):
self._content = items
def uiv_Flags(self):
return lx.symbol.fVALHINT_POPUPS
def uiv_PopCount(self):
return len(self._content)
def uiv_PopUserName(self, index):
return self._content[index].userName
def uiv_PopInternalName(self,index):
return self._content[index].internalName
def uiv_PopIconSize(self):
if self._content.iconWidth is not None and self._content.iconHeight is not None:
return(1 ,self._content.iconWidth, self._content.iconHeight)
lx.notimpl()
def uiv_PopIconImage(self, index):
iconImage = self._content[index].iconImage
if iconImage is not None:
return iconImage
lx.notimpl()
def uiv_PopIconResource(self, index):
iconResource = self._content[index].iconResource
if iconResource is not None:
return iconResource
lx.notimpl()
class ItemPopupClass(lxu.command.BasicHints):
"""Special class for creating popup with item list.
"""
def __init__(self, itemContent):
self._itemContent = itemContent
def uiv_Flags(self):
flags = lx.symbol.fVALHINT_ITEMS
if self._itemContent.noneOption:
flags |= lx.symbol.fVALHINT_ITEMS_NONE
return flags
def uiv_ItemTest(self, item):
# item comes here as lx.object.Unknown.
# Cast it to lx.object.Item by default.
item = lx.object.Item(item)
if not self._itemContent.testOnRawItems:
item = modo.Item(item)
return self._itemContent.itemTestFunction(item)
| 2.75
| 3
|
sbin/autograder/grade_item_main_runner.py
|
scopeInfinity/Submitty
| 0
|
12782616
|
<reponame>scopeInfinity/Submitty
import configparser
import json
import os
import tempfile
import shutil
import subprocess
import stat
import time
import dateutil
import dateutil.parser
import urllib.parse
import string
import random
import socket
import zipfile
import traceback
from submitty_utils import dateutils, glob
from . import grade_items_logging, write_grade_history, CONFIG_PATH
with open(os.path.join(CONFIG_PATH, 'submitty.json')) as open_file:
OPEN_JSON = json.load(open_file)
SUBMITTY_INSTALL_DIR = OPEN_JSON['submitty_install_dir']
SUBMITTY_DATA_DIR = OPEN_JSON['submitty_data_dir']
with open(os.path.join(CONFIG_PATH, 'submitty_users.json')) as open_file:
OPEN_JSON = json.load(open_file)
DAEMON_UID = OPEN_JSON['daemon_uid']
def executeTestcases(complete_config_obj, tmp_logs, tmp_work, queue_obj, submission_string, item_name, USE_DOCKER, container, which_untrusted):
queue_time_longstring = queue_obj["queue_time"]
waittime = queue_obj["waittime"]
is_batch_job = queue_obj["regrade"]
job_id = queue_obj["job_id"]
is_batch_job_string = "BATCH" if is_batch_job else "INTERACTIVE"
runner_success = -1
# run the run.out as the untrusted user
with open(os.path.join(tmp_logs,"runner_log.txt"), 'w') as logfile:
print ("LOGGING BEGIN my_runner.out",file=logfile)
logfile.flush()
testcases = complete_config_obj["testcases"]
for testcase_num in range(len(testcases)):
try:
if USE_DOCKER:
runner_success = subprocess.call(['docker', 'exec', '-w', tmp_work, container,
os.path.join(tmp_work, 'my_runner.out'), queue_obj['gradeable'],
queue_obj['who'], str(queue_obj['version']), submission_string, testcase_num], stdout=logfile)
else:
runner_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR, "sbin", "untrusted_execute"),
which_untrusted,
os.path.join(tmp_work,"my_runner.out"),
queue_obj["gradeable"],
queue_obj["who"],
str(queue_obj["version"]),
submission_string,
str(testcase_num)],
stdout=logfile)
logfile.flush()
except Exception as e:
print ("ERROR caught runner.out exception={0}".format(str(e.args[0])).encode("utf-8"),file=logfile)
logfile.flush()
print ("LOGGING END my_runner.out",file=logfile)
logfile.flush()
killall_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR, "sbin", "untrusted_execute"),
which_untrusted,
os.path.join(SUBMITTY_INSTALL_DIR, "sbin", "killall.py")],
stdout=logfile)
print ("KILLALL COMPLETE my_runner.out",file=logfile)
logfile.flush()
if killall_success != 0:
msg='RUNNER ERROR: had to kill {} process(es)'.format(killall_success)
print ("pid",os.getpid(),msg)
grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,item_name,"","",msg)
print ("execute test cases finished",file=logfile)
logfile.flush()
return runner_success
| 1.804688
| 2
|
tests/test_parsingtools.py
|
raymanP/matlab2python
| 49
|
12782617
|
<reponame>raymanP/matlab2python
import numpy as np
import unittest
from matlabparser.parsing_tools import *
# --------------------------------------------------------------------------------}
# ---
# --------------------------------------------------------------------------------{
class TestParsingTools(unittest.TestCase):
def assertEqual(self, first, second, msg=None):
#print('\n>',first,'<',' >',second,'<')
super(TestParsingTools, self).assertEqual(first, second, msg)
def test_strings(self):
self.assertEqual(string_contains_charset('g' ,r'[a-z]'),True)
self.assertEqual(string_contains_charset('09' ,r'[a-z]'),False)
self.assertEqual(string_contains_charset('0g9',r'[a-z]'),True)
self.assertEqual(previous_nonspace_pos ('01 8',8),1 )
self.assertEqual(previous_nonspace_pos (' 8',8),-1)
self.assertEqual(previous_nonspace_char('01 8',8),'1')
self.assertEqual(previous_nonspace_char(' 8',8),'')
def test_quotes(self):
# self.assertEqual(is_in_quotes("""0 '345' 7 """ ,4) ,True)
# self.assertEqual(is_in_quotes("""01'345' 7 """ ,2) ,False)
#self.assertEqual(is_in_quotes("""01'345' 7 """ ,6) ,False)
self.assertEqual(replace_inquotes("""''""" ,'X') ,'XX')
self.assertEqual(replace_inquotes("""0'23'5""" ,'X') ,'0XXXX5')
self.assertEqual(replace_inquotes("""0'2"'5""" ,'X') ,'0XXXX5')
self.assertEqual(replace_inquotes("""0"23"5""" ,'X') ,'0XXXX5')
self.assertEqual(replace_inquotes("""0'2''5'7""" ,'X') ,'0XXXXXX7')
self.assertEqual(replace_inquotes("""0'23""" ,'X') ,'0XXX')
self.assertEqual(replace_inquotes("""0"23""" ,'X') ,'0XXX')
self.assertEqual(extract_quotedstring("""''""") ,'')
self.assertEqual(extract_quotedstring("""'a'""") ,'a')
self.assertEqual(extract_quotedstring("""'a'b""") ,'a')
self.assertEqual(extract_quotedstring("""'a""") ,'a')
self.assertEqual(extract_quotedstring("""'a''a'""") ,'a\'\'a')
self.assertEqual(extract_quotedstring("""'a"a'""") ,'a"a')
self.assertEqual(extract_quotedstring('""') ,'')
#print('>>>>>>>>>>>>>>')
#print('>>>>>>>>>>>>>>')
#print('>>>>>>>>>>>>>>')
#print('>>>>>>>>>>>>>>')
#self.assertEqual(separate_comment('s='i'),(' ',' '))
| 2.78125
| 3
|
iotronic/wamp/functions.py
|
smartmeio/stack4things-openstack-iotronic
| 1
|
12782618
|
# Copyright 2017 MDSLAB - University of Messina
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from iotronic.common import rpc
from iotronic.common import states
from iotronic.conductor import rpcapi
from iotronic import objects
from iotronic.wamp import wampmessage as wm
from oslo_config import cfg
from oslo_log import log
LOG = log.getLogger(__name__)
CONF = cfg.CONF
CONF(project='iotronic')
rpc.init(CONF)
topic = 'iotronic.conductor_manager'
c = rpcapi.ConductorAPI(topic)
class cont(object):
def to_dict(self):
return {}
ctxt = cont()
def echo(data):
LOG.info("ECHO: %s" % data)
return data
def wamp_alive(board_uuid, board_name):
LOG.debug("Alive board: %s (%s)", board_uuid, board_name)
return "Iotronic alive @ " + datetime.now().strftime(
'%Y-%m-%dT%H:%M:%S.%f')
# to be removed
def alive():
LOG.debug("Alive")
return "Iotronic alive @ " + datetime.now().strftime(
'%Y-%m-%dT%H:%M:%S.%f')
def update_sessions(session_list, agent):
session_list = set(session_list)
list_from_db = objects.SessionWP.valid_list(ctxt, agent)
list_db = set([int(elem.session_id) for elem in list_from_db])
LOG.debug('Wamp session list: %s', session_list)
LOG.debug('DB session list: %s', list_db)
if session_list == list_db:
LOG.debug('Sessions on the database are updated.')
return
# list of board not connected anymore
old_connected = list_db.difference(session_list)
LOG.debug('no more valid session list: %s', old_connected)
for elem in old_connected:
old_session = objects.SessionWP.get(ctxt, elem)
if old_session.valid:
old_session.valid = False
old_session.save()
board = objects.Board.get_by_uuid(ctxt, old_session.board_uuid)
board.status = states.OFFLINE
board.save()
LOG.debug('Session updated. Board %s is now %s', board.uuid,
states.OFFLINE)
if old_connected:
LOG.warning('Some boards have been updated: status offline')
# list of board still connected
keep_connected = list_db.intersection(session_list)
LOG.debug('still valid session list: %s', keep_connected)
for elem in keep_connected:
for x in list_from_db:
if x.session_id == str(elem):
LOG.debug('%s need to be restored.', x.board_uuid)
break
if keep_connected:
LOG.warning('Some boards need to be restored.')
def board_on_leave(session_id):
LOG.debug('A board with %s disconnectd', session_id)
try:
old_session = objects.SessionWP.get(ctxt, session_id)
if old_session.valid:
old_session.valid = False
old_session.save()
board = objects.Board.get_by_uuid(ctxt, old_session.board_uuid)
board.status = states.OFFLINE
board.save()
LOG.debug('Session updated. Board %s is now %s', board.uuid,
states.OFFLINE)
return
LOG.debug('Session %s already set to not valid', session_id)
except Exception:
LOG.debug('session %s not found', session_id)
def connection(uuid, session, info=None):
LOG.debug('Received registration from %s with session %s',
uuid, session)
try:
board = objects.Board.get_by_uuid(ctxt, uuid)
except Exception as exc:
msg = exc.message % {'board': uuid}
LOG.error(msg)
return wm.WampError(msg).serialize()
try:
old_ses = objects.SessionWP(ctxt)
old_ses = old_ses.get_session_by_board_uuid(ctxt, board.uuid,
valid=True)
old_ses.valid = False
old_ses.save()
LOG.debug('old session for %s found: %s', board.uuid,
old_ses.session_id)
except Exception:
LOG.debug('valid session for %s not found', board.uuid)
session_data = {'board_id': board.id,
'board_uuid': board.uuid,
'session_id': session}
session = objects.SessionWP(ctxt, **session_data)
session.create()
LOG.debug('new session for %s saved %s', board.uuid,
session.session_id)
board.status = states.ONLINE
if info:
LOG.debug('board infos %s', info)
if 'lr_version' in info:
if board.lr_version != info['lr_version']:
board.lr_version = info['lr_version']
if 'connectivity' in info:
board.connectivity = info['connectivity']
if 'mac_addr' in info:
board.connectivity = {"mac_addr": info['mac_addr']}
board.save()
LOG.info('Board %s (%s) is now %s', board.uuid,
board.name, states.ONLINE)
return wm.WampSuccess('').serialize()
def registration(code, session):
return c.registration(ctxt, code, session)
def board_on_join(session_id):
LOG.debug('A board with %s joined', session_id['session'])
def notify_result(board_uuid, wampmessage):
wmsg = wm.deserialize(wampmessage)
LOG.info('Board %s completed the its request %s with result: %s',
board_uuid, wmsg.req_id, wmsg.result)
res = objects.Result.get(ctxt, board_uuid, wmsg.req_id)
res.result = wmsg.result
res.message = wmsg.message
res.save()
filter = {"result": objects.result.RUNNING,
"request_uuid": wmsg.req_id}
list_result = objects.Result.get_results_list(ctxt,
filter)
if len(list_result) == 0:
req = objects.Request.get_by_uuid(ctxt, wmsg.req_id)
req.status = objects.request.COMPLETED
req.save()
if req.main_request_uuid:
mreq = objects.Request.get_by_uuid(ctxt, req.main_request_uuid)
mreq.pending_requests = mreq.pending_requests - 1
if mreq.pending_requests == 0:
mreq.status = objects.request.COMPLETED
mreq.save()
return wm.WampSuccess('notification_received').serialize()
| 1.804688
| 2
|
test/test_encryption.py
|
tunapro1234/DBEX
| 0
|
12782619
|
<filename>test/test_encryption.py
from dbex.encryption import DBEXMetaEncrypter
from dbex.encryption import DebugEncrypter
from dbex.__init__ import Decoder # sonunda
from dbex.__init__ import Encoder
import unittest
import os
enc, dec = Encoder(), Decoder()
class TestEncryption(unittest.TestCase):
test_file = "dbex/test/TestEncryption.json"
def setUp(self):
with open(self.test_file, "w+") as file:
file.write("")
def tearDown(self):
os.remove(self.test_file)
def test_encryption(self):
kwargs = {"path": self.test_file, "encryption_obj": EmptyEncrypter()}
enc1 = Encoder(**kwargs)
dec1 = Decoder(**kwargs)
tester = []
enc1.dump(tester)
read = dec1.load()
self.assertEqual(tester, read)
def test_encryption_debug(self):
kwargs = {"path": self.test_file, "encryption_obj": DebugEncrypter()}
enc1 = Encoder(**kwargs)
dec1 = Decoder(**kwargs)
tester = ["deneme", 123]
enc1.dump(tester, max_depth="all")
read = dec1.load()
self.assertEqual(tester, read)
def test_encrypter1_dump_load(self):
kwargs = {
"path": self.test_file,
"encryption_obj": TestEncrypter1("3")
}
enc1 = Encoder(**kwargs)
dec1 = Decoder(**kwargs)
tester = ["deneme", 123]
enc1.dump(tester, max_depth="all")
read = dec1.load()
self.assertEqual(tester, read)
def test_encrypter1_dumps_loads(self):
kwargs = {
"path": self.test_file,
"encryption_obj": TestEncrypter1("3")
}
enc1 = Encoder(**kwargs)
dec1 = Decoder(**kwargs)
tester = ["deneme", 123]
result = dec1.loads(enc1.dumps(tester))
self.assertEqual(tester, result)
def test_encrypter1_1(self):
tester = "['deneme', 123]"
enx = TestEncrypter1("3")
for i in enx.encrypter(tester):
print(i, end="")
encrypted = "".join([i for i in enx.encrypter(tester)])
decrypted = "".join([i for i in enx.decrypter(encrypted)])
self.assertEqual(decrypted, tester)
class EmptyEncrypter(metaclass=DBEXMetaEncrypter):
gen_encryption = True
gen_encrypter = None
gen_decrypter = None
def __init__(self):
self.gen_encrypter = self.encrypter
self.gen_decrypter = self.decrypter
def encrypter(self, generator, *args, **kwargs):
for i in generator:
yield i
def decrypter(self, generator, *args, **kwargs):
for i in generator:
yield i
class TestEncrypter1(metaclass=DBEXMetaEncrypter):
gen_encryption = True
gen_decrypter = None
gen_encrypter = None
def __init__(self, password=None, sep="."):
self.password = password if password is not None else None
self.gen_decrypter = self.decrypter
self.gen_encrypter = self.encrypter
self.sep = sep
def encrypter(self, generator, *args, **kwargs):
self.password = kwargs[
"password"] if self.password is None else self.password
for i in generator:
for char in i:
yield str(ord(char) + int(str(self.password)[0]))
yield self.sep
def decrypter(self, generator, *args, **kwargs):
self.password = kwargs[
"password"] if self.password is None else self.password
temp = ""
for i in generator:
for char in i:
if char == self.sep:
yield chr(int(temp) - int(str(self.password)[0]))
temp = ""
else:
temp += char
| 2.890625
| 3
|
examples/00-basic/07_composite.py
|
normanrichardson/section-properties
| 1
|
12782620
|
r"""
.. _ref_ex_composite:
Creating a Composite Section
----------------------------
Create a section of mixed materials.
The following example demonstrates how to create a composite cross-section by assigning
different material properties to various regions of the mesh. A steel 310UB40.4 is modelled
with a 50Dx600W timber panel placed on its top flange.
The geometry and mesh are plotted, and the mesh information printed to the terminal
before the analysis is carried out. All types of cross-section analyses are carried
out, with an axial force, bending moment and shear force applied during the stress
analysis. Once the analysis is complete, the cross-section properties are printed
to the terminal and a plot of the centroids and cross-section stresses generated.
"""
# sphinx_gallery_thumbnail_number = 2
import sectionproperties.pre.library.primitive_sections as sections
import sectionproperties.pre.library.steel_sections as steel_sections
from sectionproperties.pre.geometry import CompoundGeometry
from sectionproperties.pre.pre import Material
from sectionproperties.analysis.section import Section
# %%
# Create material properties
steel = Material(
name="Steel",
elastic_modulus=200e3,
poissons_ratio=0.3,
yield_strength=500,
density=8.05e-6,
color="grey",
)
timber = Material(
name="Timber",
elastic_modulus=8e3,
poissons_ratio=0.35,
yield_strength=20,
density=0.78e-6,
color="burlywood",
)
# %%
# Create 310UB40.4
ub = steel_sections.i_section(
d=304, b=165, t_f=10.2, t_w=6.1, r=11.4, n_r=8, material=steel
)
# %%
# Create timber panel on top of the UB
panel = sections.rectangular_section(d=50, b=600, material=timber)
panel = panel.align_center(ub).align_to(ub, on="top")
# Create intermediate nodes in panel to match nodes in ub
panel = (panel - ub) | panel
# %%
# Merge the two sections into one geometry object
section_geometry = CompoundGeometry([ub, panel])
# %%
# Create a mesh and a Section object. For the mesh use a mesh size of 5 for
# the UB, 20 for the panel
section_geometry.create_mesh(mesh_sizes=[5, 20])
comp_section = Section(section_geometry, time_info=True)
comp_section.display_mesh_info() # display the mesh information
# %%
# Plot the mesh with coloured materials and a line transparency of 0.6
comp_section.plot_mesh(materials=True, alpha=0.6)
# %%
# Perform a geometric, warping and plastic analysis
comp_section.calculate_geometric_properties()
comp_section.calculate_warping_properties()
comp_section.calculate_plastic_properties(verbose=True)
# %%
# Perform a stress analysis with N = 100 kN, Mxx = 120 kN.m and Vy = 75 kN
stress_post = comp_section.calculate_stress(N=-100e3, Mxx=-120e6, Vy=-75e3)
# %%
# Print the results to the terminal
comp_section.display_results()
# %%
# Plot the centroids
comp_section.plot_centroids()
# %%
# Plot the axial stress
stress_post.plot_stress_n_zz(pause=False)
# %%
# Plot the bending stress
stress_post.plot_stress_m_zz(pause=False)
# %%
# Plot the shear stress
stress_post.plot_stress_v_zxy()
| 3.015625
| 3
|
leetcode/Array/1380. Lucky Numbers in a Matrix.py
|
yanshengjia/algorithm
| 23
|
12782621
|
"""
Given a m * n matrix of distinct numbers, return all lucky numbers in the matrix in any order.
A lucky number is an element of the matrix such that it is the minimum element in its row and maximum in its column.
Example 1:
Input: matrix = [[3,7,8],[9,11,13],[15,16,17]]
Output: [15]
Explanation: 15 is the only lucky number since it is the minimum in its row and the maximum in its column
Example 2:
Input: matrix = [[1,10,4,2],[9,3,8,7],[15,16,17,12]]
Output: [12]
Explanation: 12 is the only lucky number since it is the minimum in its row and the maximum in its column.
Example 3:
Input: matrix = [[7,8],[1,2]]
Output: [7]
Solution:
3 Pass
"""
# Time: O(mn)
# Spcae: O(m+n)
class Solution:
def luckyNumbers (self, matrix: List[List[int]]) -> List[int]:
row = len(matrix)
col = len(matrix[0])
row_min = []
col_max = []
for i in range(row):
mi = float('inf')
for j in range(col):
mi = min(matrix[i][j], mi)
row_min.append(mi)
for j in range(col):
ma = float('-inf')
for i in range(row):
ma = max(matrix[i][j], ma)
col_max.append(ma)
res = []
for i in range(row):
for j in range(col):
if matrix[i][j] == row_min[i] and matrix[i][j] == col_max[j]:
res.append(matrix[i][j])
return res
# 3 Pass
class Solution:
def luckyNumbers (self, matrix: List[List[int]]) -> List[int]:
rmin = [min(x) for x in matrix]
cmax = [max(x) for x in zip(*matrix)]
return [matrix[i][j] for i in range(len(matrix)) for j in range(len(matrix[0])) if rmin[i] == cmax[j]]
| 3.9375
| 4
|
src/api_segura/data/__init__.py
|
PythonistaMX/py261
| 0
|
12782622
|
<gh_stars>0
CARRERAS = ['Arquitectura',
'Diseño',
'Sistemas',
'Derecho',
'Actuaría']
| 1.25
| 1
|
otree_manager/otree_manager/om/processors.py
|
chkgk/otree_manager
| 2
|
12782623
|
<filename>otree_manager/otree_manager/om/processors.py
from django.conf import settings as django_conf
def settings(request):
"""Provides easy access to settings stored in django conf"""
return {
'DEMO': django_conf.DEMO,
'MIN_WORKERS': django_conf.MIN_WORKERS,
'MAX_WORKERS': django_conf.MAX_WORKERS,
'MAX_WEB': django_conf.MAX_WEB,
}
| 1.6875
| 2
|
disqus/get_posts.py
|
tonnpa/opleaders
| 1
|
12782624
|
<reponame>tonnpa/opleaders
#!/usr/bin/env python3
"""
This script retrieves all the posts pertaining to the threads in
the .json files.
Constraints
1. the files in the SRC_DIR_PATH has to follow a specific naming convention:
FROM_DATE in (yyyy-mm-dd) _ file_number [0-9999] .json
2. the maximum number of queries is 1000 (Disqus API limit)
3. only threads with more than MIN_POST_CNT number of posts is queried
4. DST_DIR_PATH, the directory where retrieved files are stored, has to exist
To continue from previous run, specify
1. FROM_DATE
2. FIRST_FILE the number of file that should be processed
3. LAST_THREAD_ID the number of thread ID that was last processed
"""
__author__ = 'tonnpa'
import os
from disqus.fetch import *
FROM_DATE = '2014-01-01'
SRC_DIR_PATH = '/home/tonnpa/hvghu/2014/threads/'
DST_DIR_PATH = '/home/tonnpa/hvghu/2014/posts/'
FIRST_FILE = 207
LAST_THREAD_ID = 3418529550
MAX_QUERY_WARNING = 995
MIN_POST_CNT = 5
# count the number of files in source directory
num_files = len(os.listdir(SRC_DIR_PATH))
num_queries = 0
for file_num in range(FIRST_FILE, num_files+1):
# open JSON file and read threads into data
with open(SRC_DIR_PATH + FROM_DATE + '_' + str(file_num).zfill(4) + '.json') as file:
data = json.loads(file.read())
# process each thread
for thread in data['response']:
# skip previously processed files
if file_num == FIRST_FILE and int(thread['id']) <= LAST_THREAD_ID:
continue
# if thread has more than 5 posts, then query for all its posts
if thread['posts'] > MIN_POST_CNT:
# get url
url_posts = get_url_list_posts(thread=thread['id'])
# query url to get json data
json_posts = get_json(url_posts)
num_queries += 1
# save json data
outfile_path = DST_DIR_PATH + FROM_DATE + '_' + str(file_num).zfill(4) + '_' + str(thread['id'] + '.json')
with open(outfile_path, 'w') as outfile:
json.dump(json_posts, outfile)
segment_num = 1
# save all further comments
while json_posts['cursor']['hasNext']:
cursor_next = json_posts['cursor']['next']
url_posts = get_url_list_posts(thread=thread['id'], cursor=cursor_next)
json_posts = get_json(url_posts)
num_queries += 1
segment_num += 1
outfile_path = DST_DIR_PATH + FROM_DATE + '_' + str(file_num).zfill(4) + '_' + \
str(thread['id'] + '_' + str(segment_num) + '.json')
with open(outfile_path, 'w') as outfile:
json.dump(json_posts, outfile)
if num_queries % 20 == 0:
print('File: ' + str(file_num).zfill(4) + ' Iteration: ' + str(num_queries))
if num_queries > MAX_QUERY_WARNING:
print('Ending process. Last Thread ID: ' + str(thread['id']))
break # looping at threads in a file
if num_queries > MAX_QUERY_WARNING:
print('Ending process. Last File Number: ' + str(file_num))
break # looping at files
| 2.59375
| 3
|
pylxd/deprecated/tests/utils.py
|
AdamIsrael/pylxd
| 0
|
12782625
|
# Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pylxd import api
from pylxd import exceptions as lxd_exceptions
def upload_image(image):
alias = '{}/{}/{}/{}'.format(image['os'],
image['release'],
image['arch'],
image['variant'])
lxd = api.API()
imgs = api.API(host='images.linuxcontainers.org')
d = imgs.alias_show(alias)
meta = d[1]['metadata']
tgt = meta['target']
try:
lxd.alias_update(meta)
except lxd_exceptions.APIError as ex:
if ex.status_code == 404:
lxd.alias_create(meta)
return tgt
def delete_image(image):
lxd = api.API()
lxd.image_delete(image)
| 1.828125
| 2
|
asana/resources/project_memberships.py
|
FiyaFly/python-asana
| 266
|
12782626
|
<filename>asana/resources/project_memberships.py
from .gen.project_memberships import _ProjectMemberships
class ProjectMemberships(_ProjectMemberships):
"""Project Memberships resource"""
def find_by_project(self, project, params={}, **options):
"""Returns the compact project membership records for the project.
Parameters
----------
project : {Gid} The project for which to fetch memberships.
[params] : {Object} Parameters for the request
- [user] : {String} If present, the user to filter the memberships to.
"""
path = "/projects/%s/project_memberships" % (project)
return self.client.get_collection(path, params, **options)
def find_by_id(self, project_membership, params={}, **options):
"""Returns the project membership record.
Parameters
----------
project_membership : {Gid} Globally unique identifier for the project membership.
[params] : {Object} Parameters for the request
"""
path = "/project_memberships/%s" % (project_membership)
return self.client.get(path, params, **options)
| 2.6875
| 3
|
Python_Arcade/Exercises/Platformer/platformer.py
|
npinak/Python-Projects
| 1
|
12782627
|
<reponame>npinak/Python-Projects
"""
https://learn.arcade.academy/en/latest/chapters/29_platformers/platformers.html
"""
import random
import arcade
SPRITE_SCALING = 0.5
TILE_SCALING = 0.5
GRAVITY = 0.5
DEFAULT_SCREEN_WIDTH = 800
DEFAULT_SCREEN_HEIGHT = 600
SCREEN_TITLE = "Sprite Move with Scrolling Screen Example"
# How many pixels to keep as a minimum margin between the character
# and the edge of the screen.
VIEWPORT_MARGIN = 220
# How fast the camera pans to the player. 1.0 is instant.
CAMERA_SPEED = 0.1
# How fast the character moves
PLAYER_MOVEMENT_SPEED = 7
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title, resizable=True)
# Sprite lists
self.player_list = None
self.wall_list = None
# Set up the player
self.player_sprite = None
# Physics engine.
self.physics_engine = None
# Create the cameras. One for the GUI, one for the sprites.
self.camera_sprites = arcade.Camera(DEFAULT_SCREEN_WIDTH, DEFAULT_SCREEN_HEIGHT)
self.camera_gui = arcade.Camera(DEFAULT_SCREEN_WIDTH, DEFAULT_SCREEN_HEIGHT)
def setup(self):
""" Set up the game and initialize the variables. """
# Sprite lists
self.player_list = arcade.SpriteList()
self.wall_list = arcade.SpriteList()
# Set up the player
self.player_sprite = arcade.Sprite(":resources:images/animated_characters/female_person/femalePerson_idle.png",
scale=0.4)
self.player_sprite.center_x = 256
self.player_sprite.center_y = 512
self.player_list.append(self.player_sprite)
# Setting the map
map_name = "map.json"
# Reading in the tiled map
self.tile_map = arcade.load_tilemap(map_name, scaling=TILE_SCALING)
# Set wall SpriteList and any others that you have.
self.wall_list = self.tile_map.sprite_lists["Walls"]
# self.coin_list = self.tile_map.sprite_lists["Coins"]
# Set the background color to what is specified in the map
if self.tile_map.background_color:
arcade.set_background_color(self.tile_map.background_color)
# Physics Engine
self.physics_engine = arcade.PhysicsEnginePlatformer(
self.player_sprite, self.wall_list, gravity_constant=GRAVITY)
# Set the background color
arcade.set_background_color(arcade.color.AMAZON)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Select the camera we'll use to draw all our sprites
self.camera_sprites.use()
# Draw all the sprites.
self.wall_list.draw()
self.player_list.draw()
# Select the (unscrolled) camera for our GUI
self.camera_gui.use()
# Draw the GUI
arcade.draw_rectangle_filled(self.width // 2,
20,
self.width,
40,
arcade.color.ALMOND)
text = f"Scroll value: ({self.camera_sprites.position[0]:5.1f}, " \
f"{self.camera_sprites.position[1]:5.1f})"
arcade.draw_text(text, 10, 10, arcade.color.BLACK_BEAN, 20)
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed. """
if key == arcade.key.UP:
self.player_sprite.change_y = PLAYER_MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player_sprite.change_y = -PLAYER_MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player_sprite.change_x = -PLAYER_MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player_sprite.change_x = PLAYER_MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""Called when the user releases a key. """
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player_sprite.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player_sprite.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
# Call update on all sprites (The sprites don't do much in this
# example though.)
self.physics_engine.update()
# Scroll the screen to the player
self.scroll_to_player()
def scroll_to_player(self):
"""
Scroll the window to the player.
if CAMERA_SPEED is 1, the camera will immediately move to the desired position.
Anything between 0 and 1 will have the camera move to the location with a smoother
pan.
"""
position = self.player_sprite.center_x - self.width / 2, \
self.player_sprite.center_y - self.height / 2
self.camera_sprites.move_to(position, CAMERA_SPEED)
def on_resize(self, width, height):
"""
Resize window
Handle the user grabbing the edge and resizing the window.
"""
self.camera_sprites.resize(int(width), int(height))
self.camera_gui.resize(int(width), int(height))
def main():
""" Main function """
window = MyGame(DEFAULT_SCREEN_WIDTH, DEFAULT_SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 3.890625
| 4
|
alembic/versions/71d46639309e_create_words_and_results_tables.py
|
TutorExilius/pyWordle
| 1
|
12782628
|
"""create_words_and_results_tables
Revision ID: 71d46639309e
Revises:
Create Date: 2022-03-29 15:45:02.382574
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "71d46639309e"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"words",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("word", sa.String(length=5, collation="NOCASE"), nullable=False),
sa.Column("enabled", sa.Boolean(), nullable=True),
sa.Column("nsfw", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id", name=op.f("pk_words")),
sa.UniqueConstraint("word", name=op.f("uq_words_word")),
)
op.create_table(
"results",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("word_id", sa.Integer(), nullable=True),
sa.Column("guessed_in_run", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["word_id"],
["words.id"],
name=op.f("fk_results_word_id_words"),
ondelete="cascade",
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_results")),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("results")
op.drop_table("words")
# ### end Alembic commands ###
| 2
| 2
|
App/ui_updates.py
|
Wizard-collab/wizard
| 0
|
12782629
|
<filename>App/ui_updates.py
# coding: utf8
# Import PyQt5 libraries
from PyQt5 import QtWidgets, QtCore, QtGui
# Import wizard gui libraries
from gui.updates import Ui_Form
# Import wizard core libraries
from wizard.vars import defaults
from wizard.prefs.main import prefs
from wizard.tools import log
from wizard.vars import updates
# Import python base librariess
import webbrowser
import os
from markdown import markdown
# Init the main logger and prefs module
logger = log.pipe_log(__name__)
prefs = prefs()
class Main(QtWidgets.QWidget):
def __init__(self):
super(Main, self).__init__()
self.ui = Ui_Form()
self.ui.setupUi(self)
'''
self.shadow = QtWidgets.QGraphicsDropShadowEffect()
self.shadow.setBlurRadius(8)
self.shadow.setColor(QtGui.QColor(0, 0, 0, 180))
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.setGraphicsEffect(self.shadow)
'''
self.ui.updates_history_plainTextEdit.setVisible(0)
self.connect_functions()
self.fill_updates()
def show_updates(self):
prefs.set_show_updates(self.ui.show_startup_checkBox.isChecked())
def show_history(self):
if not self.ui.updates_history_plainTextEdit.isVisible():
self.ui.updates_history_plainTextEdit.setVisible(1)
text=''
for key in updates.updates.keys():
text += updates.updates[key]
self.ui.updates_history_plainTextEdit.setText(markdown(text))
else:
self.ui.updates_history_plainTextEdit.setVisible(0)
self.ui.updates_history_plainTextEdit.clear()
def fill_updates(self):
if defaults._wizard_version_ in updates.updates.keys():
updates_text = updates.updates[defaults._wizard_version_]
else:
updates_text = "No updates"
self.ui.update_updates_plainTextEdit.setText(markdown(updates_text))
def connect_functions(self):
self.ui.update_doc_pushButton.clicked.connect(self.show_doc)
self.ui.update_web_pushButton.clicked.connect(self.show_web)
self.ui.show_startup_checkBox.stateChanged.connect(self.show_updates)
self.ui.updates_history_pushButton.clicked.connect(self.show_history)
def show_doc(self):
os.startfile(os.path.abspath(defaults._doc_index_path_))
def show_web(self):
webbrowser.open(defaults._wizard_url_, new=0, autoraise=True)
| 2.3125
| 2
|
rtf.py
|
Rafiot/viper-modules
| 5
|
12782630
|
# -*- coding: utf-8 -*-
'''
Code based on the python-oletools package by <NAME> 2012-10-18
http://www.decalage.info/python/oletools
'''
import os
import tempfile
from viper.common.abstracts import Module
from viper.core.session import __sessions__
try:
from oletools.rtfobj import RtfObjParser
from oletools import oleobj
HAVE_RTF = True
except ImportError:
HAVE_RTF = False
class Rtf(Module):
cmd = 'rtf'
description = 'RTF Parser'
authors = ['xorhex']
categories = ["document"]
def __init__(self):
super(Rtf, self).__init__()
self.parser.add_argument('-l', "--list", action='store_true', help='List of ')
self.parser.add_argument('-s', "--save", metavar='item_index', help='Save object')
def parse_rtf(self, filename, data):
'''
The bulk of this fuction is taken from python-oletools: https://github.com/decalage2/oletools/blob/master/oletools/rtfobj.py
See link for license
'''
self.log('success', 'File: {name} - size: {size} bytes'.format(name=filename, size=hex(len(data))))
table = []
h = ['id', 'index', 'OLE Object']
rtfp = RtfObjParser(data)
rtfp.parse()
for rtfobj in rtfp.objects:
row = []
obj_col = []
if rtfobj.is_ole:
obj_col.append('format_id: {id} '.format(id=rtfobj.format_id))
if rtfobj.format_id == oleobj.OleObject.TYPE_EMBEDDED:
obj_col.append('(Embedded)')
elif rtfobj.format_id == oleobj.OleObject.TYPE_LINKED:
obj_col.append('(Linked)')
else:
obj_col.append('(Unknown)')
obj_col.append('class name: {cls}'.format(cls=rtfobj.class_name))
# if the object is linked and not embedded, data_size=None:
if rtfobj.oledata_size is None:
obj_col.append('data size: N/A')
else:
obj_col.append('data size: %d' % rtfobj.oledata_size)
if rtfobj.is_package:
obj_col.append('OLE Package object:')
obj_col.append('Filename: {name}'.format(name=rtfobj.filename))
obj_col.append('Source path: {path}'.format(path=rtfobj.src_path))
obj_col.append('Temp path = {path}'.format(path=rtfobj.temp_path))
obj_col.append('MD5 = {md5}'.format(md5=rtfobj.olepkgdata_md5))
# check if the file extension is executable:
_, temp_ext = os.path.splitext(rtfobj.temp_path)
self.log('debug', 'Temp path extension: {ext}'.format(ext=temp_ext))
_, file_ext = os.path.splitext(rtfobj.filename)
self.log('debug', 'File extension: %r' % file_ext)
if temp_ext != file_ext:
obj_col.append("MODIFIED FILE EXTENSION")
else:
obj_col.append('MD5 = {md5}'.format(md5=rtfobj.oledata_md5))
if rtfobj.clsid is not None:
obj_col.append('CLSID: {clsid}'.format(clsid=rtfobj.clsid))
obj_col.append(rtfobj.clsid_desc)
# Detect OLE2Link exploit
# http://www.kb.cert.org/vuls/id/921560
if rtfobj.class_name == b'OLE2Link':
obj_col.append('Possibly an exploit for the OLE2Link vulnerability (VU#921560, CVE-2017-0199)')
# Detect Equation Editor exploit
# https://www.kb.cert.org/vuls/id/421280/
elif rtfobj.class_name.lower() == b'equation.3':
obj_col.append('Possibly an exploit for the Equation Editor vulnerability (VU#421280, CVE-2017-11882)')
else:
obj_col.append('Not a well-formed OLE object')
row.append(rtfp.objects.index(rtfobj))
row.append('%08Xh' % rtfobj.start)
row.append('\n'.join(obj_col))
table.append(row)
self.log('table', dict(rows=table, header=h))
def list(self):
self.parse_rtf(__sessions__.current.file.name, __sessions__.current.file.data)
def save_ole_objects(self, data, save_object, filename):
'''
The bulk of this fuction is taken from python-oletools: https://github.com/decalage2/oletools/blob/master/oletools/rtfobj.py
See link for license
'''
rtfp = RtfObjParser(data)
rtfp.parse()
try:
i = int(save_object)
objects = [rtfp.objects[i]]
except Exception as ex:
self.log('error', 'The -s option must be followed by an object index, such as "-s 2"\n{ex}'.format(ex=ex))
return
for rtfobj in objects:
i = objects.index(rtfobj)
tmp = tempfile.NamedTemporaryFile(delete=False)
if rtfobj.is_package:
self.log('info', 'Saving file from OLE Package in object #%d:' % i)
self.log('info', ' Filename = %r' % rtfobj.filename)
self.log('info', ' Source path = %r' % rtfobj.src_path)
self.log('info', ' Temp path = %r' % rtfobj.temp_path)
self.log('info', ' saving to file %s' % tmp.name)
self.log('info', ' md5 %s' % rtfobj.olepkgdata_md5)
tmp.write(rtfobj.olepkgdata)
tmp.close()
# When format_id=TYPE_LINKED, oledata_size=None
elif rtfobj.is_ole and rtfobj.oledata_size is not None:
self.log('info', 'Saving file embedded in OLE object #%d:' % i)
self.log('info', ' format_id = %d' % rtfobj.format_id)
self.log('info', ' class name = %r' % rtfobj.class_name)
self.log('info', ' data size = %d' % rtfobj.oledata_size)
# set a file extension according to the class name:
self.log('info', ' saving to file %s' % tmp.name)
self.log('info', ' md5 %s' % rtfobj.oledata_md5)
tmp.write(rtfobj.oledata)
tmp.close()
else:
self.log('info', 'Saving raw data in object #%d:' % i)
self.log('info', ' saving object to file %s' % tmp.name)
self.log('info', ' md5 %s' % rtfobj.rawdata_md5)
tmp.write(rtfobj.rawdata)
tmp.close()
if not save_object == 'all':
__sessions__.new(tmp.name)
def save(self, idx):
self.save_ole_objects(__sessions__.current.file.data, idx, __sessions__.current.file.name)
# Main starts here
def run(self):
super(Rtf, self).run()
if self.args is None:
return
if not __sessions__.is_set():
self.log('error', 'No open session. This command expects a file to be open.')
return
if not HAVE_RTF:
self.log('error', 'Missing dependancy. install oletools (pip install oletools)')
return
if self.args.list:
self.list()
elif self.args.save:
self.save(self.args.save)
else:
self.parser.print_usage()
| 2.390625
| 2
|
papers/tests/test_views.py
|
anujmittal94/spiresearch
| 1
|
12782631
|
<reponame>anujmittal94/spiresearch
from django.test import TestCase, Client
from django.urls import reverse
from papers.models import UserRead, UserProject
from accounts.models import CustomUser
class IndexViewTest(TestCase):
def test_index(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'papers/index.html')
class ReadlistViewTest(TestCase):
def setUp(self):
u1 = CustomUser.objects.create(username="user1")
u1.set_password('password')
u1.save()
def test_readlist_not_logged_in(self):
response = self.client.get(reverse('readlist'))
self.assertRedirects(response, '/accounts/login/?next=/papers/readlist')
def test_readlist_logged_in(self):
login = self.client.login(username='user1', password='password')
response = self.client.get(reverse('readlist'))
self.assertEqual(str(response.context['user']), 'user1')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'papers/readlist.html')
def test_readlist_context(self):
login = self.client.login(username='user1', password='password')
response = self.client.get(reverse('readlist'))
self.assertEqual(str(response.context['user']), 'user1')
self.assertFalse('readlist' in response.context)
u1 = CustomUser.objects.get(username='user1')
UserRead.objects.create(user = u1, urls = 'test1.test,')
response = self.client.get(reverse('readlist'))
self.assertTrue('readlist' in response.context)
self.assertEqual(response.context['readlist'], 'test1.test')
class PaperViewTest(TestCase):
def test_paper_direct_access(self):
response = self.client.get(reverse('paper'))
self.assertRedirects(response, reverse('index'))
class ProjectsViewTest(TestCase):
def setUp(self):
u1 = CustomUser.objects.create(username="user1")
u1.set_password('password')
u1.save()
def test_projects_not_logged_in(self):
response = self.client.get(reverse('projects'))
self.assertRedirects(response, '/accounts/login/?next=/papers/projects')
def test_projects_logged_in(self):
login = self.client.login(username='user1', password='password')
response = self.client.get(reverse('projects'))
self.assertEqual(str(response.context['user']), 'user1')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'papers/projects.html')
def test_projects_context(self):
login = self.client.login(username='user1', password='password')
response = self.client.get(reverse('projects'))
self.assertEqual(str(response.context['user']), 'user1')
self.assertFalse(response.context['projects'].exists())
u1 = CustomUser.objects.get(username='user1')
up1 = UserProject.objects.create(user = u1, name = 'test1 name', description = 'test1 description', urls = 'test1.test,')
response = self.client.get(reverse('projects'))
self.assertTrue(response.context['projects'].exists())
self.assertTrue(up1 in response.context['projects'])
class ProjectViewTest(TestCase):
def setUp(self):
u1 = CustomUser.objects.create(username="user1")
u1.set_password('password')
u1.save()
u2 = CustomUser.objects.create(username="user2")
u2.set_password('password')
u2.save()
up1 = UserProject.objects.create(user = u1, name = 'test1 name', description = 'test1 description', urls = 'test1.test,')
up1.save()
up2 = UserProject.objects.create(user = u2, name = 'test2 name', description = 'test2 description', urls = 'test2.test,')
up2.save()
def test_project_not_logged_in(self):
u1 = CustomUser.objects.get(username = 'user1')
up1 = UserProject.objects.get(user = u1)
response = self.client.get(reverse('project', kwargs={'project_id':up1.id}))
self.assertRedirects(response, '/accounts/login/?next=/papers/project/'+str(up1.id))
def test_project_logged_in(self):
login = self.client.login(username='user1', password='password')
u1 = CustomUser.objects.get(username = 'user1')
up1 = UserProject.objects.get(user = u1)
response = self.client.get(reverse('project', kwargs={'project_id':up1.id}))
self.assertEqual(str(response.context['user']), 'user1')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'papers/project.html')
def test_project_context(self):
login = self.client.login(username='user1', password='password')
u1 = CustomUser.objects.get(username = 'user1')
up1 = UserProject.objects.get(user = u1)
response = self.client.get(reverse('project', kwargs={'project_id':up1.id}))
self.assertEqual(str(response.context['user']), 'user1')
self.assertTrue('projectlist' in response.context)
self.assertTrue('test1' in response.context['projectlist'])
self.assertEqual(response.context['project'], up1)
def test_other_user_project(self):
login = self.client.login(username='user1', password='password')
u2 = CustomUser.objects.get(username = 'user2')
up2 = UserProject.objects.get(user = u2)
response = self.client.get(reverse('project', kwargs={'project_id':up2.id}))
self.assertRedirects(response, reverse('projects'))
| 2.390625
| 2
|
phantasy/library/operation/lattice.py
|
archman/phantasy
| 0
|
12782632
|
<filename>phantasy/library/operation/lattice.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Lattice operations, including:
1. loading lattice
2. creating lattice
"""
import logging
import os
import re
import time
from fnmatch import fnmatch
from phantasy.facility.frib import INI_DICT
from phantasy.library.lattice import CaElement
from phantasy.library.lattice import Lattice
from phantasy.library.misc import simplify_data
from phantasy.library.misc import create_tempdir
from phantasy.library.parser import find_machine_config
from phantasy.library.parser import read_polarity
from phantasy.library.parser import read_alignment_data
from phantasy.library.pv import DataSource
#from phantasy.library.layout import build_layout
from phantasy.library.parser import Configuration
#from phantasy.library.settings import Settings
from unicorn.utils import UnicornData
__authors__ = "<NAME>"
__copyright__ = "(c) 2016-2017, Facility for Rare Isotope beams, "\
"Michigan State University"
__contact__ = "<NAME> <<EMAIL>>"
_LOGGER = logging.getLogger(__name__)
DEFAULT_MODEL_DATA_DIR = 'model_data'
def load_lattice(machine, segment=None, **kws):
"""Load segment lattice(s) from machine.
Parameters
----------
machine : str
The exact name of machine.
segment : str
Unix shell pattern to define segment of machine, if not defined,
will use default segment defined in configuration file.
Keyword Arguments
-----------------
use_cache : bool
Use cache or not, ``False`` by default.
save_cache : bool
Save cache or not, ``False`` by default.
verbose : int
If not 0, show output, 0 by default.
sort : True or False
Sort lattice with s-position or not, default is False.
prefix : str
String prefix to all channels, this parameter is crucial to the
virtual accelerator (VA) modeling, when '--pv-prefix' argument is
used when starting up the VA rather than the one defined in the
configuration file (e.g. phantasy.cfg). If this parameter is not
defined, will use the one defined by 'machine' in 'DEFAULT' section
of configuration file.
auto_monitor : bool
If set True, initialize all channels auto subscribe, default is False.
Returns
-------
ret : dict
Keys or returned dict:
- lat0name: name of lattice, default_segment or first sorted key;
- lattices: dict of loaded lattice(s);
- machname: name of the machine;
- machpath: full path of machine;
- machconf: loaded machine configuration object.
Note
----
*machine* can be a path to config dir.
"""
lat_dict = {}
use_cache = kws.get('use_cache', False)
save_cache = kws.get('save_cache', False)
verbose = kws.get('verbose', 0)
sort_flag = kws.get('sort', False)
pv_prefix = kws.get('prefix', None)
auto_monitor = kws.get('auto_monitor', False)
# if use_cache:
# try:
# loadCache(machine)
# except:
# _LOGGER.error('Lattice initialization using cache failed. ' +
# 'Will attempt initialization with other method(s).')
# save_cache = True
# else:
# # Loading from cache was successful.
# return
mconfig, mdir, mname = find_machine_config(machine, verbose=verbose,
filename=INI_DICT['INI_NAME'])
d_common = dict(mconfig.items(INI_DICT['COMMON_SECTION_NAME']))
root_data_dir = d_common.get(INI_DICT['KEYNAME_ROOT_DATA_DIR'],
INI_DICT['DEFAULT_ROOT_DATA_DIR'])
# create root_data_dir/today(fmt. YYYY-MM-DD)
today_dir_name = os.path.expanduser(os.path.join(
root_data_dir, time.strftime("%Y%m%d", time.localtime())))
if not os.path.exists(today_dir_name):
os.makedirs(today_dir_name)
work_dir = today_dir_name
# default segment and all segments defined in phantasy.ini file
default_segment = d_common.get(INI_DICT['KEYNAME_DEFAULT_SEGMENT'],
INI_DICT['DEFAULT_DEFAULT_SEGMENT'])
all_segments = d_common.get(INI_DICT['KEYNAME_SEGMENTS'],
INI_DICT['DEFAULT_SEGMENTS'])
if segment is None:
segment = default_segment
_LOGGER.info("Loading segment: '{}'".format(segment))
# filter out valid segment(s) from 'segment' string or pattern.
msects = [s for s in re.findall(r'\w+', all_segments)
if fnmatch(s, segment)]
for msect in msects:
d_msect = dict(mconfig.items(msect))
# scan server
scan_svr_url = d_msect.get(INI_DICT['KEYNAME_SCAN_SVR_URL'],
INI_DICT['DEFAULT_SCAN_SVR_URL'])
# model: code
simulation_code = d_msect.get(INI_DICT['KEYNAME_SIMULATION_CODE'],
INI_DICT['DEFAULT_SIMULATION_CODE'])
if simulation_code is not None:
simulation_code = simulation_code.upper()
# model: data
model_data_dir = d_msect.get(INI_DICT['KEYNAME_MODEL_DATA_DIR'],
DEFAULT_MODEL_DATA_DIR)
if model_data_dir is not None:
model_data_dir = os.path.expanduser(
os.path.join(work_dir, model_data_dir))
# config file
config_file = d_msect.get(INI_DICT['KEYNAME_CONFIG_FILE'],
INI_DICT['DEFAULT_CONFIG_FILE'])
if config_file is not None:
if not os.path.isabs(config_file):
config_file = os.path.join(mdir, config_file)
config = Configuration(config_file)
else:
raise RuntimeError("Lattice configuration for '%s' not specified" %
(msect,))
# # layout file
# layout_file = d_msect.get(INI_DICT['KEYNAME_LAYOUT_FILE'],
# INI_DICT['DEFAULT_LAYOUT_FILE'])
# if layout_file is not None:
# if not os.path.isabs(layout_file):
# layout_file = os.path.join(mdir, layout_file)
# layout = build_layout(layoutPath=layout_file)
# else:
# raise RuntimeError("Layout for '%s' not specified" % (msect,))
# # settings file
# settings_file = d_msect.get(INI_DICT['KEYNAME_SETTINGS_FILE'],
# INI_DICT['DEFAULT_SETTINGS_FILE'])
# if settings_file is not None:
# if not os.path.isabs(settings_file):
# settings_file = os.path.join(mdir, settings_file)
# settings = Settings(settingsPath=settings_file)
# else:
# raise RuntimeError("Settings for '%s' not specified" % (msect,))
# unicorn_file
udata_file = d_msect.get('unicorn_file', None)
if udata_file is not None:
if not os.path.isabs(udata_file):
udata_file = os.path.join(mdir, udata_file)
udata = {}
for f in UnicornData(udata_file).functions:
_d = udata.setdefault(f.ename, {})
_d[(f.from_field, f.to_field)] = f.code
_LOGGER.info("UNICORN policy will be loaded from {}.".format(
os.path.abspath(udata_file)))
else:
udata = None # no unicorn data provided
_LOGGER.warning("Default UNICORN policy will be applied.")
# misalignment_file
alignment_data_file = d_msect.get('alignment_file', None)
if alignment_data_file is not None:
if not os.path.isabs(alignment_data_file):
alignment_data_file = os.path.join(mdir, alignment_data_file)
alignment_data = read_alignment_data(alignment_data_file)
_LOGGER.info("Read alignment data from {}.".format(
os.path.abspath(alignment_data_file)))
else:
alignment_data = None
_LOGGER.warning("No aligment data is read.")
# polarity_file
pdata_file = d_msect.get('polarity_file', None)
if pdata_file is not None:
if not os.path.isabs(pdata_file):
pdata_file = os.path.join(mdir, pdata_file)
pdata = read_polarity(pdata_file)
_LOGGER.info("Device polarity data is loaded from {}.".format(
os.path.abspath(pdata_file)))
else:
pdata = None
_LOGGER.warning("Default device polarity will be applied.")
# machine type, linear (non-loop) or ring (loop)
mtype = int(d_msect.get(INI_DICT['KEYNAME_MTYPE'],
INI_DICT['DEFAULT_MTYPE']))
# channel finder service: address
cf_svr_url = d_msect.get(INI_DICT['KEYNAME_CF_SVR_URL'],
INI_DICT['DEFAULT_CF_SVR_URL'])
if cf_svr_url is None:
raise RuntimeError(
"No accelerator data source (cfs_url) available")
ds_sql_path = os.path.join(mdir, cf_svr_url)
# channel finder service: tag, and property names
cf_svr_tag0 = d_msect.get(INI_DICT['KEYNAME_CF_SVR_TAG'],
INI_DICT['DEFAULT_CF_SVR_TAG'](msect))
cf_svr_prop0 = d_msect.get(INI_DICT['KEYNAME_CF_SVR_PROP'],
INI_DICT['DEFAULT_CF_SVR_PROP'])
cf_svr_tag = [s.strip() for s in cf_svr_tag0.split(',')]
cf_svr_prop = [s.strip() for s in cf_svr_prop0.split(',')]
if re.match(r"https?://.*", cf_svr_url, re.I):
# pv data source is cfs
_LOGGER.info("Loading PV data from CFS: '%s' for '%s'" %
(cf_svr_url, msect))
ds = DataSource(source=cf_svr_url)
elif os.path.isfile(ds_sql_path):
# pv data source is sqlite/csv file
_LOGGER.info("Loading PV data from CSV/SQLite: {}".format(
os.path.abspath(ds_sql_path)))
ds = DataSource(source=ds_sql_path)
else:
_LOGGER.warning("Invalid PV data source is defined.")
raise RuntimeError("Unknown PV data source '%s'" %
cf_svr_url)
ds.get_data(tag_filter=cf_svr_tag, prop_filter=cf_svr_prop)
ds.map_property_name(INI_DICT['CF_NAMEMAP'])
# model data temp directory
if not os.path.exists(model_data_dir):
os.makedirs(model_data_dir)
data_dir = create_tempdir(prefix="data_", dir=model_data_dir)
_LOGGER.info("Model data directory: {}".format(data_dir))
# build lattice from PV data
latname = msect
pv_data = simplify_data(ds.pvdata)
tag = cf_svr_tag
src = ds.source
lat = create_lattice(latname,
pv_data,
tag,
source=src,
mtype=mtype,
mname=mname,
mpath=mdir,
mconf=mconfig,
model=simulation_code,
#layout=layout,
config=config,
#settings=settings,
udata=udata,
pdata=pdata,
alignment_data=alignment_data,
data_dir=data_dir,
sort=sort_flag,
prefix=pv_prefix,
auto_monitor=auto_monitor)
# if IMPACT_ELEMENT_MAP is not None:
# lat.createLatticeModelMap(IMPACT_ELEMENT_MAP)
lat.loop = bool(d_msect.get(INI_DICT['KEYNAME_MTYPE'],
INI_DICT['DEFAULT_MTYPE']))
lat_dict[msect] = lat
# if show more informaion
if verbose:
n_elems = len(
[e for e in lat._get_element_list('*') if e.virtual == 0])
if msect == default_segment:
print("%s (*): %d elements" % (msect, n_elems))
else:
print("%s: %d elements" % (msect, n_elems))
print(
" BPM: %d, PM: %s, HCOR: %d, VCOR: %d, BEND: %d, QUAD: %d, SEXT: %d, SOL: %d, CAV: %d"
% (len(lat._get_element_list('BPM')),
len(lat._get_element_list('PM')),
len(lat._get_element_list('HCOR')),
len(lat._get_element_list('VCOR')),
len(lat._get_element_list('BEND')),
len(lat._get_element_list('QUAD')),
len(lat._get_element_list('SEXT')),
len(lat._get_element_list('SOL')),
len(lat._get_element_list('CAV'))))
if default_segment in lat_dict:
lat0name = default_segment
else:
lat0name = sorted(lat_dict.keys())[0]
return {'lat0name': lat0name,
'lattices': lat_dict,
'machname': mname,
'machpath': mdir,
'machconf': mconfig}
def create_lattice(latname, pv_data, tag, **kws):
"""Create high-level lattice object from PV data source.
Parameters
-----------
latname : str
Name of segment of machine, e.g. 'LINAC', 'LS1'.
pv_data : list
List of PV data, for each PV data, should be of list as:
``string of PV name, dict of properties, list of tags``.
tag : str
Only select PV data according to defined tag. e.g.
`phantasy.sys.LS1`.
Keyword Arguments
-----------------
source : str
Source of PV data, URL of channel finder service, file name of SQLite
database or csv spreadsheet.
mtype : int
Machine type, 0 for linear (default), 1 for a ring.
model : str
Model code, 'FLAME' or 'IMPACT', 'FLAME' by default.
udata : dict
Scaling law functions, ename as the keys (1st level), (from_field, to_field) as 2nd level
keys, function object as the values, i.e. {ename: {(f1, f2): fn1, ...}, ...}
pdata : dict
Device polarity, key-value pairs of device polarity.
alignment_data : DataFrame
Dataframe for alignment info, indexed by element name.
data_dir: str
Path of directory to host data generated from model, including input
lattice files, output files and other related files, if not defined,
random folder will be created in system temporary directory,
e.g.'/tmp/model_hGe1sq'.
#layout :
# Lattice layout object.
config :
Lattice configuration object.
settings :
Lattice settings object.
sort : True or False
Sort lattice with s-position or not, default is False.
prefix : str
String prefix to all channels, this parameter is crucial to the
virtual accelerator (VA) modeling, when '--pv-prefix' argument is
used when starting up the VA rather than the one defined in the
configuration file (e.g. phantasy.cfg). If this parameter is not
defined, will use the one defined by 'machine' in 'DEFAULT' section
of configuration file.
auto_monitor : bool
If set True, initialize all channels auto subscribe, default is False.
Returns
---------
lat :
High-level lattice object.
Note
----
Usually, *src* could be obtained from *source* attribute of ``DataSource``
instance, which can handle general PV data source type, including: channel
finder service, SQLite database, CSV file, etc.
See Also
--------
:class:`~phantasy.library.lattice.lattice.Lattice`
High-level lattice.
:class:`~phantasy.library.pv.datasource.DataSource`
Unified data source class for PVs.
"""
udata = kws.get('udata', None)
pdata = kws.get('pdata', None)
alignment_data = kws.get('alignment_data', None)
data_source = kws.get('source', None)
prefix = kws.get('prefix', None)
auto_monitor = kws.get('auto_monitor', False)
config = kws.get('config', None)
if config is not None:
pv_prefix = config.get_default('machine')
if prefix is not None:
pv_prefix = prefix
if data_source is None:
_LOGGER.warning("PV data source type should be explicitly defined.")
_LOGGER.debug("Creating lattice '{0}' from '{1}'.".format(latname, data_source))
_LOGGER.info("Found {0:d} PVs in '{1}'.".format(len(pv_data), latname))
if isinstance(tag, str):
tag = tag,
# create a new lattice
lat = Lattice(latname, **kws)
# set up lattice
for pv_name, pv_props, pv_tags in pv_data:
_LOGGER.debug("Processing {0}.".format(pv_name))
# skip if property is None
if pv_props is None:
continue
# skip if tag does not match
if pv_name and not set(tag).issubset(set(pv_tags)):
_LOGGER.debug("{0} is not tagged as {1}.".format(pv_name, tag))
continue
# element name is mandatory ('elemName' -> 'name')
if 'name' not in pv_props:
continue
name = pv_props.get('name')
# begin and end s position
if 'se' in pv_props:
pv_props['sb'] = float(pv_props['se']) \
- float(pv_props.get('length', 0.0))
elem = lat._find_exact_element(name=name)
if elem is None:
try:
elem = CaElement(**pv_props, auto_monitor=auto_monitor)
except:
_LOGGER.error(
"Error: creating element '{0}' with '{1}'.".format(
name, pv_props))
raise RuntimeError("Creating element ERROR.")
_LOGGER.debug("Created new element: '{0}'".format(name))
lat.insert(elem, trust=True)
_LOGGER.debug("Inserted {}".format(elem.name))
else:
_LOGGER.debug(
"Element '{0}' exists, only update properties.".format(name))
# update element
if pv_name:
# add prefix
pv_name_prefixed = prefix_pv(pv_name, pv_prefix)
# add 'u_policy' as keyword argument
# this policy should created from unicorn_policy
# new u_policy: {(f1, f2): fn1, ...} or None
if udata is None:
u_policy = {}
else:
u_policy = udata.get(elem.name, {})
# polarity info
polarity = get_polarity(elem.name, pdata)
# alignment info
alignment_series = get_alignment_series(elem.name, alignment_data)
elem.process_pv(pv_name_prefixed, pv_props, pv_tags,
u_policy=u_policy, polarity=polarity,
alignment_series=alignment_series,
auto_monitor=auto_monitor)
# update group
lat.update_groups()
# init design settings for all elements
lat.load_settings(stype='design')
# sort lattice or not
if kws.get('sort', False):
lat.sort(inplace=True)
# lattice length
lat.s_begin, lat.s_end, lat.length = lat.get_layout_length()
# link layout elements to lattice elements
lat.refresh_with_layout_info()
_LOGGER.info("'{0:s}' has {1:d} elements".format(lat.name, lat.size()))
return lat
def prefix_pv(pv, prefix):
"""Prefix *pv* with *prefix:* if *prefix* is not empty and None.
"""
if pv.startswith('_#_'):
return pv[3:]
m = re.match("(.*:)?(.*):(.*):(.*)", pv)
if m is None:
chanprefix = prefix
elif m.group(1) is None:
chanprefix = prefix
else:
chanprefix = ''
if chanprefix != '':
return '{}:{}'.format(chanprefix, pv)
else:
return pv
def get_polarity(ename, pdata=None):
"""Get device polarity from *pdata*.
"""
if pdata is None:
return 1
else:
return pdata.get(ename, 1)
def get_alignment_series(ename, alignment_data=None):
"""Get a Series of alignment data from *alignment_data*.
"""
if alignment_data is None:
return None
else:
try:
r = alignment_data.loc[ename]
except KeyError:
r = None
finally:
return r
| 2.171875
| 2
|
aria/utils/process.py
|
enricorusso/incubator-ariatosca
| 1
|
12782633
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Process utilities.
"""
import os
def append_to_path(*args, **kwargs):
"""
Appends one or more paths to the system path of an environment.
The environment will be that of the current process unless another is passed using the
'env' keyword argument.
:param args: paths to append
:param kwargs: 'env' may be used to pass a custom environment to use
"""
_append_to_path('PATH', *args, **kwargs)
def append_to_pythonpath(*args, **kwargs):
"""
Appends one or more paths to the python path of an environment.
The environment will be that of the current process unless another is passed using the
'env' keyword argument.
:param args: paths to append
:param kwargs: 'env' may be used to pass a custom environment to use
"""
_append_to_path('PYTHONPATH', *args, **kwargs)
def _append_to_path(path, *args, **kwargs):
env = kwargs.get('env') or os.environ
env[path] = '{0}{1}{2}'.format(
os.pathsep.join(args),
os.pathsep,
env.get(path, '')
)
| 2.109375
| 2
|
notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/Interview-Problems/LeetCode/ReverseBits.py
|
side-projects-42/INTERVIEW-PREP-COMPLETE
| 13
|
12782634
|
<reponame>side-projects-42/INTERVIEW-PREP-COMPLETE<filename>notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/Interview-Problems/LeetCode/ReverseBits.py
class Solution:
def reverseBits(self, n: int) -> int:
s = str(bin(n))
s = s[2:]
s = "0" * (32 - len(s)) + s
s = int(s[::-1], 2)
return s
| 3.609375
| 4
|
addons/network/model/digitalocean/Size.py
|
nhomar/odoo-network
| 0
|
12782635
|
class Size(object):
def __init__(self, client_id="", api_key=""):
self.client_id = client_id
self.api_key = api_key
self.name = None
self.id = None
self.memory = None
self.cpu = None
self.disk = None
self.cost_per_hour = None
self.cost_per_month = None
| 2.640625
| 3
|
backend/instruments/api/v1/urls.py
|
codepanda64/logs-and-metas-for-stations
| 0
|
12782636
|
<reponame>codepanda64/logs-and-metas-for-stations
from django.urls import path, include
from rest_framework.routers import DefaultRouter, SimpleRouter
from . import views
app_name = "instruments"
router = SimpleRouter()
router.register(r"models", views.InstrumentModelViewSet)
router.register(r"entities", views.InstrumentEntityViewSet)
# router.register(
# r"models/(?P<instrument_model_id>[0-9]+)/entities", views.InstrumentEntityViewSet
# )
urlpatterns = []
urlpatterns += router.urls
| 2.109375
| 2
|
7-assets/past-student-repos/_Individual-Projects/Computer-Architecture-Notes-master/lectureII/beejMachine.py
|
eengineergz/Lambda
| 1
|
12782637
|
import sys
PRINT_BEEJ = 1
HALT = 2
PRINT_NUM = 3
SAVE = 4
PRINT_REGISTER = 5
ADD = 6
'''
SAVE takes 2 arguments
saves value in [ARG1] to register [ARG2]
'''
register = [0] * 8
memory = [0] * 128 # 128 bytes of RAM
def load_memory(filename):
try:
address = 0
with open(filename) as f:
for line in f:
# Split before and after any comment symbols
comment_split = line.split("#")
num = comment_split[0].strip()
# Ignore blanks
if num == "":
continue
value = int(num)
memory[address] = value
address += 1
except FileNotFoundError:
print(f"{sys.argv[0]}: {sys.argv[1]} not found")
sys.exit(2)
if len(sys.argv) != 2:
print("usage: simple.py <filename>", file=sys.stderr)
sys.exit(1)
filepath = sys.argv[1]
load_memory(filepath)
pc = 0
running = True
while running:
command = memory[pc]
if command == PRINT_BEEJ:
print("Beej!")
pc += 1
elif command == PRINT_NUM:
num = memory[pc + 1]
print(num)
pc += 2
elif command == SAVE:
num = memory[pc + 1]
reg = memory[pc + 2]
register[reg] = num
pc += 3
elif command == PRINT_REGISTER:
reg = memory[pc + 1]
print(register[reg])
pc += 2
elif command == ADD:
reg_a = memory[pc + 1]
reg_b = memory[pc + 2]
register[reg_a] += register[reg_b]
pc += 3
elif command == HALT:
running = False
pc += 1
else:
print(f"Unknown instruction: {command}")
sys.exit(1)
| 3.578125
| 4
|
authentik/outposts/migrations/0001_initial.py
|
BeryJu/passbook
| 15
|
12782638
|
<gh_stars>10-100
# Generated by Django 3.1 on 2020-08-25 20:45
import uuid
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("authentik_core", "0008_auto_20200824_1532"),
]
operations = [
migrations.CreateModel(
name="Outpost",
fields=[
(
"uuid",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("name", models.TextField()),
(
"channels",
django.contrib.postgres.fields.ArrayField(
base_field=models.TextField(), size=None
),
),
("providers", models.ManyToManyField(to="authentik_core.Provider")),
],
),
]
| 1.898438
| 2
|
juggling_management/config/juggling_management.py
|
geniusupgrader/juggling_management
| 1
|
12782639
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label":_("Juggling Management"),
"items": [
{
"type": "doctype",
"name": "Jugglingtrick_juggling_management",
"label": _("Jugglingtrick"),
"description": _("Description of Jugglingtricks"),
},
{
"type": "doctype",
"name": "Routine_juggling_management",
"label": _("Routine"),
"description": _("Description of Lists"),
},
{
"type": "doctype",
"name": "Category_juggling_management",
"label": _("Category"),
"description": _("Description of Categories"),
},
{
"type": "doctype",
"name": "Prop_juggling_management",
"label": _("Prop"),
"description": _("Description of Props"),
}
]
}
]
| 1.875
| 2
|
Enhance Main Window (877182321) by Arthur-Milchior/consts.py
|
kb1900/Anki-Addons
| 1
|
12782640
|
QUEUE_SCHED_BURIED = -3
QUEUE_USER_BURIED = -2
QUEUE_SUSPENDED = -1
QUEUE_NEW_CRAM = 0
QUEUE_LRN = 1
QUEUE_REV = 2
QUEUE_DAY_LRN = 3
QUEUE_PREVIEW = 4
| 1
| 1
|
n_factorial.py
|
leantab/algorithms
| 0
|
12782641
|
# find the Kth factorial of N
n = 4
k = 22
def find_k_fact(n, k):
k -= 1 # we are using base 0
permutation = []
unused = list(range(1, n+1))
fact = [1]*(n+1)
for i in range(1, n+1):
fact[i] = i*fact[i-1]
# fact = 0: 1, 1:1, 2: 2*1=2, 3: 3*2=6, 4: 4*6=24, 5: 5*24=120....
while n > 0:
parts = fact[n]//n # number of sections 24//4=6
i = k//parts
permutation.append(unused[i])
unused.pop(i)
n -= 1
k = k % parts
return ''.join(map(str, permutation))
print(find_k_fact(n, k))
| 3.3125
| 3
|
tests/adapters/event_store/message_db_event_store/tests.py
|
mpsiva89/protean
| 0
|
12782642
|
import pytest
from protean import Domain
from protean.adapters.event_store.message_db import MessageDBStore
from protean.exceptions import ConfigurationError
@pytest.mark.message_db
class TestMessageDBEventStore:
def test_retrieving_message_store_from_domain(self, test_domain):
assert test_domain.event_store is not None
assert test_domain.event_store.store is not None
assert isinstance(test_domain.event_store.store, MessageDBStore)
def test_error_on_message_db_initialization(self):
domain = Domain()
domain.config["EVENT_STORE"][
"PROVIDER"
] = "protean.adapters.event_store.message_db.MessageDBStore"
domain.config["EVENT_STORE"][
"DATABASE_URI"
] = "postgresql://message_store@localhost:5433/dummy"
with pytest.raises(ConfigurationError) as exc:
domain.event_store.store._write(
"testStream-123", "Event1", {"foo": "bar"}, {"kind": "EVENT"}
)
assert 'FATAL: database "dummy" does not exist' in str(exc.value)
# Reset config value. # FIXME Config should be an argument to the domain
domain.config["EVENT_STORE"][
"PROVIDER"
] = "protean.adapters.event_store.memory.MemoryEventStore"
domain.config["EVENT_STORE"].pop("DATABASE_URI")
def test_write_to_event_store(self, test_domain):
position = test_domain.event_store.store._write(
"testStream-123", "Event1", {"foo": "bar"}
)
assert position == 0
def test_multiple_writes_to_event_store(self, test_domain):
for i in range(5):
position = test_domain.event_store.store._write(
"testStream-123", "Event1", {"foo": f"bar{i}"}
)
position = test_domain.event_store.store._write(
"testStream-123", "Event1", {"foo": "bar"}
)
assert position == 5
def test_reading_stream_message(self, test_domain):
test_domain.event_store.store._write("testStream-123", "Event1", {"foo": "bar"})
messages = test_domain.event_store.store._read("testStream-123")
assert len(messages) == 1
assert messages[0]["position"] == 0
assert messages[0]["data"] == {"foo": "bar"}
def test_reading_multiple_stream_messages(self, test_domain):
for i in range(5):
test_domain.event_store.store._write(
"testStream-123", "Event1", {"foo": f"bar{i}"}
)
messages = test_domain.event_store.store._read("testStream-123")
assert len(messages) == 5
assert messages[4]["data"] == {"foo": "bar4"}
def test_reading_category_message(self, test_domain):
test_domain.event_store.store._write("testStream-123", "Event1", {"foo": "bar"})
messages = test_domain.event_store.store._read("testStream")
assert len(messages) == 1
assert messages[0]["position"] == 0
assert messages[0]["data"] == {"foo": "bar"}
def test_reading_multiple_category_messages(self, test_domain):
for i in range(5):
test_domain.event_store.store._write(
"testStream-123", "Event1", {"foo": f"bar{i}"}
)
messages = test_domain.event_store.store._read("testStream")
assert len(messages) == 5
assert messages[4]["data"] == {"foo": "bar4"}
def test_reading_targeted_stream_messages(self, test_domain):
for i in range(5):
test_domain.event_store.store._write(
"testStream-123", "Event1", {"foo": f"bar{i}"}
)
for i in range(5):
test_domain.event_store.store._write(
"testStream-456", "Event1", {"foo": f"baz{i}"}
)
messages = test_domain.event_store.store._read("testStream-456")
assert len(messages) == 5
assert messages[4]["data"] == {"foo": "baz4"}
def test_read_last_message(self, test_domain):
for i in range(5):
test_domain.event_store.store._write(
"testStream-123", "Event1", {"foo": f"bar{i}"}
)
message = test_domain.event_store.store._read_last_message("testStream-123")
assert message["position"] == 4
def test_read_last_message_when_there_are_no_messages(self, test_domain):
message = test_domain.event_store.store._read_last_message("foo-bar")
assert message is None
| 2.03125
| 2
|
pymeta/boot.py
|
set-soft/pymeta3
| 12
|
12782643
|
# -*- test-case-name: pymeta.test.test_grammar -*-
"""
The definition of PyMeta's language is itself a PyMeta grammar, but something
has to be able to read that. Most of the code in this module is generated from
that grammar (in future versions, it will hopefully all be generated).
"""
import string
from pymeta.runtime import OMetaBase, ParseError, EOFError, expected
class BootOMetaGrammar(OMetaBase):
"""
The bootstrap grammar, generated from L{pymeta.grammar.OMetaGrammar} via
L{pymeta.builder.PythonBuilder}.
"""
globals = globals()
def __init__(self, input):
OMetaBase.__init__(self, input)
self._ruleNames = []
def parseGrammar(self, name, builder, *args):
"""
Entry point for converting a grammar to code (of some variety).
@param name: The name for this grammar.
@param builder: A class that implements the grammar-building interface
(interface to be explicitly defined later)
"""
self.builder = builder(name, self, *args)
res, err = self.apply("grammar")
try:
x = self.input.head()
except EOFError:
pass
else:
raise err
return res
def applicationArgs(self):
args = []
while True:
try:
(arg, endchar), err = self.pythonExpr(" >")
if not arg:
break
args.append(self.builder.expr(arg))
if endchar == '>':
break
except ParseError:
break
if args:
return args
else:
raise ParseError(self.input.position, expected("python expression"))
def ruleValueExpr(self):
(expr, endchar), err = self.pythonExpr(endChars="\r\n)]")
if str(endchar) in ")]":
self.input = self.input.prev()
return self.builder.expr(expr)
def semanticActionExpr(self):
return self.builder.action(self.pythonExpr(')')[0][0])
def semanticPredicateExpr(self):
expr = self.builder.expr(self.pythonExpr(')')[0][0])
return self.builder.pred(expr)
def eatWhitespace(self):
"""
Consume input until a non-whitespace character is reached.
"""
consumingComment = False
e = None
while True:
try:
c, e = self.input.head()
except EOFError:
break
t = self.input.tail()
if c.isspace() or consumingComment:
self.input = t
if c == '\n':
consumingComment = False
elif c == '#':
consumingComment = True
else:
break
return True, e
rule_spaces = eatWhitespace
def rule_number(self):
_locals = {'self': self}
self.locals['number'] = _locals
_G_apply_1, lastError = self._apply(self.rule_spaces, "spaces", [])
self.considerError(lastError)
def _G_or_2():
_G_exactly_1, lastError = self.exactly('-')
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_barenumber, "barenumber", [])
self.considerError(lastError)
_locals['x'] = _G_apply_2
_G_python_3, lastError = eval('self.builder.exactly(-x)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def _G_or_3():
_G_apply_1, lastError = self._apply(self.rule_barenumber, "barenumber", [])
self.considerError(lastError)
_locals['x'] = _G_apply_1
_G_python_2, lastError = eval('self.builder.exactly(x)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
_G_or_4, lastError = self._or([_G_or_2, _G_or_3])
self.considerError(lastError)
return (_G_or_4, self.currentError)
def rule_barenumber(self):
_locals = {'self': self}
self.locals['barenumber'] = _locals
def _G_or_1():
_G_exactly_1, lastError = self.exactly('0')
self.considerError(lastError)
def _G_or_2():
def _G_or_1():
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
def _G_or_2():
_G_exactly_1, lastError = self.exactly('X')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
def _G_many_4():
_G_apply_1, lastError = self._apply(self.rule_hexdigit, "hexdigit", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many_5, lastError = self.many(_G_many_4)
self.considerError(lastError)
_locals['hs'] = _G_many_5
_G_python_6, lastError = eval("int(''.join(hs), 16)", self.globals, _locals), None
self.considerError(lastError)
return (_G_python_6, self.currentError)
def _G_or_3():
def _G_many_1():
_G_apply_1, lastError = self._apply(self.rule_octaldigit, "octaldigit", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many_2, lastError = self.many(_G_many_1)
self.considerError(lastError)
_locals['ds'] = _G_many_2
_G_python_3, lastError = eval("int('0'+''.join(ds), 8)", self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
_G_or_4, lastError = self._or([_G_or_2, _G_or_3])
self.considerError(lastError)
return (_G_or_4, self.currentError)
def _G_or_2():
def _G_many1_1():
_G_apply_1, lastError = self._apply(self.rule_digit, "digit", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many1_2, lastError = self.many(_G_many1_1, _G_many1_1())
self.considerError(lastError)
_locals['ds'] = _G_many1_2
_G_python_3, lastError = eval("int(''.join(ds))", self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
return (_G_or_3, self.currentError)
def rule_octaldigit(self):
_locals = {'self': self}
self.locals['octaldigit'] = _locals
_G_apply_1, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['x'] = _G_apply_1
def _G_pred_2():
_G_python_1, lastError = eval('x in string.octdigits', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_pred_3, lastError = self.pred(_G_pred_2)
self.considerError(lastError)
_G_python_4, lastError = eval('x', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_4, self.currentError)
def rule_hexdigit(self):
_locals = {'self': self}
self.locals['hexdigit'] = _locals
_G_apply_1, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['x'] = _G_apply_1
def _G_pred_2():
_G_python_1, lastError = eval('x in string.hexdigits', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_pred_3, lastError = self.pred(_G_pred_2)
self.considerError(lastError)
_G_python_4, lastError = eval('x', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_4, self.currentError)
def rule_escapedChar(self):
_locals = {'self': self}
self.locals['escapedChar'] = _locals
_G_exactly_1, lastError = self.exactly('\\')
self.considerError(lastError)
def _G_or_2():
_G_exactly_1, lastError = self.exactly('n')
self.considerError(lastError)
_G_python_2, lastError = eval('"\\n"', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_3():
_G_exactly_1, lastError = self.exactly('r')
self.considerError(lastError)
_G_python_2, lastError = eval('"\\r"', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_4():
_G_exactly_1, lastError = self.exactly('t')
self.considerError(lastError)
_G_python_2, lastError = eval('"\\t"', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_5():
_G_exactly_1, lastError = self.exactly('b')
self.considerError(lastError)
_G_python_2, lastError = eval('"\\b"', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_6():
_G_exactly_1, lastError = self.exactly('f')
self.considerError(lastError)
_G_python_2, lastError = eval('"\\f"', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_7():
_G_exactly_1, lastError = self.exactly('"')
self.considerError(lastError)
_G_python_2, lastError = eval('\'"\'', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_8():
_G_exactly_1, lastError = self.exactly("'")
self.considerError(lastError)
_G_python_2, lastError = eval('"\'"', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_9():
_G_exactly_1, lastError = self.exactly('\\')
self.considerError(lastError)
_G_python_2, lastError = eval('"\\\\"', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
_G_or_10, lastError = self._or([_G_or_2, _G_or_3, _G_or_4, _G_or_5, _G_or_6, _G_or_7, _G_or_8, _G_or_9])
self.considerError(lastError)
return (_G_or_10, self.currentError)
def rule_character(self):
_locals = {'self': self}
self.locals['character'] = _locals
_G_python_1, lastError = eval('"\'"', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
def _G_or_3():
_G_apply_1, lastError = self._apply(self.rule_escapedChar, "escapedChar", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_4():
_G_apply_1, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_or_5, lastError = self._or([_G_or_3, _G_or_4])
self.considerError(lastError)
_locals['c'] = _G_or_5
_G_python_6, lastError = eval('"\'"', self.globals, _locals), None
self.considerError(lastError)
_G_apply_7, lastError = self._apply(self.rule_token, "token", [_G_python_6])
self.considerError(lastError)
_G_python_8, lastError = eval('self.builder.exactly(c)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_8, self.currentError)
def rule_string(self):
_locals = {'self': self}
self.locals['string'] = _locals
_G_python_1, lastError = eval('\'"\'', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
def _G_many_3():
def _G_or_1():
_G_apply_1, lastError = self._apply(self.rule_escapedChar, "escapedChar", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_2():
def _G_not_1():
_G_exactly_1, lastError = self.exactly('"')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_not_2, lastError = self._not(_G_not_1)
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
return (_G_apply_3, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
return (_G_or_3, self.currentError)
_G_many_4, lastError = self.many(_G_many_3)
self.considerError(lastError)
_locals['c'] = _G_many_4
_G_python_5, lastError = eval('\'"\'', self.globals, _locals), None
self.considerError(lastError)
_G_apply_6, lastError = self._apply(self.rule_token, "token", [_G_python_5])
self.considerError(lastError)
_G_python_7, lastError = eval("self.builder.exactly(''.join(c))", self.globals, _locals), None
self.considerError(lastError)
return (_G_python_7, self.currentError)
def rule_name(self):
_locals = {'self': self}
self.locals['name'] = _locals
_G_apply_1, lastError = self._apply(self.rule_letter, "letter", [])
self.considerError(lastError)
_locals['x'] = _G_apply_1
def _G_many_2():
_G_apply_1, lastError = self._apply(self.rule_letterOrDigit, "letterOrDigit", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many_3, lastError = self.many(_G_many_2)
self.considerError(lastError)
_locals['xs'] = _G_many_3
_G_python_4, lastError = eval('xs.insert(0, x)', self.globals, _locals), None
self.considerError(lastError)
_G_python_5, lastError = eval("''.join(xs)", self.globals, _locals), None
self.considerError(lastError)
return (_G_python_5, self.currentError)
def rule_application(self):
_locals = {'self': self}
self.locals['application'] = _locals
_G_python_1, lastError = eval("'<'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_spaces, "spaces", [])
self.considerError(lastError)
_G_apply_4, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError)
_locals['name'] = _G_apply_4
def _G_or_5():
_G_exactly_1, lastError = self.exactly(' ')
self.considerError(lastError)
_G_python_2, lastError = eval('self.applicationArgs()', self.globals, _locals), None
self.considerError(lastError)
_locals['args'] = _G_python_2
_G_python_3, lastError = eval('self.builder.apply(name, self.name, *args)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def _G_or_6():
_G_python_1, lastError = eval("'>'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_python_3, lastError = eval('self.builder.apply(name, self.name)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
_G_or_7, lastError = self._or([_G_or_5, _G_or_6])
self.considerError(lastError)
return (_G_or_7, self.currentError)
def rule_expr1(self):
_locals = {'self': self}
self.locals['expr1'] = _locals
def _G_or_1():
_G_apply_1, lastError = self._apply(self.rule_application, "application", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_2():
_G_apply_1, lastError = self._apply(self.rule_ruleValue, "ruleValue", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_3():
_G_apply_1, lastError = self._apply(self.rule_semanticPredicate, "semanticPredicate", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_4():
_G_apply_1, lastError = self._apply(self.rule_semanticAction, "semanticAction", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_5():
_G_apply_1, lastError = self._apply(self.rule_number, "number", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_6():
_G_apply_1, lastError = self._apply(self.rule_character, "character", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_7():
_G_apply_1, lastError = self._apply(self.rule_string, "string", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_8():
_G_python_1, lastError = eval("'('", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_expr, "expr", [])
self.considerError(lastError)
_locals['e'] = _G_apply_3
_G_python_4, lastError = eval("')'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_5, lastError = self._apply(self.rule_token, "token", [_G_python_4])
self.considerError(lastError)
_G_python_6, lastError = eval('e', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_6, self.currentError)
def _G_or_9():
_G_python_1, lastError = eval("'['", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_expr, "expr", [])
self.considerError(lastError)
_locals['e'] = _G_apply_3
_G_python_4, lastError = eval("']'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_5, lastError = self._apply(self.rule_token, "token", [_G_python_4])
self.considerError(lastError)
_G_python_6, lastError = eval('self.builder.listpattern(e)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_6, self.currentError)
_G_or_10, lastError = self._or([_G_or_1, _G_or_2, _G_or_3, _G_or_4, _G_or_5, _G_or_6, _G_or_7, _G_or_8, _G_or_9])
self.considerError(lastError)
return (_G_or_10, self.currentError)
def rule_expr2(self):
_locals = {'self': self}
self.locals['expr2'] = _locals
def _G_or_1():
_G_python_1, lastError = eval("'~'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
def _G_or_3():
_G_python_1, lastError = eval("'~'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_expr2, "expr2", [])
self.considerError(lastError)
_locals['e'] = _G_apply_3
_G_python_4, lastError = eval('self.builder.lookahead(e)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_4, self.currentError)
def _G_or_4():
_G_apply_1, lastError = self._apply(self.rule_expr2, "expr2", [])
self.considerError(lastError)
_locals['e'] = _G_apply_1
_G_python_2, lastError = eval('self.builder._not(e)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
_G_or_5, lastError = self._or([_G_or_3, _G_or_4])
self.considerError(lastError)
return (_G_or_5, self.currentError)
def _G_or_2():
_G_apply_1, lastError = self._apply(self.rule_expr1, "expr1", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
return (_G_or_3, self.currentError)
def rule_expr3(self):
_locals = {'self': self}
self.locals['expr3'] = _locals
def _G_or_1():
_G_apply_1, lastError = self._apply(self.rule_expr2, "expr2", [])
self.considerError(lastError)
_locals['e'] = _G_apply_1
def _G_or_2():
_G_exactly_1, lastError = self.exactly('*')
self.considerError(lastError)
_G_python_2, lastError = eval('self.builder.many(e)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_3():
_G_exactly_1, lastError = self.exactly('+')
self.considerError(lastError)
_G_python_2, lastError = eval('self.builder.many1(e)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_4():
_G_exactly_1, lastError = self.exactly('?')
self.considerError(lastError)
_G_python_2, lastError = eval('self.builder.optional(e)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_5():
_G_python_1, lastError = eval('e', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_or_6, lastError = self._or([_G_or_2, _G_or_3, _G_or_4, _G_or_5])
self.considerError(lastError)
_locals['r'] = _G_or_6
def _G_or_7():
_G_exactly_1, lastError = self.exactly(':')
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError)
_locals['n'] = _G_apply_2
_G_python_3, lastError = eval('self.builder.bind(r, n)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def _G_or_8():
_G_python_1, lastError = eval('r', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_or_9, lastError = self._or([_G_or_7, _G_or_8])
self.considerError(lastError)
return (_G_or_9, self.currentError)
def _G_or_2():
_G_python_1, lastError = eval("':'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError)
_locals['n'] = _G_apply_3
_G_python_4, lastError = eval('self.builder.bind(self.builder.apply("anything", self.name), n)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_4, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
return (_G_or_3, self.currentError)
def rule_expr4(self):
_locals = {'self': self}
self.locals['expr4'] = _locals
def _G_many_1():
_G_apply_1, lastError = self._apply(self.rule_expr3, "expr3", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many_2, lastError = self.many(_G_many_1)
self.considerError(lastError)
_locals['es'] = _G_many_2
_G_python_3, lastError = eval('self.builder.sequence(es)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def rule_expr(self):
_locals = {'self': self}
self.locals['expr'] = _locals
_G_apply_1, lastError = self._apply(self.rule_expr4, "expr4", [])
self.considerError(lastError)
_locals['e'] = _G_apply_1
def _G_many_2():
_G_python_1, lastError = eval("'|'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_expr4, "expr4", [])
self.considerError(lastError)
return (_G_apply_3, self.currentError)
_G_many_3, lastError = self.many(_G_many_2)
self.considerError(lastError)
_locals['es'] = _G_many_3
_G_python_4, lastError = eval('es.insert(0, e)', self.globals, _locals), None
self.considerError(lastError)
_G_python_5, lastError = eval('self.builder._or(es)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_5, self.currentError)
def rule_ruleValue(self):
_locals = {'self': self}
self.locals['ruleValue'] = _locals
_G_python_1, lastError = eval('"=>"', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_python_3, lastError = eval('self.ruleValueExpr()', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def rule_semanticPredicate(self):
_locals = {'self': self}
self.locals['semanticPredicate'] = _locals
_G_python_1, lastError = eval('"?("', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_python_3, lastError = eval('self.semanticPredicateExpr()', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def rule_semanticAction(self):
_locals = {'self': self}
self.locals['semanticAction'] = _locals
_G_python_1, lastError = eval('"!("', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_python_3, lastError = eval('self.semanticActionExpr()', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def rule_rulePart(self):
_locals = {'self': self}
self.locals['rulePart'] = _locals
_G_apply_1, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['requiredName'] = _G_apply_1
_G_apply_2, lastError = self._apply(self.rule_spaces, "spaces", [])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError)
_locals['n'] = _G_apply_3
def _G_pred_4():
_G_python_1, lastError = eval('n == requiredName', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_pred_5, lastError = self.pred(_G_pred_4)
self.considerError(lastError)
_G_python_6, lastError = eval('setattr(self, "name", n)', self.globals, _locals), None
self.considerError(lastError)
_G_apply_7, lastError = self._apply(self.rule_expr4, "expr4", [])
self.considerError(lastError)
_locals['args'] = _G_apply_7
def _G_or_8():
_G_python_1, lastError = eval('"::="', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_expr, "expr", [])
self.considerError(lastError)
_locals['e'] = _G_apply_3
_G_python_4, lastError = eval('self.builder.sequence([args, e])', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_4, self.currentError)
def _G_or_9():
_G_python_1, lastError = eval('args', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_or_10, lastError = self._or([_G_or_8, _G_or_9])
self.considerError(lastError)
return (_G_or_10, self.currentError)
def rule_rule(self):
_locals = {'self': self}
self.locals['rule'] = _locals
_G_apply_1, lastError = self._apply(self.rule_spaces, "spaces", [])
self.considerError(lastError)
def _G_lookahead_2():
_G_apply_1, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError)
_locals['n'] = _G_apply_1
return (_locals['n'], self.currentError)
_G_lookahead_3, lastError = self.lookahead(_G_lookahead_2)
self.considerError(lastError)
_G_python_4, lastError = eval('n', self.globals, _locals), None
self.considerError(lastError)
_G_apply_5, lastError = self._apply(self.rule_rulePart, "rulePart", [_G_python_4])
self.considerError(lastError)
_locals['r'] = _G_apply_5
def _G_or_6():
def _G_many1_1():
_G_python_1, lastError = eval('n', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_rulePart, "rulePart", [_G_python_1])
self.considerError(lastError)
return (_G_apply_2, self.currentError)
_G_many1_2, lastError = self.many(_G_many1_1, _G_many1_1())
self.considerError(lastError)
_locals['rs'] = _G_many1_2
_G_python_3, lastError = eval('self.builder.rule(n, self.builder._or([r] + rs))', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def _G_or_7():
_G_python_1, lastError = eval('self.builder.rule(n, r)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_or_8, lastError = self._or([_G_or_6, _G_or_7])
self.considerError(lastError)
return (_G_or_8, self.currentError)
def rule_grammar(self):
_locals = {'self': self}
self.locals['grammar'] = _locals
def _G_many_1():
_G_apply_1, lastError = self._apply(self.rule_rule, "rule", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many_2, lastError = self.many(_G_many_1)
self.considerError(lastError)
_locals['rs'] = _G_many_2
_G_apply_3, lastError = self._apply(self.rule_spaces, "spaces", [])
self.considerError(lastError)
_G_python_4, lastError = eval('self.builder.makeGrammar(rs)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_4, self.currentError)
| 2.953125
| 3
|
amlb/utils/serialization.py
|
PGijsbers/automlbenchmark
| 282
|
12782644
|
<filename>amlb/utils/serialization.py
import logging
import math
import os
import pickle
import re
from typing import Optional
from .core import Namespace as ns, json_dump, json_load
from .process import profile
log = logging.getLogger(__name__)
def _import_data_libraries():
try:
import numpy as np
except ImportError:
np = None
try:
import pandas as pd
except ImportError:
pd = None
try:
import scipy.sparse as sp
except ImportError:
sp = None
return np, pd, sp
ser_config = ns(
# the serializer to use when there's no specific serializer available.
# mainly intended to serialize simple data structures like lists.
# allowed=['pickle', 'json']
fallback_serializer='json',
# if numpy can use pickle to serialize ndarrays,
numpy_allow_pickle=True,
# format used to serialize pandas dataframes/series between processes.
# allowed=['pickle', 'parquet', 'hdf', 'json']
pandas_serializer='parquet',
# the compression format used when serializing pandas dataframes/series.
# allowed=[None, 'infer', 'bz2', 'gzip']
# 'infer' (= None) is the fastest but no compression,
# 'gzip' fast write and read with good compression.
# 'bz2' looks like the best compression/time ratio (faster write, sometimes slightly slower read)
pandas_compression='infer',
# the compression format used when serializing pandas dataframes/series to parquet.
# allowed=[None, 'snappy', 'gzip', 'brotli']
pandas_parquet_compression=None,
# if sparse matrices should be compressed during serialization.
sparse_matrix_compression=True,
# if sparse matrices should be deserialized to some specific format:
# allowed=[None, 'array', 'dense']
# None (no change), 'array' (numpy), 'dense' (dense matrix).
sparse_matrix_deserialized_format=None,
# if sparse dataframes should be deserialized to some specific format:
# allowed=[None, 'array', 'dense']
# None (no change), 'array' (numpy), 'dense' (dense dataframe/series).
sparse_dataframe_deserialized_format=None,
)
__series__ = '_series_'
class SerializationError(Exception):
pass
def is_serializable_data(data):
np, pd, sp = _import_data_libraries()
return isinstance(data, (np.ndarray, sp.spmatrix, pd.DataFrame, pd.Series))
def is_sparse(data):
np, pd, sp = _import_data_libraries()
return ((sp and isinstance(data, sp.spmatrix)) # sparse matrix
or (pd and isinstance(data, pd.Series) and pd.api.types.is_sparse(data.dtype)) # sparse Series
or (pd and isinstance(data, pd.DataFrame) # if one column is sparse, the dataframe is considered as sparse
and any(pd.api.types.is_sparse(dt) for dt in data.dtypes)))
def unsparsify(*data, fmt='dense'):
if len(data) == 1:
return _unsparsify(data[0], fmt=fmt)
else:
return tuple(_unsparsify(d, fmt=fmt) for d in data)
def _unsparsify(data, fmt=None):
"""
:param data: the matrix to process.
:param fmt: one of None, 'array', 'dense'
:return: the original matrix is fmt is None,
a numpy array if fmt is 'array',
a dense version of the data type if fmt is 'dense'.
"""
if fmt is None:
return data
np, pd, sp = _import_data_libraries()
if sp and isinstance(data, sp.spmatrix):
return (data.toarray() if fmt == 'array'
else data.todense() if fmt == 'dense'
else data)
elif pd and isinstance(data, (pd.DataFrame, pd.Series)):
return (data.to_numpy(copy=False) if fmt == 'array'
else _pd_to_dense(pd, data) if fmt == 'dense' and is_sparse(data)
else data)
else:
return data
def _pd_to_dense(pd, df):
if hasattr(df, 'sparse'):
return df.sparse.to_dense()
data = {k: (v.sparse.to_dense() if hasattr(v, 'sparse') else v) for k, v in df.items()}
return pd.DataFrame(data, index=df.index, columns=df.columns)
def _pd_dtypes_to_str(pd, df):
return {k: str(v) for k, v in df.dtypes.items()}
def _pd_dtypes_from_str(pd, dt):
def dt_from_str(s):
m_sparse = re.match(r"Sparse\[(.*)]", s)
if m_sparse:
sub_type, fill_value = [t.strip() for t in m_sparse.group(1).split(",", 1)]
try:
fill_value = eval(fill_value, {'nan': math.nan, '<NA>': pd.NA})
except ValueError:
pass
dt = pd.api.types.pandas_dtype(f"Sparse[{sub_type}]")
return pd.SparseDtype(dt, fill_value=fill_value)
else:
return pd.api.types.pandas_dtype(s)
return {k: dt_from_str(v) for k, v in dt.items()}
@profile(log)
def serialize_data(data, path, config: Optional[ns] = None):
config = (config | ser_config) if config else ser_config
root, ext = os.path.splitext(path)
np, pd, sp = _import_data_libraries()
if np and isinstance(data, np.ndarray):
path = f"{root}.npy"
np.save(path, data, allow_pickle=config.numpy_allow_pickle)
elif sp and isinstance(data, sp.spmatrix):
# use custom extension to recognize sparsed matrices from file name.
# .npz is automatically appended if missing, and can also potentially be used for numpy arrays.
path = f"{root}.spy.npz"
sp.save_npz(path, data, compressed=config.sparse_matrix_compression)
elif pd and isinstance(data, (pd.DataFrame, pd.Series)):
path = f"{root}.pd"
if isinstance(data, pd.DataFrame):
# pandas has this habit of inferring value types when data are loaded from file,
# for example, 'true' and 'false' are converted automatically to booleans, even for column names…
data.rename(str, axis='columns', inplace=True)
ser = config.pandas_serializer
if ser == 'pickle':
data.to_pickle(path, compression=config.pandas_compression)
elif ser == 'parquet':
if isinstance(data, pd.Series):
data = pd.DataFrame({__series__: data})
# parquet serialization doesn't support sparse dataframes
if is_sparse(data):
path = f"{root}.sparse.pd"
dtypes = _pd_dtypes_to_str(pd, data)
json_dump(dtypes, f"{path}.dtypes", style='compact')
data = unsparsify(data)
data.to_parquet(path, compression=config.pandas_parquet_compression)
elif ser == 'hdf':
data.to_hdf(path, os.path.basename(path), mode='w', format='table')
elif ser == 'json':
data.to_json(path, compression=config.pandas_compression)
else: # fallback serializer
if config.fallback_serializer == 'json':
path = f"{root}.json"
json_dump(data, path, style='compact')
else:
path = f"{root}.pkl"
with open(path, 'wb') as f:
pickle.dump(data, f)
return path
@profile(log)
def deserialize_data(path, config: Optional[ns] = None):
config = (config | ser_config) if config else ser_config
np, pd, sp = _import_data_libraries()
base, ext = os.path.splitext(path)
if ext == '.npy':
if np is None:
raise SerializationError(f"Numpy is required to deserialize {path}.")
return np.load(path, allow_pickle=config.numpy_allow_pickle)
elif ext == '.npz':
_, ext2 = os.path.splitext(base)
if ext2 == '.spy':
if sp is None:
raise SerializationError(f"Scipy is required to deserialize {path}.")
sp_matrix = sp.load_npz(path)
return unsparsify(sp_matrix, fmt=config.sparse_matrix_deserialized_format)
else:
if np is None:
raise SerializationError(f"Numpy is required to deserialize {path}.")
with np.load(path, allow_pickle=config.numpy_pickle) as loaded:
return loaded
elif ext == '.pd':
if pd is None:
raise SerializationError(f"Pandas is required to deserialize {path}.")
ser = config.pandas_serializer
df = None
if ser == 'pickle':
df = pd.read_pickle(path, compression=config.pandas_compression)
elif ser == 'parquet':
df = pd.read_parquet(path)
if len(df.columns) == 1 and df.columns[0] == __series__:
df = df.squeeze()
_, ext2 = os.path.splitext(base)
if config.sparse_dataframe_deserialized_format is None and ext2 == '.sparse':
# trying to restore dataframe as sparse if it was as such before serialization
# and if the dataframe format should remain unchanged
j_dtypes = json_load(f"{path}.dtypes")
dtypes = _pd_dtypes_from_str(pd, j_dtypes)
df = df.astype(dtypes, copy=False)
elif ser == 'hdf':
df = pd.read_hdf(path, os.path.basename(path))
elif ser == 'json':
df = pd.read_json(path, compression=config.pandas_compression)
return unsparsify(df, fmt=config.sparse_dataframe_deserialized_format)
elif ext == '.json':
return json_load(path)
elif ext == '.pkl':
with open(path, 'rb') as f:
return pickle.load(f)
else:
raise SerializationError(f"Can not deserialize file `{path}` in unknown format.")
| 2.1875
| 2
|
nanak_customization/nanak_customization/sales_invoice.py
|
JitendraSAW/Nanak-Stores
| 0
|
12782645
|
import frappe
def after_submit(self,method):
if self.picklist_reference:
frappe.db.set_value("Nanak Pick List", self.picklist_reference, "sales_invoice", self.name)
frappe.db.set_value("Nanak Pick List", self.picklist_reference, "sales_invoice_status", "Created")
| 1.773438
| 2
|
src/main/python/transectdata/transectdata.py
|
boom-roasted/ImageWAO
| 1
|
12782646
|
import json
from pathlib import Path
from typing import Dict
from countdata import CountData
from drawingdata import DrawingDataList
class TransectData:
"""
Manages transect save data in a primitive
data state, such that it can be easily
serialized.
"""
def __init__(self, transectData: Dict[str, Dict[str, list]], fp: Path):
"""
{
'ImageName.jpg':
{
"drawings":
[
DrawingData1.toDict(),
DrawingData2.toDict(),
...
]
}
}
Include `fp` for traceability.
"""
self._transectData: Dict[str, Dict[str, list]] = transectData
self.fp = fp
@staticmethod
def load(fp):
"""
Loads a serialized file. If the data cannot be decoded,
The save data is initialized with a blank dict.
"""
try:
with open(fp, "r") as f:
data = json.load(f)
except json.decoder.JSONDecodeError:
print(
f"Badly formed JSON file. Data will be overwritten when file is saved: {fp}"
)
data = {}
return TransectData(data, fp)
def dump(self, fp):
"""
Serialize save data and save to specified path.
Writes this data on top of already existing data.
"""
with open(fp, "w") as f:
json.dump(self._transectData, f, indent=4)
def addImage(self, imageName):
"""
Ensure that an image with the name
imageName is in this save data.
"""
if imageName not in self._transectData.keys():
self._transectData[imageName] = {}
def addDrawings(self, imageName, drawings: DrawingDataList):
"""
Add drawings (for a specific image) to the
save data. This will replace any drawings currently
in this save data instance associated with this
image.
"""
# Ensure image name is present
self.addImage(imageName)
# Add these drawings the image dict
self._transectData[imageName]["drawings"] = drawings.toDict()
def removeDrawings(self, imageName: str):
"""
Remove the drawings associated with an image.
"""
if imageName in self._transectData.keys():
try:
self._transectData[imageName].pop("drawings")
# There might not have been this data saved yet
except KeyError:
pass
def imageHasDrawings(self, imageName: str, otherDrawings: DrawingDataList):
"""
Compares the drawings associated with `imageName`,
and returns `True` if those drawings match `otherDrawings`.
"""
# Check if image has no drawings or data
if imageName not in self._transectData.keys():
return False
# Check if image has no drawings
if "drawings" not in self._transectData[imageName].keys():
return False
# Check if image drawings are the same as the input
# also lol TODO stop this maddness
drawings = DrawingDataList.loads(
json.dumps(self._transectData[imageName]["drawings"])
)
return drawings == otherDrawings
def drawings(self):
"""
Generator yielding a tuple of images
with corresponding drawings.
(imageName:str, drawings:DrawingDataList)
"""
for imageName, imageData in self._transectData.items():
if "drawings" in imageData.keys():
yield imageName, DrawingDataList.load(imageData["drawings"])
def imageCounts(self):
"""
Generator yielding tuple of images
and their counts.
(imageName:str, counts:CountData)
"""
for imageName, imageData in self._transectData.items():
if "drawings" in imageData.keys():
drawings = imageData["drawings"]
for drawing in drawings:
countData = CountData.fromDict(drawing["CountData"])
if not countData.isEmpty():
yield imageName, countData
def uniqueSpecies(self):
"""
Returns a list of all the different species in this save file
"""
species = []
for _, countData in self.imageCounts():
if countData.species not in species:
species.append(countData.species)
return species
def uniqueAnimals(self):
"""
Returns a list of the animals in this data set, excluding those
marked as "duplicates". The length of this list is the total number of animals counted
in this data set.
"""
animals = []
for _, countData in self.imageCounts():
if not countData.isDuplicate:
animals.extend([countData.species] * countData.number)
return animals
def uniqueImages(self):
"""
Returns a list of unique images in this data set.
"""
imageNames = []
for imageName, _ in self.imageCounts():
if imageName not in imageNames:
imageNames.append(imageName)
return imageNames
def __repr__(self):
return f"TransectData({super().__repr__()})"
def sorted(self):
"""Sort by key values (image names)"""
return TransectData(
dict(sorted(self._transectData.items(), key=lambda t: t[0])), self.fp
)
| 3.171875
| 3
|
shorttext/metrics/wasserstein/wordmoverdist.py
|
trendmicro/PyShortTextCategorization
| 0
|
12782647
|
<gh_stars>0
from itertools import product
import pulp
from scipy.spatial.distance import euclidean
from shorttext.utils.gensim_corpora import tokens_to_fracdict
# use PuLP
def word_mover_distance_probspec(first_sent_tokens, second_sent_tokens, wvmodel, distancefunc=euclidean, lpFile=None):
""" Compute the Word Mover's distance (WMD) between the two given lists of tokens, and return the LP problem class.
Using methods of linear programming, supported by PuLP, calculate the WMD between two lists of words. A word-embedding
model has to be provided. The problem class is returned, containing all the information about the LP.
Reference: <NAME>, <NAME>, <NAME>, <NAME>, "From Word Embeddings to Document Distances," *ICML* (2015).
:param first_sent_tokens: first list of tokens.
:param second_sent_tokens: second list of tokens.
:param wvmodel: word-embedding models.
:param distancefunc: distance function that takes two numpy ndarray.
:param lpFile: log file to write out.
:return: a linear programming problem contains the solution
:type first_sent_tokens: list
:type second_sent_tokens: list
:type wvmodel: gensim.models.keyedvectors.KeyedVectors
:type distancefunc: function
:type lpFile: str
:rtype: pulp.LpProblem
"""
all_tokens = list(set(first_sent_tokens+second_sent_tokens))
wordvecs = {token: wvmodel[token] for token in all_tokens}
first_sent_buckets = tokens_to_fracdict(first_sent_tokens)
second_sent_buckets = tokens_to_fracdict(second_sent_tokens)
T = pulp.LpVariable.dicts('T_matrix', list(product(all_tokens, all_tokens)), lowBound=0)
prob = pulp.LpProblem('WMD', sense=pulp.LpMinimize)
prob += pulp.lpSum([T[token1, token2]*distancefunc(wordvecs[token1], wordvecs[token2])
for token1, token2 in product(all_tokens, all_tokens)])
for token2 in second_sent_buckets:
prob += pulp.lpSum([T[token1, token2] for token1 in first_sent_buckets])==second_sent_buckets[token2]
for token1 in first_sent_buckets:
prob += pulp.lpSum([T[token1, token2] for token2 in second_sent_buckets])==first_sent_buckets[token1]
if lpFile!=None:
prob.writeLP(lpFile)
prob.solve()
return prob
def word_mover_distance(first_sent_tokens, second_sent_tokens, wvmodel, distancefunc=euclidean, lpFile=None):
""" Compute the Word Mover's distance (WMD) between the two given lists of tokens.
Using methods of linear programming, supported by PuLP, calculate the WMD between two lists of words. A word-embedding
model has to be provided. WMD is returned.
Reference: <NAME>, <NAME>, <NAME>, <NAME>, "From Word Embeddings to Document Distances," *ICML* (2015).
:param first_sent_tokens: first list of tokens.
:param second_sent_tokens: second list of tokens.
:param wvmodel: word-embedding models.
:param distancefunc: distance function that takes two numpy ndarray.
:param lpFile: log file to write out.
:return: Word Mover's distance (WMD)
:type first_sent_tokens: list
:type second_sent_tokens: list
:type wvmodel: gensim.models.keyedvectors.KeyedVectors
:type distancefunc: function
:type lpFile: str
:rtype: float
"""
prob = word_mover_distance_probspec(first_sent_tokens, second_sent_tokens, wvmodel,
distancefunc=distancefunc, lpFile=lpFile)
return pulp.value(prob.objective)
| 2.921875
| 3
|
damera/cmd/conductor.py
|
klonhj2015/damera
| 0
|
12782648
|
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def main():
pass
| 1.070313
| 1
|
scripts/automation/trex_control_plane/interactive/trex/emu/emu_plugins/emu_plugin_ipv6.py
|
jmguzmanc/trex-core
| 0
|
12782649
|
<reponame>jmguzmanc/trex-core<gh_stars>0
from trex.emu.api import *
from trex.emu.emu_plugins.emu_plugin_base import *
from trex.emu.trex_emu_conversions import Ipv6
from trex.emu.trex_emu_validator import EMUValidator
import trex.utils.parsing_opts as parsing_opts
class IPV6Plugin(EMUPluginBase):
'''Defines ipv6 plugin
RFC 4443: Internet Control Message Protocol (ICMPv6) for the Internet Protocol Version 6 (IPv6)
RFC 4861: Neighbor Discovery for IP Version 6 (IPv6)
RFC 4862: IPv6 Stateless Address Autoconfiguration.
not implemented:
RFC4941: random local ipv6 using md5
'''
plugin_name = 'IPV6'
IPV6_STATES = {
16: 'Learned',
17: 'Incomplete',
18: 'Complete',
19: 'Refresh'
}
# init jsons example for SDK
INIT_JSON_NS = {'ipv6': {'mtu': 1500, 'dmac': [1, 2, 3, 4, 5 ,6], 'vec': [[244, 0, 0, 0], [244, 0, 0, 1]], 'version': 1}}
"""
:parameters:
mtu: uint16
Maximun transmission unit.
dmac: [6]byte
Designator mac. IMPORTANT !!
vec: list of [16]byte
IPv4 vector representing multicast addresses.
version: uint16
The init version, 1 or 2 (default). It learns the version from the first Query.
"""
INIT_JSON_CLIENT = {'ipv6': {'nd_timer': 29, 'nd_timer_disable': False}}
"""
:parameters:
nd_timer: uint32
IPv6-nd timer.
nd_timer_disable: bool
Enable/Disable IPv6-nd timer.
"""
def __init__(self, emu_client):
super(IPV6Plugin, self).__init__(emu_client, 'ipv6_ns_cnt')
# API methods
@client_api('getter', True)
def get_cfg(self, ns_key):
"""
Get IPv6 configuration from namespace.
:parameters:
ns_key: EMUNamespaceKey
see :class:`trex.emu.trex_emu_profile.EMUNamespaceKey`
:return:
| dict: IPv6 configuration like:
| {'dmac': [0, 0, 0, 112, 0, 1], 'version': 2, 'mtu': 1500}
"""
ver_args = [{'name': 'ns_key', 'arg': ns_key, 't': EMUNamespaceKey}]
EMUValidator.verify(ver_args)
return self.emu_c._send_plugin_cmd_to_ns('ipv6_mld_ns_get_cfg', ns_key)
@client_api('command', True)
def set_cfg(self, ns_key, mtu, dmac):
"""
Set IPv6 configuration on namespcae.
:parameters:
ns_key: EMUNamespaceKey
see :class:`trex.emu.trex_emu_profile.EMUNamespaceKey`
mtu: int
MTU for ipv6 plugin.
dmac: list of bytes
Designator mac for ipv6 plugin.
"""
ver_args = [{'name': 'ns_key', 'arg': ns_key, 't': EMUNamespaceKey},
{'name': 'mtu', 'arg': mtu, 't': 'mtu'},
{'name': 'dmac', 'arg': dmac, 't': 'mac'},]
EMUValidator.verify(ver_args)
dmac = Mac(dmac)
return self.emu_c._send_plugin_cmd_to_ns('ipv6_mld_ns_set_cfg', ns_key, mtu = mtu, dmac = dmac.V())
@client_api('command', True)
def add_mld(self, ns_key, ipv6_vec):
"""
Add mld to ipv6 plugin.
:parameters:
ns_key: EMUNamespaceKey
see :class:`trex.emu.trex_emu_profile.EMUNamespaceKey`
ipv6_vec: list of lists of bytes
List of ipv6 addresses. Must be a valid ipv6 mld address. .e.g.[[0xff,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,1] ]
"""
ver_args = [{'name': 'ns_key', 'arg': ns_key, 't': EMUNamespaceKey},
{'name': 'ipv6_vec', 'arg': ipv6_vec, 't': 'ipv6_mc', 'allow_list': True},]
EMUValidator.verify(ver_args)
ipv6_vec = [Ipv6(ip, mc = True) for ip in ipv6_vec]
ipv6_vec = [ipv6.V() for ipv6 in ipv6_vec]
return self.emu_c._send_plugin_cmd_to_ns('ipv6_mld_ns_add', ns_key, vec = ipv6_vec)
@client_api('command', True)
def add_gen_mld(self, ns_key, ipv6_start, ipv6_count = 1):
"""
Add mld to ipv6 plugin, generating sequence of addresses.
:parameters:
ns_key: EMUNamespaceKey
see :class:`trex.emu.trex_emu_profile.EMUNamespaceKey`
ipv6_start: lists of bytes
ipv6 addresses to start from. Must be a valid ipv6 mld addresses.
ipv6_count: int
| ipv6 addresses to add
| i.e -> `ipv6_start` = [0, .., 0] and `ipv6_count` = 2 ->[[0, .., 0], [0, .., 1]].
"""
ver_args = [{'name': 'ns_key', 'arg': ns_key, 't': EMUNamespaceKey},
{'name': 'ipv6_start', 'arg': ipv6_start, 't': 'ipv6_mc'},
{'name': 'ipv6_count', 'arg': ipv6_count, 't': int}]
EMUValidator.verify(ver_args)
ipv6_vec = self._create_ip_vec(ipv6_start, ipv6_count, 'ipv6', True)
ipv6_vec = [ip.V() for ip in ipv6_vec]
return self.emu_c._send_plugin_cmd_to_ns('ipv6_mld_ns_add', ns_key, vec = ipv6_vec)
@client_api('command', True)
def remove_mld(self, ns_key, ipv6_vec):
"""
Remove mld from ipv6 plugin.
:parameters:
ns_key: EMUNamespaceKey
see :class:`trex.emu.trex_emu_profile.EMUNamespaceKey`
ipv6_vec: list of lists of bytes
List of ipv6 addresses. Must be a valid ipv6 mld address.
"""
ver_args = [{'name': 'ns_key', 'arg': ns_key, 't': EMUNamespaceKey},
{'name': 'ipv6_vec', 'arg': ipv6_vec, 't': 'ipv6_mc', 'allow_list': True},]
EMUValidator.verify(ver_args)
ipv6_vec = [Ipv6(ip, mc = True) for ip in ipv6_vec]
ipv6_vec = [ipv6.V() for ipv6 in ipv6_vec]
return self.emu_c._send_plugin_cmd_to_ns('ipv6_mld_ns_remove', ns_key, vec = ipv6_vec)
@client_api('command', True)
def remove_gen_mld(self, ns_key, ipv6_start, ipv6_count = 1):
"""
Remove mld from ipv6 plugin.
:parameters:
ns_key: EMUNamespaceKey
see :class:`trex.emu.trex_emu_profile.EMUNamespaceKey`
ipv6_start: lists of bytes
ipv6 address to start from.
ipv6_count: int
| ipv6 addresses to add
| i.e -> `ipv6_start` = [0, .., 0] and `ipv6_count` = 2 ->[[0, .., 0], [0, .., 1]].
"""
ver_args = [{'name': 'ns_key', 'arg': ns_key, 't': EMUNamespaceKey},
{'name': 'ipv6_start', 'arg': ipv6_start, 't': 'ipv6_mc'},
{'name': 'ipv6_count', 'arg': ipv6_count, 't': int}]
EMUValidator.verify(ver_args)
ipv6_vec = self._create_ip_vec(ipv6_start, ipv6_count, 'ipv6', True)
ipv6_vec = [ip.V() for ip in ipv6_vec]
return self.emu_c._send_plugin_cmd_to_ns('ipv6_mld_ns_remove', ns_key, vec = ipv6_vec)
@client_api('command', True)
def iter_mld(self, ns_key, ipv6_amount = None):
"""
Iterates over current mld's in ipv6 plugin.
:parameters:
ns_key: EMUNamespaceKey
see :class:`trex.emu.trex_emu_profile.EMUNamespaceKey`
ipv6_amount: int
Amount of ipv6 addresses to fetch, defaults to None means all.
:returns:
| list: List of ipv6 addresses dict:
| {'refc': 100, 'management': False, 'ipv6': [255, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]}
"""
ver_args = [{'name': 'ns_key', 'arg': ns_key, 't': EMUNamespaceKey},
{'name': 'ipv6_amount', 'arg': ipv6_amount, 't': int, 'must': False},]
EMUValidator.verify(ver_args)
params = ns_key.conv_to_dict(add_tunnel_key = True)
return self.emu_c._get_n_items(cmd = 'ipv6_mld_ns_iter', amount = ipv6_amount, **params)
@client_api('command', True)
def remove_all_mld(self, ns_key):
'''
Remove all user created mld(s) from ipv6 plugin.
:parameters:
ns_key: EMUNamespaceKey
see :class:`trex.emu.trex_emu_profile.EMUNamespaceKey`
'''
ver_args = [{'name': 'ns_key', 'arg': ns_key, 't': EMUNamespaceKey}]
EMUValidator.verify(ver_args)
mlds = self.iter_mld(ns_key)
mlds = [m['ipv6'] for m in mlds if m['management']]
if mlds:
self.emu_c._send_plugin_cmd_to_ns('ipv6_mld_ns_remove', ns_key, vec = mlds)
@client_api('getter', True)
def show_cache(self, ns_key):
"""
Return ipv6 cache for a given namespace.
:parameters:
ns_key: EMUNamespaceKey
see :class:`trex.emu.trex_emu_profile.EMUNamespaceKey`
:returns:
| list: list of ipv6 cache records
| [{
| 'ipv6': list of 16 bytes,
| 'refc': int,
| 'state': string,
| 'resolve': bool,
| 'mac': list of 6 bytes}
| ].
"""
ver_args = [{'name': 'ns_key', 'arg': ns_key, 't': EMUNamespaceKey},]
EMUValidator.verify(ver_args)
params = ns_key.conv_to_dict(add_tunnel_key = True)
res = self.emu_c._get_n_items(cmd = 'ipv6_nd_ns_iter', **params)
for r in res:
if 'state' in r:
r['state'] = IPV6Plugin.IPV6_STATES.get(r['state'], 'Unknown state')
return res
# Plugins methods
@plugin_api('ipv6_show_counters', 'emu')
def ipv6_show_counters_line(self, line):
'''Show IPV6 counters data from ipv6 table.\n'''
parser = parsing_opts.gen_parser(self,
"ipv6_show_counters",
self.ipv6_show_counters_line.__doc__,
parsing_opts.EMU_SHOW_CNT_GROUP,
parsing_opts.EMU_ALL_NS,
parsing_opts.EMU_NS_GROUP_NOT_REQ,
parsing_opts.EMU_DUMPS_OPT
)
opts = parser.parse_args(line.split())
self.emu_c._base_show_counters(self.data_c, opts, req_ns = True)
return True
# cfg
@plugin_api('ipv6_get_cfg', 'emu')
def ipv6_get_cfg_line(self, line):
'''IPV6 get configuration command\n'''
parser = parsing_opts.gen_parser(self,
"ipv6_get_cfg",
self.ipv6_get_cfg_line.__doc__,
parsing_opts.EMU_NS_GROUP_NOT_REQ,
parsing_opts.EMU_ALL_NS
)
opts = parser.parse_args(line.split())
keys_to_headers = [{'key': 'dmac', 'header': 'Designator Mac'},
{'key': 'mtu', 'header': 'MTU'},
{'key': 'version', 'header': 'Version'},]
args = {'title': 'Ipv6 configuration', 'empty_msg': 'No ipv6 configurations', 'keys_to_headers': keys_to_headers}
if opts.all_ns:
self.run_on_all_ns(self.get_cfg, print_ns_info = True, func_on_res = self.print_table_by_keys, func_on_res_args = args)
else:
self._validate_port(opts)
ns_key = EMUNamespaceKey(opts.port, opts.vlan, opts.tpid)
res = self.get_cfg(ns_key)
self.print_table_by_keys(data = res, **args)
return True
@plugin_api('ipv6_set_cfg', 'emu')
def ipv6_set_cfg_line(self, line):
'''IPV6 set configuration command\n'''
parser = parsing_opts.gen_parser(self,
"ipv6_set_cfg",
self.ipv6_set_cfg_line.__doc__,
parsing_opts.EMU_NS_GROUP_NOT_REQ,
parsing_opts.EMU_ALL_NS,
parsing_opts.MTU,
parsing_opts.MAC_ADDRESS
)
opts = parser.parse_args(line.split())
if opts.all_ns:
self.run_on_all_ns(self.set_cfg, mtu = opts.mtu, dmac = opts.mac)
else:
self._validate_port(opts)
ns_key = EMUNamespaceKey(opts.port, opts.vlan, opts.tpid)
self.set_cfg(ns_key, mtu = opts.mtu, dmac = opts.mac)
return True
# mld
@plugin_api('ipv6_add_mld', 'emu')
def ipv6_add_mld_line(self, line):
'''IPV6 add mld command\n'''
parser = parsing_opts.gen_parser(self,
"ipv6_add_mld",
self.ipv6_add_mld_line.__doc__,
parsing_opts.EMU_NS_GROUP_NOT_REQ,
parsing_opts.EMU_ALL_NS,
parsing_opts.IPV6_START,
parsing_opts.IPV6_COUNT
)
opts = parser.parse_args(line.split())
if opts.all_ns:
self.run_on_all_ns(self.add_gen_mld, ipv6_start = opts.ipv6_start, ipv6_count = opts.ipv6_count)
else:
self._validate_port(opts)
ns_key = EMUNamespaceKey(opts.port, opts.vlan, opts.tpid)
res = self.add_gen_mld(ns_key, ipv6_start = opts.ipv6_start, ipv6_count = opts.ipv6_count)
return True
@plugin_api('ipv6_remove_mld', 'emu')
def ipv6_remove_mld_line(self, line):
'''IPV6 remove mld command\n'''
parser = parsing_opts.gen_parser(self,
"ipv6_remove_mld",
self.ipv6_remove_mld_line.__doc__,
parsing_opts.EMU_NS_GROUP_NOT_REQ,
parsing_opts.EMU_ALL_NS,
parsing_opts.IPV6_START,
parsing_opts.IPV6_COUNT
)
opts = parser.parse_args(line.split())
if opts.all_ns:
self.run_on_all_ns(self.remove_gen_mld, ipv6_start = opts.ipv6_start, ipv6_count = opts.ipv6_count)
else:
self._validate_port(opts)
ns_key = EMUNamespaceKey(opts.port, opts.vlan, opts.tpid)
res = self.remove_gen_mld(ns_key, ipv6_start = opts.ipv6_start, ipv6_count = opts.ipv6_count)
return True
@plugin_api('ipv6_show_mld', 'emu')
def ipv6_show_mld_line(self, line):
'''IPV6 show mld command\n'''
parser = parsing_opts.gen_parser(self,
"ipv6_show_mld",
self.ipv6_show_mld_line.__doc__,
parsing_opts.EMU_NS_GROUP_NOT_REQ,
parsing_opts.EMU_ALL_NS
)
opts = parser.parse_args(line.split())
keys_to_headers = [{'key': 'ipv6', 'header': 'IPv6'},
{'key': 'refc', 'header': 'Ref.Count'},
{'key': 'management', 'header': 'From RPC'}]
args = {'title': 'Current mld:', 'empty_msg': 'There are no mld in namespace', 'keys_to_headers': keys_to_headers}
if opts.all_ns:
self.run_on_all_ns(self.iter_mld, print_ns_info = True, func_on_res = self.print_table_by_keys, func_on_res_args = args)
else:
self._validate_port(opts)
ns_key = EMUNamespaceKey(opts.port, opts.vlan, opts.tpid)
res = self.iter_mld(ns_key)
self.print_table_by_keys(data = res, **args)
return True
@plugin_api('ipv6_remove_all_mld', 'emu')
def ipv6_remove_all_mld_line(self, line):
'''IPV6 remove all mld command\n'''
parser = parsing_opts.gen_parser(self,
"ipv6_remove_all_mld",
self.ipv6_remove_all_mld_line.__doc__,
parsing_opts.EMU_NS_GROUP_NOT_REQ,
parsing_opts.EMU_ALL_NS,
)
opts = parser.parse_args(line.split())
if opts.all_ns:
self.run_on_all_ns(self.remove_all_mld)
else:
self._validate_port(opts)
ns_key = EMUNamespaceKey(opts.port, opts.vlan, opts.tpid)
res = self.remove_all_mld(ns_key)
return True
# cache
@plugin_api('ipv6_show_cache', 'emu')
def ipv6_show_cache_line(self, line):
'''IPV6 show cache command\n'''
parser = parsing_opts.gen_parser(self,
"ipv6_show_cache",
self.ipv6_show_cache_line.__doc__,
parsing_opts.EMU_NS_GROUP_NOT_REQ,
parsing_opts.EMU_ALL_NS
)
opts = parser.parse_args(line.split())
keys_to_headers = [{'key': 'mac', 'header': 'MAC'},
{'key': 'ipv6', 'header': 'IPv6'},
{'key': 'refc', 'header': 'Ref.Count'},
{'key': 'resolve', 'header': 'Resolve'},
{'key': 'state', 'header': 'State'},
]
args = {'title': 'Ipv6 cache', 'empty_msg': 'No ipv6 cache in namespace', 'keys_to_headers': keys_to_headers}
if opts.all_ns:
self.run_on_all_ns(self.show_cache, print_ns_info = True, func_on_res = self.print_table_by_keys, func_on_res_args = args)
else:
self._validate_port(opts)
ns_key = EMUNamespaceKey(opts.port, opts.vlan, opts.tpid)
res = self.show_cache(ns_key)
self.print_table_by_keys(data = res, **args)
return True
# Override functions
@client_api('getter', True)
def tear_down_ns(self, ns_key):
'''
This function will be called before removing this plugin from namespace
:parameters:
ns_key: EMUNamespaceKey
see :class:`trex.emu.trex_emu_profile.EMUNamespaceKey`
'''
try:
self.remove_all_mld(ns_key)
except:
pass
| 1.976563
| 2
|
src/quantrt/api/auth.py
|
ardieb/quantrt
| 0
|
12782650
|
import base64
import hashlib
import hmac
import json
import time
import quantrt.common.log
from typing import Union, Dict
__all__ = ["load_credentials", "sign_websocket_request"]
def load_credentials(credentials: str) -> Dict:
"""Create an authenticated coinbasepro rest client api from a dictionary or json file.
:param credentials: Either a json file with the key, secret, and passphrase or a
dictionary mimcing this json structure.
:return: A dictionary with the values of `secret`, `key`, and `passphrase` from the json file.
"""
if not credentials.endswith(".json"):
quantrt.common.log.QuantrtLog.exception(
"The file {} is invalid. Must be a JSON file.".format(credentials))
raise ValueError("The file {} is invalid. Must be a JSON file.".format(credentials))
with open(credentials, "r") as fhandle:
try:
credentials: Dict = json.load(fhandle)
except json.JSONDecodeError as err:
quantrt.common.log.QuantrtLog.exception(
"Error encountered parsing {}. {}.".format(credentials, err))
raise err
return {
"key": credentials.get("key", ""),
"secret": credentials.get("secret", ""),
"passphrase": credentials.get("passphrase", "")
}
def sign_websocket_request(secret: str, key: str, passphrase: str, request: Dict) -> Dict:
"""Sign a websocket request to the websocket coinbasepro feed.
:param secret: str - the API secret.
:param key: str - the API key.
:param passphrase: str - the API passphrase.
:param request: Dict - the request to sign.
"""
timestamp = str(time.time())
message = timestamp + "GET" + "/users/self"
message = message.encode('ascii')
hmac_key = base64.b64decode(secret)
signature = hmac.new(hmac_key, message, hashlib.sha256)
signature_b64 = base64.b64encode(signature.digest())
request["signature"] = signature_b64.decode("ascii")
request["key"] = key
request["passphrase"] = <PASSWORD>
request["timestamp"] = timestamp
| 2.65625
| 3
|