repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
pattisdr/osf.io
|
framework/auth/oauth_scopes.py
|
Python
|
apache-2.0
| 18,447
| 0.005746
|
"""
Define a set of scopes to be used by COS Internal OAuth implementation, specifically tailored to work with APIv2.
List of scopes, nomenclature, and rationale can be found in the relevant "Login as OSF- phase 2" proposal document
"""
from collections import namedtuple
from website import settings
# Public scopes are described with 3 pieces of information: list of constituent scopes, a description, and whether or
# not this scope is available to be requested by the general public
class scope(namedtuple('scope', ['parts_', 'description', 'is_public'])):
""" Patch to add `ALWAYS_PUBLIC` scope to every selectable scope,
ensuring that public endpoints are accessible with any token.
"""
@property
def parts(self):
return frozenset((CoreScopes.ALWAYS_PUBLIC, )).union(self.parts_)
class CoreScopes(object):
"""
The smallest units of permission that can be granted- all other scopes are built out of these.
Each named constant is a single string."""
# IMPORTANT: All views should be based on the smallest number of Core scopes required to describe
# the data in that view
USERS_READ = 'users_read'
USERS_WRITE = 'users_write'
USERS_CREATE = 'users_create'
USER_SETTINGS_READ = 'user.settings_read'
USER_SETTINGS_WRITE = 'user.settings_write'
USER_EMAIL_READ = 'users.email_read'
USER_ADDON_READ = 'users.addon_read'
SUBSCRIPTIONS_READ = 'subscriptions_read'
SUBSCRIPTIONS_WRITE = 'subscriptions_write'
MEETINGS_READ = 'meetings.base_read'
NODE_BASE_READ = 'nodes.base_read'
NODE_BASE_WRITE = 'nodes.base_write'
NODE_CHILDREN_READ = 'nodes.children_read'
NODE_CHILDREN_WRITE = 'nodes.children_write'
NODE_FORKS_READ = 'nodes.forks_read'
NODE_FORKS_WRITE = 'nodes.forks_write'
NODE_CONTRIBUTORS_READ = 'nodes.contributors_read'
NODE_CONTRIBUTORS_WRITE = 'nodes.contributors_write'
PREPRINT_CONTRIBUTORS_READ = 'preprints.contributors_read'
PREPRINT_CONTRIBUTORS_WRITE = 'preprints.contributors_write'
NODE_FILE_READ = 'nodes.files_read'
NODE_FILE_WRITE = 'nodes.files_write'
PREPRINT_FILE_READ = 'preprints.files_read'
PREPRINT_FILE_WRITE = 'preprints.files_write'
NODE_ADDON_READ = 'nodes.addon_read'
NODE_ADDON_WRITE = 'nodes.addon_write'
NODE_LINKS_READ = 'nodes.links_read'
NODE_LINKS_WRITE = 'nodes.links_write'
NODE_VIEW_ONLY_LINKS_READ = 'node.view_only_links_read'
NODE_VIEW_ONLY_LINKS_WRITE = 'node.view_only_links_write'
NODE_PREPRINTS_READ = 'node.preprints_read'
NODE_PREPRINTS_WRITE = 'node.preprints_write'
PREPRINTS_READ = 'preprint.preprints_read'
PREPRINTS_WRITE = 'preprint.preprints_write'
REGISTRATION_VIEW_ONLY_LINKS_READ = 'registration.view_only_links_read'
REGISTRATION_VIEW_ONLY_LINKS_WRITE = 'registration.view_only_links_write'
SCHEMA_READ = 'schemas.read'
NODE_DRAFT_REGISTRATIONS_READ = 'nodes.draft_registrations_read'
NODE_DRAFT_REGISTRATIONS_WRITE = 'nodes.draft_registrations_write'
NODE_REGISTRATIONS_READ = 'nodes.registrations_read'
NODE_REGISTRATIONS_WRITE = 'nodes.registrations_write'
NODE_CITATIONS_READ = 'nodes.citations_read'
NODE_CITATIONS_WRITE = 'nodes.citations_write'
PREPRINT_CITATIONS_READ = 'preprints.citations_read'
PREPRINT_CITATIONS_WRITE = 'preprints.citations_write'
NODE_COMMENTS_READ = 'comments.data_read'
NODE_COMMENTS_WRITE = 'comments.data_write'
LICENSE_READ = 'license.data_read'
COMMENT_REPORTS_READ = 'comments.reports_read'
COMMENT_REPORTS_WRITE = 'comments.reports_write'
APPLICATIONS_READ = 'applications_read'
APPLICATIONS_WRITE = 'applications_write'
NODE_LOG_READ = 'nodes.logs_read'
TOKENS_READ = 'tokens_read'
TOKENS_WRITE = 'tokens_write'
ALERTS_READ = 'alerts_read'
ALERTS_WRITE = 'alerts_write'
INSTITUTION_READ = 'institutions_read'
SCOPES_READ = 'scopes_read'
SEARCH = 'search_read'
ACTIONS_READ = 'actions_read'
ACTIONS_WRITE = 'actions_write'
MODERATORS_READ = 'moderators_read'
MODERATORS_WRITE = 'moderators_write'
NODE_REQUESTS_READ = 'node_requests_read'
NODE_REQUESTS_WRITE = 'node_requests_write'
NODE_SETTINGS_READ = 'node_settings_read'
NODE_SETTINGS_WRITE = 'node_settings_write'
PREPRINT_REQUESTS_READ = 'preprint_requests_read'
PREPRINT_REQUESTS_WRITE = 'preprint_requests_write'
PROVIDERS_WRITE = 'providers_write'
CHRONOS_SUBMISSION_READ = 'chronos_submission_read'
CHRONOS_SUBMISSION_WRITE = 'chronos_submission_write'
WAFFLE_READ = 'waffle_read'
NULL = 'null'
# NOTE: Use with extreme caution.
# This should NEVER be assigned to endpoints:
# - with mutable data,
# - that might contain *anything* that could be personally-identifiable,
# - as a write scope
ALWAYS_PUBLIC = 'always_public'
ORGANIZER_COLLECTIONS_BASE_READ = 'collections.base_read'
ORGANIZER_COLLECTIONS_BASE_WRITE = 'collections.base_write'
COLLECTED_META_READ = 'collected_meta_read'
COLLECTED_META_WRITE = 'collected_meta_write'
GUIDS_READ = 'guids.base_read'
WIKI_BASE_READ = 'wikis.base_read'
WIKI_BASE_WRITE = 'wikis.base_write'
IDENTIFIERS_READ = 'identifiers.data_read'
IDENTIFIERS_WRITE = 'identifiers.data_write'
METRICS_BASIC = 'metrics_basic'
METRICS_RESTRICTED = 'metrics_restricted'
class ComposedScopes(object):
"""
Composed scopes, listed in increasing order of access (most restrictive first). Each named constant is a tuple.
"""
# IMPORTANT: Composed scopes exist only as an internal implementation detail.
# All views should be based on selections from CoreScopes, above
# Users collection
USERS_READ = (CoreScopes.USERS_READ, CoreScopes.SUBSCRIPTIONS_READ, CoreScopes.ALERTS_READ, CoreScopes.USER_SETTINGS_READ)
USERS_WRITE = USERS_READ + (CoreScopes.USERS_WRITE, CoreScopes.SUBSCRIPTIONS_WRITE, CoreScopes.ALERTS_WRITE, CoreScopes.USER_SETTINGS_WRITE)
USERS_CREATE = USERS_READ + (CoreScopes.USERS_CREATE, )
# User extensions
USER_EMAIL_READ = (CoreScopes.USER_EMAIL_READ, )
# Applications collection
APPLICATIONS_READ = (CoreScopes.APPLICATIONS_READ, )
APPLICATIONS_WRITE = APPLICATIONS_READ + (CoreScopes.APPLICATIONS_WRITE,)
# Tokens collection
TOKENS_READ = (CoreScopes.TOKENS_READ,)
TOKENS_WRITE = TOKENS_READ + (CoreScopes.TOKENS_WRITE,)
# Guid redirect view
GUIDS_READ = (CoreScopes.GUIDS_READ, )
# Metaschemas collection
METASCHEMAS_READ = (CoreScopes.SCHEMA_READ, )
# Draft registrations
DRAFT_READ = (CoreScopes.NODE_DRAFT_REGISTRATIONS_READ, )
DRAFT_WRITE = (CoreScopes.NODE_DRAFT_REGISTRATIONS_WRITE, )
# Identifier views
IDENTIFIERS_READ = (CoreScopes.IDENTIFIERS_READ, )
IDENTIFIERS_WRITE = (CoreScopes.IDENTIFIERS_WRITE, )
# Comment reports collection
COMMENT_REPORTS_READ = (CoreScopes.COMMENT_REPORTS_READ,)
COMMENT_REPORTS_WRITE = COMMENT_REPORTS_READ + (CoreScopes.COMMENT_REPORTS_WRITE,)
# Nodes collection.
# Base node data includes node metadata, links, children, and preprints.
NODE_METADATA_READ = (CoreScopes.NODE_BASE_READ, CoreScopes.NODE_CHILDREN_READ, CoreScopes.NODE_LINKS_READ,
CoreScopes.NODE_CITATIONS_READ, CoreScopes.NODE_COMMENTS_READ, CoreScopes.NODE_LOG_READ,
CoreScopes.NODE_FORKS_READ, CoreScopes.WIKI_BASE_READ, CoreScopes.LICENSE_READ,
CoreScopes.IDENTIFIERS_READ, CoreScopes.NODE_PREPRINTS_READ, CoreScopes.PREPRINT_REQUESTS_READ)
NODE_METADATA_WRITE = NODE_METADATA_READ + \
(CoreScopes.NODE_BASE_WRITE, CoreScopes.NODE_CHILDREN_WRITE, CoreScopes.NODE_LINKS_WRITE, CoreScopes.IDENTIFIERS_WRITE,
CoreScopes.NODE_CITATIONS_WRITE, Cor
|
eScopes.NODE_COMMENTS_WRITE, CoreScopes.NODE_FORKS_WRITE,
CoreScopes.NODE_PREPRINTS_WRITE, CoreScopes.PREPRINT_REQUESTS_WRITE, CoreScopes.WIKI_BASE_WRITE)
# Preprints collection
# TODO: Move Met
|
rics scopes to their own restricted compose
|
Bestoa/py-brainfuck
|
nbfi/__init__.py
|
Python
|
mit
| 2,726
| 0.002935
|
'''Brainfuck interpreter'''
VERSION = '0.1.2.1103'
def __static_vars():
'''Decorate, add static attr'''
def decorate(func):
'''The decorate'''
setattr(func, 'stdin_buffer', [])
return func
return decorate
@__static_vars()
def __getchar() -> int:
'''Return one char from stdin'''
buffer_len = len(__getchar.stdin_buffer)
if buffer_len == 0:
__getchar.stdin_buffer = list(input().encode('ascii'))
__getchar.stdin_buffer.append(10) # We need this enter to compact getchar from libc.
ret_c, __getchar.stdin_buffer = __getchar.stdin_buffer[0], __getchar.stdin_buffer[1:]
return ret_c
def __pre_execute(raw_code: str) -> list:
'''Re
|
place the [] with paired code pointer'''
iptr = 0
bracket = list()
code = list(raw_code)
code_len = len(code)
while iptr < code_len:
code[iptr] = [code[iptr], '']
if code[iptr][0] == '[':
bracket.append(iptr)
elif code[iptr][0] == ']':
piptr = bracket.pop()
code[piptr][1], code[iptr][1] = iptr, piptr
iptr += 1
bracket_len = len(bracket)
if bracket_len != 0:
|
code = []
return code
def __execute(code: list, stack_size: int) -> list:
'''Run bf code'''
iptr = 0
sptr = 0
stack = list(0 for _ in range(stack_size))
code_len = len(code)
while iptr < code_len:
instruction = code[iptr][0]
if instruction == '>':
sptr += 1
elif instruction == '<':
sptr -= 1
elif instruction == '+':
stack[sptr] += 1
if stack[sptr] == 256:
stack[sptr] = 0
elif instruction == '-':
stack[sptr] -= 1
if stack[sptr] == -1:
stack[sptr] = 255
elif instruction == '.':
print(chr(stack[sptr]), end='')
elif instruction == ',':
stack[sptr] = __getchar()
elif instruction == '[' and stack[sptr] == 0:
iptr = code[iptr][1]
elif instruction == ']' and stack[sptr] != 0:
iptr = code[iptr][1]
iptr += 1
# Clean the buffer, otherwise it will affect next round result.
__getchar.stdin_buffer = []
return stack
def run(raw_code: str = '', stack_size: int = 128) -> list:
'''Interpreter the raw_code.
Input:
- raw_code: the string of brainfuck code.
if this is empty, program will wait for user input.
- stack_size: the size of stack, default is 128Bytes.
Return value:
- The whole stack.
'''
if raw_code == '':
raw_code = input('% ')
code = __pre_execute(raw_code)
return __execute(code, stack_size)
|
maxsocl/django-tvdb
|
setup.py
|
Python
|
mit
| 1,193
| 0.000838
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-tvdb',
version='0.1',
packages=['tvdb'],
include_package_data=True,
license='The MIT License: http://www.opensource.org/licenses/mit-license.php',
description='A simple Django app for TV channels DB.',
long_description=README,
author='Maksym Sokolsky',
author_email='misokolsky@gmail.com',
classifiers=[
|
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
|
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
CG-F16-16-Rutgers/steersuite-rutgers
|
steerstats/steersuitedb/Scenario.py
|
Python
|
gpl-3.0
| 2,417
| 0.009516
|
import psycopg2
from Sequence import ScenarioSequence
class Scenario(object):
"""A simple example class"""
_id_name = "scenario_id"
_table_name = "scenario"
_insert_order = """
(scenario_id ,
algorithm_type ,
benchmark_type ,
config_id,
scenario_description )"""
#scenario
#(
# scenario_id integer NOT NULL primary key,
# algorithm_type integer NOT NULL references algorithm(algorithm_id),
# benchmark_type integer NOT NULL references benchmark(benchmark_id),
# config_id integer NOT NULL references config(config_id),
# scenario_description tsvector
#) ;
def __init__(self, algorithm_type=0, benchmark_type=0, config_id=0, scenario_description=""):
self._id = id
self._algorithm_type = algorithm_type
self._benchmark_type = benchmark_type
self._config_id = config_id
self._scenario_description = scenario_description
def getScenarioData(se
|
lf, cur, n):
cur.execute("SELECT * FR
|
OM " + self._table_name + " where " + self._id_name + " = "+ str(n))
row = cur.fetchone()
return row
def insertScenario(self, cur, algorithm_type, benchmark_type, config_id, scenario_description):
try:
alDataSeq = ScenarioSequence()
next_id = alDataSeq.getNextVal(cur)
scenario_description=str(scenario_description.replace(',', ' '))
# scenario_description=scenario_description[100:110]
# print scenario_description
cur.execute("INSERT INTO " + self._table_name + " " +
self._insert_order + " " +
"VALUES(%s,%s,%s,%s,%s)",
(
str(next_id),
str(algorithm_type),
str(benchmark_type),
str(config_id),
str(scenario_description),
)
)
self._id = next_id
return next_id
except psycopg2.DatabaseError, e:
print 'Scenario Error %s' % e
def insertScenario2(self, cur):
return self.insertScenario(cur, self._algorithm_type, self._benchmark_type, self._config_id, self._scenario_description)
# sys.exit(1)
|
odoousers2014/odoo
|
addons/website_version/__openerp__.py
|
Python
|
agpl-3.0
| 724
| 0.005525
|
{
'name': 'Website Versioning',
'category': 'Website',
'summary': 'Allow to save all the versions of your website and allow to perform AB testing.',
'version':
|
'1.0',
'description': """
OpenERP Website CMS
===================
""",
'author': 'OpenERP SA',
'depends': ['website','marketing','google_a
|
ccount'],
'installable': True,
'data': [
'security/ir.model.access.csv',
'views/website_version_templates.xml',
'views/marketing_view.xml',
'views/website_version_views.xml',
'views/res_config.xml',
'data/data.xml',
],
'demo': [
'data/demo.xml',
],
'qweb': ['static/src/xml/*.xml'],
'application': True,
}
|
ddico/odoo
|
addons/survey/tests/test_survey.py
|
Python
|
agpl-3.0
| 3,979
| 0.002011
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import _
from odoo.addons.survey.tests import common
from odoo.tests.common import users
class TestSurveyInternals(common.TestSurveyCommon):
@users('survey_manager')
def test_answer_validation_mandatory(self):
""" For each type of question check that mandatory questions correctly check for complete answers """
for (question_type, text) in self.env['survey.question']._fields['question_type'].selection:
kwargs = {}
if question_type == 'multiple_choice':
kwargs['labels'] = [{'value': 'MChoice0'}, {'value': 'MChoice1'}]
elif question_type == 'simple_choice':
kwargs['labels'] = []
elif question_type == 'matrix':
kwargs['labels'] = [{'value': 'Column0'}, {'value': 'Column1'}]
kwargs['labels_2'] = [{'valu
|
e': 'Row0'}, {'value': 'Row1'}]
question = self._add_question(self.page_0, 'Q0', question_type, **kwargs)
|
self.assertDictEqual(
question.validate_question(''),
{question.id: 'TestError'}
)
@users('survey_manager')
def test_answer_validation_date(self):
question = self._add_question(
self.page_0, 'Q0', 'date', validation_required=True,
validation_min_date='2015-03-20', validation_max_date='2015-03-25', validation_error_msg='ValidationError')
self.assertEqual(
question.validate_question('Is Alfred an answer ?'),
{question.id: _('This is not a date')}
)
self.assertEqual(
question.validate_question('2015-03-19'),
{question.id: 'ValidationError'}
)
self.assertEqual(
question.validate_question('2015-03-26'),
{question.id: 'ValidationError'}
)
self.assertEqual(
question.validate_question('2015-03-25'),
{}
)
@users('survey_manager')
def test_answer_validation_numerical(self):
question = self._add_question(
self.page_0, 'Q0', 'numerical_box', validation_required=True,
validation_min_float_value=2.2, validation_max_float_value=3.3, validation_error_msg='ValidationError')
self.assertEqual(
question.validate_question('Is Alfred an answer ?'),
{question.id: _('This is not a number')}
)
self.assertEqual(
question.validate_question('2.0'),
{question.id: 'ValidationError'}
)
self.assertEqual(
question.validate_question('4.0'),
{question.id: 'ValidationError'}
)
self.assertEqual(
question.validate_question('2.9'),
{}
)
@users('survey_manager')
def test_answer_validation_char_box_email(self):
question = self._add_question(self.page_0, 'Q0', 'char_box', validation_email=True)
self.assertEqual(
question.validate_question('not an email'),
{question.id: _('This answer must be an email address')}
)
self.assertEqual(
question.validate_question('email@example.com'),
{}
)
@users('survey_manager')
def test_answer_validation_char_box_length(self):
question = self._add_question(
self.page_0, 'Q0', 'char_box', validation_required=True,
validation_length_min=2, validation_length_max=8, validation_error_msg='ValidationError')
self.assertEqual(
question.validate_question('l'),
{question.id: 'ValidationError'}
)
self.assertEqual(
question.validate_question('waytoomuchlonganswer'),
{question.id: 'ValidationError'}
)
self.assertEqual(
question.validate_question('valid'),
{}
)
|
srio/shadow3-scripts
|
METROLOGY/surface2d_to_hdf5.py
|
Python
|
mit
| 6,588
| 0.007286
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter
from srxraylib.plot.gol import plot
from oasys.util.oasys_util import write_surface_file
from srxraylib.metrology.profiles_simulation import slopes
# def transform_data(file_name):
#
# """First chapuza to create a file similar to FEA"""
#
# df = pd.read_csv(file_name, sep=';', header=None, skiprows=23)
# # new columns #
# df.columns = ['x(m)', 'y(m)', 'uz(m)']
#
# new_col = ['z(m)','ux(m)','uy(m)']
# # adding zeros for each new column
# for col in new_col:
# df[col] = 0.0
#
# # reordering the columns #
#
# cols = df.columns.tolist()
#
# # order to be like FEA ESRF #
# cols = cols[:2]+cols[3:4]+cols[-2:]+cols[2:3]
#
# df = df[cols]
#
# return df
#
# def get_line(file_name, row = 'central'):
# """Function to get a profile file for a given Sagittal line
# of a mirror 2D measurements"""
#
# df = pd.read_csv(file_name, sep=';', header=None, skiprows=23)
#
# df.columns = ['x(m)', 'y(m)', 'z(m)']
#
# #sagittal_rows = df[df.duplicated(['y(m)'])]
# #print(sagittal_rows)
#
# rows_shape = df.pivot_table(columns=['y(m)'], aggfunc='size')
#
# n_rows = rows_shape.size
#
# if row == 'central':
# n = int(n_rows/2)
# elif (isinstance(row, int) == True) and (row < n_rows):
# n = row
# else:
# raise RuntimeError(f'ERROR: {row} is not an integer number or is higher than the number of rows {n_rows}')
#
# #print(rows_shape.index[n])
#
# sub_df = df[df['y(m)'] == rows_shape.index[n]]
#
# return sub_df
def get_shadow_h5(file_name):
"""Function to get an h5 file with OASYS structure
from 2D measurements """
df = pd.read_csv(file_name, sep=';', header=None, comment='#', skiprows=1)
df.columns = ['x(m)', 'y(m)', 'z(m)']
# this part is to get the ordinates and the number of abscissas for each
rows_shape = df.pivot_table(columns=['y(m)'], aggfunc='size')
#print(rows_shape)
#n_rows = rows_shape.size
#print(n_rows)
x_coors = []
x_mins = []
x_maxs = []
z_heights = []
for i,y in enumerate(rows_shape.index):
sub_df = df[df['y(m)'] == y]
x_coors.append(np.array(sub_df['x(m)']))
x_mins.append(x_coors[i][0])
x_maxs.append(x_coors[i][-1])
z_heights.append(np.array(sub_df['z(m)']))
# checking that all coordinates along the mirror have the same steps #
if (all(x==x_mins[0] for x in x_mins)) and (all(x==x_maxs[0] for x in x_maxs)):
print("All elements in x_coors are the same")
x = x_coors[0]
y = rows_shape.index
else:
#TODO: define coordinates along the mirror and interpolate all#
#z for all y coord #
pass
#print(z_heights)
return np.array(x), np.array(y), np.array(z_heights)
# def app_gaussian(z, sigma_0= 10, sigma_1 = 10):
#
# """Copy paste of Manolos filtering function"""
#
# filtered_z = gaussian_filter(z, (sigma_0,sigma_1), order=0, output=None, mode='nearest', cval=0.0, truncate=4.0)
#
# return filtered_z
#
# def scale_profile(surface, factor):
# """Brief function just to rescale the full surface"""
# z2 = np.copy(surface)
# z2 *= factor
#
# return z2
#
#
# def detrend_best_circle(x,y,z,fitting_domain_ratio=0.5, plotting = False):
#
# """Almost copy paste of Manolos detrend best circle function"""
#
# xm = x.copy()
# zm = z[y.size//2,:]
# print(f'Medium line at {y.size//2}')
# zm.shape = -1
#
# icut = np.argwhere(np.abs(xm) <= fitting_domain_ratio)
# if len(icut) <=5:
# raise Exception("Not enough points for fitting.")
#
# xcut = xm[icut]
# #print(len(xm),len(xcut))
# zmcut = zm[icut]
#
# #print(len(zm), len(zmcut))
#
# xcut.shape = -1
# zmcut.shape = -1
#
# if plotting:
# plot(xm, zm, legend=["original"])
#
# print( np.argwhere(np.isnan(z)))
# print("Fitting interval: [%g,%g] (using %d points)" % (xcut[0],xcut[-1],xcut.size))
#
# coeff = np.polyfit(xcut, np.gradient(zmcut,xcut), deg=1)
#
# # # zfit = coeff[0] * xm + coeff[1]
# radius = 1 / coeff[0]
# #print("Detrending straight line on sloped (axis=%d): zfit = %g * coordinate + %g " % (axis, coeff[1], coeff[0]))
# print("Radius of curvature: %g m" % (1.0 / coeff[0]))
#
# if radius >= 0:
# zfit = radius - np.sqrt(radius ** 2 - xm ** 2)
# else:
# zfit = radius
|
+ np.sqrt(radius ** 2 - xm ** 2)
# if plotting:
# plot(xm, zfit, legend=["fit"])
# #plot(xcut, zmcut, xm, zfit, legend=["cut","fit"])
#
# #print(len(zfit))
#
# plot(xm, zm-zfit, legend=["detrended"])
#
# for i in range(z.shape[0]):
# z[i,:] -= zfit
#
#
# nx, ny = z
|
.shape
# z = z - (z[nx//2,ny//2])
#
# # print(f" Slope error is {round(z[:, 0].std(), 6)}")
#
# return xm, z
def plot2d(x,y,data):
plt.pcolormesh(x,y,data, cmap=plt.cm.viridis)
plt.colorbar().ax.tick_params(axis='y',labelsize=12)
plt.ylabel("Vertical [mm]",fontsize=12)
plt.xlabel("Horizontal [mm]",fontsize=12)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.show()
if __name__ == '__main__':
file_name = 'ring256_TypbeB_F127001_frontside_ontissue_meas2__avg_2D.txt'
x, y, z = get_shadow_h5(file_name)
print(z.shape, x.shape, y.shape, z.min(), z.max())
from srxraylib.plot.gol import plot_image
plot_image(z*1e6, y*1e3, x*1e3, aspect="auto")
# x,z = detrend_best_circle(x,y,z,fitting_domain_ratio=0.5, plotting=True)
#
# print(z.shape)
# #plot2d(x,y,z)
#
# z2 = app_gaussian(z, sigma_0= 6, sigma_1 = 2)
#
# z3 = scale_profile(z2,1)
#
# #plot2d(x,y,z)
slp = slopes(z, y, x, silent=0, return_only_rms=0)
#
# slp_y = np.round(slp[1][1]*1e6, 3)
output_filename = f'ring256.h5'
# plot(x,z[y.size//2,:],x,z[y.size//2,:],legend=["detrended","Gauss_filtered"])
#
# plot(x,np.gradient(z[y.size//2,:],x), legend=["Slope errors"])
write_surface_file(z.T, y, x, output_filename, overwrite=True)
print("write_h5_surface: File for OASYS " + output_filename + " written to disk.")
print(">>>>>", z.T.shape, y.shape, x.shape,)
|
scalable-networks/ext
|
uhd/host/apps/omap_debug/set_debug_pins.py
|
Python
|
gpl-2.0
| 712
| 0.008427
|
#!/usr/bin/python
import os
# Memory Map
misc_base = 0
uart_base = 1
|
spi_base = 2
i2c_base = 3
gpio_base = 4 * 128
settings_base = 5
# GPIO offset
gpio_pins = 0
gpio_ddr = 4
gpio_ctrl_lo = 8
gpio_ctrl_hi = 12
def set_reg(reg, val):
os.system("./usrp1-e-ctl w %d 1 %d" % (reg,val))
def get_reg(reg):
fin,fout = os.popen4("./usrp1-e-ctl r %d 1" % (reg,))
print fout.read()
# Set DDRs to output
set_reg(gpio_base+gpio_ddr, 0xFFFF)
set_reg(gpio_base+gpio_ddr+2, 0xFFFF)
# Set CTRL to Debug #0 ( A is fo
|
r debug 0, F is for debug 1 )
set_reg(gpio_base+gpio_ctrl_lo, 0xAAAA)
set_reg(gpio_base+gpio_ctrl_lo+2, 0xAAAA)
set_reg(gpio_base+gpio_ctrl_hi, 0xAAAA)
set_reg(gpio_base+gpio_ctrl_hi+2, 0xAAAA)
|
jpn--/pines
|
pines/zipdir.py
|
Python
|
mit
| 3,774
| 0.037096
|
#!/usr/bin/env python
import os
import zipfile
import hashlib
def _rec_split(s):
rest, tail = os.path.split(s)
if rest in ('', os.path.sep):
return tail,
return _rec_split(rest) + (tail,)
def _any_dot(s):
for i in _rec_split(s):
if len(i)>0 and i[0]=='.':
return True
return False
def _zipdir(path, ziph, skip_dots=True, extra_layer=True):
# ziph is zipfile handle
keep_dots = not skip_dots
for root, dirs, files in os.walk(path):
folder = os.path.basename(root)
if keep_dots or not _any_dot(folder):
print('zipping folder:', folder, "in", root)
for file in files:
if keep_dots or not _any_dot(file):
ziph.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), os.path.join(path, '..' if extra_layer else '.')))
else:
print('not zipping folder:', folder, "in", root)
def zipdir(source_dir, zip_file_name=None, skip_dots=True, extra_layer=False):
"""
Parameters
----------
source_dir
zip_file_name : str
If not given, uses the name of the sourcedir.
skip_dots : bool, defaults True
Ignore files and dirs that start with a dot.
Returns
-------
str
zip_file_name
"""
if zip_file_name is None:
if source_dir[-1] in ('/', '\\'):
usepath = source_dir[:-1]
else:
usepath = source_dir
zip_file_name = usepath + '.zip'
with zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED) as zipf:
_zipdir(source_dir, zipf, skip_dots=skip_dots, extra_layer=extra_layer)
return zip_file_name
def zipmod(module, zip_file_name, skip_dots=True):
"""
Create a zipfile from a module
Parameters
----------
module
zip_file_name
skip_dots
Returns
-------
"""
with zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED) as zipf:
_zipdir(module.__path__[0], zipf, s
|
kip_dots=skip_dots)
def zipmod_temp(module, skip_dots=True):
import tempfile
tempdir = tempfile.TemporaryDirectory()
zip_file_name = os.path.join(tempdir.name, module.__name__+".zip")
zipmod(module, zip_file_name, skip_dots=skip_dots)
return zip_file_name, tempdir
def make_hash_file(fname):
hash256 = hashlib.sha256()
if fname[-3:]=='.gz':
import gzip
with gzip.open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
|
hash256.update(chunk)
h = hash256.hexdigest()
with open(fname[:-3] + ".sha256.txt", "w") as fh:
fh.write(h)
else:
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash256.update(chunk)
h = hash256.hexdigest()
with open( fname+".sha256.txt" , "w") as fh:
fh.write(h)
def verify_hash_file(fname, hash_dir=None, max_retries=5):
hash256 = hashlib.sha256()
retries = 0
while retries < max_retries:
try:
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash256.update(chunk)
except PermissionError:
import time
time.sleep(5)
retries += 1
except:
raise
else:
break
h = hash256.hexdigest()
if hash_dir is None:
with open( fname+".sha256.txt" , "r") as fh:
h_x = fh.read()
else:
with open( os.path.join(hash_dir, os.path.basename(fname)+".sha256.txt" ) , "r") as fh:
h_x = fh.read()
if h != h_x:
if hash_dir:
raise ValueError(f"bad hash on {fname} with hash_dir={hash_dir}")
else:
raise ValueError(f"bad hash on {fname}")
def gzip_dir(source_dir, pattern="*.*", make_hash=True, exclude=".sha256.txt"):
"""Individually gzip every file matching pattern in source_dir."""
import gzip, glob
import shutil, os
for f in glob.glob(os.path.join(source_dir, pattern)):
if exclude in f:
continue # don't re-gzip the hash files by default
if make_hash:
make_hash_file(f)
if f[-3:]!='.gz':
with open(f, 'rb') as f_in:
with gzip.open(f + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(f)
|
munikes/loteria_lgb
|
loteria/web/views.py
|
Python
|
agpl-3.0
| 891
| 0.001122
|
from django.views import generic
from django.core.urlresolvers import reverse_lazy
from .models import LotteryUser
class LotteryUserList(generic.ListView):
template_name = 'index.html'
context_object_name = 'number_list'
def get_que
|
ryset(self):
"""Return all numbers"""
return LotteryUser.objects
|
.all().order_by('number')
class LotteryUserCreate(generic.edit.CreateView):
model = LotteryUser
fields = ['name', 'number']
template_name_suffix = '_create_form'
success_url = reverse_lazy('lotteryuser-list')
class LotteryUserUpdate(generic.edit.UpdateView):
model = LotteryUser
fields = ['name', 'number', 'prize']
template_name_suffix = '_update_form'
success_url = reverse_lazy('lotteryuser-list')
class LotteryUserDelete(generic.edit.DeleteView):
model = LotteryUser
success_url = reverse_lazy('lotteryuser-list')
|
NextHub/drf-expander
|
rest_framework_expander/optimizers.py
|
Python
|
isc
| 5,150
| 0.001748
|
from collections import OrderedDict
from copy import deepcopy
from django.utils import six
from rest_framework.utils.serializer_helpers import BindingDict
from rest_framework_expander import utils
from rest_framework_expander.exceptions import ExpanderContextMissing
class ExpanderOptimizer(object):
"""
Provides a minimal class for implementing optimizations.
"""
_creation_counter = 0
def __init__(self, adapter=None):
self._creation_counter = ExpanderOptimizer._creation_counter
ExpanderOptimizer._creation_counter += 1
self.parent = None
self.adapter = adapter
def bind(self, parent, field_name):
self.parent = parent
self.field_name = field_name
self.adapter = parent.adapter
@property
def expander(self):
if not hasattr(self, '_expander'):
if self.parent:
self._expander = self.parent.expander.children[self.field_name]
elif self.adapter:
self._expander = self.adapter.context['expander']
else:
raise ExpanderContextMissing()
return self._expander
def to_optimized_queryset(self, queryset):
"""
Performs optimizations before the queryset has been evaluated.
"""
return queryset
def to_optimized_objects(self, objects):
"""
Performs optimizations after the queryset has been evaluated.
"""
return objects
class ExpanderOptimizerSetMeta(type):
"""
Handles field declarations for ExpanderOptimizerSet.
Based on Django REST Framework's SerializerMetaclass.
"""
@classmethod
def _get_declared_optimizers(cls, bases, attrs):
optimizers = [
(optimizer_name, attrs.pop(optimizer_name))
for optimizer_name, obj in list(attrs.items())
if isinstance(obj, ExpanderOptimizer)
]
optimizers.sort(key=lambda x: x[1]._creation_counter)
for base in reversed(bases):
if hasattr(base, '_declared_optimizers'):
optimizers = list(base._declared_optimizers.items()) + optimizers
return OrderedDict(optimizers)
def __new__(cls, name, bases, attrs):
attrs['_declared_optimizers'] = cls._get_declared_optimizers(bases, attrs)
return super(ExpanderOptimizerSetMeta, cls).__new__(cls, name, bases, attrs)
@six.add_metaclass(ExpanderOptimizerSetMeta)
class ExpanderOptimizerSet(ExpanderOptimizer):
"""
Provides a minimal class for combining several optimizers.
"""
def get_optimizers(self):
return deepcopy(self._declared_optimizers)
@property
def optimizers(self):
if not hasattr(self, '_optimizers'):
self._optimizers = BindingDict(self)
for key, value in six.iteritems(self.get_optimizers()):
self._optimizers[key] = value
return self._optimizers
def to_optimized_queryset(self, queryset):
for name, optimizer in six.iteritems(self.optimizers):
if name in self.expander.children:
queryset = optimizer.to_optimized_queryset(queryset)
return queryset
def to_optimized_objects(self, objects):
for name, optimizer in six.iteritems(self.optimizers):
if name in self.expander.children:
|
objects = optimizer.to_optimized_objects(objects)
return objects
class PrefetchExpanderOptimi
|
zerSet(ExpanderOptimizerSet):
"""
ExpanderOptimizerSet which defaults to calling prefetch related.
"""
def get_optimizers(self):
optimizers = deepcopy(self._declared_optimizers)
for name in self.expander.children.keys():
if name not in optimizers:
optimizers[name] = PrefetchExpanderOptimizerSet()
return optimizers
def to_optimized_queryset(self, queryset):
if hasattr(queryset, 'model'):
source_path = utils.get_serializer_source_path(self.expander.serializer)
source_name = utils.get_model_source_name(source_path, queryset.model)
if source_name:
queryset = queryset.prefetch_related(source_name)
return super(PrefetchExpanderOptimizerSet, self).to_optimized_queryset(queryset)
class SelectExpanderOptimizerSet(ExpanderOptimizerSet):
"""
ExpanderOptimizerSet which defaults to calling select related.
"""
def get_optimizers(self):
optimizers = deepcopy(self._declared_optimizers)
for name in self.expander.children.keys():
if name not in optimizers:
optimizers[name] = SelectExpanderOptimizerSet()
return optimizers
def to_optimized_queryset(self, queryset):
if hasattr(queryset, 'model'):
source_path = utils.get_serializer_source_path(self.expander.serializer)
source_name = utils.get_model_source_name(source_path, queryset.model)
if source_name:
queryset = queryset.select_related(source_name)
return super(SelectExpanderOptimizerSet, self).to_optimized_queryset(queryset)
|
le4ndro/homeinventory
|
homeinventory/dashboard/views.py
|
Python
|
mit
| 1,435
| 0
|
import logging
from django.shortcuts import render
from django.http import JsonResponse
from django.db.models import Count
from homeinventory.inventory.models import Item, Category, Location, ItemLoan
logger = logging.getLogger(__name__)
def dashboard(request):
# get loans
item_loan = ItemLoan.objects \
.filter(item__user=request.user, returned=False) \
.order_by('-when')[:5]
logger.debug(item_loan)
# get next warranty expiring items
item_warranty = Item.objects.filter(user=request.user, warranty=True) \
.order_by('warranty_expiration')[:5]
logger.debug(item_warranty)
return render(request,
'dashboard/dashboard.html',
{'item_l
|
oan': item_loan, 'item_warranty': item_warranty})
def total_item_by_category(request):
q = Category.objects.filter(user=request.user) \
.annotate(da
|
ta=Count('item')) \
.values('name', 'data').filter(data__gt=0)
logger.debug(q)
q_list = list(q)
return JsonResponse(q_list, safe=False)
def total_item_by_location(request):
q = Location.objects.filter(user=request.user) \
.annotate(data=Count('item')) \
.values('name', 'data').filter(data__gt=0)
logger.debug(q)
q_list = list(q)
return JsonResponse(q_list, safe=False)
|
philipn/localwiki-geocode-pagenames
|
geocode_pagenames/utils.py
|
Python
|
mit
| 688
| 0
|
MAX_RESULTS_PER_PAGE = 100
def all(listf, **kwargs):
"""
Simple generator to page through all results of function `listf`.
"""
if not kwargs.get('limit'):
kwargs['limit'] = MAX_RESULTS_PER_PAGE
resp = listf(**kwargs)
for obj in resp['objects']:
yield obj
while resp['meta']['next']:
limit = resp['meta']['limit']
offset = resp['meta']['offset']
resp = listf(offset=(offset + limit))
for obj in resp['object
|
s']:
yield obj
def clean_pagename(name):
# Pagenames can't contain a slash with spaces surrounding it.
name = '/'
|
.join([part.strip() for part in name.split('/')])
return name
|
PirateLearner/pi
|
PirateLearner/blogging/db_migrate.py
|
Python
|
gpl-2.0
| 2,332
| 0.009434
|
from blogging.tag_lib import parse_content
from blogging.models import BlogContent, BlogParent, BlogContentType
import json
import os
def convert_tags(blog,tag_name,fd):
tag = {}
# tag['name'] = tag_name + '_tag'
tag['name'] = tag_name
content = parse_content(blog,tag)
if len(content) > 0:
fd.write("\nConverting "+ blog.title + "\n")
tmp = {}
# tmp[tag_name] = content
|
tmp['content'] = content
|
tag['name'] = 'pid_count_tag'
content = parse_content(blog,tag)
if len(content) > 0:
tmp['pid_count'] = content
else:
tmp['pid_count'] = '0'
fd.write(json.dumps(tmp) + "\n\n")
blog.data = json.dumps(tmp)
return True
else:
return False
def migrate():
blogs = BlogParent.objects.all()
content_type = BlogContentType.objects.get(content_type='DefaultSection')
form_filename = os.path.abspath(os.path.dirname(__file__))+"/custom/"+"migrate_sections.txt"
fd = os.fdopen(os.open(form_filename,os.O_CREAT| os.O_RDWR , 0555),'w')
for blog in blogs:
if(convert_tags(blog, 'Body', fd)):
blog.content_type = content_type
blog.save()
continue
elif (convert_tags(blog, 'content', fd)):
blog.content_type = content_type
blog.save()
continue
elif(convert_tags(blog, 'Content', fd)):
blog.content_type = content_type
blog.save()
continue
elif(convert_tags(blog, 'Summary', fd)):
blog.content_type = content_type
blog.save()
continue
elif(convert_tags(blog, 'Preface', fd)):
blog.content_type = content_type
blog.save()
continue
else:
print "NO TAGs FOUND in " + blog.title
tmp = {}
tmp['content'] = blog.data
tmp['pid_count'] = '0'
fd.write("\nAdding "+ blog.title + "\n")
fd.write(json.dumps(tmp) + "\n\n")
blog.data = json.dumps(tmp)
blog.content_type = content_type
print " Going to save " , blog , blog.content_type
blog.save()
fd.close()
if __name__ == "__main__":
migrate()
|
silenius/amnesia
|
amnesia/modules/folder/views/__init__.py
|
Python
|
bsd-2-clause
| 235
| 0
|
#
|
-*- coding: utf-8 -*-
from .browser import FolderBrowserView
def includeme(config):
config.include('.order')
config.include('.admin')
config.include('.browser')
config.include('.crud')
|
config.include('.paste')
|
voutilad/courtlistener
|
cl/corpus_importer/import_columbia/convert_columbia_html.py
|
Python
|
agpl-3.0
| 2,132
| 0.007036
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 15 16:32:17 2016
@author: elliott
"""
import re
def convert_columbia_html(text):
conversions = [('italic', 'em'),
('block_quote', 'blockquote'),
('bold', 'strong'),
('underline', 'u'),
('strikethrough', 'strike'),
('superscript', 'sup'),
('subscript', 'sub'),
('heading', 'h3'),
('table', 'pre')]
for (pattern, replacement) in conversions:
text = re.sub('<'+pattern+'>', '<'+replacement+'>', text)
text = re.sub('</'+pattern+'>', '</'+replacement+'>', text)
# grayed-out page numbers
text = re.sub('<page_number>', ' <span class="star-pagination">*', text)
text = re.sub('</page_number>', '</span> ', text)
# footnotes
foot_references = re.findall('<footnote_reference>.*?</footnote_reference>', text)
for ref in foot_references:
try:
fnum = re.search('[\*\d]+', ref).group()
except AttributeError:
fnum = re.search('\[fn(.+)\]', ref).gro
|
up(1)
rep = '<sup id="ref-fn%s"><a href="#fn%s">%s</a></sup>' % (fnum, fnum, fnum)
text = text.replace(ref,
|
rep)
foot_numbers = re.findall('<footnote_number>.*?</footnote_number>',text)
for ref in foot_numbers:
try:
fnum = re.search('[\*\d]+', ref).group()
except:
fnum = re.search('\[fn(.+)\]', ref).group(1)
rep = r'<sup id="fn%s"><a href="#ref-fn%s">%s</a></sup>' % (fnum, fnum, fnum)
text = text.replace(ref, rep)
# Make nice paragraphs. This replaces double newlines with paragraphs, then
# nests paragraphs inside blockquotes, rather than vice versa. The former
# looks good. The latter is bad.
text = '<p>' + text + '</p>'
text = re.sub('</blockquote>\s*<blockquote>', '\n\n', text)
text = re.sub('\n\n', '</p>\n<p>', text)
text = re.sub('<p>\s*<blockquote>', '<blockquote><p>', text, re.M)
text = re.sub('</blockquote></p>', '</p></blockquote>', text, re.M)
return text
|
bwduncan/Suncalendar
|
Sun.py
|
Python
|
gpl-2.0
| 19,058
| 0.00105
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
SUNRISET.C - computes Sun rise/set times, start/end of twilight, and
the length of the day at any date and latitude
Written as DAYLEN.C, 1989-08-16
Modified to SUNRISET.C, 1992-12-01
(c) Paul Schlyter, 1989, 1992
Released to the public domain by Paul Schlyter, December 1992
Direct conversion to Java
Sean Russell <ser@germane-software.com>
Conversion to Python Class, 2002-03-21
Henrik Härkönen <radix@kortis.to>
Solar Altitude added by Miguel Tremblay 2005-01-16
Solar flux, equation of time and import of python library
added by Miguel Tremblay 2007-11-22
2007-12-12 - v1.5 by Miguel Tremblay: bug fix to solar flux calculation
"""
__all__ = ['SUN_PY_VERSION', 'Sun']
SUN_PY_VERSION = 1.5
import math
import calendar
class Sun:
# Following are some macros around the "workhorse" function __daylen
# They mainly fill in the desired values for the reference altitude
# below the horizon, and also selects whether this altitude should
# refer to the Sun's center or its upper limb.
@classmethod
def dayLength(cls, year, month, day, lon, lat):
"""
This macro computes the length
|
of the day, from sunrise to sunset.
Sunrise/set is considered to occur when the Sun's upper limb is
35 arc minutes below the horizon (this accounts for the refraction
of the Earth's atmosphere).
"""
return cls.__daylen(year, month, day, lon, lat, -35.0 / 60.0, 1)
@classmethod
def dayCivilTwilightLength(cls, year, month, day, lon, lat):
"""
This macro computes the length of the day, inclu
|
ding civil twilight.
Civil twilight starts/ends when the Sun's center is 6 degrees below
the horizon.
"""
return cls.__daylen(year, month, day, lon, lat, -6.0, 0)
@classmethod
def dayNauticalTwilightLength(cls, year, month, day, lon, lat):
"""
This macro computes the length of the day, incl. nautical twilight.
Nautical twilight starts/ends when the Sun's center is 12 degrees
below the horizon.
"""
return cls.__daylen(year, month, day, lon, lat, -12.0, 0)
@classmethod
def dayAstronomicalTwilightLength(cls, year, month, day, lon, lat):
"""
This macro computes the length of the day, incl. astronomical twilight.
Astronomical twilight starts/ends when the Sun's center is 18 degrees
below the horizon.
"""
return cls.__daylen(year, month, day, lon, lat, -18.0, 0)
@classmethod
def sunRiseSet(cls, year, month, day, lon, lat):
"""
This macro computes times for sunrise/sunset.
Sunrise/set is considered to occur when the Sun's upper limb is
35 arc minutes below the horizon (this accounts for the refraction
of the Earth's atmosphere).
"""
return cls.__sunriset(year, month, day, lon, lat, -35.0 / 60.0, 1)
@classmethod
def aviationTime(cls, year, month, day, lon, lat):
"""
This macro computes the start and end times as considered by UK law.
First launch is 30 minutes before sunrise and last landing is 30
minutes after sunset.
"""
r, s = cls.__sunriset(year, month, day, lon, lat, -35.0 / 60.0, 1)
return r - 0.5, s + 0.5
@classmethod
def civilTwilight(cls, year, month, day, lon, lat):
"""
This macro computes the start and end times of civil twilight.
Civil twilight starts/ends when the Sun's center is 6 degrees below
the horizon.
"""
return cls.__sunriset(year, month, day, lon, lat, -6.0, 0)
@classmethod
def nauticalTwilight(cls, year, month, day, lon, lat):
"""
This macro computes the start and end times of nautical twilight.
Nautical twilight starts/ends when the Sun's center is 12 degrees
below the horizon.
"""
return cls.__sunriset(year, month, day, lon, lat, -12.0, 0)
@classmethod
def astronomicalTwilight(cls, year, month, day, lon, lat):
"""
This macro computes the start and end times of astronomical twilight.
Astronomical twilight starts/ends when the Sun's center is 18 degrees
below the horizon.
"""
return cls.__sunriset(year, month, day, lon, lat, -18.0, 0)
# The "workhorse" function for sun rise/set times
@classmethod
def __sunriset(cls, year, month, day, lon, lat, altit, upper_limb):
"""
Note: year,month,date = calendar date, 1801-2099 only.
Eastern longitude positive, Western longitude negative
Northern latitude positive, Southern latitude negative
The longitude value IS critical in this function!
altit = the altitude which the Sun should cross
Set to -35/60 degrees for rise/set, -6 degrees
for civil, -12 degrees for nautical and -18
degrees for astronomical twilight.
upper_limb: non-zero -> upper limb, zero -> center
Set to non-zero (e.g. 1) when computing rise/set
times, and to zero when computing start/end of
twilight.
*rise = where to store the rise time
*set = where to store the set time
Both times are relative to the specified altitude,
and thus this function can be used to compute
various twilight times, as well as rise/set times
Return value: 0 = sun rises/sets this day, times stored at
*trise and *tset.
+1 = sun above the specified 'horizon' 24 hours.
*trise set to time when the sun is at south,
minus 12 hours while *tset is set to the south
time plus 12 hours. 'Day' length = 24 hours
-1 = sun is below the specified 'horizon' 24 hours
'Day' length = 0 hours, *trise and *tset are
both set to the time when the sun is at south.
"""
# Compute d of 12h local mean solar time
d = cls.__daysSince2000Jan0(year, month, day) + 0.5 - (lon / 360.0)
# Compute local sidereal time of this moment
sidtime = cls.__revolution(cls.__GMST0(d) + 180.0 + lon)
# Compute Sun's RA + Decl at this moment
sRA, sdec, sr = cls.__sunRADec(d)
# Compute time when Sun is at south - in hours UT
tsouth = 12.0 - cls.__rev180(sidtime - sRA) / 15.0
# Compute the Sun's apparent radius, degrees
sradius = 0.2666 / sr
# Do correction to upper limb, if necessary
if upper_limb:
altit = altit - sradius
# Compute the diurnal arc that the Sun traverses to reach
# the specified altitude altit:
cost = (cls.__sind(altit) - cls.__sind(lat) * cls.__sind(sdec)) / \
(cls.__cosd(lat) * cls.__cosd(sdec))
if cost >= 1.0:
t = 0.0 # Sun always below altit
elif cost <= -1.0:
t = 12.0 # Sun always above altit
else:
t = cls.__acosd(cost) / 15.0 # The diurnal arc, hours
# Store rise and set times - in hours UT
return (tsouth - t, tsouth + t)
@classmethod
def __daylen(cls, year, month, day, lon, lat, altit, upper_limb):
"""
Note: year,month,date = calendar date, 1801-2099 only.
Eastern longitude positive, Western longitude negative
Northern latitude positive, Southern latitude negative
The longitude value is not critical. Set it to the correct
longitude if you're picky, otherwise set to, say, 0.0
The latitude however IS critical - be sure to get it correct
altit = the altitude which the Sun should cross
Set to -35/60 degrees for rise/set, -6 degrees
for civil, -12 degrees for nautical and
|
alexanderfefelov/nav
|
python/nav/metrics/errors.py
|
Python
|
gpl-2.0
| 926
| 0
|
#
# Copyright (C) 2014 UNINETT
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public L
|
icense version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# a
|
long with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Graphite related exception classes"""
class GraphiteUnreachableError(Exception):
"""The graphite-web API is unreachable"""
def __init__(self, msg, cause=None):
super(GraphiteUnreachableError, self).__init__(msg + " (%s)" % cause)
self.cause = cause
|
Kortemme-Lab/klab
|
klab/fcm/fcm.py
|
Python
|
mit
| 18,877
| 0.00731
|
#!/usr/bin/python
blank_datafile = '/home/kyleb/Dropbox/UCSF/cas9/FCS/150916-3.1/kyleb/150916-rfp-cas9/96 Well - Flat bottom_002/Specimen_001_F1_F01_046.fcs'
script_output_dir = 'script_output'
sample_directory = '/home/kyleb/Dropbox/UCSF/cas9/FCS/150916-3.1/kyleb/150916-rfp-cas9/96 Well - Flat bottom_002'
rows_in_plate = 'ABCDEFGH'
cols_in_plate = list(range(1, 13))
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from FlowCytometryTools import FCMeasurement, PolyGate, ThresholdGate
import os, FlowCytometryTools
import pylab as P
import numpy as np
import scipy
use_multiprocessing = True
if use_multiprocessing:
import multiprocessing as mp
class PlatePos:
def __init__ (self, plate_position_str):
self.row = plate_position_str[0]
assert( self.row in rows_in_plate )
self.col = int(plate_position_str[1:])
# Returns the next position on the plate
@property
def next_pos(self):
if self.row_index == len(rows_in_plate)-1:
if self.col == cols_in_plate[-1]:
return None
if self.col == cols_in_plate[-1]:
next_pos_row = rows_in_plate[ self.row_index+1 ]
next_pos_col = 1
else:
next_pos_row = self.row
next_pos_col = self.col + 1
return PlatePos( '%s%d' % (next_pos_row, next_pos_col) )
@property
def row_index(self):
return rows_in_plate.index(self.row)
def __repr__(self):
return '%s%02d' % (self.row, self.col)
def __lt__ (self, other):
if self.row == other.row:
return self.col < other.col
else:
return self.row < other.row
def __hash__(self):
return hash( str(self) )
def __eq__(self, other):
return self.row == other.row and self.col == other.col
def __ne__(self, other):
return not self.__eq__(other)
class PlateInfo:
def __init__ (self, name, value, new_positions):
self.name = name
if value == None:
self.value = np.nan
else:
self.value = value
self.positions = []
if isinstance(new_positions, list):
for new_position_range in new_positions:
self.add_position_range(new_position_range)
elif isinstance(new_positions, str):
self.add_position_range(new_positions)
else:
raise Exception('Input new positions must be a list or string')
def add_position_range(self, pos_range):
if '-' in pos_range:
first_pos_str, second_pos_str = pos_range.split('-')
first_pos = PlatePos(first_pos_str)
second_pos = PlatePos(second_pos_str)
first_pos_char_index = rows_in_plate.index(first_pos.row)
second_pos_char_index = rows_in_plate.index(second_pos.row)
for char_index in range(first_pos_char_index, second_pos_char_index + 1):
row = rows_in_plate[char_index]
for col in range(first_pos.col, second_pos.col + 1):
self.add_position( '%s%d' % (row, col) )
else:
self.add_position(pos_range)
def add_position(self, pos_str):
pos = PlatePos(pos_str)
if pos not in self.positions:
self.positions.append(pos)
self.positions.sort()
@property
def position_set(self):
return_set = set()
for pos in self.positions:
return_set.add(pos)
return return_set
def __repr__(self):
return str( self.positions )
class Plate:
def __init__ (self, plate_info_list, sample_dir=None, verbose=False, name=None):
self.name = name
self.info_dict = {}
self.samples = {}
self.sample_dir = sample_dir
for plate_info in plate_info_list:
if plate_info.name not in self.info_dict:
self.info_dict[plate_info.name] = {}
assert( plate_info.value not in self.info_dict[plate_info.name] )
self.info_dict[plate_info.name][plate_info.value] = plate_info
if sample_dir != None:
self.load_fcs_dir(sample_dir, verbose=verbose)
def __repr__(self):
return str(self.info_dict)
@property
def all_position_set(self):
s = set()
for name in self.info_dict:
for value in self.info_dict[name]:
s = s.union(self.info_dict[name][value].position_set)
return s
def get_by_well(self, well_pos):
search_pos = PlatePos(well_pos)
for pos in self.all_position_set:
if pos == search_pos:
return self.samples[pos]
def parameter_values(self, parameter_name):
return sorted( self.info_dict[parameter_name].keys() )
def well_set(self, parameter_name, parameter_value=np.nan):
if parameter_name not in self.info_dict or parameter_value not in self.info_dict[parameter_name]:
return set()
else:
return self.info_dict[parameter_name][parameter_value].position_set
def single_well_from_set(self, well_set):
well_list = list(well_set)
assert( len(well_list) == 1 )
return self.samples[well_list[0]]
@property
def experimental_parameters(self):
experimental_parameters = []
for parameter_name in list(self.info_dict.keys()):
if 'blank' not in parameter_name.lower():
if len(self.info_dict[parameter_name]) == 1 and np.nan in self.info_dict[parameter_name]:
experimental_parameters.append(parameter_name)
return experimental_parameters
def gate(self, gate):
if use_multiprocessing:
pool = mp.Pool()
for pos in self.samples:
pool.apply_async(gate_data, (pos, self.samples[pos], gate), callback=self.set_gate)
pool.close()
pool.join()
else:
for pos in self.samples:
self.samples[pos] = self.samples[pos].gate(gate)
def gate_sample(self, gate, pos):
self.samples[pos] = self.samples[pos].gate(gate)
def set_gate(self, tup):
pos, fcs_data = tup
self.samples[pos] = fcs_data
def load_fcs_dir(self, sample_directory, verbose=False):
fcs_files = find_fcs_files(sample_directory)
for plate_pos, filepath in fcs_files:
assert(plate_pos not in self.samples)
self.samples[plate_pos] = FCMeasurement(ID=str(plate_pos), datafile=filepath)
if verbose:
print('Loaded %d FCS files from directory %s' % (len(fcs_files), sample_directory))
def gate_data(pos, fcs_data, gate):
return (pos, fcs_data.gate(gate))
class FCSFile:
def __init__ (self, filepath, plate_position_str):
self.filepath = filepath
self.plate_position_obj = PlatePos(plate_position_str)
@property
def plate_position(self):
return str( self.plate_position_obj )
@property
def plate_row(self):
return self.plate_position_obj.row
@property
def plate_col(self):
return self.plate_position_obj.col
def __lt__ (self, other):
return self.plate_position < other.plate_position
def __repr__(self):
return
|
self.plate_position
def find_fcs_files(sample_directory):
fcs_files = []
for filename in os.listdir(sample_directory):
if filename.endswith('.fcs'):
full_filename = os.path.join(sample_directory, filename)
fcs_files.append( (PlatePos(filename.split('_')[2]), full_filena
|
me) )
fcs_files.sort()
return fcs_files
def ticks_format(value, index):
"""
get the value and returns the value as:
integer: [0,99]
1 digit float: [0.1, 0.99]
n*10^m: otherwise
To have all the number of the same size they are all returned as latex strings
http://stackoverflow.com/questions/17165435/matplotlib-show-labels-for-minor-ticks-also
"""
exp = np.floor(np.log10(value))
base = value/10**exp
if exp == 0 or exp == 1:
return '${0:d}$'.format(int(value))
if exp == -1:
return '${0:.1f}$'.for
|
zackproser/WealthEngine-Python-SDK
|
wealthengine_python_sdk/setup.py
|
Python
|
mit
| 361
| 0.094183
|
from setuptools import setup
setup(name='wealthengine_python_sdk',
version='0.1',
description='A Python SDK for WealthEngi
|
ne\'s Public API',
url='https://github.com/zackproser/wealthengine
|
-python-sdk',
author='Zack Proser',
author_email='zackproser@gmail.com',
license='MIT',
packages='wealthengine_python_sdk',
zip_safe=False)
|
interDist/pasportaservo
|
core/templatetags/utils.py
|
Python
|
agpl-3.0
| 1,270
| 0.001575
|
import random
from hashlib import sha256
|
from django import template
register = tem
|
plate.Library()
@register.simple_tag
def random_identifier(length=None):
try:
length = int(length)
except Exception:
length = None
if length is None or length <= 0:
length = random.randint(16, 48)
return ''.join(random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123456789_')
for n in range(length))
@register.filter(is_safe=True)
def public_id(account):
try:
return sha256(str(account.pk).encode() + str(account.date_joined).encode()).hexdigest()
except Exception:
return ''
register.simple_tag(func=lambda *args: list(args), name='list')
register.simple_tag(func=lambda **kwargs: dict(kwargs), name='dict')
@register.filter(is_safe=True)
def are_any(iterable):
try:
return any(iterable)
except (ValueError, TypeError):
return bool(iterable)
@register.filter(is_safe=True)
def are_all(iterable):
try:
return all(iterable)
except (ValueError, TypeError):
return bool(iterable)
@register.filter(is_safe=False)
def mult(value, by):
try:
return value * int(by)
except (ValueError, TypeError):
return ''
|
suvit/scrapy-megafon-phones
|
megafon_phones/megafon_phones/pipelines.py
|
Python
|
mit
| 662
| 0.001511
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class PhonePipeline(object):
def __init__(self):
self.file = None
def create_exporter(self, spider):
file = open('%s_data.txt' % spider.name, 'w+b')
self.fil
|
e = file
def process_item(self, item, spider):
if self.file is None:
self.create_exporter(spider)
datafile = self.file
datafile.write(it
|
em['phone'])
datafile.write('\n')
return item
def close_spider(self, spider):
self.file.close()
|
5monkeys/django-enumfield
|
django_enumfield/contrib/drf.py
|
Python
|
mit
| 1,336
| 0.000749
|
import six
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class EnumField(serializers.ChoiceField):
default_error_messages = {"invalid_choice": _('"{input}" is not a valid choice.')}
def __init__(self, enum, **kwargs):
self.enum = enum
choices = (
(self.get_choice_value(enum_value), enum_value.label)
for _, enum_value in enum.choices()
)
super(EnumField, self).__init__(choices, **kwargs)
def get_choice_value(self, enum_value):
return enum_value.value
def to_internal_value(self, data):
if isinstance(data, six.string_types) and data.isdigit():
data = int(data)
try:
value = self.enum.get(data).value
except AttributeError: # .get() returned None
if not self.required:
raise serializers.SkipField()
|
self.fail("invalid_choice", input=data)
return value
def to_representation(self, value):
enum_value = self.enum.get(value)
i
|
f enum_value is not None:
return self.get_choice_value(enum_value)
class NamedEnumField(EnumField):
def get_choice_value(self, enum_value):
return enum_value.name
class Meta:
swagger_schema_fields = {"type": "string"}
|
mjmvisser/adl3
|
adl3/adl_defines.py
|
Python
|
mit
| 39,284
| 0.005957
|
# Copyright (C) 2011 by Mark Visser <mjmvisser@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# This code is based on the AMD Display Library 3.0 SDK
ADL_TRUE = 1 # ADL_SDK_3.0/include/adl_defines.h:52
ADL_FALSE = 0 # ADL_SDK_3.0/include/adl_defines.h:55
ADL_MAX_CHAR = 4096 # ADL_SDK_3.0/include/adl_defines.h:59
ADL_MAX_PATH = 256 # ADL_SDK_3.0/include/adl_defines.h:62
ADL_MAX_ADAPTERS = 150 # ADL_SDK_3.0/include/adl_defines.h:65
ADL_MAX_DISPLAYS = 150 # ADL_SDK_3.0/include/adl_defines.h:68
ADL_MAX_DEVICENAME = 32 # ADL_SDK_3.0/include/adl_defines.h:71
ADL_ADAPTER_INDEX_ALL = -1 # ADL_SDK_3.0/include/adl_defines.h:74
ADL_MAIN_API_OPTION_NONE = 0 # ADL_SDK_3.0/include/adl_defines.h:77
ADL_DDC_OPTION_SWITCHDDC2 = 1 # ADL_SDK_3.0/include/adl_defines.h:90
ADL_DDC_OPTION_RESTORECOMMAND = 2 # ADL_SDK_3.0/include/adl_defines.h:93
ADL_DL_I2C_ACTIONREAD = 1 # ADL_SDK_3.0/include/adl_defines.h:104
ADL_DL_I2C_ACTIONWRITE = 2 # ADL_SDK_3.0/include/adl_defines.h:105
ADL_DL_I2C_ACTIONREAD_REPEATEDSTART = 3 # ADL_SDK_3.0/include/adl_defines.h:106
ADL_OK_WAIT = 4 # ADL_SDK_3.0/include/adl_defines.h:122
ADL_OK_RESTART = 3 # ADL_SDK_3.0/include/adl_defines.h:125
ADL_OK_MODE_CHANGE = 2 # ADL_SDK_3.0/include/adl_defines.h:128
ADL_OK_WARNING = 1 # ADL_SDK_3.0/include/adl_defines.h:131
ADL_OK = 0 # ADL_SDK_3.0/include/adl_defines.h:134
ADL_ERR = -1 # ADL_SDK_3.0/include/adl_defines.h:137
ADL_ERR_NOT_INIT = -2 # ADL_SDK_3.0/include/adl_defines.h:140
ADL_ERR_INVALID_PARAM = -3 # ADL_SDK_3.0/include/adl_defines.h:143
ADL_ERR_INVALID_PARAM_SIZE = -4 # ADL_SDK_3.0/include/adl_defines.h:146
ADL_ERR_INVALID_ADL_IDX = -5 # ADL_SDK_3.0/include/adl_defines.h:149
ADL_ERR_INVALID_CONTROLLER_IDX = -6 # ADL_SDK_3.0/include/adl_defines.h:152
ADL_ERR_INVALID_DIPLAY_IDX = -7 # ADL_SDK_3.0/include/adl_defines.h:155
ADL_ERR_NOT_SUPPORTED = -8 # ADL_SDK_3.0/include/adl_defines.h:158
ADL_ERR_NULL_POINTER = -9 # ADL_SDK_3.0/include/adl_defines.h:161
ADL_ERR_DISABLED_ADAPTER = -10 # ADL_SDK_3.0/include/adl_defines.h:164
ADL_ERR_INVALID_CALLBACK = -11 # ADL_SDK_3.0/include/adl_defines.h:167
ADL_ERR_RESOURCE_CONFLICT = -12 # ADL_SDK_3.0/include/adl_defines.h:170
ADL_DT_MONITOR = 0 # ADL_SDK_3.0/include/adl_defines.h:185
ADL_DT_TELEVISION = 1 # ADL_SDK_3.0/include/adl_defines.h:188
ADL_DT_LCD_PANEL = 2 # ADL_SDK_3.0/include/adl_defines.h:191
ADL_DT_DIGITAL_FLAT_PANEL = 3 # ADL_SDK_3.0/include/adl_defines.h:194
ADL_DT_COMPONENT_VIDEO = 4 # ADL_SDK_3.0/include/adl_defines.h:197
ADL_DT_PROJECTOR = 5 # ADL_SDK_3.0/include/adl_defines.h:200
ADL_DOT_UNKNOWN = 0 # ADL_SDK_3.0/include/adl_defines.h:210
ADL_DOT_COMPOSITE = 1 # ADL_SDK_3.0/include/adl_defines.h:213
ADL_DOT_SVIDEO = 2 # ADL_SDK_3.0/include/adl_defines.h:216
ADL_DOT_ANALOG = 3 # ADL_SDK_3.0/include/adl_defines.h:219
ADL_DOT_DIGITAL = 4 # ADL_SDK_3.0/include/adl_defines.h:222
ADL_DISPLAY_COLOR_BRIGHTNESS = 1 # ADL_SDK_3.0/include/adl_defines.h:232
ADL_DISPLAY_COLOR_CONTRAST = 2 # ADL_SDK_3.0/include/adl_defines.h:233
ADL_DISPLAY_COLOR_SATURATION = 4 # ADL_SDK_3.0/include/adl_defines.h:234
ADL_DISPLAY_COLOR_HUE = 8 # ADL_SDK_3.0/include/adl_defines.h:235
ADL_DISPLAY_COLOR_TEMPERATURE = 16 # ADL_SDK_3.0/include/adl_defines.h:236
ADL_DISPLAY_COLOR_TEMPERATURE_SOURCE_EDID = 32 # ADL_SDK_3.0/include/adl_defines.h:240
ADL_DISPLAY_COLOR_TEMPERATURE_SOURCE_USER = 64 # ADL_SDK_3.0/include/adl_defines.h:243
ADL_DISPLAY_ADJUST_OVERSCAN = 1 # ADL_SDK_3.0/include/adl_defines.h:253
ADL_DISPLAY_ADJUST_VERT_POS = 2 # ADL_SDK_3.0/include/adl_defines.h:254
ADL_DISPLAY_ADJUST_HOR_POS = 4 # ADL_SDK_3.0/include/adl_defines.h:255
ADL_DISPLAY_ADJUST_VERT_SIZE = 8 # ADL_SDK_3.0/include/adl_defines.h:256
ADL_DISPLAY_ADJUST_HOR_SIZE = 16 # ADL_SDK_3.0/include/adl_defines.h:257
ADL_DISPLAY_ADJUST_SIZEPOS = 30 # ADL_SDK_3.0/include/adl_defines.h:258
ADL_DISPLAY_CUSTOMMODES = 32 # ADL_SDK_3.0/include/adl_defines.h:259
ADL_DISPLAY_ADJUST_UNDERSCAN = 64 # ADL_SDK_3.0/include/adl_defines.h:260
ADL_DESKTOPCONFIG_UNKNOWN = 0 # ADL_SDK_3.0/include/adl_defines.h:271
ADL_DESKTOPCONFIG_SINGLE = 1 # ADL_SDK_3.0/include/adl_defines.h:272
ADL_DESKTOPCONFIG_CLONE = 4 # ADL_SDK_3.0/include/adl_defines.h:273
ADL_DESKTOPCONFIG_BIGDESK_H = 16 # ADL_SDK_3.0/include/adl_defines.h:274
ADL_DESKTOPCONFIG_BIGDESK_V = 32 # ADL_SDK_3.0/include/adl_defines.h:275
ADL_DESKTOPCONFIG_BIGDESK_HR = 64 # ADL_SDK_3.0/include/adl_defines.h:276
ADL_DESKTOPCONFIG_BIGDESK_VR = 128 # ADL_SDK_3.0/include/adl_defines.h:277
ADL_DESKTOPCONFIG_RANDR12 = 256 # ADL_SDK_3.0/include/adl_defines.h:278
ADL_MAX_DISPLAY_NAME = 256 # ADL_SDK_3.0/include/adl_defines.h:284
ADL_DISPLAYDDCINFOEX_FLAG_PROJECTORDEVICE = 1 # ADL_SDK_3.0/include/adl_defines.h:292
ADL_DISPLAYDDCINFOEX_FLAG_EDIDEXTENSION = 2 # ADL_SDK_3.0/include/adl_defines.h:293
ADL_DISPLAYDDCINFOEX_FLAG_DIGITALDEVICE = 4 # ADL_SDK_3.0/include/adl_defines.h:294
ADL_DISPLAYDDCINFOEX_FLAG_HDMIAUDIODEVICE = 8 # ADL_SDK_3.0/include/adl_defines.h:295
ADL_DISPLAYDDCINFOEX_FLAG_SUPPORTS_AI = 16 # ADL_SDK_3.0/include/adl_defines.h:296
ADL_DISPLAYDDCINFOEX_FLAG_SUPPORT_xvYCC601 = 32 # ADL_SDK_3.0/include/adl_defines.h:297
ADL_DISPLAYDDCINFOEX_FLAG_SUPPORT_xvYCC709 = 64 # ADL_SDK_3.0/include/adl_defines.h:298
ADL_DISPLAY_CONTYPE_UNKNOWN = 0 # ADL_SDK_3.0/include/adl_defines.h:308
ADL_DISPLAY_CONTYPE_VGA = 1 # ADL_SDK_3.0/include/adl_defines.h:309
ADL_DISPLAY_CONTYPE_DVI_D = 2 # ADL_SDK_3.0/include/adl_defines.h:310
ADL_DISPLAY_CONTYPE_DVI_I = 3 # ADL_SDK_3.0/include/adl_defines.h
|
:311
ADL_DISPLAY_CONTYPE_ATICVDONGLE_NTSC = 4 # ADL_SDK_3.0/include/adl_defines.h:312
ADL_DISPLAY_CONTYPE_ATICVDONGLE_JPN = 5 # ADL_SDK_3.0/include/adl_defines.h:313
ADL_DISPLAY_CONTYPE_ATICVDONGLE_NONI2C_JPN = 6 # ADL_SDK_3.0/include/adl_defines.h:314
ADL_DISPLAY_CONTYPE_ATICVDONGLE_NONI2C_NTSC = 7 # ADL_SDK_3.0/include/adl_defines.h:315
ADL_DISPLAY_CONTYPE_HDMI_TYPE_A = 10 # ADL_SDK_3.0/include/ad
|
l_defines.h:316
ADL_DISPLAY_CONTYPE_HDMI_TYPE_B = 11 # ADL_SDK_3.0/include/adl_defines.h:317
ADL_DISPLAY_CONTYPE_SVIDEO = 12 # ADL_SDK_3.0/include/adl_defines.h:318
ADL_DISPLAY_CONTYPE_COMPOSITE = 13 # ADL_SDK_3.0/include/adl_defines.h:319
ADL_DISPLAY_CONTYPE_RCA_3COMPONENT = 14 # ADL_SDK_3.0/include/adl_defines.h:320
ADL_DISPLAY_CONTYPE_DISPLAYPORT = 15 # ADL_SDK_3.0/include/adl_defines.h:321
ADL_TV_STANDARDS = 1 # ADL_SDK_3.0/include/adl_defines.h:331
ADL_TV_SCART = 2 # ADL_SDK_3.0/include/adl_defines.h:332
ADL_STANDARD_NTSC_M = 1 # ADL_SDK_3.0/include/adl_defines.h:336
ADL_STANDARD_NTSC_JPN = 2 # ADL_SDK_3.0/include/adl_defines.h:337
ADL_STANDARD_NTSC_N = 4 # ADL_SDK_3.0/include/adl_defines.h:338
ADL_STANDARD_PAL_B = 8 # ADL_SDK_3.0/include/adl_defines.h:339
ADL_STANDARD_PAL_COMB_N = 16 # ADL_SDK_3.0/include/adl_defines.h:340
ADL_STANDARD_PAL_D = 32 # ADL_SDK_3.0/include/adl_defines.h:341
ADL_STANDARD
|
videlec/sage-flatsurf
|
flatsurf/geometry/straight_line_trajectory.py
|
Python
|
gpl-2.0
| 31,149
| 0.003307
|
from __future__ import absolute_import, print_function, division
from six.moves import range, map, filter, zip
from six import iteritems
from collections import deque, defaultdict
from .polygon import is_same_direction, line_intersection
from .surface_objects import SaddleConnection
# Vincent question:
# using deque has the disadvantage of losing the initial points
# ideally doig
# my_line[i]
# we should always access to the same element
# I wanted to be able to flow backward thus inserting at the beginning of a list.
# Perhaps it would be better to model this on a deque-like class that is indexed by
# all integers rather than just the non-negative ones? Do you know of such
# a class? Alternately, we could store an offset.
def get_linearity_coeff(u, v):
r"""
Given the two 2-dimensional vectors ``u`` and ``v``, return ``a`` so that
``v = a*u``
If the vectors are not colinear, a ``ValueError`` is raised.
EXAMPLES::
sage: from flatsurf.geometry.straight_line_trajectory import get_linearity_coeff
sage: V = VectorSpace(QQ,2)
sage: get_linearity_coeff(V((1,0)), V((2,0)))
2
sage: get_linearity_coeff(V((2,0)), V((1,0)))
1/2
sage: get_linearity_coeff(V((0,1)), V((0,2)))
2
sage: get_linearity_coeff(V((0,2)), V((0,1)))
1/2
sage: get_linearity_coeff(V((1,2)), V((-2,-4)))
-2
sage: get_linearity_coeff(V((1,1)), V((-1,1)))
Traceback (most recent call last):
...
ValueError: non colinear
"""
if u[0]:
a = v[0]/u[0]
if v[1] != a*u[1]:
raise ValueError("non colinear")
return a
elif v[0]:
raise ValueError("non colinear")
elif u[1]:
return v[1]/u[1]
else:
raise ValueError("zero vector")
class SegmentInPolygon:
r"""
Maximal segment in a polygon of a similarity surface
EXAMPLES::
sage: from flatsurf import *
sage: from flatsurf.geometry.straight_line_trajectory import SegmentInPolygon
sage: s = similarity_surfaces.example()
sage: v = s.tangent_vector(0, (1/3,-1/4), (0,1))
sage: SegmentInPolygon(v)
Segment in polygon 0 starting at (1/3, -1/3) and ending at (1/3, 0)
"""
def __init__(self, start, end=None):
if not end is None:
# WARNING: here we assume that both start and end are on the
# boundary
self._start = start
self._end = end
else:
self._end = start.forward_to_polygon_boundary()
self._start = self._end.forward_to_polygon_boundary()
def __eq__(self, other):
return type(self) is type(other) and \
self._start == other._start and \
self._end == other._end
def __ne__(self, other):
return type(self) is not type(other) or \
self._start != other._start or \
self._end != other._end
def __repr__(self):
r"""
TESTS::
sage: from flatsurf import *
sage: from flatsurf.geometry.straight_line_trajectory import SegmentInPolygon
sage: s = similarity_surfaces.example()
sage: v = s.tangent_vector(0, (0,0), (3,-1))
sage: SegmentInPolygon(v)
Segment in polygon 0 starting at (0, 0) and ending at (2, -2/3)
"""
return "Segment in polygon {} starting at {} and ending at {}".format(
self.polygon_label(), self.start().point(), self.end().point())
def start(self):
r"""
Return the tangent vector associated to the start of a trajectory pointed forward.
"""
return self._start
def start_is_singular(self):
return self._start.is_based_at_singularity()
def end(self):
r"""
Return a TangentVector associated to the end of a trajectory, pointed backward.
"""
return self._end
def end_is_singular(self):
return self._end.is_based_at_singularity()
def is_edge(self):
if not self.start_is_singular() or not self.end_is_singular():
return False
vv=self.start().vector()
vertex=self.start().vertex()
ww=self.start().polygon().edge(vertex)
from flatsurf.geometry.polygon import is_same_direction
return is_same_direction(vv,ww)
def edge(self):
if not self.is_edge():
raise ValueError("Segment asked for edge when not an edge")
return self.start().vertex()
def polygon_label(self):
return
|
self._start.polygon_label()
def invert(self):
return SegmentInPolygon(self._end, self._start)
def next(self):
r"""
Return the next segment obtained by continuing straight through the end point.
EXAMPLES::
sage: from flatsurf imp
|
ort *
sage: from flatsurf.geometry.straight_line_trajectory import SegmentInPolygon
sage: s = similarity_surfaces.example()
sage: s.polygon(0)
Polygon: (0, 0), (2, -2), (2, 0)
sage: s.polygon(1)
Polygon: (0, 0), (2, 0), (1, 3)
sage: v = s.tangent_vector(0, (0,0), (3,-1))
sage: seg = SegmentInPolygon(v)
sage: seg
Segment in polygon 0 starting at (0, 0) and ending at (2, -2/3)
sage: seg.next()
Segment in polygon 1 starting at (2/3, 2) and ending at (14/9, 4/3)
"""
if self.end_is_singular():
raise ValueError("Cannot continue from singularity")
return SegmentInPolygon(self._end.invert())
def previous(self):
if self.end_is_singular():
raise ValueError("Cannot continue from singularity")
return SegmentInPolygon(self._start.invert()).invert()
# DEPRECATED STUFF THAT WILL BE REMOVED
def start_point(self):
from sage.misc.superseded import deprecation
deprecation(1, "do not use start_point but start().point()")
return self._start.point()
def start_direction(self):
from sage.misc.superseded import deprecation
deprecation(1, "do not use start_direction but start().vector()")
return self._start.vector()
def end_point(self):
from sage.misc.superseded import deprecation
deprecation(1, "do not use end_point but end().point()")
return self._end.point()
def end_direction(self):
from sage.misc.superseded import deprecation
deprecation(1, "do not use end_direction but end().vector()")
return self._end.vector()
class AbstractStraightLineTrajectory:
r"""
You need to implement:
- ``def segment(self, i)``
- ``def segments(self)``
"""
def surface(self):
raise NotImplementedError
def __repr__(self):
start = self.segment(0).start()
end = self.segment(-1).end()
return "Straight line trajectory made of {} segments from {} in polygon {} to {} in polygon {}".format(
self.combinatorial_length(),
start.point(), start.polygon_label(),
end.point(), end.polygon_label())
def plot(self, *args, **options):
r"""
Plot this trajectory by converting to a graphical trajectory.
If any arguments are provided in `*args` it must be only one argument containing a GraphicalSurface.
The keyword arguments in `**options` are passed on to :func:`GraphicalStraightLineTrajectory.plot`.
EXAMPLES::
sage: from flatsurf import *
sage: T = translation_surfaces.square_torus()
sage: v = T.tangent_vector(0, (0,0), (5,7))
sage: L = v.straight_line_trajectory()
sage: L.plot() # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 1 graphics primitive
sage: L.plot(color='red') # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 1 graphics primitive
"""
if len(args) > 1:
raise ValueError("SimilaritySurfa
|
Jozhogg/iris
|
lib/iris/tests/unit/fileformats/grib/load_convert/test_time_range_unit.py
|
Python
|
lgpl-3.0
| 1,951
| 0
|
# (C) British Crown Copyright 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test function :func:`iris.fileformats.grib._load_convert.time_range_unit.
"""
from __future__ import (absolute_import, division, print_function)
# import iris tests first so that some things can be initialised
# before importing anything else.
import iris.tests as tests
from iris.exceptions import TranslationError
from iris.fileformats.grib._load_convert import time_range_unit
from iris.unit import Unit
class Test(tests.IrisTest):
def setUp(self):
self.unit_by_indicator = {0: Unit('minutes'),
1: Unit('hours'),
2: Unit('days'),
10: Unit('3 hours'),
11
|
: Unit('6 hours'),
12: Unit('12 hours'),
13: Unit('seconds')}
def test_units(self):
for indicator, unit
|
in self.unit_by_indicator.items():
result = time_range_unit(indicator)
self.assertEqual(result, unit)
def test_bad_indicator(self):
emsg = 'unsupported time range'
with self.assertRaisesRegexp(TranslationError, emsg):
time_range_unit(-1)
if __name__ == '__main__':
tests.main()
|
dhcrzf/zulip
|
zerver/tests/test_create_video_call.py
|
Python
|
apache-2.0
| 1,733
| 0.001731
|
import mock
from zerver.lib.test_classes import ZulipTestCase
from typing import Dict
class TestFeedbackBot(ZulipTestCase):
def setUp(self) -> None:
user_profile = self.example_user('hamlet')
self.login(user_profile.email, realm=user_profile.realm)
def test_create_video_call_success(self) -> None:
with mock.patch('zerver.lib.actions.request_zoom_video_call_url', return_value={'join_url': 'example.com'}):
result = self.client_get("/json/calls/create")
self.assert_json_success(result)
self.assertEqual(200, resu
|
lt.status_code)
content = result.json()
self.assertEqual(content['zoom_url'], 'example.com')
def test_create_video_call_failure(self) -> None:
with mock.patch('zerver.lib.actions.request_zoom_video_call_url', return_value=None):
result = self.client_get("/json/calls/create")
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
content = result.json()
self.assertEqual(content['zoom_url']
|
, '')
def test_create_video_request_success(self) -> None:
class MockResponse:
def __init__(self) -> None:
self.status_code = 200
def json(self) -> Dict[str, str]:
return {"join_url": "example.com"}
with mock.patch('requests.post', return_value=MockResponse()):
result = self.client_get("/json/calls/create")
self.assert_json_success(result)
def test_create_video_request(self) -> None:
with mock.patch('requests.post'):
result = self.client_get("/json/calls/create")
self.assert_json_success(result)
|
Roshan2017/spinnaker
|
dev/dev_runner.py
|
Python
|
apache-2.0
| 10,050
| 0.00796
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
import signal
import stat
import subprocess
import sys
import time
from spinnaker.configurator import InstallationParameters
from spinnaker.fetch import AWS_METADATA_URL
from spinnaker.fetch import GOOGLE_METADATA_URL
from spinnaker.fetch import GOOGLE_INSTANCE_METADATA_URL
from spinnaker.fetch import is_aws_instance
from spinnaker.fetch import is_google_instance
from spinnaker.fetch import check_fetch
from spinnaker.fetch import fetch
from spinnaker.yaml_util import YamlBindings
from spinnaker.validate_configuration import ValidateConfig
from spinnaker import spinnaker_runner
def populate_aws_yml(content):
aws_dict = {'enabled': False}
if is_aws_instance():
zone = (check_fetch(AWS_METADATA_URL + '/placement/availability-zone')
.content)
aws_dict['enabled'] = 'true'
aws_dict['defaultRegion'] = zone[:-1]
elif os.path.exists(os.path.join(os.environ['HOME'], '.aws/credentials')):
aws_dict['enabled'] = 'true'
aws_dict['defaultRegion'] = 'us-east-1'
bindings = YamlBindings()
bindings.import_dict({'providers': {'aws': aws_dict}})
content = bindings.transform_yaml_source(content, 'providers.aws.enabled')
content = bindings.transform_yaml_source(content,
'providers.aws.defaultRegion')
return content
def populate_google_yml(content):
credentials = {'project': '', 'jsonPath': ''}
google_dict = {'enabled': False,
'defaultRegion': 'us-central1',
'defaultZone': 'us-central1-f',}
google_dict['primaryCredentials'] = credentials
if is_google_instance():
zone = os.path.basename(
check_fetch(GOOGLE_INSTANCE_METADATA_URL + '/zone',
google=True).content)
google_dict['enabled'] = 'true'
google_dict['defaultRegion'] = zone[:-2]
google_dict['defaultZone'] = zone
credentials['project'] = check_fetch(
GOOGLE_METADATA_URL + '/project/project-id', google=True).content
bindings = YamlBindings()
bindings.import_dict({'providers': {'google': google_dict}})
content = bindings.transform_yaml_source(content, 'providers.google.enabled')
content = bindings.transform_yaml_source(
content, 'providers.google.defaultRegion')
content = bindings.transform_yaml_source(
content, 'providers.google.defaultZone')
content = bindings.transform_yaml_source(
content, 'providers.google.primaryCredentials.project')
content = bindings.transform_yaml_source(
content, 'providers.google.primaryCredentials.jsonPath')
return content
class DevInstallationParameters(InstallationParameters):
"""Specialization of the normal production InstallationParameters.
This is a developer deployment where the paths are setup to run directly
out of this repository rather than a standard system installation.
Also, custom configuration parameters come from the $HOME/.spinnaker
rather than the normal installation location of /opt/spinnaker/config.
"""
DEV_SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
SUBSYSTEM_ROOT_DIR = os.getcwd()
USER_CONFIG_DIR = os.path.join(os.environ['HOME'], '.spinnaker')
LOG_DIR = os.path.join(SUBSYSTEM_ROOT_DIR, 'logs')
SPINNAKER_INSTALL_DIR = os.path.abspath(
os.path.join(DEV_SCRIPT_DIR, '..'))
INSTALLED_CONFIG_DIR = os.path.abspath(
os.path.join(DEV_SCRIPT_DIR, '../config'))
UTILITY_SCRIPT_DIR = os.path.abspath(
os.path.join(DEV_SCRIPT_DIR, '../runtime'))
EXTERNAL_DEPENDENCY_SCRIPT_DIR = os.path.abspath(
os.path.join(DEV_SCRIPT_DIR, '../runtime'))
DECK_INSTALL_DIR = os.path.join(SUBSYSTEM_ROOT_DIR, 'deck')
HACK_DECK_SETTINGS_FILENAME = 'settings.js'
DECK_PORT = 9000
class DevRunner(spinnaker_runner.Runner):
"""Specialization of the normal spinnaker runner for development use.
This class has different behaviors than the normal runner.
It follows similar heuristics for launching and stopping jobs,
however, the details differ in fundamental ways.
* The subsystems are run from their source (using gradle)
and will attempt to rebuild before running.
* Spinnaker will be reconfigured on each invocation.
The runner will display all the events to the subsystem error logs
to the console for as long as this script is running. When the script
terminates, the console will no longer show the error log, but the processes
will remain running, and continue logging to the logs directory.
"""
@staticmethod
def maybe_generate_clean_user_local():
"""Generate a spinnaker-local.yml file without environment variables refs"""
user_dir = DevInstallationParameters.USER_CONFIG_DIR
user_config_path = os.path.join(user_dir, 'spinnaker-local.yml')
if os.path.exists(user_config_path):
return
if not os.path.exists(user_dir):
os.mkdir(user_dir)
with open('{config_dir}/default-spinnaker-local.yml'.format(
config_dir=DevInstallationParameters.INSTALLED_CONFIG_DIR),
'r') as f:
content = f.read()
content = populate_aws_yml(content)
content = populate_google_yml(content)
with open(user_config_path, 'w') as f:
f.write(content)
os.chmod(user_config_path, 0600)
def __init__(self, installation_parameters=None):
self.maybe_generate_clean_user_local()
installation = installation_parameters or DevInstallationParameters
super(DevRunner, self).__init__(installati
|
on)
def start_subsystem(self, subsystem, environ=None):
"""Starts the specified subsystem.
Args:
subsystem [string]: The repository name of the subsystem to run.
"""
print 'Starting {subs
|
ystem}'.format(subsystem=subsystem)
command = os.path.join(
self.installation.SUBSYSTEM_ROOT_DIR,
subsystem,
'start_dev.sh')
return self.run_daemon(command, [command], environ=environ)
def tail_error_logs(self):
"""Start a background tail job of all the component error logs."""
log_dir = self.installation.LOG_DIR
try:
os.makedirs(log_dir)
except OSError:
pass
tail_jobs = []
for subsystem in self.get_all_subsystem_names():
path = os.path.join(log_dir, subsystem + '.err')
open(path, 'w').close()
tail_jobs.append(self.start_tail(path))
return tail_jobs
def get_deck_pid(self):
"""Return the process id for deck, or None."""
program='node ./node_modules/webpack-dev-server/bin/webpack-dev-server.js'
stdout, stderr = subprocess.Popen(
'ps -fwwwC node', stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, close_fds=True).communicate()
match = re.search('(?m)^[^ ]+ +([0-9]+) .* {program}'.format(
program=program), stdout)
return int(match.group(1)) if match else None
def start_deck(self):
"""Start subprocess for deck."""
pid = self.get_deck_pid()
if pid:
print 'Deck is already running as pid={pid}'.format(pid=pid)
return pid
path = os.path.join(self.installation.SUBSYSTEM_ROOT_DIR,
'deck/start_dev.sh')
return self.run_daemon(path, [path])
def stop_deck(self):
"""Stop subprocess for deck."""
pid = self.get_deck_pid()
if pid:
print 'Terminating deck in pid={pid}'.format(pid=pid)
os.kill(pid, signal.SIGTERM)
def start_all(self, options):
"""Starts all the components then logs stderr to the console forever.
The subsystems are in forked processes disassociated from this, so will
continue
|
zamattiac/ROSIEBot
|
tests_verifier.py
|
Python
|
mit
| 895
| 0
|
import os
from verifier import Verifier
import verifier
import unittest
# Verification tests
import json
import codecs
TASK_FILE = '201606231548.json'
with codecs.open(TASK_FILE, mode='r', encoding='utf-8') as file:
run_info = json.load(file)
v = Verifier()
class TestVerifer(unittest.TestCase):
def
|
test_handle_errors(self):
l1 = verifier.send_to_retry
verifier.handle_errors()
l2 = verifier.send_to_retry
self.assertGreater(len(l1), len(l2))
self.assertEqual(len(l2), len(l1) + len(run_info['error_list']))
def test_get_path_from_url(self):
path = v.get_path_from_url('http://staging.osf.io/mst3k')
self.assertTr
|
ue(os.path.exists(path))
def test_generate_page_dictionary(self):
d1 = v.generate_page_dictionary('wiki/')
self.assertGreater(len(d1), 0)
if __name__ == '__main__':
unittest.main()
|
brakhane/panda3d
|
direct/src/showbase/PythonUtil.py
|
Python
|
bsd-3-clause
| 86,071
| 0.005763
|
"""Contains miscellaneous utility functions and classes."""
__all__ = ['indent',
'doc', 'adjust', 'difference', 'intersection', 'union',
'sameElements', 'makeList', 'makeTuple', 'list2dict', 'invertDict',
'invertDictLossless', 'uniqueElements', 'disjoint', 'contains',
'replace', 'reduceAngle', 'fitSrcAngle2Dest', 'fitDestAngle2Src',
'closestDestAngle2', 'closestDestAngle', 'getSetterName',
'getSetter', 'Functor', 'Stack', 'Queue',
'bound', 'clamp', 'lerp', 'average', 'addListsByValue',
'boolEqual', 'lineupPos', 'formatElapsedSeconds', 'solveQuadratic',
'findPythonModule', 'mostDerivedLast',
'weightedChoice', 'randFloat', 'normalDistrib',
'weightedRand', 'randUint31', 'randInt32',
'SerialNumGen', 'serialNum', 'uniqueName', 'Enum', 'Singleton',
'SingletonError', 'printListEnum', 'safeRepr',
'fastRepr', 'isDefaultValue',
'ScratchPad', 'Sync', 'itype', 'getNumberedTypedString',
'getNumberedTypedSortedString',
'printNumberedTyped', 'DelayedCall', 'DelayedFunctor',
'FrameDelayedCall', 'SubframeCall', 'getBase', 'GoldenRatio',
'GoldenRectangle', 'rad90', 'rad180', 'rad270', 'rad360',
'nullGen', 'loopGen', 'makeFlywheelGen', 'flywheel',
'listToIndex2item', 'listToItem2index',
'formatTimeCompact','deeptype','StdoutCapture','StdoutPassthrough',
'Averager', 'getRepository', 'formatTimeExact', 'startSuperLog', 'endSuperLog',
'typeName', 'safeTypeName', 'histogramDict', 'unescapeHtmlString']
if __debug__:
__all__ += ['StackTrace', 'traceFunctionCall', 'traceParentCall', 'printThisCall',
'stackEntryInfo', 'lineInfo', 'callerInfo', 'lineTag',
'profileFunc', 'profiled', 'startProfile', 'printProfile',
'getProfileResultString', 'printStack', 'printReverseStack']
import types
import math
import os
import sys
import random
import time
import importlib
__report_indent = 3
from panda3d.core import ConfigVariableBool
if sys.version_info >= (3, 0):
import builtins
xrange = range
else:
import __builtin__ as builtins
"""
# with one integer positional arg, this uses about 4/5 of the memory of the Functor class below
def Functor(function, *args, **kArgs):
argsCopy = args[:]
def functor(*cArgs, **ckArgs):
kArgs.update(ckArgs)
return function(*(argsCopy + cArgs), **kArgs)
return functor
"""
class Functor:
def __init__(self, function, *args, **kargs):
assert callable(function), "function should be a callable obj"
self._function = function
self._args = args
self._kargs = kargs
if hasattr(self._function, '__name__'):
self.__name__ = self._function.__name__
else:
self.__name__ = str(itype(self._function))
if hasattr(self._function, '__doc__'):
self.__doc__ = self._function.__doc__
else:
self.__doc__ = self.__name__
def destroy(self):
del self._function
del self._args
del self._kargs
del self.__name__
del self.__doc__
def _do__call__(self, *args, **kargs):
_kargs = self._kargs.copy()
_kargs.update(kargs)
return self._function(*(self._args + args), **_kargs)
__call__ = _do__call__
def __repr__(self):
s = 'Functor(%s' % self._function.__name__
for arg in self._args:
try:
argStr = repr(arg)
except:
argStr = 'bad repr: %s' % arg.__class__
s += ', %s' % argStr
for karg, value in list(self._kargs.items()):
s += ', %s=%s' % (karg, repr(value))
s += ')'
return s
class Stack:
def __init__(self):
self.__list = []
def push(self, item):
self.__list.append(item)
def top(self):
# return the item on the top of the stack without popping it off
return self.__list[-1]
def pop(self):
return self.__list.pop()
def clear(self):
self.__list = []
def isEmpty(self):
return len(self.__list) == 0
def __len__(self):
return len(self.__list)
class Queue:
# FIFO queue
# interface is intentionally identical to Stack (LIFO)
def __init__(self):
self.__list = []
def push(self, item):
self.__list.append(item)
def top(self):
# return the next item at the front of the queue without popping it off
return self.__list[0]
def front(self):
return self.__list[0]
def back(self):
return self.__list[-1]
def pop(self):
return self.__list.pop(0)
def clear(self):
self.__list = []
def isEmpty(self):
return len(self.__list) == 0
def __len__(self):
return len(self.__list)
if __debug__ and __name__ == '__main__':
q = Queue()
assert q.isEmpty()
q.clear()
assert q.isEmpty()
q.push(10)
assert not q.isEmpty()
q.push(20)
assert not q.isEmpty()
assert len(q) == 2
assert q.front() == 10
assert q.back() == 20
assert q.top() == 10
assert q.top() == 10
assert q.p
|
op() == 10
assert len(q) == 1
assert not q.isEmpty()
assert q.pop() == 20
assert len(q) == 0
assert q.isEmpty()
def indent(stream, numIndents, str):
"""
Write str to stream with numIndents in front of it
"""
# To match emacs, instead of a tab character we will use 4 spaces
stream.write(' ' * numIndents + str)
if __debug__:
import traceback
import marshal
class StackTrace:
def __init__
|
(self, label="", start=0, limit=None):
"""
label is a string (or anything that be be a string)
that is printed as part of the trace back.
This is just to make it easier to tell what the
stack trace is referring to.
start is an integer number of stack frames back
from the most recent. (This is automatically
bumped up by one to skip the __init__ call
to the StackTrace).
limit is an integer number of stack frames
to record (or None for unlimited).
"""
self.label = label
if limit is not None:
self.trace = traceback.extract_stack(sys._getframe(1+start),
limit=limit)
else:
self.trace = traceback.extract_stack(sys._getframe(1+start))
def compact(self):
r = ''
comma = ','
for filename, lineNum, funcName, text in self.trace:
r += '%s.%s:%s%s' % (filename[:filename.rfind('.py')][filename.rfind('\\')+1:], funcName, lineNum, comma)
if len(r):
r = r[:-len(comma)]
return r
def reverseCompact(self):
r = ''
comma = ','
for filename, lineNum, funcName, text in self.trace:
r = '%s.%s:%s%s%s' % (filename[:filename.rfind('.py')][filename.rfind('\\')+1:], funcName, lineNum, comma, r)
if len(r):
r = r[:-len(comma)]
return r
def __str__(self):
r = "Debug stack trace of %s (back %s frames):\n"%(
self.label, len(self.trace),)
for i in traceback.format_list(self.trace):
r+=i
r+="***** NOTE: This is not a crash. This is a debug stack trace. *****"
return r
def printStack():
print(StackTrace(start=1).compact())
return True
def printReverseStack():
print(StackTrace(start=1).reverseCompact())
return True
def printVerboseStack():
print(StackTrace(start=1))
return True
#-----------------------------------------------------------------------------
def traceFunctionCall(frame):
"""
return a string that shows the call frame with calling arguments.
e.g.
foo(x=234, y=135)
"""
f = frame
co = f.f_code
dict = f.f_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
r=''
if 'self' in dict:
r = '%s.'%(dict['self'].__c
|
rex-xxx/mt6572_x201
|
sdk/monkeyrunner/jython/test/all_tests.py
|
Python
|
gpl-2.0
| 1,636
| 0.00978
|
#!/usr/bin/python2.4
#
# Copyright 2010, The Android Open Source Project
#
# Licens
|
ed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s
|
oftware
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test runner to run all the tests in this package."""
import os
import re
import sys
import unittest
TESTCASE_RE = re.compile('_test\.py$')
def AllTestFilesInDir(path):
"""Finds all the unit test files in the given path."""
return filter(TESTCASE_RE.search, os.listdir(path))
def suite(loader=unittest.defaultTestLoader):
"""Creates the all_tests TestSuite."""
script_parent_path = os.path.abspath(os.path.dirname(sys.argv[0]))
# Find all the _test.py files in the same directory we are in
test_files = AllTestFilesInDir(script_parent_path)
# Convert them into module names
module_names = [os.path.splitext(f)[0] for f in test_files]
# And import them
modules = map(__import__, module_names)
# And create the test suite for all these modules
return unittest.TestSuite([loader.loadTestsFromModule(m) for m in modules])
if __name__ == '__main__':
result = unittest.TextTestRunner().run(suite())
if not result.wasSuccessful():
# On failure return an error code
sys.exit(1)
|
Zlash65/erpnext
|
erpnext/patches/v8_9/update_billing_gstin_for_indian_account.py
|
Python
|
gpl-3.0
| 534
| 0.026217
|
# Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public Lic
|
ense v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
company = frappe.get_all('Compan
|
y', filters = {'country': 'India'})
if company:
for doctype in ['Sales Invoice', 'Delivery Note']:
frappe.db.sql(""" update `tab{0}`
set billing_address_gstin = (select gstin from `tabAddress`
where name = customer_address)
where customer_address is not null and customer_address != ''""".format(doctype))
|
repotvsupertuga/tvsupertuga.repository
|
script.module.universalscrapers/lib/universalscrapers/scraperplugins/watchfree.py
|
Python
|
gpl-2.0
| 7,916
| 0.006822
|
import base64
import re,time
import urllib
import urlparse
from BeautifulSoup import BeautifulSoup
from ..import proxy
from ..common import replaceHTMLCodes, clean_title
from ..scraper import Scraper
import xbmcaddon
import xbmc
class Watchfree(Scraper):
domains = ['watchfree.to']
name = "watchfree"
def __init__(self):
self.base_link = self.base_link = xbmcaddon.Addon('script.module.universalscrapers').getSetting("%s_baseurl" % (self.name))
self.moviesearch_link = '/?keyword=%s&search_section=1'
self.tvsearch_link = '/?keyword=%s&search_section=2'
def scrape_movie(self, title, year, imdb, debrid = False):
try:
query = self.moviesearch_link % urllib.quote_plus(title.replace('\'', '').rsplit(':', 1)[0])
query = urlparse.urljoin(self.base_link, query)
html = proxy.get(query, 'item')
page = 1
while True:
sources = self.scrape_movie_page(html, title, year)
if sources is not None:
return sources
else:
page +=1
if 'page=%s' % page in html or 'page%3D' + '%s' % page in html:
html2 = proxy.get(query + '&page=%s' % page, 'item')
html = html2
else:
break
except:
pass
return []
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
query = urlparse.urljoin(self.base_link,
self.tvsearch_link % urllib.quote_plus(title.replace('\'', '').rsplit(':', 1)[0]))
html = proxy.get(query, 'item')
if 'page=2' in html or 'page%3D2' in html:
html2 = proxy.get(query + '&page=2', 'item')
html += html2
html = BeautifulSoup(html)
cleaned_title = 'watchputlocker' + clean_title(title)
years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1)]
items = html.findAll('div', attrs={'class': 'item'})
show_url = None
for item in items:
links = item.findAll('a')
for link in links:
href = link['href']
link_title = link['title']
try:
href = urlparse.parse_qs(urlparse.urlparse(href).query)['u'][0]
except:
pass
try:
|
href = urlparse.parse_qs(urlparse.urlparse(href).query)[
|
'q'][0]
except:
pass
if cleaned_title == clean_title(link_title) and show_year in link_title:
url = re.findall('(?://.+?|)(/.+)', href)[0]
show_url = urlparse.urljoin(self.base_link, replaceHTMLCodes(url))
else:
continue
html = BeautifulSoup(proxy.get(show_url, 'tv_episode_item'))
season_items = html.findAll('div', attrs={'class': 'show_season'})
for season_item in season_items:
if season_item["data-id"] != season:
continue
episode_items = season_item.findAll('div', attrs={'class': 'tv_episode_item'})
for episode_item in episode_items:
link = episode_item.findAll('a')[-1]
href = link["href"]
link_episode = link.contents[0].strip()
if link_episode != "E%s" % (episode):
continue
link_airdate = link.findAll('span', attrs={'class': 'tv_num_versions'})[-1] # WTF
link_airdate = link_airdate.contents[0]
if any(candidate_year in link_airdate for candidate_year in years):
return self.sources(href)
except:
pass
return []
def sources(self, url):
sources = []
try:
if url == None: return sources
absolute_url = urlparse.urljoin(self.base_link, url)
html = BeautifulSoup(proxy.get(absolute_url, 'link_ite'))
tables = html.findAll('table', attrs={'class': re.compile('link_ite.+?')})
for table in tables:
rows = table.findAll('tr')
for row in rows:
link = row.findAll('a')[-1]
href = link['href']
if not 'gtfo' in href:
continue
try:
href = urlparse.parse_qs(urlparse.urlparse(href).query)['u'][0]
except:
pass
try:
href = urlparse.parse_qs(urlparse.urlparse(href).query)['q'][0]
except:
pass
href = base64.b64decode(urlparse.parse_qs(urlparse.urlparse(href).query)['gtfo'][0])
href = replaceHTMLCodes(href)
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(href.strip().lower()).netloc)[0]
host = replaceHTMLCodes(host)
host = host.encode('utf-8')
if "qertewrt" in host:
continue
quality = row.findAll('div', attrs={'class': 'quality'})[0].text
if "CAM" in quality or 'TS' in quality:
quality = 'CAM'
if 'HD' in quality:
pass
else:
quality = 'SD'
sources.append(
{'source': host, 'quality': quality, 'scraper': self.name, 'url': href, 'direct': False})
end_time = time.time()
total_time = end_time - self.start_time
print (repr(total_time))+"<<<<<<<<<<<<<<<<<<<<<<<<<"+self.name+">>>>>>>>>>>>>>>>>>>>>>>>>total_time"
except:
pass
return sources
def scrape_movie_page(self, html, title, year):
try:
html = BeautifulSoup(html)
cleaned_title = 'watchputlocker' + clean_title(title)
years = ['(%s)' % str(year), '(%s)' % str(int(year) + 1), '(%s)' % str(int(year) - 1)]
items = html.findAll('div', attrs={'class': 'item'})
for item in items:
links = item.findAll('a')
for link in links:
href = link['href']
link_title = link['title']
if any(candidate_year in link_title for candidate_year in years):
try:
href = urlparse.parse_qs(urlparse.urlparse(href).query)['u'][0]
except:
pass
try:
href = urlparse.parse_qs(urlparse.urlparse(href).query)['q'][0]
except:
pass
if cleaned_title == clean_title(link_title):
url = re.findall('(?://.+?|)(/.+)', href)[0]
url = replaceHTMLCodes(url)
return self.sources(url)
except:
pass
@classmethod
def get_settings_xml(clas):
xml = [
'<setting id="%s_enabled" ''type="bool" label="Enabled" default="true"/>' % (clas.name),
'<setting id= "%s_baseurl" type="text" label="Base Url" default="http://www.gowatchfreemovies.to"/>' % (clas.name)
]
return xml
|
webgeodatavore/pyqgis-samples
|
gui/qgis-sample-QgsColorWheel.py
|
Python
|
gpl-2.0
| 214
| 0.004673
|
# coding: utf-8
from qgis.gui import QgsColorWheel
color_wheel = QgsColorWheel()
|
def on_color_
|
wheel_changed(color):
print(color)
color_wheel.colorChanged.connect(on_color_wheel_changed)
color_wheel.show()
|
shengqh/ngsperl
|
lib/Annotation/annovarSplicing.py
|
Python
|
apache-2.0
| 5,184
| 0.016397
|
import subprocess
import os.path
import re
import argparse
parser = argparse.ArgumentParser(description="annovate splicing with protein position by annovar.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input annovar _multianno.txt file', required=True)
parser.add_argument('-d', '--database_folder', action='store', nargs='?', help='Annovar database folder', required=True)
parser.add_argument('-b', '--buildver', action='store', nargs='?', help='Annovar buildver', required=True)
parser.add_argument('-s', '--splicing_threshold', action='store', nargs='?', default=2, help='Annovar splicing threshold (default=2)')
parser.add_argument('-o', '--output', action='store', nargs='?', help='Output annovar _multianno.txt file', required=True)
args = parser.parse_args()
inputfile=args.input
annovar_db=args.database_folder
outputfile=args.output
annovar_buildver=args.buildver
splicing_distance=int(args.splicing_threshold)
# inputfile="/scratch/cqs/shengq1/dnaseq/20160829_liuqi_gene_panel/bwa_refine_hc_gvcf_vqsr_annovar/result/liuqi_gene/liuqi_gene.pass.annovar.hg19_multianno.txt"
# annovar_db="/scratch/cqs/shengq1/references/annovar/humandb/"
# outputfile="/scratch/cqs/shengq1/dnaseq/20160829_liuqi_gene_panel/bwa_refine_hc_gvcf_vqsr_annovar/result/liuqi_gene/liuqi_gene.final.annovar.hg19_multianno.txt"
# annovar_buildver="hg19"
# splicing_distance=2
possible_positions = [j for j in range(-splicing_distance, splicing_distance + 1) if j != 0]
with open(inputfile, 'r') as f:
headers = f.readline().rstrip().split('\t')
funcRefGeneIndex=headers.index("Func.refGene")
geneDetailRefGeneIndex = headers.index("GeneDetail.refGene")
aachangeRefGeneIndex=headers.index("AAChange.refGene")
annovarinputfile = outputfile + ".avinput"
with open(inputfile, 'r') as f:
with open(annovarinputfile, 'w') as snvw:
for line in f:
parts = line.rstrip('\r\n').split('\t')
if parts[funcRefGeneIndex] != "splicing":
continue
chro = parts[0]
start = int(parts[1])
for pos in possible_positions:
snvw.write("%s\t%d\t%d\tA\tG\t%d\n" % (chro, start + pos, start + pos, start))
args=["table_annovar.pl",
annovarinputfile,
annovar_db,
"-buildver",
annovar_buildve
|
r,
"-protocol",
"refGene",
"-operation",
"g",
"--outfile",
outputfile,
"--remove",
"--otherinfo"]
subprocess.call(args)
annovar_outputfile = outpu
|
tfile + "." + annovar_buildver + "_multianno.txt"
if os.path.isfile(annovar_outputfile):
splicing_map = {}
prog = re.compile("p\.\w(\d+)[\w|\?]")
with open(annovar_outputfile, "r") as f:
splicingHeaders = f.readline().rstrip().split('\t')
splicingFuncRefGeneIndex=splicingHeaders.index("Func.refGene")
splicingAAChangeRefGeneIndex=splicingHeaders.index("AAChange.refGene")
for line in f:
parts = line.rstrip('\r\n').split('\t')
funcRefGene = parts[splicingFuncRefGeneIndex]
if(funcRefGene == "splicing" or funcRefGene == "intronic"):
continue
chrom = parts[0]
originalposition = int(parts[-1])
position = int(parts[1])
distance = abs(position - originalposition)
if funcRefGene != "exonic":
anno = funcRefGene
else:
anno = {}
for a in parts[splicingAAChangeRefGeneIndex].split(','):
if a != 'UNKNOWN':
aparts = a.split(':')
ma = prog.match(aparts[-1])
if ma == None:
print(line)
else:
anno[aparts[1]] = ':'.join(aparts[0:3]) + ':p.X' + ma.group(1) + 'X'
locus = parts[0] + ":" + str(originalposition)
if locus in splicing_map:
oldone = splicing_map[locus]
if oldone[0] > distance:
splicing_map[locus] = [distance, funcRefGene, anno]
else:
splicing_map[locus] = [distance, funcRefGene, anno]
outputTemp = outputfile + ".tmp"
with open(inputfile, 'r') as f:
with open(outputTemp, 'w') as w:
for line in f:
parts = line.rstrip('\r\n').split('\t')
if parts[funcRefGeneIndex] != "splicing":
w.write(line)
continue
locus= parts[0] + ":" + parts[1]
if locus not in splicing_map:
w.write(line)
continue
values = splicing_map[locus]
if values[1] != 'exonic':
parts[aachangeRefGeneIndex] = values[2]
else:
anno = values[2]
lst = []
for detail in parts[geneDetailRefGeneIndex].split(','):
detailparts = detail.split(':')
trans = detailparts[0]
if trans in anno :
lst.append(anno[trans])
else:
lst.append('')
parts[aachangeRefGeneIndex] = ','.join(lst)
w.write("%s\n" % ("\t".join(parts)))
os.remove(annovarinputfile)
os.remove(annovar_outputfile)
if os.path.isfile(outputfile):
os.remove(outputfile)
os.rename(outputTemp, outputfile)
print("annotate splicing by annovar done.")
|
mgeisler/satori
|
satori/sysinfo/ohai_solo.py
|
Python
|
apache-2.0
| 6,696
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# pylint: disable=W0622
"""Ohai Solo Data Plane Discovery Module."""
import json
import logging
import ipaddress as ipaddress_module
import six
from satori import bash
from satori import errors
from satori import utils
LOG = logging.getLogger(__name__)
def get_systeminfo(ipaddress, config, interactive=False):
"""Run data plane discovery using this module against a host.
:param ipaddress: address to the host to discover.
:param config: arguments and configuration suppplied to satori.
:keyword interactive: whether to prompt the user for information.
"""
if (ipaddress in utils.get_local_ips() or
ipaddress_module.ip_address(six.text_type(ipaddress)).is_loopback):
client = bash.LocalShell()
client.host = "localhost"
client.port = 0
perform_install(client)
return system_info(client)
else:
with bash.RemoteShell(
ipaddress, username=config['host_username'],
private_key=config['host_key'],
interactive=interactive) as client:
perform_install(client)
return system_info(client)
def system_info(client, with_install=False):
"""Run ohai-solo on a remote system and gather the output.
:param client: :class:`ssh.SSH` instance
:returns: dict -- system information from ohai-solo
:raises: SystemInfoCommandMissing, SystemInfoCommandOld, SystemInfoNotJson
SystemInfoMissingJson
SystemInfoCommandMissing if `ohai` is not installed.
SystemInfoCommandOld if `ohai` is not the latest.
SystemInfoNotJson if `ohai` does not return valid JSON.
SystemInfoMissingJson if `ohai` does not return any JSON.
"""
if with_install:
perform_install(client)
if client.is_windows():
raise errors.UnsupportedPlatform(
"ohai-solo is a linux-only sytem info provider. "
"Target platform was %s", client.platform_info['dist'])
else:
command = "unset GEM_CACHE GEM_HOME GEM_PATH && sudo ohai-solo"
output = client.execute(command, escalate=True, allow_many=False)
not_found_msgs = ["command not found", "Could not find ohai"]
if any(m in k for m in not_found_msgs
for k in list(output.values()) if isinstance(k,
six.string_types)):
LOG.warning("SystemInfoCommandMissing on host: [%s]", client.host)
raise errors.SystemInfoCommandMissing("ohai-solo missing on %s" %
client.host)
# use string formatting to handle unicode
unicode_output = "%s" % output['stdout']
try:
results = json.loads(unicode_output)
except ValueError as exc:
try:
clean_output = get_json(unicode_output)
results = json.loads(clean_output)
except ValueError as exc:
raise errors.SystemInfoNotJson(exc)
return results
def perform_install(client):
"""Install ohai-solo on remote system."""
LOG.info("Installing (or updating) ohai-solo on device %s at %s:%d",
client.host, client.host, client.port)
# Check if it a windows box, but fail safely to Linux
is_windows = False
try:
is_windows = client.is_windows()
except Exception:
pass
if is_windows:
raise errors.UnsupportedPlatform(
"ohai-solo is a linux-only sytem info provider. "
"Target platform was %s", client.platform_info['dist'])
else:
# Download to host
command = ("wget -N http://readonly.configdiscovery.rackspace.com"
"/install.sh")
output = client.execute(command, cwd='/tmp', escalate=True,
allow_many=False)
LOG.debug("Downloaded ohai-solo | %s", output['stdout'])
# Run install
command = "bash install.sh"
install_output = client.execute(command, cwd='/tmp',
|
with_exit_code=True,
|
escalate=True, allow_many=False)
LOG.debug("Ran ohai-solo install script. | %s.",
install_output['stdout'])
# Be a good citizen and clean up your tmp data
command = "rm install.sh"
client.execute(command, cwd='/tmp', escalate=True, allow_many=False)
# Process install command output
if install_output['exit_code'] != 0:
raise errors.SystemInfoCommandInstallFailed(
install_output['stderr'][:256])
else:
return install_output
def remove_remote(client):
"""Remove ohai-solo from specifc remote system.
Currently supports:
- ubuntu [10.x, 12.x]
- debian [6.x, 7.x]
- redhat [5.x, 6.x]
- centos [5.x, 6.x]
"""
if client.is_windows():
raise errors.UnsupportedPlatform(
"ohai-solo is a linux-only sytem info provider. "
"Target platform was %s", client.platform_info['dist'])
else:
platform_info = client.platform_info
if client.is_debian():
remove = "dpkg --purge ohai-solo"
elif client.is_fedora():
remove = "yum -y erase ohai-solo"
else:
raise errors.UnsupportedPlatform("Unknown distro: %s" %
platform_info['dist'])
command = "%s" % remove
output = client.execute(command, cwd='/tmp', escalate=True)
return output
def get_json(data):
"""Find the JSON string in data and return a string.
:param data: :string:
:returns: string -- JSON string stripped of non-JSON data
:raises: SystemInfoMissingJson
SystemInfoMissingJson if `ohai` does not return any JSON.
"""
try:
first = data.index('{')
last = data.rindex('}')
return data[first:last + 1]
except ValueError as exc:
context = {"ValueError": "%s" % exc}
raise errors.SystemInfoMissingJson(context)
|
lizardsystem/flooding-lib
|
flooding_lib/util/flshinc.py
|
Python
|
gpl-3.0
| 10,414
| 0.000288
|
"""Tool to loop over fls_h.inc files. Based on nens/asc.py and NumPy
masked arrays. Stripped out all unnecessary flexibility.
Usage:
# Opens zipfile if path ends with zip; inside it opens the only file,
# or raises ValueError if there are several. Currently we need to no
# data value passed in because we don't get it from the file; you may
# need to use some asc file present to get one.
flsh = flshinc.Flsh(path, no_data_value=-999.0)
geo_transform = flsh.geo_transform() # Format same as GDAL's, in
# Rijksdriehoek probably
cellsize_in_m2 = geo_transform[1]*geo_transform[1]
for timestamp, grid in flsh:
print("Total inundated area at timestamp {0}: {1} m2".format(
timestamp, numpy.greater(grid, 0).sum() * cellsize_in_m2))
Extra boolean options to Flsh:
one_per_hour: only yield the first grid of each hour (assumes
timestamp is in hours)
mutate: constantly yield the same grid object. Means that previously
yielded grids change. Faster because no copies are made, but
only use when you understand the risk.
If anything unexpected is encountered in a file, a possibly cryptic
ValueError is raised.
"""
# Python 3 is coming to town
from __future__ import print_function, unicode_literals
from __future__ import absolute_import, division
import logging
import math
import numpy
import numpy.ma
import zipfile
from flooding_lib.util import files
logger = logging.getLogger(__name__)
def splitline(f):
return f.readline().decode('utf8').strip().split()
def ints(f):
return [int(i) for i in splitline(f)]
def floats(f):
return [float(fl) for fl in splitline(f)]
def distance(p1, p2):
return math.sqrt((p1[0] - p2[0]) ** 2 +
(p1[1] - p2[1]) ** 2)
def check(line, expected):
if line[:len(expected)] != expected:
raise ValueError("line {0} was expected to start with {1}".
format(line, expected))
def y0_is_south(header, helper_geotransform):
if helper_geotransform:
helper_y0 = helper_geotransform[3]
# In old FLS files, header['y0'] is the y value of the
# southwest corner, in newer ones it's the y of the northwest
# corner. We have no way to distinguish them based on the FLS
# file alone.
# The helper geotransform's y0 is always at the north of the
# region. If it is sufficiently northwards of the FLS' y0,
# the y0 must be to the south. "Sufficient" is defined as at
# least 10% of the FLS height -- I'm afraid that without that
# margin, we're going to find maxwaterdepth grids that are a
# tiny bit to the north of the FLS, that would cause false
# souths.
north_of_fls_y0 = (
header['y0'] + 0.1 * (header['nrows'] * header['dx']))
if helper_y0 > north_of_fls_y0:
return True
return False
class Flsh(object):
def __init__(
self, path, no_data_value=-999.0, one_per_hour=False,
mutate=False, helper_geotransform=None):
self.path = path
self.no_data_value = no_data_value
self.one_per_hour = one_per_hour
self.mutate = mutate
self.helper_geotransform = helper_geotransform
def geo_transform(self):
header = self._parse_header()
# y0 can be north or south, dy is positive or negative depending
if y0_is_south(header, self.helper_geotransform):
y0 = header['y0'] + (header['nrows'] * header['dx'])
else:
y0 = header['y0']
return [header['x0'], header['dx'], 0.0,
y0, 0.0, -header['dx']]
def get_classes(self):
header = self._parse_header()
return header['classes']
def _open_path(self):
if self.path.endswith('.zip'):
try:
zipf = zipfile.ZipFile(self.path)
namelist = zipf.namelist()
if len(namelist) != 1:
raise ValueError(
"Can only open .zip files with 1 file inside, "
"{p} has {n}.".format(p=self.path, n=len(namelist)))
return zipf.open(namelist[0], mode='rU')
except zipfile.BadZipfile:
raise ValueError(
"{} ends in .zip but can't be opened as one."
.format(self.path))
else:
return file(self.path, 'rU')
@property
def ncols(self):
return self._parse_header()['ncols']
@property
def nrows(self):
return self._parse_header()['nrows']
def _parse_header(self):
if hasattr(self, '_header'):
return self._header
self.f = self._open_path()
# 1: dimensions
while True:
try:
check(
splitline(self.f),
['MAIN', 'DIMENSIONS', 'MMAX', 'NMAX'])
break
except ValueError:
pass
colrowline = splitline(self.f)
try:
ncols, nrows = [int(c) for c in colrowline]
except ValueError:
if colrowline[0] == '***':
nrows, ncols = self.find_max_col()
# logger.debug("nrows={0} ncols={1}".format(nrows, ncols))
# 2: grid
while True:
try:
spl = splitline(self.f)
check(spl, ['GRID'])
break
except ValueError:
pass
grid = floats(self.f)
spl = spl[1:]
dx = grid[spl.index('DX')]
x0 = grid[spl.index('X0')]
y0 = grid[spl.index('Y0')]
# logger.debug("dx={0} x0={1} y0={2}".format(dx, x0, y0))
# 3: classes
while True:
try:
check(
splitline(self.f),
['CLASSES', 'OF', 'INCREMENTAL', 'FILE'])
break
except ValueError:
pass
classes = []
line = s
|
plitline(self.f)
while line != ['ENDCLASSES']:
classes += [[float(fl) for fl in line]]
line = splitline(self.f)
# logger.debug("classes: {0}".format(classes))
self._header = {
'nrows': nrows,
'ncols': ncols,
'dx': dx,
'x0': x0,
'y0': y0,
'classes': classes,
}
return self._header
def find_max_col(self):
|
opened = self._open_path()
maxcol = 0
maxrow = 0
for line in opened:
line = line.strip().decode('utf8').split()
if not line or '.' in line[0]:
continue
try:
row, col, value = [int(elem) for elem in line]
except ValueError:
continue
maxcol = max(maxcol, col)
maxrow = max(maxrow, row)
logger.debug("Found max col: {}".format(maxcol))
logger.debug("Found max row: {}".format(maxrow))
return maxcol, maxrow
def __iter__(self):
header = self._parse_header()
the_array = numpy.zeros((header['nrows'] + 1, header['ncols'] + 1))
current_timestamp = False
yield_this_grid = False
last_yielded_hour = None
for line in self.f:
line = line.strip().decode('utf8').split()
if not line or '.' in line[0]:
if yield_this_grid:
if self.mutate:
yield current_timestamp, the_array
else:
yield current_timestamp, numpy.array(the_array)
last_yielded_hour = int(current_timestamp)
if not line:
# End of file
return
# Start of a new timestamp
timestamp, _, class_column = line[:3]
current_timestamp = float(timestamp)
class_column = int(class_column) - 1
yield_this_grid = (
not self.one_per_hour
or int(current_timestamp) != last_yielded_hour)
|
dna2github/dna2oldmemory
|
PyLangParser/source/walker.py
|
Python
|
mit
| 2,919
| 0.012676
|
"""
@author: Seven Lju
@date: 2016.04.27
"""
constStops = [
'\n', '\t', ' ', '~', '!', '#', '$', '%',
'@', '&', '*', '(', ')', '-', '=', '+', '[',
']', '{', '}', '\\', '|', '\'', '"', ';',
':', ',', '<', '.', '>', '/', '?', '^', '`'
]
class TextWalker(object):
def __init__(self, text, stops=constStops):
self.text = text
self.c
|
ursor = 0
self.n = len(text)
self.stops = stops
self.token = ""
self.stop = '\n'
def __iter__(self):
return self
def __next__(self):
if self.cursor >
|
= self.n:
raise StopIteration()
i = self.cursor
while True:
if i >= self.n:
self.stop = '\0'
break
self.stop = self.text[i]
if self.stop in self.stops:
break
i += 1
self.token = self.text[self.cursor:i]
self.cursor = i + 1
return (self.token, self.stop)
def next(self):
return self.__next__()
def skipString(self, pair=None, markEscape='\\'):
_stops = self.stops
if pair is None:
pair = self.stop
self.stops = [pair, markEscape]
i = self.cursor
token = ""
while True:
string = self.__next__()
token += string[0]
stop = string[1]
if stop == pair:
break
elif stop == markEscape:
token += stop
token += self.text[self.cursor]
self.cursor += 1
if stop == '\0':
break
if self.cursor >= self.n or self.cursor <= 0:
token += self.stop
break
self.stops = _stops
self.token = token
return (self.token, self.stop)
def skipLongString(self, start, end=None, markEscape='\\'):
if end is None:
end = start
if self.cursor >= self.n:
return ("", '\0')
if not (start and end):
return ("", self.stop)
if self.stop != start[0]:
return ("", self.stop)
if self.text[self.cursor:self.cursor+len(start)-1] != start[1:]:
return ("", self.stop)
_stops = self.stops
token = ""
end1st = end[0]
endlen = len(end)
self.stops = [end1st, markEscape]
self.cursor += len(start) - 1
while True:
string = self.__next__()
token += string[0]
stop = string[1]
if stop == end1st and self.cursor + endlen - 1 <= self.n:
if self.text[self.cursor:self.cursor+endlen-1] == end[1:]:
self.stop = end
break
else:
token += stop
elif stop == markEscape:
token += stop
token += self.text[self.cursor]
self.cursor += 1
if stop == '\0':
break
if self.cursor >= self.n or self.cursor <= 0:
token += self.stop
break
self.cursor += endlen - 1
self.stops = _stops
self.token = token
return (self.token, self.stop)
def seeForward(self, n):
return self.text[self.cursor:self.cursor + n]
def seeBackward(self, n):
start = self.cursor - n
return self.text[start:self.cursor]
|
elliotthill/django-oscar
|
oscar/apps/shipping/admin.py
|
Python
|
bsd-3-clause
| 576
| 0
|
from django.contrib import admin
from oscar.apps.shipping.models import (
OrderAndItemCharges, WeightBand, WeightBased)
class OrderChargesAdmin(admin.ModelAdmin):
exclude = ('code',)
list_display = ('name', 'description', 'price_per_order', 'price_per_item',
|
'free_shipping_threshold')
class WeightBandAdmin(admin.ModelAdmin):
list_display = ('method', 'weight_from', 'weight_to', 'charge')
admin.site.register(OrderAndItemCharges, OrderChargesAdmin)
admin.site.register(WeightBased)
admin.site.register(WeightBand, WeightBandAdmin)
|
|
oleduc/ferrymang
|
ferrymang/modules/__init__.py
|
Python
|
bsd-3-clause
| 22
| 0
|
__author
|
__ = 'oleduc
|
'
|
tux-00/ansible
|
lib/ansible/module_utils/redhat.py
|
Python
|
gpl-3.0
| 10,236
| 0.001563
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), James Laska
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import types
from ansible.module_utils.six.moves import configparser
class RegistrationBase(object):
def __init__(self, module, username=None, password=None):
self.module = module
self.username = username
self.password = password
def configure(self):
raise NotImplementedError("Must be implemented by a sub-class")
def enable(self):
# Remove any existing redhat.repo
redhat_repo = '/etc/yum.repos.d/redhat.repo'
|
if os.path.isfile(redhat_repo):
os.unlink(re
|
dhat_repo)
def register(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unregister(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unsubscribe(self):
raise NotImplementedError("Must be implemented by a sub-class")
def update_plugin_conf(self, plugin, enabled=True):
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
if os.path.isfile(plugin_conf):
cfg = configparser.ConfigParser()
cfg.read([plugin_conf])
if enabled:
cfg.set('main', 'enabled', 1)
else:
cfg.set('main', 'enabled', 0)
fd = open(plugin_conf, 'rwa+')
cfg.write(fd)
fd.close()
def subscribe(self, **kwargs):
raise NotImplementedError("Must be implemented by a sub-class")
class Rhsm(RegistrationBase):
def __init__(self, module, username=None, password=None):
RegistrationBase.__init__(self, module, username, password)
self.config = self._read_config()
self.module = module
def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
'''
Load RHSM configuration from /etc/rhsm/rhsm.conf.
Returns:
* ConfigParser object
'''
# Read RHSM defaults ...
cp = configparser.ConfigParser()
cp.read(rhsm_conf)
# Add support for specifying a default value w/o having to standup some configuration
# Yeah, I know this should be subclassed ... but, oh well
def get_option_default(self, key, default=''):
sect, opt = key.split('.', 1)
if self.has_section(sect) and self.has_option(sect, opt):
return self.get(sect, opt)
else:
return default
cp.get_option = types.MethodType(get_option_default, cp, configparser.ConfigParser)
return cp
def enable(self):
'''
Enable the system to receive updates from subscription-manager.
This involves updating affected yum plugins and removing any
conflicting yum repositories.
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', True)
def configure(self, **kwargs):
'''
Configure the system as directed for registration with RHN
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'config']
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
# non-configuration parameters and replace '_' with '.'. For example,
# 'server_hostname' becomes '--system.hostname'.
for k, v in kwargs.items():
if re.search(r'^(system|rhsm)_', k):
args.append('--%s=%s' % (k.replace('_', '.'), v))
self.module.run_command(args, check_rc=True)
@property
def is_registered(self):
'''
Determine whether the current system
Returns:
* Boolean - whether the current system is currently registered to
RHN.
'''
# Quick version...
if False:
return os.path.isfile('/etc/pki/consumer/cert.pem') and \
os.path.isfile('/etc/pki/consumer/key.pem')
args = ['subscription-manager', 'identity']
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
if rc == 0:
return True
else:
return False
def register(self, username, password, autosubscribe, activationkey):
'''
Register the current system to the provided RHN server
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'register']
# Generate command arguments
if activationkey:
args.append('--activationkey "%s"' % activationkey)
else:
if autosubscribe:
args.append('--autosubscribe')
if username:
args.extend(['--username', username])
if password:
args.extend(['--password', password])
# Do the needful...
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unsubscribe(self):
'''
Unsubscribe a system from all subscribed channels
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'unsubscribe', '--all']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unregister(self):
'''
Unregister a currently registered system
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'unregister']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def subscribe(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
for pool in available_pools.filter(regexp):
pool.subscribe()
class RhsmPool(object):
'''
Convenience class for housing subscription information
'''
def __init__(self, module, **kwargs):
self.module = module
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return str(self.__getattribute__('_name'))
def subscribe(self):
args = "subscription-manager subscribe --pool %s" % s
|
masashi-y/depccg
|
depccg/printer/my_json.py
|
Python
|
mit
| 1,555
| 0.001286
|
from typing import Dict, Any
from depccg.tree import Tree
from depccg.cat import Category
def _json_of_category(category: Category) -> Dict[str, Any]:
def rec(node):
if node.is_functor:
return {
'slash': node.slash,
'left': rec(node.left),
'right': rec(node.right)
}
else:
feature = node.features
return {
'base': node.base,
'feature': feature if len(feature) > 0 else None
|
}
return rec(category)
def json_of(
tree: Tree,
full: bool = False
) -> Dict[str, Any]:
"""a tree in Python dict object.
Args:
tree (Tree): tree object
full (bool): whether to decomopose categories into its com
|
ponents, i.e.,
{
'slash': '/',
'left': {'base': 'S', 'feature': 'adj'},
'right': {'base': 'NP', 'feature': None},
},
or just as a string "S[adj]/NP".
Returns:
str: tree string in the CoNLL format
"""
def rec(node: Tree) -> Dict[str, Any]:
if node.is_leaf:
res = dict(node.token)
res['cat'] = _json_of_category(node.cat) if full else str(node.cat)
return res
else:
return {
'type': node.op_string,
'cat': _json_of_category(node.cat) if full else str(node.cat),
'children': [rec(child) for child in node.children]
}
return rec(tree)
|
aitgon/wopmars
|
wopmars/tests/resource/wrapper/fooPackage/FooBase2P.py
|
Python
|
mit
| 325
| 0.003077
|
"""
Example of module
|
documentation which can be
multiple-lined
"""
from sqlalchemy import Column, Integer, String
from wopmars.Base import
|
Base
class FooBase2P(Base):
"""
Documentation for the class
"""
__tablename__ = "FooBase2P"
id = Column(Integer, primary_key=True)
name = Column(String(255))
|
pupeno/bonvortaro
|
vortaro/views.py
|
Python
|
agpl-3.0
| 324
| 0.003086
|
from django.shortcuts import render_to_response
from bonvortaro.vortaro import forms
def search(request):
if request.method == 'POST':
form = forms.SearchForm(request.POST)
else:
form = forms.SearchForm(request.GET)
return render_to_response("vortaro/search.html", {
"form": form
})
| ||
tensorflow/tensorboard
|
tensorboard/plugins/scalar/scalars_plugin.py
|
Python
|
apache-2.0
| 6,802
| 0
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard Scalars plugin.
See `http_api.md` in this directory for specifications of the routes for
this plugin.
"""
import csv
import io
import werkzeug.exceptions
from werkzeug import wrappers
from tensorboard import errors
from tensorboard import plugin_util
from tensorboard.backend import http_util
from tensorboard.data import provider
from tensorboard.plugins import base_plugin
from tensorboard.plugins.scalar import metadata
_DEFAULT_DOWNSAMPLING = 1000 # scalars per time series
class OutputFormat(object):
"""An enum used to list the valid output formats for API calls."""
JSON = "json"
CSV = "csv"
class ScalarsPlugin(base_plugin.TBPlugin):
"""Scalars Plugin for TensorBoard."""
plugin_name = metadata.PLUGIN_NAME
def __init__(self, context):
"""Instantiates ScalarsPlugin via TensorBoard core.
Args:
context: A base_plugin.TBContext instance.
"""
self._downsample_to = (context.sampling_hints or {}).get(
self.plugin_name, _DEFAULT_DOWNSAMPLING
)
self._data_provider = context.data_provider
self._version_checker = plugin_util._MetadataVersionChecker(
data_kind="scalar",
latest_known_version=0,
)
def get_plugin_apps(self):
return {
"/scalars": self.scalars_route,
"/scalars_multirun": self.scalars_multirun_route,
"/tags": self.tags_route,
}
def is_active(self):
return False # `list_plugins` as called by TB core suffices
def frontend_metadata(self):
return base_plugin.FrontendMetadata(element_name="tf-scalar-dashboard")
def index_impl(self, ctx, experiment=None):
"""Return {runName: {tagName: {displayName: ..., description:
...}}}."""
mapping = self._data_provider.list_scalars(
ctx,
experiment_id=experiment,
plugin_name=metadata.PLUGIN_NAME,
)
result = {run: {} for run in mapping}
for (run, tag_to_content) in mapping.items():
for (tag, metadatum) in tag_to_content.items():
md = metadata.parse_plugin_metadata(metadatum.plugin_content)
if not self._version_checker.ok(md.version, run, tag):
continue
description = plugin_util.markdown_to_safe_html(
metadatum.description
)
result[run][tag] = {
"displayName": metadatum.display_name,
"description": description,
}
return result
def scalars_impl(self, ctx, tag, run, experiment, output_format):
"""Result of the form `(body, mime_typ
|
e)`."""
all_scalars = self._data_provider.read_scalars(
ctx,
experiment_id=experiment,
plugin_name=metadata.PLUGIN_NAME,
downsample=self._downsample_to,
run_tag_filter=provider.RunTagFilter(runs=[run], tags=[tag]),
)
s
|
calars = all_scalars.get(run, {}).get(tag, None)
if scalars is None:
raise errors.NotFoundError(
"No scalar data for run=%r, tag=%r" % (run, tag)
)
values = [(x.wall_time, x.step, x.value) for x in scalars]
if output_format == OutputFormat.CSV:
string_io = io.StringIO()
writer = csv.writer(string_io)
writer.writerow(["Wall time", "Step", "Value"])
writer.writerows(values)
return (string_io.getvalue(), "text/csv")
else:
return (values, "application/json")
def scalars_multirun_impl(self, ctx, tag, runs, experiment):
"""Result of the form `(body, mime_type)`."""
all_scalars = self._data_provider.read_scalars(
ctx,
experiment_id=experiment,
plugin_name=metadata.PLUGIN_NAME,
downsample=self._downsample_to,
run_tag_filter=provider.RunTagFilter(runs=runs, tags=[tag]),
)
body = {
run: [(x.wall_time, x.step, x.value) for x in run_data[tag]]
for (run, run_data) in all_scalars.items()
}
return (body, "application/json")
@wrappers.Request.application
def tags_route(self, request):
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
index = self.index_impl(ctx, experiment=experiment)
return http_util.Respond(request, index, "application/json")
@wrappers.Request.application
def scalars_route(self, request):
"""Given a tag and single run, return array of ScalarEvents."""
tag = request.args.get("tag")
run = request.args.get("run")
if tag is None or run is None:
raise errors.InvalidArgumentError(
"Both run and tag must be specified: tag=%r, run=%r"
% (tag, run)
)
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
output_format = request.args.get("format")
(body, mime_type) = self.scalars_impl(
ctx, tag, run, experiment, output_format
)
return http_util.Respond(request, body, mime_type)
@wrappers.Request.application
def scalars_multirun_route(self, request):
"""Given a tag and list of runs, return dict of ScalarEvent arrays."""
if request.method != "POST":
raise werkzeug.exceptions.MethodNotAllowed(["POST"])
tags = request.form.getlist("tag")
runs = request.form.getlist("runs")
if len(tags) != 1:
raise errors.InvalidArgumentError(
"tag must be specified exactly once"
)
tag = tags[0]
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
(body, mime_type) = self.scalars_multirun_impl(
ctx, tag, runs, experiment
)
return http_util.Respond(request, body, mime_type)
|
kg-bot/SupyBot
|
plugins/Python/config.py
|
Python
|
gpl-3.0
| 2,695
| 0.000742
|
###
# Copyright (c) 2003-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
#
|
* Redistributions i
|
n binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Python', True)
if yn("""This plugin provides a snarfer for ASPN Python Recipe URLs;
it will output the name of the Recipe when it sees such a URL.
Would you like to enable this snarfer?"""):
conf.supybot.plugins.Python.aspnSnarfer.setValue(True)
Python = conf.registerPlugin('Python')
conf.registerChannelValue(Python, 'aspnSnarfer',
registry.Boolean(False, """Determines whether the ASPN Python recipe
snarfer is enabled. If so, it will message the channel with the name of
the recipe when it see an ASPN Python recipe link on the channel."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
SymbiFlow/prjuray
|
fuzzers/002-tilegrid/clel_int/top.py
|
Python
|
isc
| 2,848
| 0.001053
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import os
import random
random.seed(int(os.getenv("SEED"), 16))
from utils import util
from prjuray.db import Database
def gen_sites():
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
tile_type = gridinfo.tile_type
if tile_type in ['CLEL_L', 'CLEL_R']:
# Don't fuzz the top and bottom of the grid, the interconnect
# there behaves differently.
_, _, y_min, y_max = grid.dims()
if loc.grid_y <= y_min + 1 or loc.grid_y >= y_max - 1:
continue
site_name = sorted(gridinfo.sites.keys())[0]
if gridinfo.tile_type[-1] == 'L':
int_tile_loc = (loc.grid_x + 1, loc.grid_y)
else:
int_tile_loc = (loc.grid_x - 1, loc.grid_y)
|
int_tile_name = grid.tilename_at_loc(int_tile_loc)
if not int_tile_name.startswith('INT_'):
continue
yield int_tile_name, site_name
def write_params(params):
pinstr = 'tile,val\n'
for tile, (site, val) in sorted(params.items()):
pinstr += '%s,%s,%s\n' % (tile, val, site)
open('params.csv', 'w').write(pinstr)
def run():
print('''
module top();
''')
|
params = {}
sites = sorted(list(gen_sites()))
for (tile_name, site_name), isone in zip(sites,
util.gen_fuzz_states(len(sites))):
params[tile_name] = (site_name, isone)
print('''
(* KEEP, DONT_TOUCH, LOC = "{loc}", LOCK_PINS="I0:A1 I1:A2 I2:A3 I3:A4 I4:A5 I5:A6" *)
wire loop_{loc};
LUT6 #(.INIT(64'hFFFFFFFFFFFFFFFF) ) luta_{loc} (
.I0(1),
.I1(1),
.I2(loop_{loc}),
.I3({loop}),
.I4(1),
.I5(1),
.O(loop_{loc})
);
'''.format(loc=site_name, loop=1 if isone else ('loop_' + site_name)))
print("endmodule")
write_params(params)
if __name__ == '__main__':
run()
|
lulf/qpid-dispatch
|
python/qpid_dispatch_internal/tools/command.py
|
Python
|
apache-2.0
| 9,029
| 0.006534
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""
Utilities for command-line programs.
"""
import sys, json, optparse, os
from collectio
|
ns import Sequence, Mapping
from qpid_dispatch_site import VERSION
from proton import SSLDomain, Url
from proton.utils import SyncRequestResponse, BlockingConnection
class UsageError(Exception):
"""
Raise this exception to indicate the usage message should be printed.
Handled by L{main}
"""
pass
def main(run, argv=sys.argv, op=None):
"""
Call run(argv) with exception handling, do appropriate sys.exit().
@param op: a
|
n OptionParser to use for usage related error messages.
@return: exit value for sys.exit
"""
try:
run(argv)
return 0
except KeyboardInterrupt:
print
except UsageError, e:
op.error(e)
except Exception, e:
if "_QPID_DISPATCH_TOOLS_DEBUG_" in os.environ:
raise
print "%s: %s" % (type(e).__name__, e)
return 1
def check_args(args, maxargs=0, minargs=0):
"""
Check number of arguments, raise UsageError if in correct.
@param maxargs: max number of allowed args after command or None to skip check.
@param minargs: min number of allowed args after command or None to skip check.
@return args padded with None to maxargs.
"""
if minargs is not None and len(args) < minargs:
raise UsageError("Not enough arguments, got %s need %s" % (len(args), minargs))
if maxargs is not None and len(args) > maxargs:
raise UsageError("Unexpected arguments: %s" % (" ".join(args[maxargs:])))
return args + [None] * (maxargs - len(args))
def connection_options(options, title="Connection Options"):
"""Return an OptionGroup for connection options."""
group = optparse.OptionGroup(options, title)
group.add_option("-b", "--bus", action="store", type="string", default="0.0.0.0",
metavar="URL", help="URL of the messaging bus to connect to (default %default)")
group.add_option("-r", "--router", action="store", type="string", default=None,
metavar="ROUTER-ID", help="Router to be queried")
group.add_option("-t", "--timeout", action="store", type="float", default=5, metavar="SECS",
help="Maximum time to wait for connection in seconds (default %default)")
group.add_option("--ssl-certificate", action="store", type="string", metavar="CERT",
help="Client SSL certificate (PEM Format)")
group.add_option("--ssl-key", action="store", type="string", metavar="KEY",
help="Client SSL private key (PEM Format)")
group.add_option("--ssl-trustfile", action="store", type="string", metavar="TRUSTED-CA-DB",
help="Trusted Certificate Authority Database file (PEM Format)")
group.add_option("--ssl-password", action="store", type="string", metavar="PASSWORD",
help="Certificate password, will be prompted if not specifed.")
# Use the --ssl-password-file option to avoid having the --ssl-password in history or scripts.
group.add_option("--ssl-password-file", action="store", type="string", metavar="SSL-PASSWORD-FILE",
help="Certificate password, will be prompted if not specifed.")
group.add_option("--sasl-mechanisms", action="store", type="string", metavar="SASL-MECHANISMS",
help="Allowed sasl mechanisms to be supplied during the sasl handshake.")
group.add_option("--sasl-username", action="store", type="string", metavar="SASL-USERNAME",
help="User name for SASL plain authentication")
group.add_option("--sasl-password", action="store", type="string", metavar="SASL-PASSWORD",
help="Password for SASL plain authentication")
# Use the --sasl-password-file option to avoid having the --sasl-password in history or scripts.
group.add_option("--sasl-password-file", action="store", type="string", metavar="SASL-PASSWORD-FILE",
help="Password for SASL plain authentication")
group.add_option("--ssl-disable-peer-name-verify", action="store_true", default=False,
help="Disables SSL peer name verification. WARNING - This option is insecure and must not be used "
"in production environments")
return group
def get_password(file=None):
if file:
with open(file, 'r') as password_file:
return str(password_file.read()).strip() # Remove leading and trailing characters
return None
class Sasl(object):
"""
A simple object to hold sasl mechanisms, sasl username and password
"""
def __init__(self, mechs=None, user=None, password=None, sasl_password_file=None):
self.mechs = mechs
self.user = user
self.password = password
self.sasl_password_file = sasl_password_file
if self.sasl_password_file:
self.password = get_password(self.sasl_password_file)
def opts_url(opts):
"""Fix up default URL settings based on options"""
url = Url(opts.bus)
# If the options indicate SSL, make sure we use the amqps scheme.
if opts.ssl_certificate or opts.ssl_trustfile:
url.scheme = "amqps"
return url
def opts_sasl(opts):
url = Url(opts.bus)
mechs, user, password, sasl_password_file = opts.sasl_mechanisms, (opts.sasl_username or url.username), (opts.sasl_password or url.password), opts.sasl_password_file
if not (mechs or user or password or sasl_password_file):
return None
return Sasl(mechs, user, password, sasl_password_file)
def opts_ssl_domain(opts, mode=SSLDomain.MODE_CLIENT):
"""Return proton.SSLDomain from command line options or None if no SSL options specified.
@param opts: Parsed optoins including connection_options()
"""
certificate, key, trustfile, password, password_file, ssl_disable_peer_name_verify = opts.ssl_certificate,\
opts.ssl_key,\
opts.ssl_trustfile,\
opts.ssl_password,\
opts.ssl_password_file, \
opts.ssl_disable_peer_name_verify
if not (certificate or trustfile):
return None
if password_file:
password = get_password(password_file)
domain = SSLDomain(mode)
if trustfile:
domain.set_trusted_ca_db(str(trustfile))
if ssl_disable_peer_name_verify:
domain.set_peer_authentication(SSLDomain.VERIFY_PEER, str(trustfile))
else:
domain.set_peer_authentication(SSLDomain.VERIFY_PEER_NAME, str(trustfile))
if certificate:
domain.set_credentials(str(certificate), str(key), str(password))
return domain
class Option(optparse.Option):
"""Addes two new types to optparse.Option: json_map, json_list"""
def check_json(option, opt, value):
"""Validate a json value, for use with L{Option}"""
try:
result = json.loads(value)
if option.type == 'json_list' and not isinstance(result, Sequence) or
|
kyrelos/bauth
|
settings/production.py
|
Python
|
gpl-2.0
| 640
| 0.009375
|
from .base import *
DEBUG = False
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',#'django.db.backends.postgresql_psycopg2',
'NAME': 'bauth',
'USER': 'postgres',
'ADMINUSER':'postgres',
'PASSWORD': 'C7TS*+dp~-9JHwb*7rzP',
'HOST': '127.0
|
.0.1',
'PORT': '',
}
}
# Add raven to the list of installed apps
INSTA
|
LLED_APPS = INSTALLED_APPS + (
# ...
'raven.contrib.django.raven_compat',
)
RAVEN_CONFIG = {
'dsn': 'https://5fa65a7464454dcbadff8a7587d1eaa0:205b12d200e24b39b4c586f7df3965ba@app.getsentry.com/29978',
}
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/parties/CalendarGuiDay.py
|
Python
|
apache-2.0
| 30,399
| 0.003388
|
import datetime
import time
from pandac.PandaModules import TextNode, Vec3, Vec4, PlaneNode, Plane, Point3
from toontown.pgui.DirectGui import DirectFrame, DirectLabel, DirectButton, DirectScrolledList, DGG
from direct.directnotify import DirectNotifyGlobal
from toontown.pgui import DirectGuiGlobals
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
from toontown.parties.PartyInfo import PartyInfo
from toontown.parties import PartyGlobals
from toontown.ai.NewsManager import NewsManager
def myStrftime(myTime):
result = ''
result = myTime.strftime('%I')
if result[0] == '0':
result = result[1:]
result += myTime.strftime(':%M %p')
return result
class CalendarGuiDay(DirectFrame):
notify = directNotify.newCategory('CalendarGuiDay')
ScrollListTextSize = 0.03
def __init__(self, parent, myDate, startDate, dayClickCallback = None, onlyFutureDaysClickable = False):
self.origParent = parent
self.startDate = startDate
self.myDate = myDate
self.dayClickCallback = dayClickCallback
self.onlyFutureDaysClickable = onlyFutureDaysClickable
DirectFrame.__init__(self, parent=parent)
self.timedEvents = []
self.partiesInvitedToToday = []
self.hostedPartiesToday = []
self.yearlyHolidaysToday = []
self.showMarkers = config.GetBool('show-calendar-markers', 0)
self.filter = ToontownGlobals.CalendarFilterShowAll
self.load()
self.createGuiObjects()
self.update()
def createDummyLocators(self):
self.dayButtonLocator = self.attachNewNode('dayButtonLocator')
self.dayButtonLocator.setX(0.1)
self.dayButtonLocator.setZ(-0.05)
self.numberLocator = self.attachNewNode('numberLocator')
self.numberLocator.setX(0.09)
self.scrollLocator = self.attachNewNode('scrollLocator')
self.selectedLocator = self.attachNewNode('selectedLocator')
self.selectedLocator.setX(0.11)
self.selectedLocator.setZ(-0.06)
def load(self):
dayAsset = loader.loadModel('phase_4/models/parties/tt_m_gui_sbk_calendar_box')
dayAsset.reparentTo(self)
self.dayButtonLocator = self.find('**/loc_origin')
self.numberLocator = self.find('**/loc_number')
self.scrollLocator = self.find('**/loc_topLeftList')
self.selectedLocator = self.find('**/loc_origin')
self.todayBox = self.find('**/boxToday')
self.todayBox.hide()
self.selectedFrame = self.find('**/boxHover')
self.selectedFrame.hide()
self.defaultBox = self.find('**/boxBlank')
self.scrollBottomRightLocator = self.find('**/loc_bottomRightList')
self.scrollDownLocator = self.find('**/loc_scrollDown')
self.attachMarker(self.scrollDownLocator)
self.scrollUpLocator = self.find('**/loc_scrollUp')
self.attachMarker(self.scrollUpLocator)
def attachMarker(self, parent, scale = 0.005, color = (1, 0, 0)):
if self.showMarkers:
marker = loader.loadModel('phase_3/models/misc/sphere')
marker.reparentTo(parent)
marker.setScale(scale)
marker.setColor(*color)
def createGuiObjects(self):
self.dayButton = DirectButton(parent=self.dayButtonLocator, image=self.selectedFrame, relief=None, command=self.__clickedOnDay, pressEffect=1, rolloverSound=None, clickSound=No
|
ne)
self.numberWidget = DirectLabel(parent=self.numberLocator, relief=None, text=str(self.myDate.day), text_scale=0.04, text_align=TextNode.ACenter, text_font=ToontownGlobals.getInterfaceFont(), text_fg=Vec4(110 / 255.0, 126 / 255.0, 255 / 255.0, 1))
self.attachMarker
|
(self.numberLocator)
self.listXorigin = 0
self.listFrameSizeX = self.scrollBottomRightLocator.getX() - self.scrollLocator.getX()
self.scrollHeight = self.scrollLocator.getZ() - self.scrollBottomRightLocator.getZ()
self.listZorigin = self.scrollBottomRightLocator.getZ()
self.listFrameSizeZ = self.scrollLocator.getZ() - self.scrollBottomRightLocator.getZ()
self.arrowButtonXScale = 1
self.arrowButtonZScale = 1
self.itemFrameXorigin = 0
self.itemFrameZorigin = 0
self.buttonXstart = self.itemFrameXorigin + 0.21
self.gui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
buttonOffSet = -0.01
incButtonPos = (0.0, 0, 0)
decButtonPos = (0.0, 0, 0)
itemFrameMinZ = self.listZorigin
itemFrameMaxZ = self.listZorigin + self.listFrameSizeZ
arrowUp = self.find('**/downScroll_up')
arrowDown = self.find('**/downScroll_down')
arrowHover = self.find('**/downScroll_hover')
self.scrollList = DirectScrolledList(parent=self.scrollLocator, relief=None, pos=(0, 0, 0), incButton_image=(arrowUp,
arrowDown,
arrowHover,
arrowUp), incButton_relief=None, incButton_scale=(self.arrowButtonXScale, 1, self.arrowButtonZScale), incButton_pos=incButtonPos, incButton_image3_color=Vec4(1, 1, 1, 0.2), decButton_image=(arrowUp,
arrowDown,
arrowHover,
arrowUp), decButton_relief=None, decButton_scale=(self.arrowButtonXScale, 1, -self.arrowButtonZScale), decButton_pos=decButtonPos, decButton_image3_color=Vec4(1, 1, 1, 0.2), itemFrame_pos=(self.itemFrameXorigin, 0, -0.03), numItemsVisible=4, incButtonCallback=self.scrollButtonPressed, decButtonCallback=self.scrollButtonPressed)
itemFrameParent = self.scrollList.itemFrame.getParent()
self.scrollList.incButton.reparentTo(self.scrollDownLocator)
self.scrollList.decButton.reparentTo(self.scrollUpLocator)
arrowUp.removeNode()
arrowDown.removeNode()
arrowHover.removeNode()
clipper = PlaneNode('clipper')
clipper.setPlane(Plane(Vec3(-1, 0, 0), Point3(0.23, 0, 0)))
clipNP = self.scrollList.component('itemFrame').attachNewNode(clipper)
self.scrollList.component('itemFrame').setClipPlane(clipNP)
return
def scrollButtonPressed(self):
self.__clickedOnDay()
def adjustForMonth(self):
curServerDate = base.cr.toontownTimeManager.getCurServerDateTime()
if self.onlyFutureDaysClickable:
if self.myDate.year < curServerDate.year or self.myDate.year == curServerDate.year and self.myDate.month < curServerDate.month or self.myDate.year == curServerDate.year and self.myDate.month == curServerDate.month and self.myDate.day < curServerDate.day:
self.numberWidget.setColorScale(0.5, 0.5, 0.5, 0.5)
self.numberWidget['state'] = DirectGuiGlobals.DISABLED
else:
self.numberWidget.setColorScale(1, 1, 1, 1)
if self.myDate.month != self.startDate.month:
self.setColorScale(0.75, 0.75, 0.75, 1.0)
if self.dayClickCallback is not None:
self.numberWidget['state'] = DirectGuiGlobals.DISABLED
else:
self.setColorScale(1, 1, 1, 1)
if self.myDate.date() == curServerDate.date():
self.defaultBox.hide()
self.todayBox.show()
else:
self.defaultBox.show()
self.todayBox.hide()
return
def destroy(self):
if self.dayClickCallback is not None:
self.numberWidget.destroy()
self.dayClickCallback = None
self.notify.debug('desroying %s' % self.myDate)
try:
for item in self.scrollList['items']:
if hasattr(item, 'description') and item.description and hasattr(item.description, 'destroy'):
self.notify.debug('desroying description of item %s' % item)
item.unbind(DGG.ENTER)
item.unbind(DGG.EXIT)
item.description.destroy()
except e:
self.notify.debug('pass %s' % self.myDate)
self.scrollList.removeAndDestroyAllItems()
self.scrollList.destroy()
self.dayButton.destroy()
DirectFrame.destroy(self)
return
def addWeeklyHolidays(self):
if not self.filter == Toontow
|
Pulgama/supriya
|
supriya/patterns/EventPattern.py
|
Python
|
mit
| 1,545
| 0.003236
|
import uuid
from uqbar.objects import new
from supriya.patterns.Pattern import Pattern
class EventPattern(Pattern):
### CLASS VARIABLES ###
__slots__ = ()
### SPECIAL METHODS ###
def _coerce_iterator_output(self, expr, state=None):
import supriya.patterns
if not isinstance(expr, supriya.patterns.Event):
expr = supriya.patterns.NoteEvent(**expr)
if expr.get("uuid") is None:
expr = new(expr, uuid=uuid.uuid4())
return expr
### PUBLIC METHODS ###
def play(self, clock=None, server=None):
import supriya.patterns
import supriya.realtime
event_player = supriya.patterns.Rea
|
ltimeEventPlayer(
self, clock=clock, server=server or supriya.realtime.Server.default()
)
event_player.start()
|
return event_player
def with_bus(self, calculation_rate="audio", channel_count=None, release_time=0.25):
import supriya.patterns
return supriya.patterns.Pbus(
self,
calculation_rate=calculation_rate,
channel_count=channel_count,
release_time=release_time,
)
def with_effect(self, synthdef, release_time=0.25, **settings):
import supriya.patterns
return supriya.patterns.Pfx(
self, synthdef=synthdef, release_time=release_time, **settings
)
def with_group(self, release_time=0.25):
import supriya.patterns
return supriya.patterns.Pgroup(self, release_time=release_time)
|
zokis/mapa_do_cidadao
|
mapa_cidadao/mapa_cidadao/core/migrations/0003_auto_20150525_1937.py
|
Python
|
mit
| 457
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration
|
):
dependencies = [
('core', '0002_auto_20150525_1743'),
]
operations = [
migrations.AlterField(
model_name='categoria',
name='nome',
field=models.CharField(max_length=75, verbose_name=b'nome'),
preserve_default=True,
),
]
|
|
jbaek7023/CustomEcommerce
|
src/products/mixins.py
|
Python
|
mit
| 1,045
| 0.002871
|
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.http import Http404
class StaffRequiredMixin(object):
@classmethod
def as_view(self, *args, **kwargs):
view = super(StaffRequiredMixin, self).as_view(*args, **kwargs)
return staff_member_required(view)
@method_decorator(login_required)
def dispatch(self, reques
|
t, *args, **kwargs):
|
if request.user.is_staff:
return super(StaffRequiredMixin, self).dispatch(request, *args, **kwargs)
else:
return Http404
class LoginRequiredMixin(object):
@classmethod
def as_view(self, *args, **kwargs):
view = super(LoginRequiredMixin, self).as_view(*args, **kwargs)
return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
|
ArtemVavilov88/php4dvd_tests
|
php4dvd/model/user.py
|
Python
|
apache-2.0
| 512
| 0.005859
|
class User(object):
def __init__(self, username=None, password=None, email=None):
self.username
|
= username
self.password = password
self.email = email
@classmethod
def admin(cls):
return cls(username="admin", password="admin")
#random values for username and password
@classmethod
def random_data(cls):
from random import randint
return cls(username="user" + str(randint(0, 1000)), pa
|
ssword="pass" + str(randint(0, 1000)))
|
SimonGreenhill/ABVDGet
|
abvdget/abvd_download.py
|
Python
|
bsd-3-clause
| 1,118
| 0.004472
|
#!/usr/bin/env python3
#coding=utf-8
import sys
import argparse
from .ABVD import DATABASES, Downloader
from . import __version__
import json
def parse_args(args):
"""
Parses command line arguments
Returns a tuple of (inputfile, method, outputfile)
"""
parser = argparse.ArgumentParser(description='Downloads data from the ABVD')
parser.add_argument('--vers
|
ion', action='version', version='%s' % __version__)
parser.add_argument("database", help="database", choices=DATABASES)
parser.add_argument("language", help="language", type=int)
|
parser.add_argument(
'-o', "--output", dest='output', default=None,
help="output file", action='store'
)
args = parser.parse_args(args)
return (args.database, args.language, args.output)
def main(args=None): # pragma: no cover
if args is None:
args = sys.argv[1:]
database, language, outfile = parse_args(args)
d = Downloader(database)
d.get(language)
if outfile:
d.write(outfile)
else:
print(json.dumps(d.data, sort_keys=True, indent=2, ensure_ascii=False))
|
bdero/edx-platform
|
lms/djangoapps/instructor/tests/test_legacy_xss.py
|
Python
|
agpl-3.0
| 2,400
| 0.000833
|
"""
Tests of various instructor dashboard features that include lists of students
"""
from django.conf import settings
from django.test.client import RequestFactory
from django.test.utils import override_settings
from markupsafe import escape
from courseware.tests.tests import TEST_DATA_MIXED_MODULESTORE
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from edxmako.tests import mako_middleware_process_request
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from instructor.views import legacy
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestXss(ModuleStoreTestCase):
def setUp(self):
self._request_factory = RequestFactory()
self._course = CourseFactory.create()
self._evil_student = UserFactory.create(
email="robot+evil@edx.org",
username="evil-robot",
profile__name='<span id="evil">Evil Robot</span>',
)
self._instructor = UserFactory.create(
email="robot+instructor@edx.org",
username="instructor",
is_staff=True
)
CourseEnrollmentFactory.create(
user=self._evil_student,
course_id=self._course.id
)
def _test_action(self, action):
"""
Test for XSS vulnerability in the given action
Build a request with the given action, call the instructor dashboard
view, and check that HTML code in a user's name is properly escaped.
"""
req = se
|
lf._reques
|
t_factory.post(
"dummy_url",
data={"action": action}
)
req.user = self._instructor
req.session = {}
mako_middleware_process_request(req)
resp = legacy.instructor_dashboard(req, self._course.id.to_deprecated_string())
respUnicode = resp.content.decode(settings.DEFAULT_CHARSET)
self.assertNotIn(self._evil_student.profile.name, respUnicode)
self.assertIn(escape(self._evil_student.profile.name), respUnicode)
def test_list_enrolled(self):
self._test_action("List enrolled students")
def test_dump_list_of_enrolled(self):
self._test_action("Dump list of enrolled students")
def test_dump_grades(self):
self._test_action("Dump Grades for all students in this course")
|
cwimbrow/veganeyes-api
|
app/api_1_0/errors.py
|
Python
|
mit
| 1,089
| 0.000918
|
"""
The MIT License (MIT)
Copyright (c) 2014 Chri
|
s Wimbrow
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to w
|
hom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
|
erobinson/cloop
|
device/processes/test/__init__.py
|
Python
|
gpl-2.0
| 25
| 0
|
__author
|
__ = '
|
erobinson'
|
yamt/neutron
|
quantum/tests/unit/test_servicetype.py
|
Python
|
apache-2.0
| 20,304
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Salvatore Orlando, VMware
|
#
import contextlib
import logging
import mock
from oslo.config import cfg
import webob.exc as webexc
imp
|
ort webtest
from quantum.api import extensions
from quantum import context
from quantum.db import api as db_api
from quantum.db import servicetype_db
from quantum.extensions import servicetype
from quantum import manager
from quantum.plugins.common import constants
from quantum.tests.unit import dummy_plugin as dp
from quantum.tests.unit import test_api_v2
from quantum.tests.unit import test_db_plugin
from quantum.tests.unit import test_extensions
from quantum.tests.unit import testlib_api
LOG = logging.getLogger(__name__)
DEFAULT_SERVICE_DEFS = [{'service_class': constants.DUMMY,
'plugin': dp.DUMMY_PLUGIN_NAME}]
_uuid = test_api_v2._uuid
_get_path = test_api_v2._get_path
class TestServiceTypeExtensionManager(object):
"""Mock extensions manager."""
def get_resources(self):
return (servicetype.Servicetype.get_resources() +
dp.Dummy.get_resources())
def get_actions(self):
return []
def get_request_extensions(self):
return []
class ServiceTypeTestCaseBase(testlib_api.WebTestCase):
fmt = 'json'
def setUp(self):
# This is needed because otherwise a failure will occur due to
# nonexisting core_plugin
cfg.CONF.set_override('core_plugin', test_db_plugin.DB_PLUGIN_KLASS)
cfg.CONF.set_override('service_plugins',
["%s.%s" % (dp.__name__,
dp.DummyServicePlugin.__name__)])
self.addCleanup(cfg.CONF.reset)
# Make sure at each test a new instance of the plugin is returned
manager.QuantumManager._instance = None
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
ext_mgr = TestServiceTypeExtensionManager()
self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
self.api = webtest.TestApp(self.ext_mdw)
self.resource_name = servicetype.RESOURCE_NAME.replace('-', '_')
super(ServiceTypeTestCaseBase, self).setUp()
class ServiceTypeExtensionTestCase(ServiceTypeTestCaseBase):
def setUp(self):
self._patcher = mock.patch(
"%s.%s" % (servicetype_db.__name__,
servicetype_db.ServiceTypeManager.__name__),
autospec=True)
self.addCleanup(self._patcher.stop)
self.mock_mgr = self._patcher.start()
self.mock_mgr.get_instance.return_value = self.mock_mgr.return_value
super(ServiceTypeExtensionTestCase, self).setUp()
def _test_service_type_create(self, env=None,
expected_status=webexc.HTTPCreated.code):
tenant_id = 'fake'
if env and 'quantum.context' in env:
tenant_id = env['quantum.context'].tenant_id
data = {self.resource_name:
{'name': 'test',
'tenant_id': tenant_id,
'service_definitions':
[{'service_class': constants.DUMMY,
'plugin': dp.DUMMY_PLUGIN_NAME}]}}
return_value = data[self.resource_name].copy()
svc_type_id = _uuid()
return_value['id'] = svc_type_id
instance = self.mock_mgr.return_value
instance.create_service_type.return_value = return_value
expect_errors = expected_status >= webexc.HTTPBadRequest.code
res = self.api.post(_get_path('service-types', fmt=self.fmt),
self.serialize(data),
extra_environ=env,
expect_errors=expect_errors,
content_type='application/%s' % self.fmt)
self.assertEqual(res.status_int, expected_status)
if not expect_errors:
instance.create_service_type.assert_called_with(mock.ANY,
service_type=data)
res = self.deserialize(res)
self.assertTrue(self.resource_name in res)
svc_type = res[self.resource_name]
self.assertEqual(svc_type['id'], svc_type_id)
# NOTE(salvatore-orlando): The following two checks are
# probably not essential
self.assertEqual(svc_type['service_definitions'],
data[self.resource_name]['service_definitions'])
def _test_service_type_update(self, env=None,
expected_status=webexc.HTTPOk.code):
svc_type_name = 'updated'
data = {self.resource_name: {'name': svc_type_name}}
svc_type_id = _uuid()
return_value = {'id': svc_type_id,
'name': svc_type_name}
instance = self.mock_mgr.return_value
expect_errors = expected_status >= webexc.HTTPBadRequest.code
instance.update_service_type.return_value = return_value
res = self.api.put(_get_path('service-types/%s' % svc_type_id,
fmt=self.fmt),
self.serialize(data))
if not expect_errors:
instance.update_service_type.assert_called_with(mock.ANY,
svc_type_id,
service_type=data)
self.assertEqual(res.status_int, webexc.HTTPOk.code)
res = self.deserialize(res)
self.assertTrue(self.resource_name in res)
svc_type = res[self.resource_name]
self.assertEqual(svc_type['id'], svc_type_id)
self.assertEqual(svc_type['name'],
data[self.resource_name]['name'])
def test_service_type_create(self):
self._test_service_type_create()
def test_service_type_update(self):
self._test_service_type_update()
def test_service_type_delete(self):
svctype_id = _uuid()
instance = self.mock_mgr.return_value
res = self.api.delete(_get_path('service-types/%s' % svctype_id,
fmt=self.fmt))
instance.delete_service_type.assert_called_with(mock.ANY,
svctype_id)
self.assertEqual(res.status_int, webexc.HTTPNoContent.code)
def test_service_type_get(self):
svctype_id = _uuid()
return_value = {self.resource_name: {'name': 'test',
'service_definitions': [],
'id': svctype_id}}
instance = self.mock_mgr.return_value
instance.get_service_type.return_value = return_value
res = self.api.get(_get_path('service-types/%s' % svctype_id,
fmt=self.fmt))
instance.get_service_type.assert_called_with(mock.ANY,
svctype_id,
fields=mock.ANY)
self.assertEqual(res.status_int, webexc.HTTPOk.code)
def test_service_type_list(self):
svctype_id = _uuid()
return_value = [{self.resource_name: {'name': 'test',
'service_definitions': [],
'id': svctype_id}}]
instance = self.mock_mgr.return
|
lddubeau/glerbl
|
glerbl/check/__init__.py
|
Python
|
gpl-3.0
| 461
| 0
|
class CheckBase(object):
"""
Base class for checks.
"""
|
hooks = []
# pylint: disable=W0105
"""Git hooks to which this class applies. A
|
list of strings."""
def execute(self, hook):
"""
Executes the check.
:param hook: The name of the hook being run.
:type hook: :class:`str`
:returns: ``True`` if the check passed, ``False`` if not.
:rtype: :class:`bool`
"""
pass
|
NDAR/NITRC-Pipeline-for-NDAR
|
unsupported/tests/test_nifti.py
|
Python
|
bsd-2-clause
| 1,410
| 0.004965
|
import os
import nose.tools
import ndar
def test_nifti_nifti():
"""image is already a NIfTI-1 file"""
im = ndar.Image('test_data/06025B_mprage.nii.gz')
assert im.nifti_1 == im.path(im.files['NIfTI-1'][0])
def test_nifti_unzipped_nifti():
"""image is already an uncompressed NIfTI-1 file"""
im = ndar.Image('test_data/a.nii')
assert im.nifti_1 == im.path(im.files['NIfTI-1'][0])
def test_nifti_nonnifti():
"""image is not a NIfTI-1 file"""
im = ndar.Image('test_data/NDAR_INVZU049GXV_ima
|
ge03_1326225820791.zip')
assert os.path.exists(im.nifti_1)
def test_nifti_nonvolume():
"""image is not a volume"""
im = ndar.Image('test_data/10_425-02_li1_146.
|
png')
nose.tools.assert_raises(AttributeError, lambda: im.nifti_1)
def test_nifti_mcfail():
"""mri_convert failure (by way of a bad image)"""
im = ndar.Image('test_data/bogus.mnc')
nose.tools.assert_raises(AttributeError, lambda: im.nifti_1)
def test_nifti_nifti_gz():
"""image is a gzipped NIfTI-1 file"""
im = ndar.Image('test_data/06025B_mprage.nii.gz')
assert im.nifti_1_gz == im.path(im.files['NIfTI-1'][0])
def test_nifti_unzipped_nifti_gz():
"""image is an unzipped NIfTI-1 file"""
im = ndar.Image('test_data/a.nii')
assert im.nifti_1_gz != im.path(im.files['NIfTI-1'][0])
assert im.nifti_1_gz.endswith('.nii.gz')
assert os.path.exists(im.nifti_1_gz)
# eof
|
KMK-ONLINE/ansible-modules-core
|
network/openswitch/ops_config.py
|
Python
|
gpl-3.0
| 7,945
| 0.001133
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: ops_config
version_added: "2.1"
author: "Peter sprygada (@privateip)"
short_description: Manage OpenSwitch configuration using CLI
description:
- OpenSwitch configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with ops configuration sections in
a deterministic way.
extends_documentation_fragment: openswitch
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: true
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
before:
description:
- The ordered
|
set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a changed needs to be made. Just l
|
ike with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. Finally if match is set to I(exact), command lines
must be an equal match.
required: false
default: line
choices: ['line', 'strict', 'exact']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block']
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
required: false
default: false
choices: ['true', 'false']
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
"""
EXAMPLES = """
- name: configure hostname over cli
ops_config:
lines:
- "hostname {{ inventory_hostname }}"
- name: configure vlan 10 over cli
ops_config:
lines:
- no shutdown
parents:
- vlan 10
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
responses:
description: The set of responses from issuing the commands on the device
retured: when not check_mode
type: list
sample: ['...', '...']
"""
import re
import itertools
def get_config(module):
config = module.params['config'] or dict()
if not config and not module.params['force']:
config = module.config
return config
def build_candidate(lines, parents, config, strategy):
candidate = list()
if strategy == 'strict':
for index, cmd in enumerate(lines):
try:
if cmd != config[index]:
candidate.append(cmd)
except IndexError:
candidate.append(cmd)
elif strategy == 'exact':
if len(lines) != len(config):
candidate = list(lines)
else:
for cmd, cfg in itertools.izip(lines, config):
if cmd != cfg:
candidate = list(lines)
break
else:
for cmd in lines:
if cmd not in config:
candidate.append(cmd)
return candidate
def main():
argument_spec = dict(
lines=dict(aliases=['commands'], required=True, type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact']),
replace=dict(default='line', choices=['line', 'block']),
force=dict(default=False, type='bool'),
config=dict(),
transport=dict(default='cli', choices=['cli'])
)
module = get_module(argument_spec=argument_spec,
supports_check_mode=True)
lines = module.params['lines']
parents = module.params['parents'] or list()
before = module.params['before']
after = module.params['after']
match = module.params['match']
replace = module.params['replace']
contents = get_config(module)
config = module.parse_config(contents)
if parents:
for parent in parents:
for item in config:
if item.text == parent:
config = item
try:
children = [c.text for c in config.children]
except AttributeError:
children = [c.text for c in config]
else:
children = [c.text for c in config if not c.parents]
result = dict(changed=False)
candidate = build_candidate(lines, parents, children, match)
if candidate:
if replace == 'line':
candidate[:0] = parents
else:
candidate = list(parents)
candidate.extend(lines)
if before:
candidate[:0] = before
if after:
candidate.extend(after)
if not module.check_mode:
response = module.configure(candidate)
result['responses'] = response
result['changed'] = True
result['updates'] = candidate
return module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.shell import *
from ansible.module_utils.netcfg import *
from ansible.module_utils.openswitch import *
if __name__ == '__main__':
main()
|
lupyuen/RaspberryPiImage
|
usr/share/pyshared/ajenti/plugins/bind9/__init__.py
|
Python
|
apache-2.0
| 313
| 0
|
from ajenti.api import *
from
|
ajenti.plugins import *
info = PluginInfo(
title='BIND9',
description='BIND9 DNS server',
icon='globe',
dependencies=[
PluginDependency('main'),
PluginDependenc
|
y('services'),
BinaryDependency('named'),
],
)
def init():
import main
|
chrisspen/dtree
|
setup.py
|
Python
|
lgpl-3.0
| 1,398
| 0.007153
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from distutils.core import setup, Command # pylint: disable=no-name-in-module
import dtree
class TestCommand(Command):
description = "Runs unittests."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('python dtree.py')
setup(
name='dtree',
version=dtree.__version__,
description='A simple pure-Python batch decision tree construction algorithm.',
author='Chris Spencer',
author_email='chrisspen@gmail.com',
url='https://github.com/chrisspen/dtree',
license='LGPL',
py_modules=['dtree'],
#https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Development Status :: 5 - Production/St
|
able",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Li
|
cense :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
platforms=['OS Independent'],
# test_suite='dtree',
cmdclass={
'test': TestCommand,
},
)
|
camponez/importescala
|
test/test_escala.py
|
Python
|
gpl-3.0
| 3,876
| 0
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
# coding=utf-8
import unittest
from datetime import datetime
from lib.escala import Escala
import dirs
dirs.DEFAULT_DIR = dirs.TestDir()
class FrameTest(unittest.TestCase):
def setUp(self):
self.escala = Escala('fixtures/escala.xml')
self.dir = dirs.TestDir()
self.maxDiff = None
def tearDown(self):
pass
def test_attributos_voo_1(self):
p_voo = self.escala.escalas[0]
self.assertEqual(p_voo.activity_date, datetime(2013, 3, 1, 11, 36))
self.assertEqual(p_voo.present_location, 'VCP')
self.assertEqual(p_voo.flight_no, '4148')
self.assertEqual(p_voo.origin, 'VCP')
self.assertEqual(p_voo.destination, 'GYN')
self.assertEqual(p_voo.actype, 'E95')
self.assertTrue(p_voo.checkin)
self.assertEqual(p_voo.checkin_time, datetime(2013, 3, 1, 10, 36))
self.assertEqual(p_voo.std, datetime(2013, 3, 1, 13, 13))
self.assertEqual(p_voo.sta, datetime(2013, 3, 1, 11, 36))
self.assertEqual(p_voo.activity_info, 'AD4148')
self.assertFalse(p_voo.duty_design)
def test_attributos_voo_17(self):
p_voo = self.escala.escalas[17]
self.assertEqual(p_voo.activity_date, datetime(2013, 10, 28, 3, 0))
self.assertEqual(p_voo.present_location, 'VCP')
self.assertEqual(p_voo.flight_no, None)
self.assertEqual(p_voo.origin, 'VCP')
self.assertEqual(p_voo.destination, 'VCP')
self.assertEqual(p_voo.activity_info, 'P04')
self.assertEqual(p_voo.actype, None)
self.assertEqual(p_voo.sta, datetime(2013, 10, 28, 3, 0))
self.assertEqual(p_voo.std, datetime(2013, 10, 28, 15, 0))
self.assertFalse(p_voo.checkin)
self.assertEqual(p_voo.checkin_time, None)
self.assertFalse(p_voo.duty_design)
def test_attributos_voo_18(self):
p_voo = self.escala.escalas[18]
self.assertEqual(p_voo.activity_date, datetime(2013, 10, 29, 4, 58))
self.assertEqual(p_voo.present_location, 'VCP')
self.assertEqual(p_voo.flight_no, '4050')
self.assertEqual(p_voo.origin, 'VCP')
self.assertEqual(p_voo.destination, 'FLN')
self.assertEqual(p_voo.activity_info, 'AD4050')
self.assertEqual(p_voo.actype, 'E95')
self.assertEqual(p_voo.sta, datetime(2013, 10, 29, 4, 58))
self.assertEqual(p_voo.std, datetime(2013, 10, 29, 6, 15))
self.assertTrue(p_voo.checkin)
self.assertEqual(p_voo.checkin_time, datetime(2013, 10, 29, 5, 8))
self.assertFalse(p_voo.duty_design)
self.assertEqual(p_voo.horas_de_voo, '1:17')
def test_attributos_quarto_voo(self):
p_voo = self.escala.escalas[25]
|
self.assertFalse(p_voo.checkin)
self.assertEqual(p_voo.checkin_time, None)
self.assertEqual(p_voo.flight_no, '2872')
self.assertEqual(p_voo.activity_info, 'AD2872')
def test_calculo_horas_voadas(self):
s_horas = {
'h_diurno': '6:40',
'h_noturno': '6:47',
'h_total_voo': '13:27',
'h_faixa2': '0:00',
'h_sobreaviso': '40:00',
'h_reserva': '29:13'
}
self.assertEqu
|
al(self.escala.soma_horas(), s_horas)
def test_ics(self):
"""
Check ICS output
"""
escala = Escala('fixtures/escala_ics.xml')
f_result = open(self.dir.get_data_dir() + 'fixtures/escala.ics')
self.assertEqual(escala.ics(), f_result.read())
f_result.close()
def test_csv(self):
"""
Check CSV output
"""
f_result = open(self.dir.get_data_dir() + 'fixtures/escala.csv')
self.assertEqual(self.escala.csv(), f_result.read())
f_result.close()
def main():
unittest.main()
if __name__ == '__main__':
main()
|
futurepr0n/Books-solutions
|
Python-For-Everyone-Horstmann/Chapter9-Objects-and-Classes/test_24.py
|
Python
|
mit
| 718
| 0
|
# Unit tests for p24.py
# IMPORTS
from S24 import Item
import unittest
# main
class ItemTests(unittest.TestCase):
def test_empty_constructor(self):
item = Item()
self.assertEqual("", item.get_name())
self.assertEqual(0.0, item.get_price())
def test_constructor_with_name(self):
item = Item("Corn Flakes")
self.assertEqual("Corn Flakes", item.get_name())
self.assertEqual(0.0, item.get_price())
def test_constructor_with_name_and_price(self):
item = Item("Corn Flakes", 3.95)
|
self.assertEqual("Corn Flakes", item.get_name())
self.assertEqual(3.95, item.get_price())
# PROGRAM RUN
if __nam
|
e__ == '__main__':
unittest.main()
|
rohitranjan1991/home-assistant
|
homeassistant/components/nexia/entity.py
|
Python
|
mit
| 4,354
| 0.000689
|
"""The nexia integration base entity."""
from nexia.thermostat import NexiaThermostat
from nexia.zone import NexiaThermostatZone
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
ATTRIBUTION,
DOMAIN,
MANUFACTURER,
SIGNAL_THERMOSTAT_UPDATE,
SIGNAL_ZONE_UPDATE,
)
from .coordinator import NexiaDataUpdateCoordinator
class NexiaEntity(CoordinatorEntity):
"""Base class for nexia entities."""
def __init__(self, coordinator, name, unique_id):
"""Initialize the entity."""
super().__init__(coordinator)
self._unique_id = unique_id
self._name = name
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name."""
return self._name
@property
def extra_state_attributes(self):
"""Return the device specific state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
}
class NexiaThermostatEntity(NexiaEntity):
"""Base class for nexia devices attached to a thermostat."""
def __init__(self, coordinator, thermostat, name, unique_id):
"""Initialize the entity."""
super().__init__(coordinator, name, unique_id)
self._thermostat: NexiaThermostat = thermostat
@property
def device_info(self) -> DeviceInfo:
"""Return the device_info of the device."""
assert isinstance(self.coordinator, NexiaDataUpdateCoordinator)
return DeviceInfo(
configuration_url=self.coordinator.nexia_home.root_url,
identifiers={(DOMAIN, self._thermostat.thermostat_id)},
manufacturer=MANUFACTURER,
model=self._thermostat.get_model(),
name=self._thermostat.get_name(),
sw_version=self._thermostat.get_firmware(),
)
async def async_added_to_hass(self):
"""Listen for signals for services."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SIGNAL_THERMOSTAT_UPDATE}-{self._thermostat.thermostat_id}",
self.async_write_ha_state,
)
)
def _signal_thermostat_update(self):
"""Signal a thermostat update.
Whenever the underlying library do
|
es an action against
a thermostat, the data for the thermostat and all
connected zone is updated.
Update all the zones on the thermostat.
"""
dispatcher_send(
self.hass, f"{SIGNAL_THERMOSTAT_UPDATE}-{self._thermostat.thermostat_id}"
)
class NexiaThermostatZoneEntity(NexiaThermostatEntity):
""
|
"Base class for nexia devices attached to a thermostat."""
def __init__(self, coordinator, zone, name, unique_id):
"""Initialize the entity."""
super().__init__(coordinator, zone.thermostat, name, unique_id)
self._zone: NexiaThermostatZone = zone
@property
def device_info(self):
"""Return the device_info of the device."""
data = super().device_info
zone_name = self._zone.get_name()
data.update(
{
"identifiers": {(DOMAIN, self._zone.zone_id)},
"name": zone_name,
"suggested_area": zone_name,
"via_device": (DOMAIN, self._zone.thermostat.thermostat_id),
}
)
return data
async def async_added_to_hass(self):
"""Listen for signals for services."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}",
self.async_write_ha_state,
)
)
def _signal_zone_update(self):
"""Signal a zone update.
Whenever the underlying library does an action against
a zone, the data for the zone is updated.
Update a single zone.
"""
dispatcher_send(self.hass, f"{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}")
|
Princessgladys/googleresourcefinder
|
lib/feedlib/geo.py
|
Python
|
apache-2.0
| 2,777
| 0.001801
|
# Copyright 2009-2010 by Ka-Ping Yee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Geographical functions. All measurements are in metres."""
from math import asin, cos, pi, sin, sqrt
EARTH_RADIUS = 6371009
def hav(theta):
"""Computes the haversine of an angle given in radians."""
return sin(theta/2)**2
def central_angle((phi_s, lam_s), (phi_f, lam_f)):
"""Returns the central angle between two points on a sphere, whose
locations are given as (latitude, longitude) pairs in radians."""
d_phi = phi_s - phi_f
d_lam = lam_s - lam_f
return 2*asin(sqrt(hav(d_phi) + cos(phi_s)*cos(phi_f)*hav(d_lam)))
def distance(start, finish):
"""Approximates the distance in metres between two points on the Earth,
which are given as {'lat':y, 'lon':x} objects in degrees."""
start_rad = (start['lat']*pi/180, start['lon']*pi/180)
finish_rad = (finish['lat']*pi/180, finish['lon']*pi/180)
return central_angle(start_rad, finish_rad)*EARTH_RADIUS
def point_inside_polygon(point, poly):
"""Returns true if the given point is inside the given polygon.
point is given as an {'lat':y, 'lon':x} object in degrees
poly is given as a list of (longitude, latitude) tuples. The last vertex
is assumed to be the same as the first vertex.
TODO(shakusa): poly should probably be expressed in a less-confusing way"""
lat = point['lat']
lon = point['lon']
n = len(poly)
inside = False
# Count the parity of intersections of a horizontal eastward ray starting
# at (lon, lat). If even, point is outside, odd, point is inside
lon1, lat1 = poly[0]
for i in range(n + 1):
lon2, lat2 = poly[i % n]
# if our ray falls within the vertical coords of the edge
if min(lat1, lat2) < lat <= max(lat1, lat2):
# if our (eastward) ray starts before the edge and the edge is not
# horizontal
if lon <= max(lon1, lon2) and lat1 != lat2:
lon_inters = lon1 + (lat - lat1) * (lon2 - lon1) / (lat2 - lat1)
# if the intersection is beyond the start of the ray,
|
# we've crossed it
|
if lon <= lon_inters:
inside = not inside
lon1, lat1 = lon2, lat2
return inside
|
dungeonsnd/forwarding
|
EChat/pack.py
|
Python
|
bsd-3-clause
| 3,621
| 0.023497
|
# -*- coding: utf-8 -*-
import hashlib
import math
import struct
import base64
import json
import zlib
import binascii
from Crypto.Cipher import AES
from Crypto import Random
salt ='__E3S$hH%&*KL:"II<UG=_!@fc9}021jFJ|KDI.si81&^&%%^*(del?%)))+__'
fingerprint_len =4
iv_len =16
randomiv_len =4
print_log =False
# 输入密码,输出其hash值的前两个字节的16进制表示.
def fingerprintSimple(input_str):
return binascii.hexlify(hashlib.sha256(input_str).digest()[0:2])
def hash(input):
return hashlib.sha256(input).digest()
def fingerprint(input):
return struct.pack('!i',zlib.adler32(input))
def pack(pwd, dict_input):
try:
if print_log:
print 'pack pwd=', pwd
print 'pack dict_input=', dict_input
input =json.dumps(dict_input)
l =len(input)
output =input.ljust(int(math.ceil(l/16.0)*16), ' ')
rndfile = Random.new()
randomiv =rndfile.read(randomiv_len)
iv =hash(randomiv)[0:iv_len]
if print_log:
print 'pack iv=', repr(iv)
key =hash(salt+pwd)
encryptor =AES.new(key, AES.MODE_CBC, iv)
encrypted_str = encryptor.encrypt(output)
output =randomiv+encrypted_str
fp =fingerprint(output)
# body_len + fp + randomiv + encrypted_msg + padding
body_len =struct.pack('!i', l)
output =body_len+fp+output
if print_log:
print 'pack body_len=', l
print 'pack
|
randomiv=', repr(randomiv)
print 'pack fingerprint=', repr(fp)
pr
|
int 'pack encrypted_str=%s, len=%d'% (repr(encrypted_str), len(encrypted_str))
output =base64.b64encode(output)
if print_log:
print 'pack result:%s, len=%d' %(output, len(output))
output =output+'\r\n'
return output
except:
return ''
def unpack(pwd, input_str_utf8):
try:
if input_str_utf8[-2: ]=='\r\n':
input =input_str_utf8[0: len(input_str_utf8)-2]
else :
input =input_str_utf8
if print_log:
print 'unpack input:%s, len=%d' %(input, len(input))
input =base64.b64decode(input)
# body_len + fp + randomiv + encrypted_msg + padding
l, =struct.unpack('!i', input[0:4])
if print_log:
print 'unpack body_len=', l
input =input[4:]
if print_log:
print 'unpack input fingerprint=', repr(input[0:fingerprint_len])
print 'unpack cal fingerprint=', repr(fingerprint(input[fingerprint_len:]))
if fingerprint(input[fingerprint_len:])!=input[0:fingerprint_len]:
return {}
input =input[fingerprint_len:]
randomiv =input[0:randomiv_len]
iv =hash(randomiv)[0:iv_len]
input =input[randomiv_len:]
if print_log:
print 'unpack randomiv=', repr(randomiv)
print 'unpack iv=', repr(iv)
key =hash(salt+pwd)
decryptor =AES.new(key, AES.MODE_CBC, iv)
output = decryptor.decrypt(input)
output =output[0:l]
if print_log:
print 'unpack, json.loads data:', output
d =json.loads(output)
if print_log:
print 'unpack result:', d
return d
except:
return {}
if __name__=='__main__':
d ={'k':u'大神好'}
print 'pack input=',d
enc =pack('qwert',d)
print 'pack result=',enc
d =unpack('qwert',enc)
print 'unpack result=',d
|
ldemailly/wdt
|
test/wdt_port_block_test.py
|
Python
|
bsd-3-clause
| 3,215
| 0.000622
|
#! /usr/bin/env python
import re
from threading import Thread
from common_utils import *
# Todo: refactor using more of common_utils
receiver_end_time = 0
receiver_status = 0
def wait_for_receiver_finish(receiver_process):
global receiver_end_time
global receiver_status
receiver_status = receiver_process.wait()
receiver_end_time = time.clock()
def test(resumption):
global receiver_end_time
global receiver_status
environment_variable_name = 'WDT_TEST_IPV6_CLIENT'
if (
environment_variable_name in os.environ and
os.environ[environment_variable_name] == "0"
):
print("Test with ipv6 client is disabled in this system")
return
receiver_cmd = get_receiver_binary() + " -skip_writes -num_ports=1 -v 1"
print(re
|
ceiver_cmd)
receiver_process = subprocess.Popen(
receiver_cmd.split(),
stdout=subprocess.PIPE
)
connection_url = receiver_process.stdout.readline().strip()
print(connection_url)
# wdt url can be of two k
|
inds :
# 1. wdt://localhost?ports=1,2,3,4
# 2. wdt://localhost:1?num_ports=4
# the second kind of url is another way of expressing the first one
url_match = re.search('\?(.*&)?ports=([0-9]+).*', connection_url)
if not url_match:
url_match = re.search(':([0-9]+)(\?.*)', connection_url)
rest_of_url = url_match.group(2)
port_to_block = url_match.group(1)
start_port = ":" + port_to_block
else:
rest_of_url = url_match.group(0)
start_port = ""
port_to_block = url_match.group(2)
print(rest_of_url + " " + port_to_block)
# start a thread to wait for receiver finish
thread = Thread(target=wait_for_receiver_finish, args=[receiver_process])
thread.start()
# we hack the url to be ::1 instead of hostname to increase chances
# it works on machines which do have ipv6 but no dns entry
sender_cmd = (
"(sleep 20 | nc -4 localhost {0}) &> /dev/null & "
"(sleep 20 | nc -4 localhost {0}) &> /dev/null & "
"sleep 1; {3} -directory wdt/ -ipv6 "
"-num_ports=1 "
"-connection_url \"wdt://[::1]{1}{2}\""
).format(
port_to_block, start_port, rest_of_url, get_sender_binary()
)
if resumption:
sender_cmd = sender_cmd + " -enable_download_resumption"
print(sender_cmd)
status = os.system(sender_cmd)
status >>= 8
sender_end_time = time.clock()
# wait for receiver finish
thread.join()
status |= receiver_status
if status != 0:
print("Test failed, exiting with {0}".format(status))
exit(status)
diff = abs(sender_end_time - receiver_end_time) * 1000
max_allowed_diff = 200
if diff > max_allowed_diff:
print(
(
"Sender and Receiver end time difference {0} is more than "
"allowed diff {1}"
).format(diff, max_allowed_diff)
)
exit(1)
print(
(
"Test passed - Sender and Receiver"
" end time diff {0} ms"
).format(diff)
)
print("Testing without download resumption")
test(False)
print("Testing with download resumption")
test(True)
|
yamstudio/mysite
|
personal/apps.py
|
Python
|
mit
| 91
| 0
|
fro
|
m django.apps import AppConfig
class PersonalConfig(App
|
Config):
name = 'personal'
|
adykstra/mne-python
|
mne/datasets/misc/_misc.py
|
Python
|
bsd-3-clause
| 697
| 0.001435
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD Style.
from ...utils import verbose
from ..utils import _data_path, _data_path_doc
@verbose
def data_path(path=None, force_update=False, update_path=True,
download=True, verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
|
update_path=update_path, name='misc',
download=download)
data_path.__doc__ = _data_path_doc.format(name='misc',
|
conf='MNE_DATASETS_MISC_PATH')
|
coruus/pyasn1
|
pyasn1/debug.py
|
Python
|
bsd-2-clause
| 1,667
| 0.011398
|
import sys
from pyasn1.compat.octets import octs2ints
from pyasn1 import error
from pyasn1 import __version__
flagNone = 0x0000
flagEncoder = 0x0001
flagDecoder = 0x0002
flagAll = 0xffff
flagMap = {
'encoder': flagEncoder,
'decoder': flagDecoder,
'all': flagAll
}
class Debug:
defaultPrinter = sys.stderr and sys.stderr.write or None
def __init__(self, *flags):
self._flags = flagNo
|
ne
if not self.defaultPrinter:
raise error.PyAsn1Error('Null debug writer specified')
self._printer = self.defaultPrinter
self('running pyasn1 version %s' % __version__)
for f in flags:
if f not in flagMap:
raise error.PyAsn1Error('bad debug flag %s' % (f,))
|
self._flags = self._flags | flagMap[f]
self('debug category \'%s\' enabled' % f)
def __str__(self):
return 'logger %s, flags %x' % (self._printer, self._flags)
def __call__(self, msg):
self._printer('DBG: %s\n' % msg)
def __and__(self, flag):
return self._flags & flag
def __rand__(self, flag):
return flag & self._flags
logger = 0
def setLogger(l):
global logger
logger = l
def hexdump(octets):
return ' '.join(
[ '%s%.2X' % (n%16 == 0 and ('\n%.5d: ' % n) or '', x)
for n,x in zip(range(len(octets)), octs2ints(octets)) ]
)
class Scope:
def __init__(self):
self._list = []
def __str__(self): return '.'.join(self._list)
def push(self, token):
self._list.append(token)
def pop(self):
return self._list.pop()
scope = Scope()
|
RexFuzzle/sfepy
|
sfepy/discrete/fem/linearizer.py
|
Python
|
bsd-3-clause
| 4,428
| 0.001807
|
"""
Linearization of higher order solutions for the purposes of visualization.
"""
import numpy as nm
from sfepy.linalg import dot_sequences
from sfepy.discrete.fem.refine import refine_reference
def get_eval_dofs(dofs, dof_conn, ps, ori=None):
"""
Get default function for evaluating field DOFs given a list of elements and
reference element coordinates.
"""
def _eval(iels, rx):
edofs = dofs[dof_conn[iels]]
if ori is not None:
eori = ori[iels]
else:
eori = None
bf = ps.eval_base(rx, ori=eori, force_axis=True)[...,0,:]
rvals = dot_sequences(bf, edofs)
return rvals
return _eval
def get_eval_coors(coors, conn, ps):
"""
Get default function for evaluating physical coordinates given a list of
elements and reference element coordinates.
"""
def _eval(iels, rx):
ecoors = coors[conn[iels]]
aux = ecoors.transpose((0, 2, 1))
bf = ps.eval_base(rx).squeeze()
phys_coors = nm.dot(aux, bf.T).transpose((0, 2, 1))
return phys_coors
return _eval
def create_output(eval_dofs, eval_coors, n_el, ps, min_level=0, max_level=2,
eps=1e-4):
"""
Create mesh with linear elements that approximates DOFs returned by
`eval_dofs()` corresponding to a higher order approximation with a relative
precision given by `eps`. The DOFs are evaluated in physical coordinates
returned by `eval_coors()`.
"""
def _get_msd(iels, rx, ree):
rvals = eval_dofs(iels, rx)
rng = rvals.max() - rvals.min()
n_components = rvals.shape[-1]
msd = 0.0
for ic in range(n_components):
rval = rvals[..., ic]
sd = rval[:, ree]
# ~ max. second derivative.
msd += nm.abs(sd[..., 0] + sd[..., 2]
- 2.0 * sd[..., 1]).max(axis=-1)
msd /= n_components
return msd, rng
rx0 = ps.geometry.coors
rc0 = ps.geometry.conn[None, :]
rx, rc, ree = refine_reference(ps.geometry, 1)
factor = rc.shape[0] / rc0.shape[0]
iels = nm.arange(n_el)
msd, rng = _get_msd(iels, rx, ree)
eps_r = rng * eps
flag = msd > eps_r
iels0 = flag0 = None
coors = []
conns = []
vdofs = []
inod = 0
for level in range(max_level + 1):
if level < min_level:
flag.fill(True) # Force refinement everywhere.
elif level == max_level:
# Last level - take everything.
|
flag.fill(F
|
alse)
# Deal with finished elements.
if flag0 is not None:
ii = nm.searchsorted(iels0, iels)
expand_flag0 = flag0[ii].repeat(factor, axis=1)
else:
expand_flag0 = nm.ones_like(flag)
ie, ir = nm.where((flag == False) & (expand_flag0 == True))
if len(ie):
uie, iies = nm.unique(ie, return_inverse=True)
# Each (sub-)element has own coordinates - no shared vertices.
xes = eval_coors(iels[uie], rx0)
des = eval_dofs(iels[uie], rx0)
# Vectorize (how??) or use cython?
cc = []
vd = []
for ii, iie in enumerate(iies):
ce = rc0[ir[ii]]
xe = xes[iie]
cc.append(xe[ce])
de = des[iie]
vd.append(de[ce])
cc = nm.vstack(cc)
vd = nm.vstack(vd)
nc = cc.shape[0]
np = rc0.shape[1]
conn = nm.arange(nc, dtype=nm.int32).reshape((nc / np, np))
coors.append(cc)
conns.append(conn + inod)
vdofs.append(vd)
inod += nc
if not flag.any():
break
iels0 = iels
flag0 = flag
# Deal with elements to refine.
if level < max_level:
eflag = flag.sum(axis=1, dtype=nm.bool)
iels = iels[eflag]
rc0 = rc
rx0 = rx
rx, rc, ree = refine_reference(ps.geometry, level + 2)
msd, rng = _get_msd(iels, rx, ree)
eps_r = rng * eps
flag = msd > eps_r
all_coors = nm.concatenate(coors, axis=0)
conn = nm.concatenate(conns, axis=0)
all_vdofs = nm.concatenate(vdofs, axis=0)
mat_ids = nm.zeros(conn.shape[0], dtype=nm.int32)
return level, all_coors, conn, all_vdofs, mat_ids
|
mikkylok/mikky.lu
|
migrations/versions/f045592adab0_add_follow_table.py
|
Python
|
mit
| 964
| 0.006224
|
"""add follow table
Revision ID: f045592adab0
Revises: 56a3d184ac27
Create Date: 2017-10-06 00:38:24.001488
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by
|
Alembic.
revision = 'f045592adab0'
down_revision = '56a3d184ac27'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('follows',
sa.Column('follower_id', sa.Integer(), nullable=False),
sa.Column('followed_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime()
|
, nullable=True),
sa.ForeignKeyConstraint(['followed_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['follower_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('follower_id', 'followed_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('follows')
# ### end Alembic commands ###
|
sthysel/sedge
|
sedge/templates.py
|
Python
|
gpl-3.0
| 177
| 0.00565
|
sedge_co
|
nfig_header = """
# :sedge:
#
# this configuration generated
|
from `sedge' file:
# {}
#
# do not edit this file manually, edit the source file and re-run `sedge'
#
"""
|
KatolaZ/mammult
|
models/growth/node_deg_over_time.py
|
Python
|
gpl-3.0
| 2,400
| 0.01
|
# This file is part of MAMMULT: Metrics And Models for Multilayer Networks
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
####
##
## Get an edgelist, a file pof arrival times and a node ID and return
## the degree of that node as a function of time (we suppose that IDs
## are sequential)
##
##
import sys
if len(sys.argv) < 4:
print "Usage: %s <netfile> <timefile> <nodeid1> [<nodeid2> <nodeid3>...]" % sys.argv[0]
sys.exit(1)
node_id = int(sys.argv[3])
lines = open(sys.argv[2], "r").readlines()
arrival_time = {}
#### WARNING!!!! THIS WORKS ONLY FOR M0=3
arrival_time[0] = 0
arrival_time[1] = 1
arrival_time[2] = 2
neigh_by_time = {}
max_t = -1
for l in lines:
if l[0] == "#":
continue
t,node = [int(x) for x in l.strip(" \n").split(" ")]
arrival_time[node] = t
if t > max_t :
max_t = t
lines = open(sys.argv[1], "r").readlines()
for l in lines:
if l[0] == "#":
continue
n1,n2 = [int(x) for x in l.strip(" \n").split(" ")]
if neigh_by_time.has_key(n1):
neigh_by_time[n1].append(arrival_time[n2])
else:
neigh_by_time[n1] = [arrival_time[n2]]
if neigh_by_time.has_key(n2):
neigh_by_time[n2].append(arrival_time[n1])
else:
neigh_by_time[n2] = [arrival_time
|
[n1]]
#print neigh_by_time[node_id]
for node_id in sys.argv[3:]:
node_id = int(node_id)
neigh_by_time[node_id].sort()
last_time = neigh_by_time[node_id][0]
#### changed here
k = 1
print "####
|
", node_id
for t in neigh_by_time[node_id][1:]:
if t != last_time:
if last_time < arrival_time[node_id]:
print arrival_time[node_id], k
else:
print last_time, k
last_time = t
k += 1
print max_t, k-1
print
print
|
snemes/pype32
|
setup.py
|
Python
|
bsd-3-clause
| 4,235
| 0.004959
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Nahuel Riva
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARR
|
ANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__revision__ = "$Id$"
__all__ = ['metadata', 'setup']
from distutils.core import setup
from distutils import version
from warnings import warn
import re
import os
import sys
import glob
# Distutils hack: in order to be able to build MSI installers with loose
# version numbers, we subclass StrictVersion to accept loose version numbers
# and convert them to the strict format. This works because Distutils will
# happily reinstall a package even if the version number matches exactly the
# one already installed on the system - so we can simply strip all extraneous
# characters and beta/postrelease version numbers will be treated just like
# the base version number.
if __name__ == '__main__':
StrictVersion = version.StrictVersion
class NotSoStrictVersion (StrictVersion):
def parse (self, vstring):
components = []
for token in vstring.split('.'):
token = token.strip()
match = re.search('^[0-9]+', token)
if match:
number = token[ match.start() : match.end() ]
components.append(number)
vstring = '.'.join(components)
return StrictVersion.parse(self, vstring)
version.StrictVersion = NotSoStrictVersion
# Get the base directory
here = os.path.dirname(__file__)
if not here:
here = os.path.curdir
# Text describing the module (reStructured text)
try:
readme = os.path.join(here, 'README')
long_description = open(readme, 'r').read()
except Exception:
warn("README file not found or unreadable!")
long_description = """pype32 is python library to read and write PE/PE+ binary files."""
# Get the list of scripts in the "tools" folder
scripts = glob.glob(os.path.join(here, 'tools', '*.py'))
# Set the parameters for the setup script
metadata = {
# Setup instructions
'provides' : ['pype32'],
'packages' : ['pype32'],
'scripts' : scripts,
# Metadata
'name' : 'pype32',
'version' : '0.1-alpha4',
'description' : 'Yet another Python library to read and write PE/PE+ files.',
'long_description' : long_description,
'author' : 'Nahuel Riva',
'author_email' : 'crackinglandia'+chr(64)+'gmail'+chr(0x2e)+'com',
'url' : 'https://github.com/crackinglandia/pype32',
'keywords' : ['pecoff', 'x86', 'x64', '.net', 'parser'],
'download_url' : 'https://github.com/crackinglandia/pype32/tarball/v0.1-alpha4',
}
# Execute the setup script
if __name__ == '__main__':
setup(**metadata)
|
mikedingjan/wagtail
|
wagtail/documents/urls.py
|
Python
|
bsd-3-clause
| 298
| 0.003356
|
from django.conf.urls import url
|
from wagtail.documents.views import serve
urlpatterns = [
url(r'^(\d+)/(.*)$', serve.serve, name='wagtaildocs_serve'),
url(r'^authenticate_with_password/(\d+)/$', serve.authenticat
|
e_with_password,
name='wagtaildocs_authenticate_with_password'),
]
|
drix00/leepstools
|
leepstools/file/elastic.py
|
Python
|
apache-2.0
| 1,272
| 0.000786
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: leepstools.file.elastic
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
Read and generate LEEPS elastic file .ees.
"""
#########################################################################
|
######
# Copyright 2017 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Lice
|
nse at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
import os.path
# Third party modules.
# Local modules.
# Project modules.
# Globals and constants variables.
class Elastic():
pass
def generate_elastic_file(program_file_path, elastic_file_path, composition):
if not os.path.isfile(program_file_path):
raise ValueError
|
ramuta/python101
|
slide4.py
|
Python
|
gpl-2.0
| 193
| 0.010363
|
__author
|
__ = 'ramuta'
a = 1
b = 2
if a < b:
a = b
print a
print b
"""
Java equivalent
if
|
(a < b) {
a = b;
}
If you delete parenthesis, brackets and semicolons you get python.
"""
|
Elico-Corp/odoo-addons
|
website_captcha_nogoogle/website.py
|
Python
|
agpl-3.0
| 2,704
| 0
|
# -*- coding: utf-8 -*-
# © 2015 Elico corp (www.elico-corp.com)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import base64
import random
import string
from binascii import hexlify, unhexlify
from openerp import api, fields, models
try:
from captcha.image import ImageCaptcha
except ImportError:
pass
try:
from simplecrypt import decrypt, encrypt
except ImportError:
pass
class Website(models.Model):
_inherit = 'website'
captcha = fields.Text('Captcha', compute="_captcha", store=False)
captcha_crypt_challenge = fields.Char(
'Crypt', compute="_captcha", store=False)
captcha_crypt_password = fields.Char(
default=lambda self: self._default_salt(),
required=True, help='''
The secret value used as the basis for a key.
This should be as long as varied as possible.
Try to avoid common words.''')
captcha_length = fields.Selection(
'_captcha_length', default='4', required=True)
captcha_chars = fields.Selection(
'_captcha_chars', default='digits', required=True)
def is_captcha_valid(self, crypt_challenge, response):
challenge = decrypt(
self.captcha_crypt_password, unhexlify(crypt_challenge))
if response.upper() == challenge:
return True
return False
@api.depends('captcha_length', 'captcha_chars')
@api.
|
one
def _captcha(self):
captcha = ImageCaptcha()
captcha_challenge = self._generate_random_str(
self._get_captcha_chars(), int(self.captcha_length))
self.captcha_crypt_challenge = hexlify(
encrypt(self.captcha_crypt_password, captcha_challenge))
out = captcha.generate(captcha_challenge).getvalue()
self.captcha = base64.b64encode(out)
def _generate_random_str(self, chars, size):
return ''.jo
|
in(random.choice(chars) for _ in range(size))
def _default_salt(self):
return self._generate_random_str(
string.digits + string.letters + string.punctuation, 100)
# generate a random salt
def _captcha_length(self):
return [(str(i), str(i)) for i in range(1, 11)]
def _captcha_chars(self):
return [
('digits', 'Digits only'),
('hexadecimal', 'Hexadecimal'),
('all', 'Letters and Digits')]
def _get_captcha_chars(self):
chars = string.digits
if self.captcha_chars == 'hexadecimal':
# do not use the default string.hexdigits because it contains
# lowercase
chars += 'ABCDEF'
elif self.captcha_chars == 'all':
chars += string.uppercase
return chars
|
MadsJensen/agency_connectivity
|
sorted_scripts/python_processing/preprocessing.py
|
Python
|
bsd-3-clause
| 9,616
| 0.000416
|
"""
Preprocessing function for the bdf.
@author: mje
@email: mads [] cnru.dk
"""
import mne
from mne.preprocessing import ICA, create_eog_epochs
import matplotlib.pyplot as plt
import numpy as np
# SETTINGS
n_jobs = 1
reject = dict(eeg=300e-6) # uVolts (EEG)
l_freq, h_freq, n_freq = 0.5, 90, 50 # Frequency setting for high, low, Noise
decim = 10 # decim value
montage = mne.channels.read_montage("biosemi64")
# data_folder = "/home/mje/Projects/agency_connectivity/data/"
# Functions
def convert_bdf2fif(subject, data_folder):
"""Convert bdf data to fiff.
Parameters
----------
subject: string
The subject to convert.
Returns
-------
None, but save fiff file.
"""
raw = mne.io.read_raw_edf(
data_folder + "%s.bdf" % subject,
montage=montage,
eog=["EXG3", "EXG4", "EXG5", "EXG6"],
misc=["EXG1", "EXG2", "EXG7", "EXG8"],
preload=True)
raw.set_eeg_reference()
raw.save(data_folder + "%s-raw.fif" % subject, overwrite=True)
def filter_raw(subject, data_folder):
"""Filter raw fifs.
Parameters
----------
subject : string
the subject id to be loaded
"""
raw = mne.io.Raw(data_folder + "%s-raw.fif" % subject, preload=True)
raw.set_montage = montage
raw.apply_proj()
raw.notch_filter(n_freq)
raw.filter(l_freq, None)
raw.filter(None, h_freq)
raw.save(data_folder + "%s_bp-raw.fif" % subject, overwrite=True)
def compute_ica(subject, data_folder):
"""Function will compute ICA on raw and apply the ICA.
Parameters
----------
subject : string
the subject id to be loaded
"""
raw = mne.io.Raw(data_folder + "%s_bp-raw.fif" % subject, preload=True)
raw.set_montage = montage
raw.apply_proj()
# raw.resample(512, n_jobs=2)
# ICA Part
ica = ICA(n_components=None,
max_pca_components=40,
method='fastica',
max_iter=256)
picks = mne.pick_types(
raw.info, meg=False, eeg=True, stim=False, exclude='bads')
ica.fit(raw, picks=picks, decim=decim, reject=reject)
# maximum number of components to reject
n_max_eog = 1
##########################################################################
# 2) identify bad components by analyzing latent sources.
title = 'Sources related to %s artifacts (red) for sub: %s'
#
# # generate ECG epochs use detection via phase statistics
# ecg_epochs = create_ecg_epochs(raw, ch_name="Ext4",
# tmin=-.5, tmax=.5, picks=picks)
# n_ecg_epochs_found = len(ecg_epochs.events)
# sel_ecg_epochs = np.arange(0, n_ecg_epochs_found, 10)
# ecg_epochs = ecg_epochs[sel_ecg_epochs]
#
# ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, method='ctps')
# fig = ica.plot_scores(scores, exclude=ecg_inds,
# title=title % ('ecg', subject))
# fig.savefig(data_folder + "pics/%s_ecg_scores.png" % subject)
#
# if ecg_inds:
# show_picks = np.abs(scores).argsort()[::-1][:5]
#
# fig = ica.plot_sources(raw, show_picks, exclude=ecg_inds,
# title=title % ('ecg', subject), show=False)
# fig.savefig(data_folder + "pics/%s_ecg_sources.png" % subject)
# fig = ica.plot_components(ecg_inds, title=title % ('ecg', subject),
# colorbar=True)
# fig.savefig(data_folder + "pics/%s_ecg_component.png" % subject)
#
# ecg_inds = ecg_inds[:n_max_ecg]
# ica.exclude += ecg_inds
#
# # estimate average artifact
# ecg_evoked = ecg_epochs.average()
# del ecg_epochs
#
# # plot ECG sources + selection
# fig = ica.plot_sources(ecg_evoked, exclude=ecg_inds)
# fig.savefig(data_folder + "pics/%s_ecg_sources_ave.png" % subject)
#
# # plot ECG cleaning
# ica.plot_overlay(ecg_evoked, exclude=ecg_inds)
# fig.savefig(data_folder + "pics/%s_ecg_sources_clean_ave.png" % subject)
# DETECT EOG BY CORRELATION
# HORIZONTAL EOG
eog_epochs = create_eog_epochs(raw, ch_name="EXG4")
eog_indices, scores = ica.find_bads_eog(raw, ch_name="EXG4")
if eog_indices:
fig = ica.plot_scores(
scores, exclude=eog_indices, title=title % ('eog', subject))
fig.savefig(data_folder + "pics/%s_eog_scores.png" % subject)
# fig = ica.plot_components(picks=np.arange(0,20, 1),
# title=title % ('eog', subject), colorbar=True)
# fig.savefig(data_folder + "pics/%s_eog_component.png" % subject)
fig = ica.plot_properties(raw, picks=eog_indices)[0]
fig.savefig(data_folder + "pics/%s_eog_properties.png" % subject)
eog_indices = eog_indices[:n_max_eog]
ica.exclude += eog_indices
del eog_epochs
##########################################################################
# Apply the solution to Raw, Epochs or Evoked like this:
raw_ica = ica.apply(raw)
ica.save(data_folder + "%s-ica.fif" % subject) # save ICA componenets
# Save raw with ICA removed
raw_ica.save(data_folder + "%s_bp_ica-raw.fif" % subject, overwrite=True)
plt.close("all")
def epoch_data(subject, data_folder, save=True):
"""Epoch a raw data set.
Parameters
----------
subject : str
The subject to be epoched.
save : bool
Whether to save the epochs or not.
Returns
-------
epochs
"""
# SETTINGS
tmin, tmax = -2, 2
event_id = {'voluntary': 243, 'involuntary': 219}
raw = mne.io.Raw(data_folder + "%s_bp_ica-raw.fif" % subject)
events = mne.find_events(raw)
# Setup for reading the raw data
picks = mne.pick_types(
raw.info, meg=False, eeg=True, stim=True, eog=True, exclude='bads')
# Read epochs
epochs = mne.Epochs(
raw,
events,
event_id,
tmin,
tmax,
picks=picks,
baseline=(None, -1.8),
reject=None,
preload=True)
if save:
epochs.save(data_folder + "%s_ds_bp_ica-epo.fif" % subject)
return epochs
def hilbert_process(raw, bands, return_evoked=False):
"""Make hilbert transform of raw data and epoch it.
Parameters
----------
raw : ???
The raw data to be transformed.
bands : dict
Dictionary with frequencies to calculate. Shape "band": [low, high]
return_evoked : bool
If true, an evoked data set will be returned, if False epochs will be
returned.
Returns
-------
Epochs if return_evoked is False. These are complex number!
if return_evoked is True, an evoked object is returned. This does only
have the envelope.
"""
tmin, tmax = -2, 2
event_id = {'voluntary': 243, 'involuntary': 219}
picks = mne.pick_types(
raw.info, meg=False, eeg=True, stim=False, exclude='bads')
events = mne.find_events(raw)
results_dict = {}
for band in bands.keys():
raw_tmp = raw.copy()
raw_tmp.filter(bands[band][0], bands[band][1])
if return_evoked:
evokeds = []
raw_tmp.apply_hilbert(picks=picks, envelope=True)
epochs = mne.Epochs(
raw_tmp,
events,
event_id,
tmin,
tmax,
picks=picks,
baseline=(None, -1.8),
reject=reject)
for cond in epochs.event_id.keys():
evokeds.append(epochs[cond].average())
results_dict[band] = evokeds
else:
raw_tmp.apply_hilbert(picks=picks, envelope=False)
epochs = mne.Epochs(
raw_tmp,
events,
event_id,
tmin,
|
tmax,
picks=picks,
baseline=None,
reject=reject)
results_dict[band] = epochs
return results_dict
def save_ev
|
ent_file(subject, data_folder):
"""
Parameters
----------
subject : subject name
data_folder : string
Returns
-------
"""
|
sussexstudent/falmer
|
falmer/banners/migrations/0001_initial.py
|
Python
|
mit
| 924
| 0.002165
|
# Generated by Django
|
2.0.8 on 2018-08-14 10:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Banner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('outlet', models.CharField(max_length=64)),
|
('display_from', models.DateTimeField(blank=True, null=True)),
('display_to', models.DateTimeField(blank=True, null=True)),
('purpose', models.CharField(choices=[('NOTICE', 'Notice')], max_length=12)),
('heading', models.CharField(blank=True, max_length=256)),
('body', models.TextField()),
('disabled', models.BooleanField(default=False)),
],
),
]
|
simontakite/sysadmin
|
pythonscripts/headfirst/hfpy_code/01-MeetPython-Everyone-Loves-Lists/page30.py
|
Python
|
gpl-2.0
| 399
| 0.010025
|
movies = ["The Ho
|
ly Grail", 1975, "Terry Jones & Terry Gilliam", 91,
["Graham Chapman", ["Michael Palin", "John Cleese",
"Terry Gilliam", "Eric Idle", "Terry Jones"]]]
def print_lol(a_list):
for each_item in a_list:
if isinstance(each_item, list):
print_lol(each_item)
else:
print(each_item)
pr
|
int_lol(movies)
|
kbase/metrics
|
source/custom_scripts/dump_query_results.py
|
Python
|
mit
| 4,387
| 0.004559
|
#!/usr/local/bin/python
import os
import mysql.connector as mysql
metrics_mysql_password = os.environ["METRICS_MYSQL_PWD"]
sql_host = os.environ["SQL_HOST"]
metrics = os.environ["QUERY_ON"]
def dump_query_results():
"""
It is a simple SQL table dump of a given query so we can supply users with custom tables.
Note that the SQL query itself and column headers portion need to be changed if you want to change
the query/results. Otherwise it is good to go.
It can be called simply with the bin shell script.
Read the README at the top level for an example.
docker-compose run --rm metrics ../bin/custom_scripts/dump_query_results.sh > query_results.txt
"""
# connect to mysql
db_connection = mysql.connect(
host=sql_host, # "mysql1", #"localhost",
user="metrics", # "root",
passwd=metrics_mysql_password,
database="metrics", # "datacamp"
)
cursor = db_connection.cursor()
query = "use " + metrics
cursor.execute(query)
# CHANGE QUERY HERE
# query = "select username, display_name, email, orcid, kb_internal_user, institution, country, signup_date, last_signin_date from user_info order by signup_date"
# Query for Adam Narratives dump of information:
# select wc.* from metrics.user_info ui inner join metrics_reporting.workspaces_current wc on ui.username = wc.username
# where ui.
|
kb_internal_user = 0 and wc.narrative_versio
|
n > 0 and is_deleted = 0 and is_temporary = 0;
#query = ("select * from metrics_reporting.narrative_app_flows")
query = ("select * from metrics_reporting.user_super_summary")
# CHANGE COLUMN HEADERS HERE TO MATCH QUERY HEADERS
# print("username\temail\tlast_signin_date\tmax_last_seen\tHasBeenSeen")
# print("ws_id\tusername\tmod_date\tinitial_save_date\trecord_date\ttop_lvl_object_count\ttotal_object_count\tvisible_app_cells_count\tnarrative_version\thidden_object_count\tdeleted_object_count\ttotal_size\ttop_lvl_size\tis_public\tis_temporary\tnumber_of_shares")
# Headers for Adam's narratives query (Note if more columns added, may need to update this
# print(
# "ws_id\tusername\tmod_date\tinitial_save_date\trecord_date\ttop_lvl_object_count\ttotal_object_count\tvisible_app_cells_count\tcode_cells_count\t"
# "narrative_version\thidden_object_count\tdeleted_object_count\ttotal_size\ttop_lvl_size\tis_public\tis_temporary\tis_deleted\tnumber_of_shares\t"
# "num_nar_obj_ids\tstatic_narratives_count"
# )
# HEADERS FOR user_super_summary
print(
"username\tdisplay_name\temail\tkb_internal_user\tuser_id\tglobus_login\tgoogle_login\torcid\tsession_info_country\tcountry\tstate\t"
"institution\tdepartment\tjob_title\thow_u_hear_selected\thow_u_hear_other\tsignup_date\tlast_signin_date\tdays_signin_minus_signup\t"
"days_since_last_signin\tnum_orgs\tnarrative_count\tshared_count\tnarratives_shared\tfirst_narrative_made_date\tlast_narrative_made_date\t"
"last_narrative_modified_date\ttotal_narrative_objects_count\ttop_lvl_narrative_objects_count\ttotal_narrative_objects_size\t"
"top_lvl_narrative_objects_size\ttotal_narrative_count\ttotal_public_narrative_count\tdistinct_static_narratives_count\t"
"static_narratives_created_count\ttotal_visible_app_cells\ttotal_code_cells_count\tfirst_file_date\tlast_file_date\t"
"total_file_sizes_MB\ttotal_file_count\tmost_used_app\tdistinct_apps_used\ttotal_apps_run_all_time\ttotal_apps_run_last365\t"
"total_apps_run_last90\ttotal_apps_run_last30\ttotal_app_errors_all_time\tfirst_app_run\tlast_app_run\ttotal_run_time_hours\t"
"total_queue_time_hours\ttotal_CPU_hours\tsession_count_all_time\tsession_count_last_year\tsession_count_last_90\tsession_count_last_30"
)
#Header for Adam's narrative_app_flow
#print("ws_id\tusername\tapp_name\tfunc_name\tstart_date\tfinish_date")
cursor.execute(query)
row_values = list()
for row_values in cursor:
temp_string = ""
for i in range(len(row_values) - 1):
if row_values[i] is not None:
temp_string += str(row_values[i])
temp_string += "\t"
if row_values[-1] is not None:
temp_string += str(row_values[-1])
print(temp_string)
return 1
dump_query_results()
|
aferr/LatticeMemCtl
|
configs/example/memtest.py
|
Python
|
bsd-3-clause
| 7,651
| 0.01307
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
import optparse
import sys
import m5
from m5.objects import *
parser = optparse.OptionParser()
parser.add_option("-a", "--atomic", action="store_true",
help="Use atomic (non-timing) mode")
parser.add_option("-b", "--blocking", action="store_true",
help="Use blocking caches")
parser.add_option("-l", "--maxloads", metavar="N", default=0,
help="Stop after N loads")
parser.add_option("-m", "--maxtick", type="int", default=m5.MaxTick,
metavar="T",
help="Stop after T ticks")
#
# The "tree" specification is a colon-separated list of one or more
# integers. The first integer is the number of caches/testers
# connected directly to main memory. The last integer in the list is
# the number of testers associated with the uppermost level of memory
# (L1 cache, if there are caches, or main memory if no caches). Thus
# if there is only one integer, there are no caches, and the integer
# specifies the number of testers connected directly to main memory.
# The other integers (if any) specify the number of caches at each
# level of the hierarchy between.
#
# Examples:
#
# "2:1" Two caches connected to memory with a single tester behind each
# (single-level hierarchy, two testers total)
#
# "2:2:1" Two-level hierarchy, 2 L1s behind each of 2 L2s, 4 testers total
#
parser.add_option("-t", "--treespec", type="string", default="8:1",
help="Colon-separated multilevel tree specification, "
"see script comments for details "
"[default: %default]")
parser.add_option("--force-bus", action="store_true",
help="Use bus between levels even with single cache")
parser.add_option("-f", "--functional", type="int", default=0,
metavar="PCT",
help="Target percentage of functional accesses "
"[default: %default]")
parser.add_option("-u", "--uncacheable", type="int", default=0,
metavar="PCT",
help="Target percentage of uncacheable accesses "
"[default: %default]")
parser.add_option("--progress", type="int", default=1000,
metavar="NLOADS",
help="Progress message interval "
"[default: %default]")
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
block_size = 64
try:
treespec = [int(x) for x in options.treespec.split(':')]
numtesters = reduce(lambda x,y: x*y, treespec)
except:
print "Error parsing treespec option"
sys.exit(1)
if numtesters > block_size:
print "Error: Number of testers limited to %s because of false sharing" \
% (block_size)
sys.exit(1)
if len(treespec) < 1:
print "Error parsing treespec"
sys.exit(1)
# define prototype L1 cache
proto_l1 = BaseCache(size = '32kB', assoc = 4, block_size = block_size,
latency = '1ns', tgts_per_mshr = 8)
if options.blocking:
proto_l1.mshrs = 1
else:
proto_l1.mshrs = 4
# build a list of prototypes, one for each level of treespec, starting
# at the end (last entry is tester objects)
prototypes = [ MemTest(atomic=options.atomic, max_loads=options.maxloads,
percent_functional=options.functional,
percent_uncacheable=options.uncacheable,
progress_interval=options.progress) ]
# next comes L1 cache, if any
if len(treespec) > 1:
prototypes.insert(0, proto_l1)
# now add additional cache levels (if any) by scaling L1 params
for scale in treespec[:-2]:
# clone previous level and update params
prev = prototypes[0]
next = prev()
next.size = prev.size * scale
next.latency = prev.latency * 10
next.assoc = prev.assoc * scale
next.mshrs = prev.mshrs * scale
prototypes.insert(0, next)
# system simulated
system = System(funcmem = SimpleMemory(in_addr_map = False),
funcbus = NoncoherentBus(),
physmem = SimpleMemory(latency = "100ns"))
def make_level(spec, prototypes, attach_obj, attach_port):
fanout = spec[0]
parent = attach_obj # use attach obj as config parent too
if len(spec) > 1 and (fanout > 1 or options.force_bus):
port = getattr(attach_obj, attach_port)
new_bus = CoherentBus(clock="500MHz", width=16)
if (port.role == 'MASTER'):
new_bus.slave = port
attach_port = "master"
else:
new_bus.master = port
attach_port = "slave"
parent.cpu_side_bus = new_bus
attach_obj = new_bus
objs = [prototypes[0]() for i in xrange(fanout)]
if len(spec) > 1:
# we just built caches, more levels to go
parent.cache = objs
for cache in objs:
cache.mem_side = getattr(attach_obj, attach_port)
make_level(spec[1:], prototypes[1:], cache, "cpu_side")
else:
# we just built the MemTest objects
parent.cpu = objs
for t in objs:
t.test = getattr(attach_obj, attach_port)
t.functional = system.funcbus.slave
make_level(treespec, prototypes, system.physmem, "port")
# connect reference memory to funcbus
system.funcbus.master = system.funcmem.port
# -----------------------
# run simulation
# -----------------------
root = Root( full_system = False, system = system )
if options.atomic:
root.system.mem_mode = 'atomic'
else:
root.system.mem_mode = 'timing'
# The system port is never used in the tester so merely connect it
# to avoid problems
root.system.system_port = root.system.physmem.port
# Not much point in this b
|
eing higher than the L1 latency
m5.ticks.setGlobalFrequency('1ns')
# instantiate configuration
m5.instantiate()
# simulate until program terminates
exit_event = m5.simulate(options.maxtick)
pr
|
int 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
|
DOE-NEPA/geonode_2.0_to_2.4_migration
|
migrate_base_topiccategory.py
|
Python
|
gpl-2.0
| 1,677
| 0.023256
|
#!/usr/bin/python
import os
import psycopg2
import sys
file = open("/home/" + os.getlogin() + "/.pgpass", "r")
pgpasses = []
for line in file:
pgpasses.append(line.rstrip("\n").split(":"))
file.close()
for pgpass in pgpasses:
#print str(pgpass)
if pgpass[0] == "54.236.235.110" and pgpass[3] == "geonode":
src_pgpass = pgpass
if pgpass[0] == "54.197.226.56" and pgpass[3] == "geonode":
dst_pgpass = pgpass
src = psycopg2.connect(host=src_pgpass[0], database="geonode2", user=src_pgpass[3], password=src_pgpass[4])
dst = psycopg2.connect(host=dst_pgpass[0], database="geonode", user=dst_pgpass[3], password=dst_pgpass[4])
src_cur = src.cursor()
dst_cur = dst.cursor()
src_cur.execute("select id, identifier, description, gn_description, is_choice from base_topiccategory")
for src_row in src_cur:
assignments = []
#id
assignments.append(src_row[0])
#identifier
assignments.append(src_row[1])
#description
assignments.append(src_row[2])
#description_en
assignments.append(
|
None)
#gn_description
assignments.append(src_row[3])
#gn_description_en
assignments.append(None)
#is_choice
assignments.append(src_row[4])
try:
dst_cur.execute("insert into base_topiccategory(id, identifier, description, description_en, gn_description, gn_description_en, is_choice)
|
values (%s, %s, %s, %s, %s, %s, %s)", assignments)
dst.commit()
except Exception as error:
print
print type(error)
print str(error) + "select id, identifier, description, gn_description, is_choice from base_topiccategory"
print str(src_row)
dst.rollback()
dst.commit()
src_cur.close()
dst_cur.close()
src.close()
dst.close()
|
mikebryant/tsumufs
|
lib/tsumufs/metrics.py
|
Python
|
gpl-3.0
| 1,997
| 0.008513
|
# Copyright (C) 2008 Google, Inc. All Rights Reserved.
# Copyright (C) 2012 Michael Bryant.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''TsumuFS, a NFS-based caching filesystem.'''
import time
import threading
from extendedattributes import extendedattribute
_metrics_lock = threading.RLock()
_metrics = {}
def benchmark(func):
'''
Decorator method to help gather metrics.
'''
global _metrics
def wrapper(*__args, **__kwargs):
name = func.__name__
start_time = time.time()
result = func.__call__(*__args, **__kwargs)
delta_t = time.time() - start_time
try:
_metrics_lock.acquire()
|
if not _metrics.has_key(name):
_metrics[name] = [ 1, delta_t ]
else:
_metrics[name][0] += 1
_metrics[name][1] += delta_t
finally:
_metrics_lock.release()
return result
return wrapper
@extendedattribute('root', 'tsumufs.metrics')
def xattr_metrics(type_, path, value=None):
global _metrics
if value:
return -errno.EOPNOTSUPP
try:
_metrics_lock.acquire()
i
|
f len(_metrics.keys()) == 0:
return '{}'
result = '{ '
for name in _metrics.keys():
result += ("'%s': %f (%d), " %
(name, _metrics[name][1] / _metrics[name][0],
_metrics[name][0]))
result = result[:-2]
result += ' }'
return result
finally:
_metrics_lock.release()
|
Ormod/pghoard
|
pghoard/common.py
|
Python
|
apache-2.0
| 6,515
| 0.001995
|
"""
pghoard - common utility functions
Copyright (c) 2015 Ohmu Ltd
See LICENSE for details
"""
import fcntl
import logging
import os
try:
from backports import lzma # pylint: disable=import-error, unused-import
except ImportError:
import lzma # pylint: disable=import-error, unused-import
try:
from urllib.parse import urlparse, parse_qs # pylint: disable=no-name-in-module, import-error
except ImportError:
from urlparse import urlparse, parse_qs # pylint: disable=no-name-in-module, import-error
if hasattr(lzma, "open"):
lzma_open = lzma.open # pylint: disable=no-member, maybe-no-member
lzma_open_read = lzma.open # pylint: disable=no-member, maybe-no-member
lzma_compressor = lzma.LZMACompressor # pylint: disable=no-member
lzma_decompressor = lzma.LZMADecompressor # pylint: disable=no-member
elif not hasattr(lzma, "options"):
def lzma_open(filepath, mode, preset):
return lzma.LZMAFile(filepath, mode=mode, preset=preset)
def lzma_open_read(filepath, mode):
return lzma.LZMAFile(filepath, mode=mode)
def lzma_compressor(preset):
return lzma.LZMACompressor(preset=preset) # pylint: disable=no-member
def lzma_decompressor():
return lzma.LZMADecompressor() # pylint: disable=no-member
else:
def lzma_open(filepath, mode, preset):
return lzma.LZMAFile(filepath, mode=mode, options={"level": preset}) # pylint: disable=unexpected-keyword-arg
def lzma_open_read(filepath, mode):
return lzma.LZMAFile(filepath, mode=mode)
def lzma_compressor(preset):
return lzma.LZMACompressor(options={"level": preset}) # pylint: disable=no-member
def lzma_decompressor():
return lzma.LZMADecompressor() # pylint: disable=no-member
try:
from Queue import Queue, Empty # pylint: disable=import-error, unused-import
except ImportError:
from queue import Queue, Empty # pylint: disable=import-error, unused-import
default_log_format_str = "%(asctime)s\t%(name)s\t%(threadName)s\t%(levelname)s\t%(message)s"
syslog_format_str = '%(name)s %(levelname)s: %(message)s'
def create_connection_string(connection_info):
return " ".join("{}='{}'".format(k, str(v).replace("'", "\\'"))
for k, v in sorted(connection_info.items()))
def get_connection_info(info):
"""turn a connection info object into a dict or return it if it was a
dict already. supports both the traditional libpq format and the new
url format"""
if isinstance(info, dict):
return info.copy()
elif info.startswith("postgres://") or info.startswith("postgresql://"):
return parse_connection_string_url(info)
else:
return parse_connection_string_libpq(info)
def parse_connection_string_url(url):
p = urlparse(url)
fields = {}
if p.hostname:
fields["host"] = p.hostname
if p.port:
fields["port"] = str(p.port)
if p.username:
fields["user"] = p.username
if p.password is not None:
fields["password"] = p.password
if p.path and p.path != "/":
fields["dbname"] = p.path[1:]
for k, v in parse_qs(p.query).items():
fields[k] = v[-1]
return fields
def parse_connection_string_libpq(connection_string):
"""parse a postgresql connection string as defined in
http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING"""
fields = {}
while True:
connection_string = connection_string.strip()
if not connection_string:
break
if "=" not in connection_string:
raise ValueError("expecting key=value format in connection_string fragment {!r}".format(connection_string))
key, rem = connection_string.split("=", 1)
if rem.startswith("'"):
asis, value = False, ""
for i in range(1, len(rem)):
if asis:
value += rem[i]
asis = False
elif rem[i] == "'":
break # end of entry
elif rem[i] == "\\":
asis = True
else:
value += rem[i]
else:
raise Val
|
ueError("invalid connection_string fragment {!r}".format(rem))
connection_string = rem[i + 1:] # pylint: disable=undefined-loop-variable
else:
res = rem.split(None, 1)
if len(res) > 1:
value, connection_string = res
else:
value, connection_string = rem, ""
fields[key] = value
return fields
def create_pgpass_file(l
|
og, connection_string_or_info):
"""Look up password from the given object which can be a dict or a
string and write a possible password in a pgpass file;
returns a connection_string without a password in it"""
info = get_connection_info(connection_string_or_info)
if "password" not in info:
return create_connection_string(info)
content = "{host}:{port}:{dbname}:{user}:{password}\n".format(
host=info.get("host", ""), port=info.get("port", 5432),
user=info.get("user", ""), password=info.pop("password"),
dbname=info.get("dbname", "*"))
pgpass_path = os.path.join(os.environ.get("HOME"), ".pgpass")
if os.path.exists(pgpass_path):
with open(pgpass_path, "r") as fp:
pgpass_data = fp.read()
else:
pgpass_data = ""
if content in pgpass_data:
log.debug("Not adding authentication data to: %s since it's already there", pgpass_path)
else:
with open(pgpass_path, "a") as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(content)
log.debug("Wrote %r to %r", content, pgpass_path)
return create_connection_string(info)
def set_syslog_handler(syslog_address, syslog_facility, logger):
syslog_handler = logging.handlers.SysLogHandler(address=syslog_address, facility=syslog_facility)
logger.addHandler(syslog_handler)
formatter = logging.Formatter(syslog_format_str)
syslog_handler.setFormatter(formatter)
return syslog_handler
def set_subprocess_stdout_and_stderr_nonblocking(proc):
for fd in [proc.stdout.fileno(), proc.stderr.fileno()]:
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
def convert_pg_version_number_to_numeric(version_string):
parts = version_string.split(".")
return int(parts[0]) * 10000 + int(parts[1]) * 100 + int(parts[2])
|
r39132/airflow
|
airflow/task/task_runner/__init__.py
|
Python
|
apache-2.0
| 1,803
| 0.001109
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requir
|
ed by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow import configuration
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
from airflow.exceptions import AirflowException
_TASK_RUNNER = configuration.conf.get('core', 'TASK_RUNNER')
def get_task_runner(local_task_job):
"""
Get the task runner that can be used to run the given job.
:param local_task_job: The LocalTaskJob associated with the TaskInstance
that needs to be executed.
:type local_task_job: airflow.jobs.LocalTaskJob
:return: The task runner to use to run the task.
:rtype: airflow.task.task_runner.base_task_runner.BaseTaskRunner
"""
if _TASK_RUNNER == "StandardTaskRunner":
return StandardTaskRunner(local_task_job)
elif _TASK_RUNNER == "CgroupTaskRunner":
from airflow.contrib.task_runner.cgroup_task_runner import CgroupTaskRunner
return CgroupTaskRunner(local_task_job)
else:
raise AirflowException("Unknown task runner type {}".format(_TASK_RUNNER))
|
gylian/sickrage
|
sickbeard/providers/ezrss.py
|
Python
|
gpl-3.0
| 5,370
| 0.003538
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import urllib
import re
try:
import xml.etree.cElementTree as etree
except ImportError:
import elementtree.ElementTree as etree
import sickbeard
import generic
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import helpers
class EZRSSProvider(generic.TorrentProvider):
def __init__(self):
self.urls = {'base_url': 'https://www.ezrss.it/'}
self.url = self.urls['base_url']
generic.TorrentProvider.__init__(self, "EZRSS")
self.supportsBacklog = True
self.supportsFrench = False
self.enabled = False
self.ratio = None
self.cache = EZRSSCache(self)
def isEnabled(self):
return self.enabled
def imageName(self):
return 'ezrss.png'
def getQuality(self, item, anime=False):
try:
quality = Quality.sceneQuality(item.filename, anime)
except:
quality = Quality.UNKNOWN
return quality
def fin
|
dSearchResults(self, show, episodes, search_mode, manualSearch=False, downCurQuality=False):
self.show = show
results = {}
if show.air_by_date or show.sports:
logger.log(self.name + u" doesn't support air-by-date or sports backloging because of limitations on their RSS search.",
logger.WARNING)
return results
res
|
ults = generic.TorrentProvider.findSearchResults(self, show, episodes, search_mode, manualSearch, downCurQuality)
return results
def _get_season_search_strings(self, ep_obj):
params = {}
params['show_name'] = helpers.sanitizeSceneName(self.show.name, ezrss=True).replace('.', ' ').encode('utf-8')
if ep_obj.show.air_by_date or ep_obj.show.sports:
params['season'] = str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
params['season'] = "%d" % ep_obj.scene_absolute_number
else:
params['season'] = ep_obj.scene_season
return [params]
def _get_episode_search_strings(self, ep_obj, add_string=''):
params = {}
if not ep_obj:
return params
params['show_name'] = helpers.sanitizeSceneName(self.show.name, ezrss=True).replace('.', ' ').encode('utf-8')
if self.show.air_by_date or self.show.sports:
params['date'] = str(ep_obj.airdate)
elif self.show.anime:
params['episode'] = "%i" % int(ep_obj.scene_absolute_number)
else:
params['season'] = ep_obj.scene_season
params['episode'] = ep_obj.scene_episode
return [params]
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
params = {"mode": "rss"}
if search_params:
params.update(search_params)
search_url = self.url + 'search/index.php?' + urllib.urlencode(params)
logger.log(u"Search string: " + search_url, logger.DEBUG)
results = []
for curItem in self.cache.getRSSFeed(search_url, items=['entries'])['entries'] or []:
(title, url) = self._get_title_and_url(curItem)
if title and url:
logger.log(u"RSS Feed provider: [" + self.name + "] Attempting to add item to cache: " + title, logger.DEBUG)
results.append(curItem)
return results
def _get_title_and_url(self, item):
(title, url) = generic.TorrentProvider._get_title_and_url(self, item)
try:
new_title = self._extract_name_from_filename(item.filename)
except:
new_title = None
if new_title:
title = new_title
logger.log(u"Extracted the name " + title + " from the torrent link", logger.DEBUG)
return (title, url)
def _extract_name_from_filename(self, filename):
name_regex = '(.*?)\.?(\[.*]|\d+\.TPB)\.torrent$'
logger.log(u"Comparing " + name_regex + " against " + filename, logger.DEBUG)
match = re.match(name_regex, filename, re.I)
if match:
return match.group(1)
return None
def seedRatio(self):
return self.ratio
class EZRSSCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll EZRSS every 15 minutes max
self.minTime = 15
def _getRSSData(self):
rss_url = self.provider.url + 'feed/'
logger.log(self.provider.name + " cache update URL: " + rss_url, logger.DEBUG)
return self.getRSSFeed(rss_url)
provider = EZRSSProvider()
|
bd808/tools-stashbot
|
stashbot/__init__.py
|
Python
|
gpl-3.0
| 845
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of bd808's stashbot application
# Copyright (C) 2015 Bryan Davis and contributors
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
#
|
any later version.
#
# This program is distributed in
|
the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from .bot import Stashbot
__all__ = (
'Stashbot',
)
any((
Stashbot,
))
|
DInnaD/CS50
|
pset6/caesar.py
|
Python
|
apache-2.0
| 2,532
| 0.011453
|
import cs50
import sys
def main():
if len(sys.argv) != 2:
print("You should provide cmd line arguments!")
exit(1)
#if sys.argv[1].isalpha() == False:
#print("You should provide valid key!")
#exit(1)
kplainText = int(sys.argv[1])
cipher = []
plainText = cs50.get_string()
for symbol in plainText:
if symbol.isalpha():
cipher.append(caesar(symbol, kplainText))
else:
cipher.append(symbol)
print("".join(cipher))
exit(0)
def caesar(char, kplainText):
if char.isupper():
return chr(((ord(char) - 65 + kplainText) % 26) + 65)
else:
return chr(((ord(char) - 97 + kplainText) % 26) + 97)
if __name__ == "__main__":
main()
# #include <ctype.h>
# #include <string.h>
# #include <cs50.h>
# #include <stdio.h>
# #include <stdlib.h>
# //define my caesarCipher
# void caesarCipher(char* plainText,int key);
# def int main(int argc, char* argv[]): # //{//????????????????/char*
# if argc is not 2:
# # {
# print("Usage: ./caesar k\n")
# #return 1
# #}
# #//printf(" %s\n", argv[1]);
# int key = atoi(sys.argv[1])
# char plainText[101]
|
# print("plaintext: ")#;//ask user
# fgets(plainText, sizeof(plainText), stdin);//get user input & store it in planText var++++++++
# print("ciphertext: ")#;//print the ciphered text
# caesarCipher(plainText,key)
# //system(pause);//connect out if not use wind---------------------------???????????????
# # return 0;
# #}
# void caesarCipher(char* plainText, int key){//key pomen mestami on first plaiiiiiiiiiin
#
|
int i = 0
# char cipher
# int cipherValue
# while plainText[i] != '\0' and strlen(plainText) -1 > i :break#// for(int i=1,len=strlen(name);i<len;i++)
# if isalpha(plainText[i]) and islower(plainText[i]):
# cipherValue = ((int)((plainText[i]) - 97 + key) % 26 + 97)
# cipher = (char)(cipherValue);printf("%c", cipher)
# i++
# else:
# if isalpha(plainText[i]) and isupper(plainText[i]):# // if isaph char
# cipherValue = ((int)(plainText[i] - 65 + key) % 26 + 65)
# cipher = (char)(cipherValue)
# print("%c", cipher)
# i++
# else: #//if not isaplha low or up
# print("%c", plainText[i])
# i++
# print("\n")
#}
|
akvo/akvo-rsr
|
akvo/rsr/models/organisation_document.py
|
Python
|
agpl-3.0
| 6,312
| 0.003485
|
# -*- coding: utf-8 -*-
# Akvo RSR is covered by the GNU Affero General
|
Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < h
|
ttp://www.gnu.org/licenses/agpl.html >.
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from ..fields import ValidXMLCharField
from akvo.codelists.models import Country, DocumentCategory, Language
from akvo.codelists.store.default_codelists import COUNTRY, DOCUMENT_CATEGORY, FILE_FORMAT, LANGUAGE
from akvo.utils import codelist_choices, codelist_value
def document_path(self, filename):
return 'db/org/%s/document/%s' % (str(self.organisation.pk), filename)
class OrganisationDocument(models.Model):
organisation = models.ForeignKey(
'Organisation', on_delete=models.CASCADE, related_name='documents', verbose_name=_('organisation')
)
url = models.URLField(
_('document url'), blank=True,
help_text=_('Enter the online location of your document. The URL should start with '
'\'http://\' or \'https://\'.')
)
document = models.FileField(
_('document'), blank=True, upload_to=document_path,
help_text=_('You can upload a document to your organisation. These documents will be '
'stored on the RSR server and will be publicly available for users to '
'download and view to gain further insight in the organisation.')
)
format = ValidXMLCharField(
_('document format'), max_length=85, blank=True, choices=codelist_choices(FILE_FORMAT),
help_text=_('This provides the code for the Internet Media Type ("MIME type") of the '
'document, and includes pdf, msword, rtf, xml, csv, etc.')
)
title = ValidXMLCharField(
_('document title'), max_length=100, blank=True,
help_text=_('Enter the title of your document.')
)
title_language = ValidXMLCharField(
_('title language'), max_length=2, blank=True, choices=codelist_choices(LANGUAGE),
help_text=_('Select the language of the document title.')
)
language = ValidXMLCharField(
_('document language'), max_length=2, blank=True, choices=codelist_choices(LANGUAGE),
help_text=_('Select the language that the document is written in.')
)
document_date = models.DateField(
_('document date'), null=True, blank=True,
help_text=_('Enter the date (DD/MM/YYYY) to be used for the production or publishing date '
'of the relevant document to identify the specific document version.')
)
def __str__(self):
return self.show_link()
def clean(self):
# Check if the user has at least uploaded a document or indicated an URL.
if not (self.url or self.document or self.title):
raise ValidationError(
_('It is required to have at least a title, an uploaded document or indicate an '
'URL.')
)
# Check for non-unicode characters
if self.document:
self.document.name = self.document.name.encode('ascii', 'ignore')
def show_link(self):
title = self.title if self.title else '%s' % _('Untitled document')
if self.url:
return '<a href="%s">%s</a>' % (self.url, title,)
elif self.document:
return '<a href="%s">%s</a>' % (self.document.url, title,)
else:
return title
def iati_language(self):
return codelist_value(Language, self, 'language')
def iati_language_unicode(self):
return str(self.iati_language())
class Meta:
app_label = 'rsr'
verbose_name = _('organisation document')
verbose_name_plural = _('organisation documents')
ordering = ['-id', ]
class OrganisationDocumentCategory(models.Model):
document = models.ForeignKey(OrganisationDocument, on_delete=models.CASCADE, related_name='categories',
verbose_name=_('document'))
category = ValidXMLCharField(_('document category'), max_length=3, blank=True,
choices=codelist_choices(DOCUMENT_CATEGORY),
help_text=_('The description of the type of content contained '
'within the document.'))
class Meta:
app_label = 'rsr'
verbose_name = _('document category')
verbose_name_plural = _('document categories')
ordering = ['-id', ]
def __str__(self):
if self.category:
try:
return self.iati_category().name
except AttributeError:
return self.iati_category()
else:
return '%s' % _('No category specified')
def iati_category(self):
return codelist_value(DocumentCategory, self, 'category')
def iati_category_unicode(self):
return str(self.iati_category())
class OrganisationDocumentCountry(models.Model):
document = models.ForeignKey(OrganisationDocument, on_delete=models.CASCADE, related_name='countries',
verbose_name=_('document'))
country = ValidXMLCharField(
_('recipient country'), blank=True, max_length=2, choices=codelist_choices(COUNTRY, show_code=False),
help_text=_('This identifies the country which concerns the organisation document.')
)
text = ValidXMLCharField(
_('description'), blank=True, max_length=100,
help_text=_('Optionally enter a short description.')
)
class Meta:
app_label = 'rsr'
verbose_name = _('document country')
verbose_name_plural = _('document countries')
ordering = ['-id', ]
def __str__(self):
if self.country:
try:
return self.iati_country().name
except AttributeError:
return self.iati_country()
else:
return '%s' % _('No country specified')
def iati_country(self):
return codelist_value(Country, self, 'country')
def iati_country_unicode(self):
return str(self.iati_country())
|
Tocknicsu/nctuoj_contest
|
test/api/contest/put_contest.py
|
Python
|
apache-2.0
| 3,747
| 0.004003
|
import datetime
data = [
{
"name": "test_put_contest_no_login",
"url": "/api/contest/",
"method": "put",
"payload": {
"title": "change",
"start": "2001-01-01 00:00:00",
"end": "2001-01-01 00:00:00",
"freeze": "0",
"description": "XD"
},
"response_status": 403,
"response_data":{
"msg": "Permission Denied"
}
},
{
"name": "test_put_contest_test",
"url": "/api/contest/",
"method": "put",
"payload": {
"token": "TEST@TOKEN",
"title": "change",
"start": "2001-01-01 00:00:00",
"end": "2001-01-01 00:00:00",
"freeze": "0",
"description": "XD"
},
"response_status": 403,
"response_data":{
"msg": "Permission Denied"
}
},
{
"name": "test_put_contest_unofficial",
"url": "/api/contest/",
"method": "put",
"payload": {
"token": "UNOFFICIAL@TOKEN",
"title": "change",
"start": "2001-01-01 00:00:00",
"end": "2001-01-01 00:00:00",
"freeze": "0",
"description": "XD"
},
"response_status": 403,
"response_data":{
"msg": "Permission Denied"
}
},
{
"name": "test_put_contest_official",
"url": "/api/contest/",
"method": "put",
"payload": {
"token": "OFFICIAL@TOKEN",
"title": "change",
"start": "2001-01-01 00:00:00",
"end": "2001-01-01 00:00:00",
"freeze": "0",
"description": "XD"
},
"response_status": 403,
"response_data":{
"msg": "Permission Denied"
}
},
{
"name": "test_put_contest_admin",
"url": "/api/contest/",
"method": "put",
"payload": {
"token": "ADMIN@TOKEN",
"title": "change",
"start": str(datetime.datetime
|
.now())[:-7],
"end": str(datetime.datetime.now() + dat
|
etime.timedelta(hours=3))[:-7],
"freeze": 0,
"description": "XD"
},
"response_status": 200,
"response_data":{
"msg": {
"title": "change",
"start": str(datetime.datetime.now())[:-7],
"end": str(datetime.datetime.now() + datetime.timedelta(hours=3))[:-7],
"freeze": 0,
"description": "XD"
}
}
},
{
"name": "test_put_contest_admin_wrong_time(start > end)",
"url": "/api/contest/",
"method": "put",
"payload": {
"token": "ADMIN@TOKEN",
"title": "change",
"start": str(datetime.datetime.now())[:-7],
"end": str(datetime.datetime.now() + datetime.timedelta(hours=-3))[:-7],
"freeze": 0,
"description": "XD"
},
"response_status": 400,
"response_data":{
"msg": "start cannot larger than end"
}
},
{
"name": "test_put_contest_admin_wrong_time(start + freeze > end)",
"url": "/api/contest/",
"method": "put",
"payload": {
"token": "ADMIN@TOKEN",
"title": "change",
"start": str(datetime.datetime.now())[:-7],
"end": str(datetime.datetime.now() + datetime.timedelta(hours=3))[:-7],
"freeze": 200,
"description": "XD"
},
"response_status": 400,
"response_data":{
"msg": "start + freeze cannot larger than end"
}
}
]
|
bjuvensjo/scripts
|
vang/misc/tests/test_wc.py
|
Python
|
apache-2.0
| 2,185
| 0.000915
|
from unittest.mock import mock_open, patch, call
import pytest
from pytest import raises
from vang.misc.wc import is_excluded, is_i
|
ncluded, count_words, count_letters, count, count_all, get_files, parse_args
@pytest.mark.parametrize('excluded,
|
expected', [
[('foo.txt',), True],
[('.*.txt',), True],
[('.*.txt', 'bar.txt'), True],
[('foo.txtx',), False],
])
def test_is_excluded(excluded, expected):
assert is_excluded('foo.txt', excluded) == expected
@pytest.mark.parametrize('included, expected', [
[('foo.txt',), True],
[('.*.txt',), True],
[('.*.txt', 'bar.txt'), True],
[('foo.txtx',), False],
])
def test_is_included(included, expected):
assert is_included('foo.txt', included) == expected
@patch('vang.misc.wc.walk')
def test_get_files(mock_walk):
mock_walk.return_value = iter([['root', 'dir', ('f1', 'f2')]])
files = list(get_files('root_dir'))
assert files == [('root', 'f1'), ('root', 'f2')]
def test_count_words():
assert count_words(' foo bar baz ') == 3
def test_count_letters():
assert count_letters(' foo bar baz ') == 11
@patch('builtins.open',
new_callable=mock_open,
read_data='Nobody inspects the \n spammish repetition')
def test_count(mock_file):
assert count('root', 'file') == (2, 5, 38)
assert call('root/file', 'rt', encoding='utf-8') in mock_file.mock_calls
@patch('vang.misc.wc.get_files')
@patch('vang.misc.wc.count')
def test_count_all(mock_count, mock_get_files):
mock_get_files.return_value = ('f1', 'f2')
mock_count.return_value = (1, 2, 3)
assert count_all() == {'files': 2, 'letters': 6, 'lines': 2, 'words': 4}
@pytest.mark.parametrize("args", [
'foo',
'-x bar',
])
def test_parse_args_raises(args):
with raises(SystemExit):
parse_args(args.split(' ') if args else args)
@pytest.mark.parametrize("args, expected", [
['', {'dirs': ['.'], 'excluded': [], 'included': ['.*']}],
['-d d1 d2 -e e1 e2 -i i1 i2', {'dirs': ['d1', 'd2'], 'excluded': ['e1', 'e2'], 'included': ['i1', 'i2']}],
])
def test_parse_args_valid(args, expected):
assert expected == parse_args(args.split(' ') if args else '').__dict__
|
hofschroeer/shinysdr
|
shinysdr/test/test_devices.py
|
Python
|
gpl-3.0
| 5,929
| 0.004891
|
# Copyright 2014, 2015 Kevin Reid <kpreid@switchb.org>
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division
from twisted.trial import unittest
from zope.interface import implements # available via Twisted
# Note: not testing _ConstantVFOCell, it's just a useful utility
from shinysdr.devices import _ConstantVFOCell, AudioDevice, Device, FrequencyShift, IDevice, IRXDriver, ITXDriver, PositionedDevice, merge_devices
from shinysdr.signals import SignalType
from shinysdr.test.testutil import DeviceTestCase
from shinysdr.types import Range
from shinysdr.values import ExportedState, LooseCell, nullExportedState
class TestDevice(unittest.TestCase):
def test_name(self):
self.assertEqual(u'x', Device(name='x').get_name())
self.assertEqual(None, Device().get_name())
def test_rx_absent(self):
d = Device()
self.assertEqual(False, d.can_receive())
self.assertEqual(nullExportedState, d.get_rx_driver())
def test_rx_present(self):
rxd = _TestRXDriver()
d = Device(rx_driver=rxd)
self.assertEqual(True, d.can_receive())
self.assertEqual(rxd, d.get_rx_driver())
def test_tx_absent(self):
d = Device()
self.assertEqual(False, d.can_receive())
self.assertEqual(nullExportedState, d.get_tx_driver())
def test_tx_present(self):
txd = _TestTXDriver([])
d = Device(tx_driver=txd)
self.assertEqual(True, d.can_transmit())
self.assertEqual(txd, d.get_tx_driver())
def test_tx_mode_noop(self):
'''
With no TX driver, set_transmitting is a noop.
This was chosen as the most robust handling of the erroneous operation.
'''
d = Device(rx_driver=_TestRXDriver())
d.set_transmitting(True)
d.set_transmitting(False)
def test_tx_mode_actual(self):
log = []
txd = _TestTXDriver(log)
d = Device(rx_driver=_TestRXDriver(), tx_driver=txd)
def midpoint_hook():
log.append('H')
# Either TX driver receives the hook (!= case) or the hook is called directly (== case)
d.set_transmitting(True, midpoint_hook)
self.assertEqual(log, [(True, midpoint_hook)])
d.set_transmitting(True, midpoint_hook)
self.assertEqual(log, [(True, midpoint_hook), 'H'])
d.set_transmitting(False, midpoint_hook)
self.assertEqual(log, [(True, midpoint_hook), 'H', (False, midpoint_hook)])
d.set_transmitting(False, midpoint_hook)
self.assertEqual(log, [(True, midpoint_hook), 'H', (False, midpoint_hook), 'H'])
# TODO VFO tests
# TODO components tests
# close() is tested in test_top
class _TestRXDriver(ExportedState):
implements(IRXDriver)
def get_output_type(self):
return SignalType('IQ', 1)
def get_tune_delay(self):
return 0.0
def get_usable_bandwidth(self):
return Range([(-1, 1)])
def notify_reconnecting_or_restarting(self):
pass
class _TestTXDriver(ExportedState):
implements(ITXDriver)
def __init__(self, log):
self.log = log
def get_input_type(self):
return SignalType('IQ', 1)
def notify_reconnecting_or_restarting(self):
pass
def set_transmitting(self, value, midpoint_hook):
self.log.append((value, midpoint_hook))
class TestMergeDevices(unittest.TestCase):
def test_name(self):
self.assertEqual('a', merge_devices([Device(), Device(name='a')]).get_name())
self.assertEqual('a', merge_devices([Device(
|
name='a'), Device()]).get_name())
|
self.assertEqual('a+b', merge_devices([Device(name='a'), Device(name='b')]).get_name())
def test_components_disjoint(self):
d = merge_devices([
Device(components={'a': ExportedState()}),
Device(components={'b': ExportedState()})
])
self.assertEqual(d, IDevice(d))
self.assertEqual(sorted(d.get_components_dict().keys()), ['a', 'b'])
def test_components_conflict(self):
d = merge_devices([
Device(components={'a': ExportedState()}),
Device(components={'a': ExportedState()})
])
self.assertEqual(d, IDevice(d))
self.assertEqual(sorted(d.get_components_dict().keys()), ['0-a', '1-a'])
def test_vfos(self):
d = merge_devices([
Device(vfo_cell=_ConstantVFOCell(1)),
Device(vfo_cell=LooseCell(key='freq', value=0, type=Range([(10, 20)]), writable=True))
])
self.assertTrue(d.get_vfo_cell().isWritable())
# TODO more testing
class TestAudioDevice(DeviceTestCase):
def setUp(self):
super(TestAudioDevice, self).setUpFor(
device=AudioDevice(''))
# Test methods provided by DeviceTestCase
class TestFrequencyShift(DeviceTestCase):
def setUp(self):
super(TestFrequencyShift, self).setUpFor(
device=FrequencyShift(100.0))
# Test methods provided by DeviceTestCase
class TestPositionedDevice(DeviceTestCase):
def setUp(self):
super(TestPositionedDevice, self).setUpFor(
device=PositionedDevice(10.0, 20.0))
# Test methods provided by DeviceTestCase
|
metomi/rose
|
metomi/rose/task_run.py
|
Python
|
gpl-3.0
| 6,607
| 0
|
# Copyright (C) British Crown (Met Office) & Contributors.
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Implement "rose task-run"."""
import os
import sys
import traceback
from metomi.rose.app_run import AppRunner
from metomi.rose.env import env_export
from metomi.rose.opt_parse import RoseOptionParser
from metomi.rose.popen import RosePopenError
from metomi.rose.reporter import Reporter
from metomi.rose.run import Runner
from metomi.rose.task_env import get_prepend_paths
class TaskAppNotFoundError(Exception):
"""Error: a task has no associated application configuration."""
def __str__(self):
return "%s (key=%s): task has no associated application." % self.args
class TaskRunner(Runner):
"""A wrapper to a Rose task."""
NAME = "task"
OPTIONS = AppRunner.OPTIONS + [
"app_key",
"cycle",
"cycle_offsets",
"path_globs",
"prefix_delim",
"suffix_delim",
]
def __init__(self, *args, **kwargs):
Runner.__init__(self, *args, **kwargs)
self.app_runner = AppRunner(
event_handler=self.event_handler,
popen=self.popen,
config_pm=self.config_pm,
fs_util=self.fs_util,
suite_engine_proc=self.suite_engine_proc,
)
def run_impl(self, opts, args, uuid, work_files):
"""Run application configuration as a suite task."""
# "rose task-env"
t_prop = self.suite_engine_proc.get_task_props(
cycle=opts.cycle,
cycle_offsets=opts.cycle_offsets,
prefix_delim=opts.prefix_delim,
suffix_delim=opts.suffix_delim,
)
is_changed = False
for key, value in t_prop:
if os.getenv(key) != value:
env_export(key, value, self.event_handler)
is_changed = True
path_globs = opts.path_globs
if path_globs is None:
path_globs = []
prepend_paths_map = get_prepend_paths(
self.event_handler,
t_prop.suite_dir,
path_globs,
full_mode=is_changed,
)
for key, prepend_paths in prepend_paths_map.items():
orig_paths = []
orig_v = os.getenv(key, "")
if orig_v:
orig_paths = orig_v.split(os.pathsep)
value = os.pathsep.join(prepend_paths + orig_paths)
env_export(key, value, self.event_handler)
# Name association with builtin applications
builtin_app = None
if opts.app_mode is None:
builtin_apps_manager = self.app_runner.builtins_manager
builtin_app = builtin_apps_manager.guess_handler(t_prop.task_name)
if builtin_app is not None:
opts.app_mode = builtin_app.SCHEME
# Determine what app config to use
if not opts.conf_dir:
for app_key in [opts.app_key, os.getenv("ROSE_TASK_APP")]:
if app_key is not None:
conf_dir = os.path.join(t_prop.suite_dir, "app", app_key)
if not os.path.isdir(conf_dir):
raise TaskAppNotFoundError(t_prop.task_name, app_key)
break
else:
app_key = t_prop.task_name
conf_dir = os.path.join(
t_prop.suite_dir, "app", t_prop.task_name
)
if (
not os.path.isdir(conf_dir)
and builtin_app is not None
and builtin_app.get_app_key(t_prop.task_name)
):
# A builtin application may select a different app_key
# based on the task name.
app_key = builtin_app.get_app_key(t_prop.task_name)
conf_dir = os.path.join(t_prop.suite_dir, "app", app_key)
if not os.path.isdir
|
(conf_dir):
raise TaskAppNotFoundError(t_prop.task_name, app_key)
opts.conf_dir = conf_dir
return self.app_runner(opts, args)
def main():
"""Launcher for the CLI."""
opt_parser = RoseOptionParser(
usage='rose task-run [OPTIONS] [--] [APP-COMMAND ...]',
description='''
Provide an environment to run a suite task.
Provides environment variables documented in `rose task-env`. It
|
is worth
noting that if the environment variables are already provided by
`rose task-env`, this command will not override them.
Normally, the suite task will select a Rose application configuration
that has the same name as the task. This can be overridden by the
`--app-key=KEY` option or the `ROSE_TASK_APP` environment variable.
SHORT OPTIONS
All options of `rose app-run` and `rose task-env` are supported.
Additional options are:
--app-key=KEY
Specify a named application configuration.
''',
epilog='''
ENVIRONMENT VARIABLES
All environment variables of `rose app-run` and `rose task-env` are
supported. All environment variables documented in `rose task-env` are
passed to the application `rose task-run` runs.
The following environment variables are used by `rose task-run`:
ROSE_TASK_APP
Specify a named application configuration.
SEE ALSO
* `rose app-run`
* `rose task-env`
''',
)
option_keys = TaskRunner.OPTIONS
opt_parser.add_my_options(*option_keys)
opts, args = opt_parser.parse_args()
event_handler = Reporter(opts.verbosity - opts.quietness)
runner = TaskRunner(event_handler)
try:
sys.exit(runner(opts, args))
except Exception as exc:
runner.handle_event(exc)
if opts.debug_mode:
traceback.print_exc()
if isinstance(exc, RosePopenError):
sys.exit(exc.ret_code)
else:
sys.exit(1)
if __name__ == "__main__":
main()
|
google/loaner
|
loaner/web_app/backend/api/messages/template_messages.py
|
Python
|
apache-2.0
| 2,625
| 0.007619
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Template messages for Template API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from protorpc import messages
class TemplateType(messages.Enum):
TITLE = 1
BODY = 2
class Template(messages.Message):
"""ConfigResponse response for ProtoRPC message.
Attributes:
name: str, The name of the name being requested.
body: str, the text of the body.
title: str, the subject line or title of the template.
"""
name = messages.StringField(1)
body = messages.StringField(2)
title = messages.StringField(3)
class ListTemplatesResponse(messages.Message):
"""ListTemplatesResponse response for ProtoRPC message.
Attributes:
configs: TemplateResponse, The name and corresponding value being
returned.
"""
templates = messages.MessageField(Template, 1, repeated=True)
class UpdateTemplate(messages.Message):
"""UpdateConfig request for ProtoRPC message.
Attributes:
name: str, The name of the name being requested.
body: str, the text of the body.
title: str, the subject line or title of the template.
"""
name = messages.StringField(1)
body = messages.StringField(2)
title = me
|
ssages.StringField(3)
class UpdateTemplateRequest(messages.Message):
"""UpdateTemplateRequest request for ProtoRPC message.
Attributes:
name: str, The name of the name being requested.
body: str, the text of the body.
title: str, the subject line or title of the template.
"""
name = messages.StringField(1)
body = messages.StringField(2)
title = messages.StringField(3)
class RemoveTemplateRequest(mes
|
sages.Message):
"""UpdateTemplateRequest request for ProtoRPC message.
Attributes:
name: The template to remove / delete.
"""
name = messages.StringField(1)
class CreateTemplateRequest(messages.Message):
"""CreateTemplateRequest ProtoRPC message.
Attributes:
template: Template, A Template to create.
"""
template = messages.MessageField(Template, 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.